code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from Model import * from DataLoader import * from Setting import * import collections import os import numpy as np import pickle import random # - # ## Week4 Code1 2SAT problem with local search # # # The file format is as follows. In each instance, the number of variables and the number of clauses is the same, and this number is specified on the first line of the file. Each subsequent line specifies a clause via its two literals, with a number denoting the variable and a "-" sign denoting logical "not". For example, the second line of the first data file is "-16808 75250", which indicates the clause ¬x16808∨x75250. # # Your task is to determine which of the 6 instances are satisfiable, and which are unsatisfiable. In the box below, enter a 6-bit string, where the ith bit should be 1 if the ith instance is satisfiable, and 0 otherwise. For example, if you think that the first 3 instances are satisfiable and the last 3 are not, then you should enter the string 111000 in the box below. # It is a open problem and has many different solution. For example, 2SAT reduces to computing the strongly connected components of a suitable graph (with two vertices per variable and two directed edges per clause, you should think through the details). This might be an especially attractive option for those of you who coded up an SCC algorithm in Part 2 of this specialization. Alternatively, you can use Papadimitriou's randomized local search algorithm. (The algorithm from lecture is probably too slow as stated, so you might want to make one or more simple modifications to it --- even if this means breaking the analysis given in lecture --- to ensure that it runs in a reasonable amount of time.) A third approach is via backtracking. # # **I personally implemented the 2nd method -- 'Local search algorithm, also called Papadimitriou’s Algorithm'. In order to complete the computation in a reasonable time, I also implemente the " clauses pruning" trick mentioned in the course form here:** # https://www.coursera.org/learn/algorithms-npcomplete/discussions/weeks/4/threads/Cl-n9enMEeavDRL9DbMJZA # # Course video:https://www.coursera.org/learn/algorithms-npcomplete/lecture/YltoR/analysis-of-papadimitrious-algorithm # + class heuristic(Model): def __init__(self): super().__init__() def preprocess(self): self.data = self.dataLoader.data[1:] self.size = self.dataLoader.data[0] self.codedict = {} @staticmethod def judge_clause(clauseidx,state,data): idx1 = int(data[clauseidx][0]) idx2 = int(data[clauseidx][1]) if idx1 > 0: con1 = state[idx1 -1 ] else: con1 = not state[-idx1 -1 ] if idx2 > 0: con2 = state[idx2 -1 ] else: con2 = not state[-idx2 -1 ] finalcon = con1 or con2 return finalcon , con1 , con2 @staticmethod def judge_fix_clauses(clauseidx,state,data): idx1 = int(data[clauseidx][0]) idx2 = int(data[clauseidx][1]) if idx1 > 0: con1 = state[idx1 -1 ] else: con1 = not state[-idx1 -1 ] if idx2 > 0: con2 = state[idx2 -1 ] else: con2 = not state[-idx2 -1 ] finalcon = con1 or con2 fixed = False if not finalcon: fixed = True randomboolean = np.random.rand() > .5 if randomboolean: if not con1: state[abs(idx1) -1 ] = not state[abs(idx1) -1 ] else: state[abs(idx2) -1 ] = not state[abs(idx2) -1 ] else: if not con2: state[abs(idx2) -1 ] = not state[abs(idx2) -1 ] else: state[abs(idx1) -1 ] = not state[abs(idx1) -1 ] return state,fixed @staticmethod def pruning_clauses(clauses): while True: countdict = collections.defaultdict(list) removeset = set() for clauseidx in range(len(clauses)): idx1 = int(clauses[clauseidx][0]) idx2 = int(clauses[clauseidx][1]) countdict[abs(idx1)].append(idx1) countdict[abs(idx2)].append(idx2) for clauseidx in range(len(clauses)): idx1 = abs(int(clauses[clauseidx][0])) idx2 = abs(int(clauses[clauseidx][1])) if countdict[idx1][0] == sum(countdict[idx1])/len(countdict[idx1]) or countdict[idx2][0] == sum(countdict[idx2])/len(countdict[idx2]): removeset.add(clauseidx) newclauses = [] for clauseidx in range(len(clauses)): if clauseidx not in removeset: newclauses.append(clauses[clauseidx]) if len(newclauses) == len(clauses): break else: clauses = newclauses return newclauses def model(self): data = m.data size = len(data) print('Pruning clauses...') data = heuristic.pruning_clauses(data) print('Clauses size down to:', len(data)) for round in range(10): print('Local search is executing round:',round) i = 0 clauseidx = 0 prepre = 0 randomList = np.random.uniform(0,1,size) state = [True if randomList[i] > 0.5 else False for i in range(size)] while i < 2*len(data)*len(data): i+=1 candidates_clauses = [] for clauseidx in range(len(data)): clause_status = heuristic.judge_clause(clauseidx,state,data)[0] if not clause_status: candidates_clauses.append([clauseidx,clause_status]) if not candidates_clauses: print('Satisfiable solution found!') return True choice = random.choice(candidates_clauses) state,fixed = heuristic.judge_fix_clauses(choice[0],state,data) print('Satisfiable solution not found!') return False # + # %%time processLine = lambda x : [i.replace('\n','') for i in x.split(' ')] for file in ['2sat1.txt','2sat2.txt','2sat3.txt','2sat4.txt','2sat5.txt','2sat6.txt',]: arg = {'fileName':'./data/' + file,'numLines':None , 'processLine' : processLine } d = DataLoader(**arg) m = heuristic() s = Setting(d,m,False) s.run() print('--------------------------------------------------') # -
course4/Week4Code.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Github link to Colab link # In this program, you can paste your github link. Then, you will receive a link to colab! def getlink(): x = input("Paste your Github link here: ") print() z = True if x[0]=="h" and x[:19]=="https://github.com/": y=x[19:] elif x[0]=="g" and x[:11]=="github.com/": y=x[11:] else: print("Seems your link is invalid, please have a check on it!") z = False if z: print("The Colab link is here:") print("https://colab.research.google.com/github/"+y) # Let's have a try! # # You may use the URL of this website. getlink()
Preliminary/Github link to Colab link.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np import matplotlib.pyplot as plt import re, nltk import gensim import codecs from sner import Ner import spacy from sklearn.metrics import confusion_matrix, accuracy_score, average_precision_score from sklearn.model_selection import KFold, StratifiedKFold, cross_val_score, GridSearchCV from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer from nltk.internals import find_jars_within_path from nltk.tag import StanfordPOSTagger from nltk.tag import StanfordNERTagger import spacy from sklearn import linear_model from sklearn import svm from sklearn.metrics import fbeta_score, accuracy_score from scipy.sparse import hstack from sklearn.feature_extraction.text import CountVectorizer # + f_train = open('traininig_dataset (1) (1).txt', 'r+') f_test = open('validation_dataset (1) (1).txt', 'r+') train = pd.DataFrame(f_train.readlines(), columns = ['Question']) test = pd.DataFrame(f_test.readlines(), columns = ['Question']) # - train['QType'] = train.Question.apply(lambda x: x.split(' ', 1)[0]) train['Question'] = train.Question.apply(lambda x: x.split(' ', 1)[1]) train['QType-Coarse'] = train.QType.apply(lambda x: x.split(':')[0]) train['QType-Fine'] = train.QType.apply(lambda x: x.split(':')[1]) test['QType'] = test.Question.apply(lambda x: x.split(' ', 1)[0]) test['Question'] = test.Question.apply(lambda x: x.split(' ', 1)[1]) test['QType-Coarse'] = test.QType.apply(lambda x: x.split(':')[0]) test['QType-Fine'] = test.QType.apply(lambda x: x.split(':')[1]) train.head() test.describe() test.head() train.append(test).describe() # As can be observed, the train set consists of some duplicate question (81 to be exact). <br> # The number of unique Coarse:Fine classes is 50 whereas entries corresponding to 42 are present in the test set. <br> # The number of fine classes overall is 47 whereas entries corresponding to 39 are present in test. from sklearn.preprocessing import LabelEncoder le = LabelEncoder() le.fit(pd.Series(train.QType.tolist() + test.QType.tolist()).values) train['QType'] = le.transform(train.QType.values) test['QType'] = le.transform(test.QType.values) le2 = LabelEncoder() le2.fit(pd.Series(train['QType-Coarse'].tolist() + test['QType-Coarse'].tolist()).values) train['QType-Coarse'] = le2.transform(train['QType-Coarse'].values) test['QType-Coarse'] = le2.transform(test['QType-Coarse'].values) le3 = LabelEncoder() le3.fit(pd.Series(train['QType-Fine'].tolist() + test['QType-Fine'].tolist()).values) train['QType-Fine'] = le3.transform(train['QType-Fine'].values) test['QType-Fine'] = le3.transform(test['QType-Fine'].values) train.head() all_corpus = pd.Series(train.Question.tolist() + test.Question.tolist()).astype(str) # Obtaining Dotwords.<br> # Also, performing text cleaning and pre-processing in the next two blocks # + nltk.download('stopwords') nltk.download('wordnet') from nltk.corpus import stopwords from nltk.stem.porter import PorterStemmer from nltk.stem.snowball import SnowballStemmer from nltk.stem.wordnet import WordNetLemmatizer # dot_words = [] # for row in all_corpus: # for word in row.split(): # if '.' in word and len(word)>2: # dot_words.append(word) # - def text_clean(corpus, keep_list): ''' Purpose : Function to keep only alphabets, digits and certain words (punctuations, qmarks, tabs etc. removed) Input : Takes a text corpus, 'corpus' to be cleaned along with a list of words, 'keep_list', which have to be retained even after the cleaning process Output : Returns the cleaned text corpus ''' cleaned_corpus = pd.Series() for row in corpus: qs = [] for word in row.split(): if word not in keep_list: p1 = re.sub(pattern='[^a-zA-Z0-9]',repl=' ',string=word) p1 = p1.lower() qs.append(p1) else : qs.append(word) cleaned_corpus = cleaned_corpus.append(pd.Series(' '.join(qs))) return cleaned_corpus def preprocess(corpus, keep_list, cleaning = True, stemming = False, stem_type = None, lemmatization = False, remove_stopwords = True): ''' Purpose : Function to perform all pre-processing tasks (cleaning, stemming, lemmatization, stopwords removal etc.) Input : 'corpus' - Text corpus on which pre-processing tasks will be performed 'keep_list' - List of words to be retained during cleaning process 'cleaning', 'stemming', 'lemmatization', 'remove_stopwords' - Boolean variables indicating whether a particular task should be performed or not 'stem_type' - Choose between Porter stemmer or Snowball(Porter2) stemmer. Default is "None", which corresponds to Porter Stemmer. 'snowball' corresponds to Snowball Stemmer Note : Either stemming or lemmatization should be used. There's no benefit of using both of them together Output : Returns the processed text corpus ''' if cleaning == True: corpus = text_clean(corpus, keep_list) if remove_stopwords == True: wh_words = ['who', 'what', 'when', 'why', 'how', 'which', 'where', 'whom'] stop = set(stopwords.words('english')) for word in wh_words: stop.remove(word) corpus = [[x for x in x.split() if x not in stop] for x in corpus] else : corpus = [[x for x in x.split()] for x in corpus] if lemmatization == True: lem = WordNetLemmatizer() corpus = [[lem.lemmatize(x, pos = 'v') for x in x] for x in corpus] if stemming == True: if stem_type == 'snowball': stemmer = SnowballStemmer(language = 'english') corpus = [[stemmer.stem(x) for x in x] for x in corpus] else : stemmer = PorterStemmer() corpus = [[stemmer.stem(x) for x in x] for x in corpus] corpus = [' '.join(x) for x in corpus] return corpus common_dot_words = ['U.S.', 'St.', 'Mr.', 'Mrs.', 'D.C.'] all_corpus = preprocess(all_corpus, keep_list = common_dot_words, remove_stopwords = True) # # Splitting the preprocessed combined corpus again into train and test set train_corpus = all_corpus[0:train.shape[0]] test_corpus = all_corpus[train.shape[0]:] # Loading the English model for Spacy.<br> # NLTK version for the same performs too slowly, hence opting for Spacy. nlp = spacy.load('en') # # Obtaining Features from Train Data, which would be fed to CountVectorizer # # Creating list of Named Entitites, Lemmas, POS Tags, Syntactic Dependency Relation and Orthographic Features using shape.<br> # Later, these would be used as features for our model. all_ner = [] all_lemma = [] all_tag = [] all_dep = [] all_shape = [] for row in train_corpus: doc = nlp(row) present_lemma = [] present_tag = [] present_dep = [] present_shape = [] present_ner = [] #print(row) for token in doc: present_lemma.append(token.lemma_) present_tag.append(token.tag_) #print(present_tag) present_dep.append(token.dep_) present_shape.append(token.shape_) all_lemma.append(" ".join(present_lemma)) all_tag.append(" ".join(present_tag)) all_dep.append(" ".join(present_dep)) all_shape.append(" ".join(present_shape)) for ent in doc.ents: present_ner.append(ent.label_) all_ner.append(" ".join(present_ner)) # Converting the attributes obtained above into vectors using CountVectorizer. count_vec_ner = CountVectorizer(ngram_range=(1, 2)).fit(all_ner) ner_ft = count_vec_ner.transform(all_ner) count_vec_lemma = CountVectorizer(ngram_range=(1, 2)).fit(all_lemma) lemma_ft = count_vec_lemma.transform(all_lemma) count_vec_tag = CountVectorizer(ngram_range=(1, 2)).fit(all_tag) tag_ft = count_vec_tag.transform(all_tag) count_vec_dep = CountVectorizer(ngram_range=(1, 2)).fit(all_dep) dep_ft = count_vec_dep.transform(all_dep) count_vec_shape = CountVectorizer(ngram_range=(1, 2)).fit(all_shape) shape_ft = count_vec_shape.transform(all_shape) # Combining the features obtained into 1 matrix #x_all_ft_train = hstack([ner_ft, lemma_ft, tag_ft, dep_ft, shape_ft]) x_all_ft_train = hstack([ner_ft, lemma_ft, tag_ft]) x_all_ft_train # Converting from COOrdinate format to Compressed Sparse Row format for easier mathematical computations. x_all_ft_train = x_all_ft_train.tocsr() x_all_ft_train # # Now we will obtain the Feature vectors for the test set using the CountVectorizers Obtained from the Training Corpus all_test_ner = [] all_test_lemma = [] all_test_tag = [] all_test_dep = [] all_test_shape = [] for row in test_corpus: doc = nlp(row) present_lemma = [] present_tag = [] present_dep = [] present_shape = [] present_ner = [] #print(row) for token in doc: present_lemma.append(token.lemma_) present_tag.append(token.tag_) #print(present_tag) present_dep.append(token.dep_) present_shape.append(token.shape_) all_test_lemma.append(" ".join(present_lemma)) all_test_tag.append(" ".join(present_tag)) all_test_dep.append(" ".join(present_dep)) all_test_shape.append(" ".join(present_shape)) for ent in doc.ents: present_ner.append(ent.label_) all_test_ner.append(" ".join(present_ner)) ner_test_ft = count_vec_ner.transform(all_test_ner) lemma_test_ft = count_vec_lemma.transform(all_test_lemma) tag_test_ft = count_vec_tag.transform(all_test_tag) dep_test_ft = count_vec_dep.transform(all_test_dep) shape_test_ft = count_vec_shape.transform(all_test_shape) #x_all_ft_test = hstack([ner_test_ft, lemma_test_ft, tag_test_ft, dep_test_ft, shape_test_ft]) x_all_ft_test = hstack([ner_test_ft, lemma_test_ft, tag_test_ft]) x_all_ft_test x_all_ft_test = x_all_ft_test.tocsr() x_all_ft_test # # Model Training # Literature study over the years has shown Linear SVM performs best in this Use Case. model = svm.LinearSVC() # First Modelling for Coarse Classes model.fit(x_all_ft_train, train['QType-Coarse'].values) # # Model Evaluation preds = model.predict(x_all_ft_test) preds accuracy_score(test['QType-Coarse'].values, preds) # Glad to announce, Feature Engineering has enabled us to achieve an Accuracy of 88.2% on the validation set.<br> # The obtained accuracy is way higher than the 73% accuracy obtained without feature engineering # Next, we will obtain accuracies for Coarse:Fine combinations model.fit(x_all_ft_train, train['QType'].values) preds = model.predict(x_all_ft_test) accuracy_score(test['QType'].values, preds) # Woah, up to 81.4% accuracy from 68% obtained earlier when modelled without Feature Engineering. # Finally, we would evaluate our performance for the fine classes model.fit(x_all_ft_train, train['QType-Fine'].values) preds = model.predict(x_all_ft_test) accuracy_score(test['QType-Fine'].values, preds) # Not bad, We haved achieved an accuracy of 81.2% over the Fine Classes. # # Conclusion # # We achieved great accuracies using Feature Engineering as compared to accuracies obtained without feature engineering. # (The notebook for models obtained without feature engineering is not being shared and one can try implementing it easily). # # Experimenting with informer hypernyms can further help in accuracy improvement as suggested in https://nlp.stanford.edu/courses/cs224n/2010/reports/olalerew.pdf
Question Classifier.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Set things up # + import numpy as np import tensorflow as tf from nn_policy import FeedForwardCritic from nn_policy import FeedForwardPolicy from rllab.envs.mujoco.half_cheetah_env import HalfCheetahEnv from rllab.exploration_strategies.ou_strategy import OUStrategy from sandbox.rocky.tf.algos.ddpg import DDPG as ShaneDDPG from sandbox.rocky.tf.envs.base import TfEnv from sandbox.rocky.tf.policies.deterministic_mlp_policy import \ DeterministicMLPPolicy from sandbox.rocky.tf.q_functions.continuous_mlp_q_function import \ ContinuousMLPQFunction from ddpg import DDPG as MyDDPG from testing_utils import are_np_arrays_equal # + env = TfEnv(HalfCheetahEnv()) action_dim = env.action_dim obs_dim = env.observation_space.low.shape[0] batch_size = 2 rewards = np.random.rand(batch_size) terminals = (np.random.rand(batch_size) > 0.5).astype(np.int) obs = np.random.rand(batch_size, obs_dim) actions = np.random.rand(batch_size, action_dim) next_obs = np.random.rand(batch_size, obs_dim) ddpg_params = dict( batch_size=64, n_epochs=0, epoch_length=0, eval_samples=0, discount=0.99, qf_learning_rate=1e-3, policy_learning_rate=1e-4, soft_target_tau=0.001, replay_pool_size=1000000, min_pool_size=1000, scale_reward=0.1, ) discount = ddpg_params['discount'] # - print(rewards) print(terminals) print(obs) print(actions) print(next_obs) # ## Create my stuff sess_me = tf.Session() with sess_me.as_default(): es = OUStrategy(env_spec=env.spec) ddpg_params['Q_weight_decay'] = 0. qf_params = dict( embedded_hidden_sizes=(100, ), observation_hidden_sizes=(100, ), hidden_nonlinearity=tf.nn.relu, ) policy_params = dict( observation_hidden_sizes=(100, 100), hidden_nonlinearity=tf.nn.relu, output_nonlinearity=tf.nn.tanh, ) qf = FeedForwardCritic( "critic", env.observation_space.flat_dim, env.action_space.flat_dim, **qf_params ) policy = FeedForwardPolicy( "actor", env.observation_space.flat_dim, env.action_space.flat_dim, **policy_params ) my_algo = MyDDPG( env, es, policy, qf, **ddpg_params ) my_policy = my_algo.actor my_qf = my_algo.critic my_target_policy = my_algo.target_actor my_target_qf = my_algo.target_critic # ## Set up Shane sess_shane = tf.Session() with sess_shane.as_default(): es = OUStrategy(env_spec=env.spec) policy = DeterministicMLPPolicy( name="init_policy", env_spec=env.spec, hidden_sizes=(100, 100), hidden_nonlinearity=tf.nn.relu, output_nonlinearity=tf.nn.tanh, ) qf = ContinuousMLPQFunction( name="qf", env_spec=env.spec, hidden_sizes=(100, 100), ) ddpg_params.pop('Q_weight_decay') shane_algo = ShaneDDPG( env, policy, qf, es, **ddpg_params ) sess_shane.run(tf.initialize_all_variables()) shane_algo.init_opt() # This initializes the optimizer parameters sess_shane.run(tf.initialize_all_variables()) f_train_policy = shane_algo.opt_info['f_train_policy'] f_train_qf = shane_algo.opt_info['f_train_qf'] shane_target_qf = shane_algo.opt_info["target_qf"] shane_target_policy = shane_algo.opt_info["target_policy"] shane_policy = shane_algo.policy shane_qf = shane_algo.qf # ## Measure stuff from Shane's algo with sess_shane.as_default(): shane_policy_param_values = shane_policy.flat_to_params( shane_policy.get_param_values() ) shane_qf_param_values = shane_qf.flat_to_params( shane_qf.get_param_values() ) # TODO(vpong): why are these two necessary? shane_target_policy.set_param_values(shane_policy.get_param_values()) shane_target_qf.set_param_values(shane_qf.get_param_values()) shane_actions, _ = shane_policy.get_actions(obs) shane_qf_out = shane_qf.get_qval(obs, actions) shane_next_actions, _ = shane_target_policy.get_actions(next_obs) shane_next_target_qf_values = shane_target_qf.get_qval(next_obs, shane_next_actions) shane_ys = rewards + (1. - terminals) * discount * shane_next_target_qf_values # ## Copy things to my algo with sess_me.as_default(): my_policy.set_param_values(shane_policy_param_values) my_target_policy.set_param_values(shane_policy_param_values) my_qf.set_param_values(shane_qf_param_values) my_target_qf.set_param_values(shane_qf_param_values) # ## Measure stuff from my algo # + feed_dict = my_algo._update_feed_dict(rewards, terminals, obs, actions, next_obs) my_actions = sess_me.run( my_policy.output, feed_dict=feed_dict ) my_qf_out = sess_me.run( my_qf.output, feed_dict=feed_dict ).flatten() my_next_actions = sess_me.run( my_target_policy.output, feed_dict=feed_dict ) my_next_target_qf_values = sess_me.run( my_algo.target_critic.output, feed_dict=feed_dict).flatten() my_ys = sess_me.run(my_algo.ys, feed_dict=feed_dict).flatten() my_policy_loss = sess_me.run( my_algo.actor_surrogate_loss, feed_dict=feed_dict) my_qf_loss = sess_me.run( my_algo.critic_loss, feed_dict=feed_dict) # - # ## Check that Shane and my params stayed the same shane_policy = shane_algo.policy shane_qf = shane_algo.qf with sess_shane.as_default(): shane_policy_param_values_new = shane_policy.flat_to_params( shane_policy.get_param_values() ) shane_qf_param_values_new = shane_qf.flat_to_params( shane_qf.get_param_values() ) shane_target_policy_param_values_new = shane_target_policy.flat_to_params( shane_target_policy.get_param_values() ) shane_target_qf_param_values_new = shane_target_qf.flat_to_params( shane_target_qf.get_param_values() ) my_policy_params_values_new = my_algo.actor.get_param_values() my_qf_params_values_new = my_algo.critic.get_param_values() my_target_policy_params_values_new = my_algo.target_actor.get_param_values() my_target_qf_params_values_new = my_algo.target_critic.get_param_values() print(all((a==b).all() for a, b in zip(shane_policy_param_values, shane_policy_param_values_new))) print(all((a==b).all() for a, b in zip(shane_policy_param_values, my_policy_params_values_new))) print(all((a==b).all() for a, b in zip(shane_policy_param_values, shane_target_policy_param_values_new))) print(all((a==b).all() for a, b in zip(shane_policy_param_values, my_target_policy_params_values_new))) print(all((a==b).all() for a, b in zip(shane_qf_param_values, shane_qf_param_values_new))) print(all((a==b).all() for a, b in zip(shane_qf_param_values, my_qf_params_values_new))) print(all((a==b).all() for a, b in zip(shane_qf_param_values, shane_target_qf_param_values_new))) print(all((a==b).all() for a, b in zip(shane_qf_param_values, my_target_qf_params_values_new))) # ## Check critic outputs are the same # + W1, b1, W2, b2, W3, b3 = shane_qf_param_values output = np.matmul(obs, W1) + b1 output = np.maximum(output, 0) output = np.hstack((output, actions)) output = np.matmul(output, W2) + b2 output = np.maximum(output, 0) output = np.matmul(output, W3) + b3 expected_qf_out = output.flatten() print(my_qf_out) print(shane_qf_out) print(expected_qf_out) # - # ## Check actor outputs are the same # + W1, b1, W2, b2, W3, b3 = shane_policy_param_values output = np.matmul(obs, W1) + b1 output = np.maximum(output, 0) output = np.matmul(output, W2) + b2 output = np.maximum(output, 0) output = np.matmul(output, W3) + b3 expected_action = output print(my_actions) print(shane_actions) print(expected_action) # - # ## Check that next action outputs are the same # + W1, b1, W2, b2, W3, b3 = shane_policy_param_values output = np.matmul(next_obs, W1) + b1 output = np.maximum(output, 0) output = np.matmul(output, W2) + b2 output = np.maximum(output, 0) output = np.matmul(output, W3) + b3 expected_next_action = output print(my_next_actions) print(shane_next_actions) print(expected_next_action) # - # ## Check next critic outputs are the same # + W1, b1, W2, b2, W3, b3 = shane_qf_param_values output = np.matmul(next_obs, W1) + b1 output = np.maximum(output, 0) output = np.hstack((output, expected_next_action)) output = np.matmul(output, W2) + b2 output = np.maximum(output, 0) output = np.matmul(output, W3) + b3 expected_target_qf_values = output.flatten() print(shane_next_target_qf_values) print(my_next_target_qf_values) print(expected_target_qf_values) # - my_expected_ys = rewards + (1. - terminals) * discount * my_next_target_qf_values shane_expected_ys = rewards + (1. - terminals) * discount * shane_next_target_qf_values expected_ys = rewards + (1. - terminals) * discount * expected_target_qf_values print(shane_ys) print(shane_expected_ys) print(my_ys) print(my_expected_ys) print(expected_ys) # ## Check losses are the same # Only do this once since it changes the params! with sess_shane.as_default(): shane_policy_loss, _ = f_train_policy(obs) shane_qf_loss, qval, _ = f_train_qf(shane_ys, obs, actions) print(my_policy_loss) print(shane_policy_loss) print(shane_qf_loss) print(my_qf_loss) sess.close()
notebooks/compare_ddpg.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd import numpy as np import pickle from gensim.models import Word2Vec from keras.models import Sequential from keras.layers import Dense, Dropout, LSTM from keras.models import load_model from keras.utils.np_utils import to_categorical from keras.metrics import top_k_categorical_accuracy from sklearn.decomposition import PCA from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA from sklearn.externals import joblib from sklearn import tree from sklearn.preprocessing import normalize import tensorflowjs as tfjs import analysis_functions # - # + meta_dict = pickle.load(open( "meta_dict.p", "rb" )) train_data = np.load("data/train_data.npy") train_y = np.load("data/train_y.npy") test_data = np.load("data/test_data.npy") test_y = np.load("data/test_y.npy") # - data_df = pd.read_csv("../cleaning/cleaned_memes.tsv", sep='\t') data_meme_classes = dict(tuple(data_df.groupby('meme'))) train_data_dict = dict() test_data_dict = dict() for meme_name, memes in data_meme_classes.items(): validation_split = int(np.floor(len(np.unique(memes['meme_id']))*.8)) ans = dict(tuple(memes.groupby(lambda index: int(memes.loc[index]['meme_id'] > validation_split)))) train_data_dict[meme_name] = ans[0] test_data_dict[meme_name] = ans[1] train_sentences = analysis_functions.data_dict_to_sents(train_data_dict) # + meta_dict = dict() meta_dict["memes"] = analysis_functions.categorical_dict_from_list(np.unique(data_df['meme'])) meta_dict["meme_names"] = np.unique(data_df['meme']) meta_dict["pos"] = analysis_functions.categorical_dict_from_list(np.unique(data_df['pos'])) meta_dict["sentiment"] = analysis_functions.categorical_dict_from_list(np.unique(data_df['sentiment'])) meta_dict["embeddings"] = Word2Vec(train_sentences, size=50, min_count=5) meta_dict["sentence_size"] = 50 pickle.dump(meta_dict, open( "meta_dict.p", "wb" ) ) # - train_data, train_y = analysis_functions.vectorize_meme_data(meta_dict, train_data_dict) test_data, test_y = analysis_functions.vectorize_meme_data(meta_dict, test_data_dict) model = analysis_functions.create_model(meta_dict) # + model = Sequential() model.add(Dropout(.2, input_shape = (meta_dict["sentence_size"], meta_dict["embeddings"].vector_size + len(meta_dict["sentiment"]) + len(meta_dict["pos"])))) model.add(LSTM(units = len(meta_dict["meme_names"]), recurrent_dropout = .6, )) model.add(Dense(len(meta_dict["meme_names"]), activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) model.summary() # - model = load_model('model.hdf5') model.fit(train_data, train_y, epochs = 50, batch_size = 200, validation_split = 0.0) # + #model.save("model.hdf5") # - print(model.evaluate(train_data, train_y)) print(model.evaluate(test_data, test_y)) test_cross = pd.crosstab(model.predict_classes(test_data), np.argmax(test_y, axis = 1)) train_cross = pd.crosstab(model.predict_classes(train_data), np.argmax(train_y, axis = 1)) train_cross_mat = pd.DataFrame.as_matrix(train_cross) test_cross_mat = pd.DataFrame.as_matrix(test_cross) # + import matplotlib.pyplot as plt import numpy as np fig = plt.figure() plt.imshow(normalize(test_cross_mat, axis = 0), cmap = "magma") fig.suptitle('test', fontsize=20) plt.xlabel('true', fontsize=16) plt.ylabel('predicted', fontsize=16) fig.savefig('../paper/test_confusion_matrix.jpg') # - normalize(train_cross_mat, axis = 0) def eval_top_k(y_, y, k): y_real = np.argmax(y, axis = 1)[:,None] sol = np.any(np.argsort(y_)[:,-k:] == y_real, axis = 1) return(sum(sol)/len(sol)) train_data.shape pred_train = model.predict(train_data) pred_test = model.predict(test_data) train_cross = pd.crosstab(np.argmax(pred_train, axis = 1), np.argmax(train_y, axis = 1)) test_cross = pd.crosstab(np.argmax(pred_test, axis = 1), np.argmax(test_y, axis = 1)) train_cross_mat = pd.DataFrame.as_matrix(train_cross) test_cross_mat = pd.DataFrame.as_matrix(test_cross) meme_num = 38 test_y[np.argmax(test_y, axis = 1) == meme_num] def top_k_specific(predictions, answers, meme_num, k): y = answers[np.argmax(answers, axis = 1) == meme_num] y_ = predictions[np.argmax(answers, axis = 1) == meme_num] return(eval_top_k(y_, y, k)) for k in range(1,11): meme_scores_test = np.ones((10, 40))*-1 for k in range(1,11): for meme_num in range(40): score = top_k_specific(pred_test, test_y, meme_num, k) meme_scores_test[k-1,meme_num] = score meme_scores_train = np.ones((10, 40))*-1 for k in range(1,11): for meme_num in range(40): score = top_k_specific(pred_train, train_y, meme_num, k) meme_scores_train[k-1,meme_num] = score model.summary() model.layers[0].batch_input_shape = (None, 50, 98) model.save("model.hdf5") tfjs.converters.save_keras_model(model, "js_memes_model") from matplotlib import pyplot pyplot.plot(meme_scores_test.T) pyplot.title("test accuracy by top k") pyplot.xlabel("meme number") pyplot.ylabel("accuracy") pyplot.legend(range(1, 11), loc='lower left', ncol=5) pyplot.yticks(np.arange(0, 1.1, .1)) pyplot.show() np.arange(0, 1.1, .1) for k in range(1,11): print("{0:.3f}".format(np.min(meme_scores_test[k-1,]))) for k in range(1,11): worst_meme = -1 worst_score = 2 for meme_num in range(40): score = top_k_specific(pred_test, test_y, meme_num, k) if(score < worst_score): worst_score = score worst_meme = meme_num #print(worst_meme) print("{0:.3f}".format(worst_score)) for i in range(1,11): print("{0:.3f}".format(eval_top_k(pred_test, test_y, i))) analysis_meta_dict = pickle.load(open( "../analysis/meta_dict.p", "rb" )) lda_model = LDA() lda_train = train_data.reshape(-1, train_data.shape[-1]) lda_test = test_data.reshape(-1, test_data.shape[-1]) lda_train.shape lda_model.fit(lda_train, np.argmax(train_y, axis = 1).repeat(50)) lda_model.predict(meta_dict["embeddings"]) pred_test = lda_model.predict_proba(lda_test) np.argmax(test_y, axis = 1)[2001] for item in lda_model.predict_proba(lda_test[2005:2006]): for index, num in enumerate(item): print(str(index) + ": {0:.3f}".format(num)) np.argmax(test_y, axis = 1)[0:10] lda_train = train_data.reshape(train_data.shape[0], -1) train_data.shape
analysis/Analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from Portfolio_Construction import * # Portfolio_Construction: ObjectiveFunc, nominal_mvo, ObjectiveFunc_SR, long_only_constraint, Portfolio_Construction # nominal_mvo2, calculate_portfolio_var, risk_budget_objective,total_weight_constraint from Portfolio_Evaluation import * def load_bbg(path): sheet_to_df_map = pd.read_excel(path, sheet_name=None, skiprows = 5) is_first = True for key in list(sheet_to_df_map.keys()): if not sheet_to_df_map[key].empty: sheet_to_df_map[key].rename(columns={'TOT_RETURN_INDEX_GROSS_DVDS':key}, inplace=True) if is_first: df_data = sheet_to_df_map[key] is_first = False else: df_data = pd.merge(df_data, sheet_to_df_map[key], on='Dates') return df_data # + import pandas as pd from scipy.stats import norm from tabulate import tabulate import numpy as np import scipy.stats as stats from scipy.optimize import minimize from scipy.optimize import LinearConstraint # data will not be disclosed LowRisk_path = r'./data/LOW_RISK_NEW2.xlsx' HighRisk_path = r'./data/HIGH_RISK_NEW2.xlsx' FX_path = r'./data/fx_daily.xlsx' LowRisk_data = load_bbg(LowRisk_path) MidRisk_data = LowRisk_data.copy() HighRisk_data = load_bbg(HighRisk_path) FX_data = load_bbg(FX_path) # USD/CAD # - Low_portfolio_value,Low_portfolio_investment,Low_pl,Low_portfolio_returns,Low_portfolio_PnL,Low_weights_initial,Low_weights_end,Low_portfolio_holdings_list =Portfolio_Construction(LowRisk_data,"RiskParity","Low") Mid_portfolio_value,Mid_portfolio_investment,Mid_pl,Mid_portfolio_returns,Mid_portfolio_PnL,Mid_weights_initial,Mid_weights_end,Mid_portfolio_holdings_list =Portfolio_Construction(LowRisk_data,"MVO_0","Mid") High_portfolio_value,High_portfolio_investment,High_pl,High_portfolio_returns,High_portfolio_PnL,High_weights_initial,High_weights_end,High_portfolio_holdings_list =Portfolio_Construction(HighRisk_data,"MVO_0.5","High") # + # Low Risk PnL import matplotlib.pyplot as plt # PnL with deposit pl_df1 = pd.DataFrame({'Dates':Low_pl[:,0],'PnL':Low_pl[:,1]}) plt.plot(pl_df1['Dates'], pl_df1['PnL']) plt.show() # PnL without deposit pl_df2 = pd.DataFrame({'Dates':Low_pl[1:,0],'PnL':Low_pl[1:,1]+Low_portfolio_investment[1:,1]}) plt.plot(pl_df2['Dates'], pl_df2['PnL']) plt.show() # cumulative PnL without deposit plt.plot(pl_df2['Dates'], pl_df2['PnL'].cumsum()) plt.show() # PnL only through adjusting allocation pl_df3 = pd.DataFrame({'Dates':Low_portfolio_PnL[:,0],'PnL':Low_portfolio_PnL[:,1]}) plt.plot(pl_df3['Dates'], pl_df3['PnL']) plt.show() # + # Mid Risk PnL import matplotlib.pyplot as plt # PnL with deposit pl_df1 = pd.DataFrame({'Dates':Mid_pl[:,0],'PnL':Mid_pl[:,1]}) plt.plot(pl_df1['Dates'], pl_df1['PnL']) plt.show() # PnL without deposit pl_df2 = pd.DataFrame({'Dates':Mid_pl[1:,0],'PnL':Mid_pl[1:,1]+Mid_portfolio_investment[1:,1]}) plt.plot(pl_df2['Dates'], pl_df2['PnL']) plt.show() # cumulative PnL without deposit plt.plot(pl_df2['Dates'], pl_df2['PnL'].cumsum()) plt.show() # PnL only through adjusting allocation pl_df3 = pd.DataFrame({'Dates':Mid_portfolio_PnL[:,0],'PnL':Mid_portfolio_PnL[:,1]}) plt.plot(pl_df3['Dates'], pl_df3['PnL']) plt.show() # + # High Risk PnL import matplotlib.pyplot as plt # PnL with deposit pl_df1 = pd.DataFrame({'Dates':High_pl[:,0],'PnL':High_pl[:,1]}) plt.plot(pl_df1['Dates'], pl_df1['PnL']) plt.show() # PnL without deposit pl_df2 = pd.DataFrame({'Dates':High_pl[1:,0],'PnL':High_pl[1:,1] +High_portfolio_investment[1:,1]}) plt.plot(pl_df2['Dates'], pl_df2['PnL']) plt.show() # cumulative PnL without deposit plt.plot(pl_df2['Dates'], pl_df2['PnL'].cumsum()) plt.show() # PnL only through adjusting allocation pl_df3 = pd.DataFrame({'Dates':High_portfolio_PnL[:,0], 'PnL':High_portfolio_PnL[:,1]}) plt.plot(pl_df3['Dates'], pl_df3['PnL']) plt.show() # + # Returns Graph # Low Risk Low_returns_df2 = pd.DataFrame({'Dates':Low_portfolio_returns[:,0], 'returns':Low_portfolio_returns[:,1]}) plt.plot(Low_returns_df2['Dates'], Low_returns_df2['returns']) plt.show() fig=plt.figure(figsize=(10,5)) plt.plot(Low_returns_df2['Dates'], Low_portfolio_returns[:,1].cumprod()) fig.suptitle('Cumulative Returns') plt.xlabel('Dates') plt.ylabel('Cumulative Returns') plt.show() fig.savefig('Low Risk Trading Period Cumulative Returns.jpg') Low_Merge=pd.merge(Low_returns_df2,Benchmark_Low_df,on='Dates') fig=plt.figure(figsize=(10,5)) plt.plot(Low_Merge['Dates'], Low_Merge.iloc[:,1].cumprod()) plt.plot(Low_Merge['Dates'], Low_Merge.iloc[:,2].cumprod()) fig.suptitle('Cumulative Returns') plt.xlabel('Dates') plt.ylabel('Cumulative Returns') plt.legend(['portfolio','benchmark']) plt.show() fig.savefig('Low Risk Trading Period Cumulative Returns_wBenchmark.jpg') print("Sharpe Ratio:",SharpeRatio((Low_returns_df2.iloc[:,1]-1),52)) print("Annualized Returns:",AnnualizedReturns((Low_returns_df2.iloc[:,1]-1),52)) print("Annualized Volatility:",AnnualizedVolatility((Low_returns_df2.iloc[:,1]-1),52)) print("VaR using normal distribuction (as percentage of the portfolio)", VaR_normal((Low_returns_df2.iloc[:,1]-1),52,0.05)) print("VaR using historical distribuction (as percentage of the portfolio)", VaR_historical((Low_returns_df2.iloc[:,1]-1),52,0.05)) print("CVaR (as percentage of the portfolio)",CVaR((Low_returns_df2.iloc[:,1]-1),52,0.05)) #money weighted returns CashFlow = np.append(np.array([0]*(len(Low_portfolio_PnL[:,1])-1)), Low_portfolio_value[-1,1]) + Low_portfolio_investment[:,1]+Low_portfolio_PnL[:,1] print("Money weighted returns",(1+np.irr(CashFlow))**52-1) print("Time weighted returns",52*((Low_portfolio_returns[:,1].cumprod()[-1])**( 1/len(Low_portfolio_returns[:,1]))-1)) # + # Returns Graph # Mid Risk Mid_returns_df2 = pd.DataFrame({'Dates':Mid_portfolio_returns[:,0], 'returns':Mid_portfolio_returns[:,1]}) plt.plot(Mid_returns_df2['Dates'], Mid_returns_df2['returns']) plt.show() fig=plt.figure(figsize=(10,5)) plt.plot(Mid_returns_df2['Dates'], Mid_portfolio_returns[:,1].cumprod()) fig.suptitle('Cumulative Returns') plt.xlabel('Dates') plt.ylabel('Cumulative Returns') plt.show() fig.savefig('Mid Risk Trading Period Cumulative Returns.jpg') Mid_Merge=pd.merge(Mid_returns_df2,Benchmark_Mid_df,on='Dates') fig=plt.figure(figsize=(10,5)) plt.plot(Mid_Merge['Dates'], Mid_Merge.iloc[:,1].cumprod()) plt.plot(Mid_Merge['Dates'], Mid_Merge.iloc[:,2].cumprod()) fig.suptitle('Cumulative Returns') plt.xlabel('Dates') plt.ylabel('Cumulative Returns') plt.legend(['portfolio','benchmark']) plt.show() fig.savefig('Mid Risk Trading Period Cumulative Returns_wBenchmark.jpg') print("Sharpe Ratio:",SharpeRatio((Mid_returns_df2.iloc[:,1]-1),52)) print("Annualized Returns:",AnnualizedReturns((Mid_returns_df2.iloc[:,1]-1),52)) print("Annualized Volatility:",AnnualizedVolatility((Mid_returns_df2.iloc[:,1]-1),52)) print("VaR using normal distribuction (as percentage of the portfolio)", VaR_normal((Mid_returns_df2.iloc[:,1]-1),52,0.05)) print("VaR using historical distribuction (as percentage of the portfolio)", VaR_historical((Mid_returns_df2.iloc[:,1]-1),52,0.05)) print("CVaR (as percentage of the portfolio)",CVaR((Mid_returns_df2.iloc[:,1]-1),52,0.05)) #money weighted returns CashFlow_Mid = np.append(np.array([0]*(len(Mid_portfolio_PnL[:,1])-1)),Mid_portfolio_value[-1,1]) + Mid_portfolio_investment[:,1]+Mid_portfolio_PnL[:,1] print("Money weighted returns",(1+np.irr(CashFlow_Mid))**52-1) print("Time weighted returns",52*((Mid_portfolio_returns[:,1].cumprod()[-1])**(1/len(Mid_portfolio_returns[:,1]))-1)) # + # Returns Graph # High Risk High_returns_df2 = pd.DataFrame({'Dates':High_portfolio_returns[:,0], 'returns':High_portfolio_returns[:,1]}) plt.plot(High_returns_df2['Dates'], High_returns_df2['returns']) plt.show() fig=plt.figure(figsize=(10,5)) plt.plot(High_returns_df2['Dates'], High_portfolio_returns[:,1].cumprod()) fig.suptitle('Cumulative Returns') plt.xlabel('Dates') plt.ylabel('Cumulative Returns') plt.show() fig.savefig('High Risk Trading Period Cumulative Returns.jpg') High_Merge=pd.merge(High_returns_df2,Benchmark_High_df,on='Dates') print(High_Merge) fig=plt.figure(figsize=(10,5)) plt.plot(High_Merge['Dates'], High_Merge.iloc[:,1].cumprod()) plt.plot(High_Merge['Dates'], High_Merge.iloc[:,2].cumprod()) fig.suptitle('Cumulative Returns') plt.xlabel('Dates') plt.ylabel('Cumulative Returns') plt.legend(['portfolio','benchmark']) plt.show() fig.savefig('High Risk Trading Period Cumulative Returns_wBenchmark.jpg') print("Sharpe Ratio:",SharpeRatio((High_returns_df2.iloc[:,1]-1),52)) print("Annualized Returns:",AnnualizedReturns((High_returns_df2.iloc[:,1]-1),52)) print("Annualized Volatility:",AnnualizedVolatility((High_returns_df2.iloc[:,1]-1),52)) print("VaR using normal distribuction (as percentage of the portfolio)", VaR_normal((High_returns_df2.iloc[:,1]-1),52,0.05)) print("VaR using historical distribuction (as percentage of the portfolio)", VaR_historical((High_returns_df2.iloc[:,1]-1),52,0.05)) print("CVaR (as percentage of the portfolio)",CVaR((High_returns_df2.iloc[:,1]-1),52,0.05)) #money weighted returns CashFlow_High = np.append(np.array([0]*(len(High_portfolio_PnL[:,1])-1)),High_portfolio_value[-1,1]) + High_portfolio_investment[:,1]+Mid_portfolio_PnL[:,1] print("Money weighted returns",(1+np.irr(CashFlow_High))**52-1) print("Time weighted returns",52*((High_portfolio_returns[:,1].cumprod()[-1])**(1/len(High_portfolio_returns[:,1]))-1)) # + Low_values_df0 = pd.DataFrame({'Dates':Low_portfolio_value[:,0], 'Values':Low_portfolio_value[:,1]}) Low_values_df = week_to_quarter(Low_values_df0) fig=plt.figure(figsize=(10,5)) plt.plot(Low_values_df['Dates'], Low_values_df['Values']) plt.xlabel('Dates') plt.ylabel('Values') fig.suptitle('Low Risk Portfolio Value (Quarterly)') plt.show() fig.savefig('Low Risk Portfolio Value (Quarterly)') Mid_values_df0 = pd.DataFrame({'Dates':Mid_portfolio_value[:,0], 'Values':Mid_portfolio_value[:,1]}) Mid_values_df = week_to_quarter(Mid_values_df0) fig=plt.figure(figsize=(10,5)) plt.plot(Mid_values_df['Dates'], Mid_values_df['Values']) plt.xlabel('Dates') plt.ylabel('Values') fig.suptitle('Mid Risk Portfolio Value (Quarterly)') plt.show() fig.savefig('Mid Risk Portfolio Value (Quarterly)') High_values_df0 = pd.DataFrame({'Dates':High_portfolio_value[:,0], 'Values':High_portfolio_value[:,1]}) High_values_df = week_to_quarter(High_values_df0) fig=plt.figure(figsize=(10,5)) plt.plot(High_values_df['Dates'], High_values_df['Values']) plt.xlabel('Dates') plt.ylabel('Values') fig.suptitle('High Risk Portfolio Value (Quarterly)') plt.show() fig.savefig('High Risk Portfolio Value (Quarterly)') # print(Low_values_df)
Portfolio_Construction.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- from doctest import run_docstring_examples #import logging #logging.basicConfig(level=logging.INFO) # # Damenproblem # # Quelle: https://de.wikipedia.org/wiki/Damenproblem # # Es sollen jeweils $m$ Damen auf einem $n$ x $n$-Schachbrett so aufgestellt werden, dass keine zwei Damen einander gemäß ihren in den Schachregeln definierten Zugmöglichkeiten schlagen können. # ## Hilfsfunktionen # + def buchstaben(n): return [(chr(ord('A') + i)) for i in range(n)] def zahlen(n): return range(1, n + 1) def schachbrett(n): spielfeld = set() for zahl in zahlen(n): spielfeld = spielfeld.union( tuple(zip(buchstaben(n), [zahl for i in range(n)]))) return spielfeld def geschlagene_felder(dame=('A', 1), schachbrett=schachbrett(8)): # Damen schlagen sich, wenn x1 == x2 oder y1 == y2 oder |x1 - x2| == |y1 -y2| x1 = ord(dame[0]) y1 = dame[1] felder = set() for feld in schachbrett: x2 = ord(feld[0]) y2 = feld[1] if x1 == x2 or y1 == y2 or abs(x1 - x2) == abs(y1 - y2): felder.add(feld) return felder # - # ## Ihre Lösung # + def damenproblem(damen=8, schachbrett=schachbrett(8), positionen=set()): """ Findet eine Lösung des Damenproblems. >>> damenproblem(damen=2, schachbrett=schachbrett(4)) ('A1', 'B3') """ pass run_docstring_examples(damenproblem, locals()) # - # ## Lösung anwenden m = 8 n = 8 loesung = damenproblem(m, schachbrett(n)) print("Eine Lösung, um {m} Damen auf einem Schachfeld der Grösse {n}x{n} zu platzieren, lautet:\n{loesung}".format(m=m, n=n, loesung=loesung))
exercises/zero/problems/Damenproblem.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + import torch import transformers from life_after_bert import MCDataset, evaluate_encoder_decoder # + device = "cuda" if torch.cuda.is_available() else "cpu" model_name = "t5-large" task_name = "Age Comparison" num_choices = 2 # - model = transformers.T5ForConditionalGeneration.from_pretrained(model_name) tokenizer = transformers.AutoTokenizer.from_pretrained(model_name, mask_token="<extra_id_0>") # Set sentinel token as mask_token static_decoder_input_ids = tokenizer("<pad> <extra_id_0>", add_special_tokens=False, return_tensors="pt").input_ids dataset = MCDataset.load_data(task_name, num_choices, tokenizer) accuracy, (all_answers, all_preds) = evaluate_encoder_decoder(model, dataset, static_decoder_input_ids, device=device) accuracy
notebooks/eval_encoder_decoder.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # tfbuilder tutorial # The whole machinery of tfbuilder can be used by importing the convert function from the tfbuilder library. from os import path from tfbuilder import convert # ### arguments `convert()` # input_path = folder path in which the files to be converted are # output_path = folder path to which all tf-modules are to be written # file_elem = define necessary part in source filename # csv_delimit = default is ',' but it can be replaced by any token # tlg_out = `True` if one wants TLG codes as folder names `False` if folder names from metadata # ignore_empty = `True` if source files that don't produce slot numbers need to be ignored # generic = Generic metadata to be present in every tf-file to be produced # lang = language (referring to languages available in `langsettings`) # typ = subtype of a language, if special behavious is required, like `tlge` (tlg-e cdrom) # header = if True, the convertor expects csv-files to have a header # version = version number to be assigned to the tf-module # langsettings = langsettings to be imported; usually, this is the langsettings provided by tfbuilder # multiprocessing = False --> no multiprocessing # = True --> active multiprocessing; authomatic assignment of number of processor threads # = int --> manual assingment of number of processor threads # chunksize = number of files to be assigned to each thread each cycle # silent = if True, all TF-messages are suppressed # # #### remarks `generic` and `langsettings`: # Both are accessible and changeable in tf_config.py. However, one is able to pass his/her own settings (=dictionary) to the convert function... # + # # convert('/media/ernstboogert/ResearchDat/sources', # convert(path.expanduser('~/github/pthu/sources/greek_sources/canonical-greekLit/data/tlg2046'), # # path.expanduser('~/github/pthu/OUT'), # '/media/ernstboogert/ResearchDat/TF_dissertation/Perseus', # ignore_empty=False, # tlg_out=False, # lang='greek', # typ=False, # header=False, # multiprocessing=False, # chunksize=1, # silent=False, # ) # + # convert(path.expanduser('~/github/pthu/sources/greek_sources'), # # path.expanduser('~/github/pthu/OUT'), # '/media/ernstboogert/ResearchDat/TF_dissertation/Perseus/12032020', # file_elem='grc', # ignore_empty=False, # tlg_out=True, # lang='greek', # typ=False, # header=False, # multiprocessing=False, # chunksize=10, # silent=False, # ) # + # convert(path.expanduser('~/github/TLG_E/tsv'), # path.expanduser('~/github/TF_dissertation/TLG_E'), # file_elem='tlg', # csv_delimiter='\t', # ignore_empty=True, # tlg_out=True, # lang='greek', # typ='tlge', # header=True, # multiprocessing=False, # chunksize=10, # silent=True, # ) # + # convert('/media/ernstboogert/ResearchDat/mss_out', # # path.expanduser('~/github/pthu/OUT'), # '/media/ernstboogert/ResearchDat/TF_dissertation/mss/17032020', # file_elem='', # ignore_empty=False, # tlg_out=False, # lang='greek', # typ=False, # header=False, # multiprocessing=False, # chunksize=10, # silent=False, # ) # + # from os import path # convert(path.expanduser('~/github/manuscripts/Muenster/preprocessed'), # # path.expanduser('~/github/pthu/OUT'), # path.expanduser('~/github/TF_dissertation/mss_muenster'), # file_elem='', # ignore_empty=False, # tlg_out=False, # lang='greek', # typ=False, # header=False, # multiprocessing=False, # chunksize=10, # silent=False, # ) # + from os import path convert(path.expanduser('~/github/manuscripts/Muenster/preprocessed'), # path.expanduser('~/github/pthu/OUT'), path.expanduser('~/github/TF_dissertation/Muenster_mss'), file_elem='', ignore_empty=False, tlg_out=False, lang='greek', typ=False, header=False, multiprocessing=3, chunksize=10, silent=True, ) # + # from os import path # convert(path.expanduser('~/github/manuscripts/Birmingham/John_preprocessed'), # # convert(path.expanduser('~/github/manuscripts/Birmingham/test_in'), # # path.expanduser('~/github/pthu/OUT'), # path.expanduser('~/github/TF_dissertation/Birmingham_John_mss'), # # path.expanduser('~/github/manuscripts/Birmingham/test_out'), # file_elem='', # ignore_empty=False, # tlg_out=False, # lang='greek', # typ=False, # header=False, # multiprocessing=3, # chunksize=10, # silent=True, # ) # + from os import path convert(path.expanduser('~/github/manuscripts/Muenster/preprocessed'), # path.expanduser('~/github/pthu/OUT'), path.expanduser('~/github/TF_dissertation/Muenster_mss'), file_elem='', ignore_empty=False, tlg_out=False, lang='greek', typ=False, header=False, multiprocessing=3, chunksize=10, silent=True, ) # -
tfbuilder/tutorial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + cadastro = {} lista = [] for i in range(0, 1): nome = str(input('Nome: ')) cadastro[nome] = idade = int(input('Idade: ')) cadastro[nome] = sexo = str(input('Sexo [M/F]: ')).upper() print(cadastro)
Python/Exercicios_Curso_em_Videos/ex056.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %matplotlib inline import keras from keras.layers import Dense from keras.models import Model from keras.models import Sequential from keras.utils.np_utils import to_categorical from collections import Counter import numpy as np import matplotlib.pyplot as plt # - max_iter = 50 seq_fn = lambda z, c: z ** 2 + c def iterate_sequence(seq_fn, max_iter, c): return 1 if c.real > 0 else 0 def iterate_sequence(seq_fn, max_iter, c): z = c for i in range(max_iter): z = seq_fn(z, c) if (z.real * z.real + z.imag * z.imag) > 4: return 1 return 0 # # Model results visualization def generate_X(unit): c_list = [] width = 3 * unit height = 2 * unit for x in range(height): im = x * 2. / height - 1 for y in range(width): re = y * 3. / width - 2 c_list.append(np.array([re, im])) return np.stack(c_list) def generate_visualization(model, unit): width = 3 * unit height = 2 * unit X = generate_X(unit) y = model.predict_classes(X, batch_size = 64) return y.reshape((2 * unit, 3 * unit)) class FakeModel(): def predict_classes(self, X, **kwargs): return np.array([iterate_sequence(seq_fn, max_iter, complex(*sample)) for sample in X]) fake_model = FakeModel() res = generate_visualization(fake_model, 48) plt.imshow(res) # # Training samples generation nb_samples = 100000 samples = np.random.rand(nb_samples, 2) samples[:, 0] = samples[:, 0] * 3 - 2 samples[:, 1] = samples[:, 1] * 2 - 1 sample_img = np.array([iterate_sequence(seq_fn, max_iter, complex(*sample)) for sample in samples]) outside = samples[sample_img == 1] inside = samples[sample_img == 0][np.random.choice(samples.shape[0] - outside.shape[0], outside.shape[0])] X = np.concatenate([inside, outside]) y = np.concatenate([np.zeros(inside.shape[0]), np.zeros(outside.shape[0]) + 1]).astype(np.int32) y = to_categorical(y) # # Model definition model = Sequential([ Dense(512, input_dim = 2, activation = 'relu'), Dense(512, activation = 'relu'), Dense(512, activation = 'relu'), Dense(512, activation = 'relu'), Dense(512, activation = 'relu'), Dense(512, activation = 'relu'), Dense(512, activation = 'relu'), Dense(2, activation = 'softmax') ]) model.compile('adam', 'binary_crossentropy') model.fit(X, y, nb_epoch = 3, batch_size = 256, shuffle = True) res = generate_visualization(model, 32) plt.imshow(res) model.optimizer.lr = 0.0001 model.fit(X, y, nb_epoch = 3, batch_size = 256, shuffle = True) model.fit(X, y, nb_epoch = 3, batch_size = 256, shuffle = True) plt.imshow(generate_visualization(model, 32)) model.optimizer.lr = 1e-5 model.fit(X, y, nb_epoch = 3, batch_size = 256, shuffle = True) model.fit(X, y, nb_epoch = 3, batch_size = 256, shuffle = True) model.fit(X, y, nb_epoch = 3, batch_size = 256, shuffle = True) model.fit(X, y, nb_epoch = 6, batch_size = 256, shuffle = True) model.fit(X, y, nb_epoch = 6, batch_size = 256, shuffle = True) plt.imshow(generate_visualization(model, 128)) model.fit(X, y, nb_epoch = 6, batch_size = 256, shuffle = True) model.fit(X, y, nb_epoch = 6, batch_size = 256, shuffle = True) model.optimizer.lr = 1e-6 model.fit(X, y, nb_epoch = 6, batch_size = 256, shuffle = True) plt.imshow(generate_visualization(model, 256)) model.optimizer.lr = 1e-7 model.fit(X, y, nb_epoch = 6, batch_size = 256, shuffle = True) model.fit(X, y, nb_epoch = 6, batch_size = 256, shuffle = True) model.fit(X, y, nb_epoch = 6, batch_size = 256, shuffle = True) plt.imshow(generate_visualization(model, 256))
lambdas/es/indexer/test/data/normal.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + import argparse import logging import math import os import random import shutil import time from collections import OrderedDict import numpy as np import torch import torch.nn.functional as F import torch.optim as optim from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader, RandomSampler, SequentialSampler from torch.utils.data.distributed import DistributedSampler from torch.utils.tensorboard import SummaryWriter from tqdm import tqdm from dataset.custom import DATASET_GETTERS from utils import AverageMeter, accuracy logger = logging.getLogger(__name__) best_acc = 0 def save_checkpoint(state, is_best, checkpoint, filename='checkpoint.pth.tar'): filepath = os.path.join(checkpoint, filename) torch.save(state, filepath) if is_best: shutil.copyfile(filepath, os.path.join(checkpoint, 'model_best.pth.tar')) def set_seed(args): random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed) def get_cosine_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, num_cycles=7./16., last_epoch=-1): def _lr_lambda(current_step): if current_step < num_warmup_steps: return float(current_step) / float(max(1, num_warmup_steps)) no_progress = float(current_step - num_warmup_steps) / \ float(max(1, num_training_steps - num_warmup_steps)) return max(0., math.cos(math.pi * num_cycles * no_progress)) return LambdaLR(optimizer, _lr_lambda, last_epoch) def interleave(x, size): s = list(x.shape) return x.reshape([-1, size] + s[1:]).transpose(0, 1).reshape([-1] + s[1:]) def de_interleave(x, size): s = list(x.shape) return x.reshape([size, -1] + s[1:]).transpose(0, 1).reshape([-1] + s[1:]) def main(): parser = argparse.ArgumentParser(description='PyTorch FixMatch Training') parser.add_argument('--gpu-id', default='0', type=int, help='id(s) for CUDA_VISIBLE_DEVICES') parser.add_argument('--num-workers', type=int, default=4, help='number of workers') parser.add_argument('--dataset', default='cifar10', type=str, choices=['cifar10', 'cifar100'], help='dataset name') parser.add_argument('--num-labeled', type=int, default=4000, help='number of labeled data') parser.add_argument("--expand-labels", action="store_true", help="expand labels to fit eval steps") parser.add_argument('--arch', default='wideresnet', type=str, choices=['wideresnet', 'resnext'], help='dataset name') parser.add_argument('--total-steps', default=2**20, type=int, help='number of total steps to run') parser.add_argument('--eval-step', default=1024, type=int, help='number of eval steps to run') parser.add_argument('--start-epoch', default=0, type=int, help='manual epoch number (useful on restarts)') parser.add_argument('--batch-size', default=64, type=int, help='train batchsize') parser.add_argument('--lr', '--learning-rate', default=0.03, type=float, help='initial learning rate') parser.add_argument('--warmup', default=0, type=float, help='warmup epochs (unlabeled data based)') parser.add_argument('--wdecay', default=5e-4, type=float, help='weight decay') parser.add_argument('--nesterov', action='store_true', default=True, help='use nesterov momentum') parser.add_argument('--use-ema', action='store_true', default=True, help='use EMA model') parser.add_argument('--ema-decay', default=0.999, type=float, help='EMA decay rate') parser.add_argument('--mu', default=7, type=int, help='coefficient of unlabeled batch size') parser.add_argument('--lambda-u', default=1, type=float, help='coefficient of unlabeled loss') parser.add_argument('--T', default=1, type=float, help='pseudo label temperature') parser.add_argument('--threshold', default=0.95, type=float, help='pseudo label threshold') parser.add_argument('--out', default='result', help='directory to output the result') parser.add_argument('--resume', default='', type=str, help='path to latest checkpoint (default: none)') parser.add_argument('--seed', default=None, type=int, help="random seed") parser.add_argument("--amp", action="store_true", help="use 16-bit (mixed) precision through NVIDIA apex AMP") parser.add_argument("--opt_level", type=str, default="O1", help="apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html") parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") parser.add_argument('--no-progress', action='store_true', help="don't use progress bar") args = parser.parse_args() global best_acc def create_model(args): if args.arch == 'wideresnet': import models.wideresnet as models model = models.build_wideresnet(depth=args.model_depth, widen_factor=args.model_width, dropout=0, num_classes=args.num_classes) elif args.arch == 'resnext': import models.resnext as models model = models.build_resnext(cardinality=args.model_cardinality, depth=args.model_depth, width=args.model_width, num_classes=args.num_classes) logger.info("Total params: {:.2f}M".format( sum(p.numel() for p in model.parameters())/1e6)) return model if args.local_rank == -1: device = torch.device('cuda', args.gpu_id) args.world_size = 1 args.n_gpu = torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank) device = torch.device('cuda', args.local_rank) torch.distributed.init_process_group(backend='nccl') args.world_size = torch.distributed.get_world_size() args.n_gpu = 1 args.device = device logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN) logger.warning( f"Process rank: {args.local_rank}, " f"device: {args.device}, " f"n_gpu: {args.n_gpu}, " f"distributed training: {bool(args.local_rank != -1)}, " f"16-bits training: {args.amp}",) logger.info(dict(args._get_kwargs())) args.dataset == 'custom' args.num_classes = 8 if args.seed is not None: set_seed(args) if args.local_rank in [-1, 0]: os.makedirs(args.out, exist_ok=True) args.writer = SummaryWriter(args.out) if args.arch == 'wideresnet': args.model_depth = 28 args.model_width = 2 elif args.arch == 'resnext': args.model_cardinality = 4 args.model_depth = 28 args.model_width = 4 if args.local_rank not in [-1, 0]: torch.distributed.barrier() labeled_dataset, unlabeled_dataset, test_dataset = DATASET_GETTERS[args.dataset]( args, './data') if args.local_rank == 0: torch.distributed.barrier() train_sampler = RandomSampler if args.local_rank == -1 else DistributedSampler labeled_trainloader = DataLoader( labeled_dataset, sampler=train_sampler(labeled_dataset), batch_size=args.batch_size, num_workers=args.num_workers, drop_last=True) unlabeled_trainloader = DataLoader( unlabeled_dataset, sampler=train_sampler(unlabeled_dataset), batch_size=args.batch_size*args.mu, num_workers=args.num_workers, drop_last=True) test_loader = DataLoader( test_dataset, sampler=SequentialSampler(test_dataset), batch_size=args.batch_size, num_workers=args.num_workers) if args.local_rank not in [-1, 0]: torch.distributed.barrier() model = create_model(args) if args.local_rank == 0: torch.distributed.barrier() model.to(args.device) no_decay = ['bias', 'bn'] grouped_parameters = [ {'params': [p for n, p in model.named_parameters() if not any( nd in n for nd in no_decay)], 'weight_decay': args.wdecay}, {'params': [p for n, p in model.named_parameters() if any( nd in n for nd in no_decay)], 'weight_decay': 0.0} ] optimizer = optim.SGD(grouped_parameters, lr=args.lr, momentum=0.9, nesterov=args.nesterov) args.epochs = math.ceil(args.total_steps / args.eval_step) scheduler = get_cosine_schedule_with_warmup( optimizer, args.warmup, args.total_steps) if args.use_ema: from models.ema import ModelEMA ema_model = ModelEMA(args, model, args.ema_decay) args.start_epoch = 0 if args.resume: logger.info("==> Resuming from checkpoint..") assert os.path.isfile( args.resume), "Error: no checkpoint directory found!" args.out = os.path.dirname(args.resume) checkpoint = torch.load(args.resume) best_acc = checkpoint['best_acc'] args.start_epoch = checkpoint['epoch'] model.load_state_dict(checkpoint['state_dict']) if args.use_ema: ema_model.ema.load_state_dict(checkpoint['ema_state_dict']) optimizer.load_state_dict(checkpoint['optimizer']) scheduler.load_state_dict(checkpoint['scheduler']) if args.amp: from apex import amp model, optimizer = amp.initialize( model, optimizer, opt_level=args.opt_level) if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True) logger.info("***** Running training *****") logger.info(f" Task = {args.dataset}@{args.num_labeled}") logger.info(f" Num Epochs = {args.epochs}") logger.info(f" Batch size per GPU = {args.batch_size}") logger.info( f" Total train batch size = {args.batch_size*args.world_size}") logger.info(f" Total optimization steps = {args.total_steps}") model.zero_grad() train(args, labeled_trainloader, unlabeled_trainloader, test_loader, model, optimizer, ema_model, scheduler) def test(args, test_loader, model, epoch): batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() top1 = AverageMeter() top5 = AverageMeter() end = time.time() if not args.no_progress: test_loader = tqdm(test_loader, disable=args.local_rank not in [-1, 0]) with torch.no_grad(): for batch_idx, (inputs, targets) in enumerate(test_loader): data_time.update(time.time() - end) model.eval() inputs = inputs.to(args.device) targets = targets.to(args.device) outputs = model(inputs) loss = F.cross_entropy(outputs, targets) prec1, prec5 = accuracy(outputs, targets, topk=(1, 5)) losses.update(loss.item(), inputs.shape[0]) top1.update(prec1.item(), inputs.shape[0]) top5.update(prec5.item(), inputs.shape[0]) batch_time.update(time.time() - end) end = time.time() if not args.no_progress: test_loader.set_description("Test Iter: {batch:4}/{iter:4}. Data: {data:.3f}s. Batch: {bt:.3f}s. Loss: {loss:.4f}. top1: {top1:.2f}. top5: {top5:.2f}. ".format( batch=batch_idx + 1, iter=len(test_loader), data=data_time.avg, bt=batch_time.avg, loss=losses.avg, top1=top1.avg, top5=top5.avg, )) if not args.no_progress: test_loader.close() logger.info("top-1 acc: {:.2f}".format(top1.avg)) logger.info("top-5 acc: {:.2f}".format(top5.avg)) return losses.avg, top1.avg if __name__ == '__main__': main()
train.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Micromagnetic standard problem 4 # # ## Problem specification # # The sample is a thin film cuboid with dimensions: # # - length $l_{x} = 500 \,\text{nm}$, # - width $l_{y} = 125 \,\text{nm}$, and # - thickness $l_{z} = 3 \,\text{nm}$. # # The material parameters (similar to permalloy) are: # # - exchange energy constant $A = 1.3 \times 10^{-11} \,\text{J/m}$, # - magnetisation saturation $M_\text{s} = 8 \times 10^{5} \,\text{A/m}$. # # Magnetisation dynamics are governed by the Landau-Lifshitz-Gilbert equation # # $$\frac{d\mathbf{m}}{dt} = \underbrace{-\gamma_{0}(\mathbf{m} \times \mathbf{H}_\text{eff})}_\text{precession} + \underbrace{\alpha\left(\mathbf{m} \times \frac{d\mathbf{m}}{dt}\right)}_\text{damping}$$ # # where $\gamma_{0} = 2.211 \times 10^{5} \,\text{m}\,\text{A}^{-1}\,\text{s}^{-1}$ and Gilbert damping $\alpha=0.02$. # # In the standard problem 4, the system is first relaxed at zero external magnetic field and then, starting from the obtained equlibrium configuration, the magnetisation dynamics are simulated for two external magnetic fields $\mathbf{B}_{1} = (-24.6, 4.3, 0.0) \,\text{mT}$ and $\mathbf{B}_{2} = (-35.5, -6.3, 0.0) \,\text{mT}$. # # More detailed specification of Standard problem 4 can be found in Ref. 1. # # ## Simulation # # # In the first step, we import the required `discretisedfield` and `oommfc` modules. import discretisedfield as df import mumaxc as mc # Now, we can set all required geometry and material parameters. # + # Geometry lx = 500e-9 # x dimension of the sample(m) ly = 125e-9 # y dimension of the sample (m) lz = 3e-9 # sample thickness (m) # Material (permalloy) parameters Ms = 8e5 # saturation magnetisation (A/m) A = 1.3e-11 # exchange energy constant (J/m) # Dynamics (LLG equation) parameters gamma = 2.211e5 # gyromagnetic ratio (m/As) alpha = 0.02 # Gilbert damping # - # ## First stage # # In the first stage, we need to relax the system at zero external magnetic field. # # We choose `stdprob4` to be the name of the system. This name will be used to name all output files created by OOMMF. system = mc.System(name='stdprob4') # In order to completely define the micromagnetic system, we need to provide: # # 1. hamiltonian $\mathcal{H}$ # 2. dynamics $\text{d}\mathbf{m}/\text{d}t$ # 3. magnetisation $\mathbf{m}$ # # The mesh is created by providing two points `p1` and `p2` between which the mesh domain spans and the size of a discretisation cell. We choose the discretisation to be $(5, 5, 3) \,\text{nm}$. # + cell = (5e-9, 5e-9, 3e-9) # mesh discretisation (m) mesh = mc.Mesh(p1=(0, 0, 0), p2=(lx, ly, lz), cell=cell) # Create a mesh object. # - # We can visualise the mesh domain and a discretisation cell: # %matplotlib inline mesh # **Hamiltonian:** In the second step, we define the system's Hamiltonian. In this standard problem, the Hamiltonian contains only exchange and demagnetisation energy terms. Please note that in the first simulation stage, there is no applied external magnetic field. Therefore, we do not add Zeeman energy term to the Hamiltonian. system.hamiltonian = mc.Exchange(A) + mc.Demag() # We can check what is the continuous model of system's Hamiltonian. system.hamiltonian # **Dynamics:** Similarly, the system's dynamics is defined by providing precession and damping terms (LLG equation). # + system.dynamics = mc.Precession(gamma) + mc.Damping(alpha) system.dynamics # check the dynamics equation # - # **Magnetisation:** Finally, we have to provide the magnetisation configuration that is going to be relaxed subsequently. We choose the uniform configuration in $(1, 0.25, 0.1)$ direction, and as norm (magnitude) we set the magnetisation saturation $M_\text{s}$. In order to create the magnetisation configuration, we create a `Field` object from the `discretisedfield` module. system.m = df.Field(mesh, value=(1, 0.25, 0.1), norm=Ms) # Now, the system is fully defined. # # **Energy minimisation:** The system (its magnetisation) is evolved using a particular driver. In the first stage, we need to relax the system - minimise its energy. Therefore, we create `MinDriver` object and drive the system using its `drive` method. md = mc.MinDriver() # create energy minimisation driver md.drive(system) # minimise the system's energy # The system is now relaxed. We can now obtain some data characteristic to the magnetisation field. # + print('The average magnetisation is {}.'.format(system.m.average)) print('The magnetisation at the mesh centre {} is {}.'.format( system.m.mesh.centre, system.m(system.m.mesh.centre))) # - # ## Second stage: field $\mathbf{B}_{1}$ # In the second stage, we need to apply an external magnetic field $\mathbf{B}_{1} = (-24.6, 4.3, 0.0) \,\text{mT}$ to the system. In other words, we have to add Zeeman energy term to the Hamiltonian. # Add Zeeman energy term to the Hamiltonian H1 = (-24.6e-3/mc.mu0, 4.3e-3/mc.mu0, 0.0) system.hamiltonian += mc.Zeeman(H1) # If we now inspect the Hamiltonian, we see that an additional Zeeman term is added. system.hamiltonian # Finally, we can run the simulation using `TimeDriver` this time. We run the magnetisation evolution for $t=1 \,\text{ns}$, during which we save the system's state $n=200$ times. # + t = 1e-9 # simulation time (s) n = 200 # number of data saving steps td = mc.TimeDriver() # create time driver td.drive(system, t=t, n=n) # drive the system # - # ### Postprocessing # When we drove the system using the `TimeDriver`, we specified that we want to save the magnetisation configuration $n=200$ times. A detailed table of all computed parameters from the last simulation can be shown from the datatable (`system.dt`), which is a `pandas` dataframe [2]. # # For instance, if we want to show the last 10 rows in the table, we run: system.dt.tail() # Finally, we want to plot the average magnetisation configuration `my` as a function of time `t`: myplot = system.dt.plot("t", "my") # ## References # # [1] µMAG Site Directory: http://www.ctcms.nist.gov/~rdm/mumag.org.html # # [2] Pandas: http://pandas.pydata.org/
docs/ipynb/standard_problem4.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### 重置摄像头 # !echo 'jetson' | sudo -S systemctl restart nvargus-daemon && printf '\n' # ### 初始化 # + import torch import torchvision from torch2trt import torch2trt from torch2trt import TRTModule TASK="test" CATEGORIES = ['apex'] device = torch.device('cuda') # 加载路径跟随模型 modelroad = torchvision.models.resnet18(pretrained=False) modelroad.fc = torch.nn.Linear(512, 2 * len(CATEGORIES)) modelroad = modelroad.cuda().eval().half() modelroad.load_state_dict(torch.load(TASK+'/path/model.pth')) data = torch.zeros((1, 3, 224, 224)).cuda().half() modelroad_trt = torch2trt(modelroad, [data], fp16_mode=True) torch.save(modelroad_trt.state_dict(), TASK+'/road_model_trt.pth') modelroad_trt = TRTModule() modelroad_trt.load_state_dict(torch.load(TASK+'/road_model_trt.pth')) # 加载交通信号分类模型 # from jetbot import ObjectDetector # model = ObjectDetector('ssd_mobilenet_v2_coco.engine') from PIL import Image import sys import os import urllib import tensorflow.contrib.tensorrt as trt import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import matplotlib.patches as patches import tensorflow as tf import numpy as np import time outputNames=['detection_boxes', 'detection_classes', 'detection_scores', 'num_detections'] inputNames=['image_tensor'] from tensorflow.python.platform import gfile tf_config = tf.ConfigProto() tf_config.gpu_options.allow_growth = True tf_sess = tf.Session(config=tf_config) with gfile.FastGFile('./model_tf/v2/trt_graph.pb', 'rb') as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) tf_sess.graph.as_default() tf.import_graph_def(graph_def, name='') # 导入计算图 tf_input = tf_sess.graph.get_tensor_by_name(input_names[0] + ':0') tf_scores = tf_sess.graph.get_tensor_by_name('detection_scores:0') tf_boxes = tf_sess.graph.get_tensor_by_name('detection_boxes:0') tf_classes = tf_sess.graph.get_tensor_by_name('detection_classes:0') tf_num_detections = tf_sess.graph.get_tensor_by_name('num_detections:0') from jetracer.nvidia_racecar import NvidiaRacecar car = NvidiaRacecar() # - # 初始化摄像头 # + from jetcam.csi_camera import CSICamera camera = CSICamera(width=300, height=300, capture_fps=50) # - # 开始跑 # + from utils import preprocess import numpy as np import threading import ipywidgets import time car.steering_gain = -0.55 car.steering_offset = -0.13 car.throttle_gain = 0.5 # car.throttle_gain = 0.5 # car.throttle = 0.25 speed_widget = ipywidgets.IntText(description='速度', value=20) run_button = ipywidgets.Button(description='启动') state = 1 # 运行的主循环 def run(car,camera,speed_widget,run_button): global state loop = 0 # 红绿灯信号 1表示要停5秒 signalRGB = 0 #路口方向 0:直行,-1左转 1右转 chooseDirect = 2 forbiddenDirect ={} enableDirect ={} #是否不再路口 0表示进入路口 inRoad = 0 # 确认交通信号的阈值 condition = 0.9 # string_val: "corner" # string_val: "no left turn" # string_val: "no right turn" # string_val: "no direct line" # string_val: "no entry" # string_val: "left turn" # string_val: "right turn" # string_val: "direct line" # string_val: "signal" # string_val: "footway" # 路口方向未定,判断交通信号 def noLeftTurn(){ forbiddenDirect.append(-1) } def noRightTurn(){ forbiddenDirect.append(1) } def noDirectTurn(){#和no entry相同 forbiddenDirect.append(0) } def leftTurn(){ chooseDirect = -1 } def rightTurn(){ chooseDirect = 1 } def directTurn(){ chooseDirect = 0 } def footway(){ #暂不处理 } def signal(){ if (signalRGB==0): signalRGB==1 } def corner(){ #暂不处理 } # 判断路口并处理 def checkCrossing(output): //todo 根据corner的位置确认路口 if (output[CROSS_CATEGORIES.index('十字路口')]>condition): enableDirect = {-1,1,0} elif (output[CROSS_CATEGORIES.index('左三叉路口')]>condition): enableDirect = {-1,0} elif (output[CROSS_CATEGORIES.index('右三叉路口')]>condition): enableDirect = {1,0} elif (output[CROSS_CATEGORIES.index('左右三叉路口')]>condition): enableDirect = {-1,1} elif inRoad = 1 # 根据可选方向,禁行方向和路口方向确定方向。如果SRGB=1,停车,sleep 5秒,设置SRGB=2。 直接设置转向。 def inCrossing(): if signalRGB == 1: # 红绿灯 停5秒 signalRGB = 2 car.throttle = -0.1 time.sleep(0.5) car.throttle = 0 time.sleep(4.5) car.throttle = speed_widget.value*0.01 if (chooseDirect==2): #确定可以行走的方向 enableDirect=enableDirect-forbiddenDirect if (len(enableDirect)!=0): #确定方向 chooseDirect=enableDirect[0] if (chooseDirect!=2): car.steering=0.45*chooseDirect signalsFunc=[corner(),noLeftTurn(),noRightTurn(),noDirectTurn(),noDirectTurn(),leftTurn(),rightTurn(),directTurn(),signal(),footway()] while True: if (state == 1): car.throttle = 0 car.steering = 0 time.sleep(0.5) else: throttle = 0 # 准备摄像头数据 image = camera.read() image = preprocess(image).half() detections = model(image) if (loop%5==0) and (inRoad == 1): # 识别交通信号 每5帧处理一次(进入路口后就不处理) scores, boxes, classes, num_detections = tf_sess.run([tf_scores, tf_boxes, tf_classes, tf_num_detections], feed_dict={ tf_input: image[None, ...] }) boxes = boxes[0] # index by 0 to remove batch dimension scores = scores[0] classes = classes[0] num_detections = num_detections[0] for index,scores in enumerate(scores): if scores>0.6: box = boxes[index] if (classes[index]==1): signalsFunc[classes[index]]() elif (classes[index]==9 ) and (box[2]-box[1]>0.15): signalsFunc[classes[index]]() elif (classes[index]>=2) and (classes[index]<=8) and (box[2]-box[1]>0.2) and (chooseDirect==2): signalsFunc[classes[index]](); if (len(enableDirect)==0): checkCrossing(output) # 进入路口 if (len(enableDirect)!=0): inRoad = 0 inCrossing() # 没有进入路口 if (len(enableDirect)==0) or (inRoad == 1): inRoad = 1 chooseDirect = 2 forbiddenDirect ={} enableDirect ={} loop += 1 #在道路上才由road模型决定方向 if inRoad == 1: output = modelroad_trt(image).detach().cpu().numpy().flatten() x = float(output[0]) car.steering = x car.throttle = speed_widget.value*0.01 execute_thread = threading.Thread(target=run, args=(car,camera,speed_widget,run_button)) def runclick(c): global state print(run_button.description) if run_button.description == '启动': run_button.description = '停止' state = 0 else: run_button.description = '启动' state = 1 run_button.on_click(runclick) data_collection_widget = ipywidgets.HBox([ run_button, speed_widget ]) display(data_collection_widget) execute_thread.start() # -
notebooks/record_train/t_city_sim_ob.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from PIL import Image im = Image.open('data/src/lena.jpg') # ![lena](data/src/lena.jpg) im_rotate = im.rotate(90) im_rotate.save('data/dst/lena_rotate_90.jpg', quality=95) # ![lena_rotate_90](data/dst/lena_rotate_90.jpg) im_rotate = im.rotate(45) im_rotate.save('data/dst/lena_rotate_45.jpg', quality=95) # ![lena_rotate_45](data/dst/lena_rotate_45.jpg) im_rotate = im.rotate(45, resample=Image.BICUBIC) im_rotate.save('data/dst/lena_rotate_45_bicubic.jpg', quality=95) # ![lena_rotate_45_bicubic](data/dst/lena_rotate_45_bicubic.jpg) im_rotate = im.rotate(90, expand=True) im_rotate.save('data/dst/lena_rotate_90_expand.jpg', quality=95) # ![lena_rotate_90_expand](data/dst/lena_rotate_90_expand.jpg) im_rotate = im.rotate(45, expand=True) im_rotate.save('data/dst/lena_rotate_45_expand.jpg', quality=95) # ![lena_rotate_45_expand](data/dst/lena_rotate_45_expand.jpg) im_rotate = im.rotate(45, center=(0, 60)) im_rotate.save('data/dst/lena_rotate_45_change_center.jpg', quality=95) # ![lena_rotate_45_change_center](data/dst/lena_rotate_45_change_center.jpg) im_rotate = im.rotate(45, center=(0, 60), expand=True) im_rotate.save('data/dst/lena_rotate_45_change_center_expand.jpg', quality=95) # ![lena_rotate_45_change_center_expand](data/dst/lena_rotate_45_change_center_expand.jpg) im_rotate = im.rotate(0, translate=(100, 50)) im_rotate.save('data/dst/lena_rotate_0_translate.jpg', quality=95) # ![lena_rotate_0_translate](data/dst/lena_rotate_0_translate.jpg) im_rotate = im.rotate(45, translate=(100, 50)) im_rotate.save('data/dst/lena_rotate_45_translate.jpg', quality=95) # ![lena_rotate_45_translate](data/dst/lena_rotate_45_translate.jpg) im_rotate = im.rotate(45, translate=(100, 50), expand=True) im_rotate.save('data/dst/lena_rotate_45_translate_expand.jpg', quality=95) # ![lena_rotate_45_translate_expand](data/dst/lena_rotate_45_translate_expand.jpg) im_rotate = im.rotate(45, fillcolor=(255, 128, 0), expand=True) im_rotate.save('data/dst/lena_rotate_45_fillcolor_expand.jpg', quality=95) # ![lena_rotate_45_fillcolor_expand](data/dst/lena_rotate_45_fillcolor_expand.jpg)
notebook/pillow_rotate.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- # + [markdown] extensions={"jupyter_dashboards": {"version": 1, "views": {"grid_default": {"col": 0, "height": 7, "hidden": false, "row": 47, "width": 12}, "report_default": {}}}} slideshow={"slide_type": "skip"} # # Presentation notes: # * Install `adaptive`: `conda install -c conda-forge adaptive` # * Run this presentation with [`RISE`](https://github.com/damianavila/RISE): `conda install -c damianavila82 rise` # * Install [`nbextensions`](https://github.com/ipython-contrib/jupyter_contrib_nbextensions): `conda install -c conda-forge jupyter_contrib_nbextensions` # * Enable "Hide code" plugin with `jupyter nbextension enable hide_input/main` # * Click on the "Enter RISE Slideshow" button # + extensions={"jupyter_dashboards": {"version": 1, "views": {"grid_default": {"col": 8, "height": 4, "hidden": false, "row": 0, "width": 4}, "report_default": {}}}} slideshow={"slide_type": "skip"} # %%HTML <style> div.prompt {display: none} div.cell {padding: 0px} .slides{ width: 95% !important; height: 95% !important; bottom: 0px; overflow-y: hidden !important; overflow-x: hidden !important; } </style> # + extensions={"jupyter_dashboards": {"version": 1, "views": {"grid_default": {"col": 8, "height": 4, "hidden": false, "row": 17, "width": 4}, "report_default": {}}}} hide_input=false slideshow={"slide_type": "skip"} import adaptive adaptive.notebook_extension() # Import other modules that are used later from functools import partial import random import holoviews as hv # Plotting from matplotlib import pyplot as plt import numpy as np def f(x, offset=0.07357338543088588): a = 0.01 return x + a**2 / (a**2 + (x - offset)**2) def plot_sharp_peak(figsize): plt.xkcd(randomness=0.5) fig = plt.figure(figsize=figsize) ax = fig.add_subplot(1, 1, 1) ax.spines['right'].set_color('none') ax.spines['top'].set_color('none') plt.xticks([]) plt.yticks([]) xs = np.linspace(0, 1, 300) ys = xs + 0.005**2 / (0.005**2 + (xs - 0.5)**2) plt.annotate( r'SHARP PEAK', xy=(0.5, 1.2), arrowprops=dict(arrowstyle='->'), xytext=(0.6, 0.8)) plt.text(0.02, 1, r'$f(x) = x + \frac{a^2}{a^2 + {(x-x_0)}^2}$', fontdict={'size': 20}); plt.plot(xs, ys) plt.title("1D example function") plt.xlabel('x') plt.ylabel('y') return fig def plot_loss_interval(learner): if learner.npoints >= 2: x_0, x_1 = max(learner.losses, key=learner.losses.get) y_0, y_1 = learner.data[x_0], learner.data[x_1] plot = hv.Scatter(([x_0, x_1], [y_0, y_1])) else: plot = hv.Scatter([]) return plot.opts(style=dict(size=6, color='r')) learner = adaptive.Learner1D(f, bounds=(-1, 1)) plots = {0: learner.plot()} for n in range(1, 101): xs, _ = learner.ask(1) learner.tell(xs, map(learner.function, xs)) plots[n] = (learner.plot() * plot_loss_interval(learner))[:, -1.1:1.1] hm_plots = hv.HoloMap(plots, kdims=['npoints']).relabel('something smarter') hom_learner = adaptive.Learner1D(f, bounds=(-1, 1), loss_per_interval=adaptive.learner.learner1D.uniform_loss) hom_plots = {0: hom_learner.plot()} for n in range(1, 101): xs, _ = hom_learner.ask(1) hom_learner.tell(xs, map(hom_learner.function, xs)) hom_plots[n] = (hom_learner.plot() * plot_loss_interval(hom_learner))[:, -1.1:1.1] hm_hom_plots = hv.HoloMap(hom_plots, kdims=['npoints']).relabel('homogeneous') # + [markdown] extensions={"jupyter_dashboards": {"version": 1, "views": {"grid_default": {"col": 0, "height": 4, "hidden": false, "row": 0, "width": 4}, "report_default": {}}}} hide_input=true slideshow={"slide_type": "slide"} # <div> # <img style="float: left; vertical-align:middle" src="presentation/logo-small.png"> # <h1><span style="">adaptive</span></h1> # </div> # a tool for adaptive and parallel evaluation of functions # # – [github.com/python-adaptive/adaptive](https://github.com/python-adaptive/adaptive) <br> # – <NAME>: [<EMAIL>](mailto:<EMAIL>) <br> # + [markdown] extensions={"jupyter_dashboards": {"version": 1, "views": {"grid_default": {"col": 4, "height": 4, "hidden": false, "row": 0, "width": 4}, "report_default": {}}}} slideshow={"slide_type": "fragment"} # <table> # <tr> # <th><img src="presentation/bas.jpg" alt="<NAME>" height="200" width="200">me</th> # <th><img src="presentation/joe.jpg" alt="<NAME>" height="200" width="200"><NAME></th> # <th><img src="presentation/anton.png" alt="<NAME>" height="200" width="200"><NAME></th> # </tr> # </table> # + [markdown] extensions={"jupyter_dashboards": {"version": 1, "views": {"grid_default": {"col": 0, "height": 7, "hidden": false, "row": 54, "width": 12}, "report_default": {}}}} slideshow={"slide_type": "slide"} # # This talk # * Who am I? # * Which problem are we solving? # * A motivating example # * Core concepts of `adaptive` # * Live coding example in the Jupyter notebook # * Quantum transport example on the Azure cluster (very dangerous) # + [markdown] extensions={"jupyter_dashboards": {"version": 1, "views": {"grid_default": {"col": 0, "height": 5, "hidden": false, "row": 61, "width": 12}, "report_default": {}}}} slideshow={"slide_type": "slide"} # # Who am I? # * Microsoft Research Intern in Santa Barbara # * Ph.D. student in Delft (Netherlands) with <NAME> in the Quantum Tinkerer group # * Theoretical quantum mechanics (Majorana hybrid semiconductor superconductor nanowire devices) # + [markdown] extensions={"jupyter_dashboards": {"version": 1, "views": {"grid_default": {"col": 4, "height": 4, "hidden": false, "row": 20, "width": 4}, "report_default": {}}}} slideshow={"slide_type": "subslide"} # # Heavy numerics # + [markdown] extensions={"jupyter_dashboards": {"version": 1, "views": {"grid_default": {"col": 0, "height": 20, "hidden": false, "row": 66, "width": 12}, "report_default": {}}}} slideshow={"slide_type": "fragment"} # Proof: cluster usage of last 60 days, I burned `0.5 yr/day` of CPU time # <img src="presentation/cluster_new.png" width="40%"> # Source: http://hpc05.quantumtinkerer.tudelft.nl/ # + [markdown] extensions={"jupyter_dashboards": {"version": 1, "views": {"grid_default": {"col": 0, "height": 19, "hidden": false, "row": 86, "width": 12}, "report_default": {}}}} slideshow={"slide_type": "subslide"} # from arXiv:1807.01940 <img src="presentation/example_plot3.png" width="55%"> # # + [markdown] extensions={"jupyter_dashboards": {"version": 1, "views": {"grid_default": {"col": 0, "height": 5, "hidden": false, "row": 4, "width": 12}, "report_default": {}}}} slideshow={"slide_type": "slide"} # # The problems # * How to sample a function? # * When did we sufficiently sample the function? # + [markdown] extensions={"jupyter_dashboards": {"version": 1, "views": {"grid_default": {"col": 11, "height": 4, "hidden": false, "row": 13, "width": null}, "report_default": {}}}} slideshow={"slide_type": "slide"} # # Simplest example, 1D function # We start with the most common use-case: sampling a 1D function $\ f: ℝ → ℝ$. # # We will use the following function: # + extensions={"jupyter_dashboards": {"version": 1, "views": {"grid_default": {"col": 0, "height": 10, "hidden": false, "row": 105, "width": 4}, "report_default": {}}}} hide_input=true slideshow={"slide_type": "-"} plot_sharp_peak((8, 6)); # + [markdown] extensions={"jupyter_dashboards": {"version": 1, "views": {"grid_default": {"col": 0, "height": 4, "hidden": false, "row": 9, "width": 4}, "report_default": {}}}} slideshow={"slide_type": "slide"} # ## Homogeneous sampling, _the usual_ # ```python # xs = np.linspace(-1, 1, 100) # ys = f(xs) # ``` # _Strategy_: choose a point in the middle of the largest interval $\Delta$x # + extensions={"jupyter_dashboards": {"version": 1, "views": {"grid_default": {"col": 4, "height": 11, "hidden": false, "row": 9, "width": 4}, "report_default": {}}}} hide_input=true slideshow={"slide_type": "-"} # %%output size=150 hm_hom_plots # + [markdown] extensions={"jupyter_dashboards": {"version": 1, "views": {"grid_default": {"col": 8, "height": 4, "hidden": false, "row": 9, "width": 4}, "report_default": {}}}} slideshow={"slide_type": "slide"} # ## Better sampling, minimize the distance between the points # _Strategy_: choose a point in the middle of the largest interval $\sqrt{\Delta x^2 + \Delta y^2}$ # + extensions={"jupyter_dashboards": {"version": 1, "views": {"grid_default": {"col": 0, "height": 11, "hidden": false, "row": 13, "width": 4}, "report_default": {}}}} hide_input=true slideshow={"slide_type": "-"} # %%output size=120 hm_hom_plots + hm_plots # + [markdown] extensions={"jupyter_dashboards": {"version": 1, "views": {"grid_default": {"col": 0, "height": 12, "hidden": false, "row": 115, "width": 12}, "report_default": {}}}} slideshow={"slide_type": "slide"} # # the `adaptive` package # [`adaptive`](https://gitlab.kwant-project.org/qt/adaptive-evaluation) is an open-source package: # * 5675 lines of code and 732 commits # * first version released Feb 2018 # # **It does**: # * Smarter sampling of functions (or experiments) # * 1D, 2D # * random functions (0D) # * numerical integration # * N-D _(in development version)_ # * easy parallelization # * provide tools for live-plotting of the data # + [markdown] extensions={"jupyter_dashboards": {"version": 1, "views": {"grid_default": {"col": 0, "height": 9, "hidden": false, "row": 24, "width": 12}, "report_default": {}}}} slideshow={"slide_type": "slide"} # # The `learner` object # # A learner takes the function to "learn" and the `bounds`. # ```python # learner = adaptive.Learner1D(f, bounds=(-1, 1)) # ``` # + [markdown] extensions={"jupyter_dashboards": {"version": 1, "views": {"grid_default": {"col": 4, "height": 4, "hidden": false, "row": 105, "width": 4}, "report_default": {}}}} slideshow={"slide_type": "fragment"} # The three most important methods: # + [markdown] extensions={"jupyter_dashboards": {"version": 1, "views": {"grid_default": {"col": 8, "height": 4, "hidden": false, "row": 105, "width": 4}, "report_default": {}}}} slideshow={"slide_type": "fragment"} # `loss = learner.loss()` "quality factor" how well do the points describe the function? # + [markdown] extensions={"jupyter_dashboards": {"version": 1, "views": {"grid_default": {"col": 4, "height": 4, "hidden": false, "row": 109, "width": 4}, "report_default": {}}}} slideshow={"slide_type": "fragment"} # `xs, loss_improvements = learner.ask(n=10)` give me new points, and tell me how "good" the points are # + [markdown] extensions={"jupyter_dashboards": {"version": 1, "views": {"grid_default": {"col": 8, "height": 4, "hidden": false, "row": 109, "width": 4}, "report_default": {}}}} slideshow={"slide_type": "fragment"} # # `learner.tell(x_new, y_new)` add the newly calculated data # + [markdown] extensions={"jupyter_dashboards": {"version": 1, "views": {"grid_default": {"col": 0, "height": 14, "hidden": false, "row": 33, "width": 12}, "report_default": {}}}} hide_input=false slideshow={"slide_type": "slide"} # # The `runner` object, _running the `learner`_ # # We basically want to to something like _(not really how it works internally)_: # # ```python # learner = adaptive.Learner1D(f, bounds=(-1, 1)) # while learner.loss() > 0.01: # xs, _ = learner.ask(4) # we do nothing with the `loss_improvements` now # for x in xs: # y = learner.function(x) # learner.tell(x, y) # ``` # # This has some problems: <br> # • Not using all the resources<br> # • Blocks the kernel, so we cannot plot the data while the calculation is in progress # # The `Runner` solves these problems. # ```python # learner = adaptive.Learner1D(f, bounds=(-1, 1)) # runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.01, executor=client) # ``` # + [markdown] extensions={"jupyter_dashboards": {"version": 1, "views": {"grid_default": {"col": 0, "height": 19, "hidden": false, "row": 127, "width": 12}, "report_default": {}}}} hide_input=true slideshow={"slide_type": "slide"} # # switch to the a notebook: [tutorial-notebook.ipynb](tutorial-notebook.ipynb) # ![](presentation/live-coding-demos.jpg) # + hide_input=true slideshow={"slide_type": "slide"} from IPython.display import IFrame, HTML display(HTML('<h1>Thank you!</h2>')) IFrame('https://ghbtns.com/github-btn.html?user=python-adaptive&repo=adaptive&type=star&count=true&size=large', width=160, height=30) # + [markdown] extensions={"jupyter_dashboards": {"version": 1, "views": {"grid_default": {"col": 0, "height": 11, "hidden": false, "row": 150, "width": 12}, "report_default": {}}}} slideshow={"slide_type": "-"} # ### Install with # ```bash # conda install -c conda-forge adaptive # recommended # pip install adaptive # ``` # ### Questions on # * Gitter chat [https://gitter.im/python-adaptive/adaptive](gitter.im/python-adaptive/adaptive) # * Github issues [https://github.com/python-adaptive/adaptive](github.com/python-adaptive/adaptive) # + [markdown] slideshow={"slide_type": "slide"} # <img src="presentation/majoranas_on_fire.png" height="80%"/> # + [markdown] extensions={"jupyter_dashboards": {"version": 1, "views": {"grid_default": {"col": 0, "height": 25, "hidden": false, "row": 161, "width": 12}, "report_default": {}}}} slideshow={"slide_type": "slide"} # ![](presentation/trump.gif)
tutorial-slides.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Network interpretability of lung X-rays # # In this tutorial, we demonstrate visualising network interpretability through a classification task. # # The data are a set of X-rays collated from a variety of sources. The labels used are: # - normal (the absence of the following classes) # - pneumonia # - covid # # We then use GradCam and occlusion sensitivity to interpret the trained network's classification choices. # # [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/Project-MONAI/tutorials/blob/master/modules/interpretability/covid_classification.ipynb) # !python -c "import monai" || pip install -q "monai[tqdm]" # !python -c "import matplotlib" || pip install -q matplotlib # + import os import zipfile from tqdm import tqdm from glob import glob from enum import Enum import numpy as np import torch import random import matplotlib.pyplot as plt import matplotlib.patches as mpatches from sklearn.metrics import ( classification_report, confusion_matrix, ConfusionMatrixDisplay ) import monai from monai.networks.utils import eval_mode from monai.networks.nets import densenet121 from monai.transforms import ( Compose, LoadImage, Lambda, AddChannel, ScaleIntensity, ToTensor, RandRotate, RandFlip, Rand2DElastic, RandZoom, Resize, ) monai.config.print_config() random_seed = 42 random.seed(random_seed) monai.utils.set_determinism(random_seed) np.random.seed(random_seed) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # - # # Download the data # # The data is currently hosted on Kaggle: # https://www.kaggle.com/ericspod/project-monai-2020-bootcamp-challenge-dataset # # Unfortunately, there's no way to download this automatically, so you'll have to download it yourself and then point this notebook to look at the downloaded `zip` file. data_path_zip = os.path.join(os.environ.get( "MONAI_DATA_DIRECTORY"), "archive.zip") data_path = os.path.join(os.environ.get( "MONAI_DATA_DIRECTORY"), "covid_xray_combined_small") if not os.path.isdir(data_path): if not os.path.isfile(data_path_zip): raise RuntimeError("TODO: Download needed.") with zipfile.ZipFile(data_path_zip, 'r') as zf: for member in tqdm(zf.infolist(), desc='Extracting '): try: zf.extract(member, data_path) except zipfile.error: pass # # Load images # # For ease, we'll only use the images in the training folder # + crop_size = (320, 320) # set size of images for network class Diagnosis(Enum): normal = 0 pneumonia = 1 covid = 2 num_class = len(Diagnosis) def get_label(path): fname = os.path.basename(path) if fname[:6] == "normal": return Diagnosis.normal.value elif fname[:9] == "pneumonia": return Diagnosis.pneumonia.value elif fname[:5] == "covid": return Diagnosis.covid.value else: raise RuntimeError(f"Unknown label: {path}") class CovidImageDataset(torch.utils.data.Dataset): def __init__(self, files, transforms, even_balance=True): self.image_files = files self.labels = list(map(get_label, self.image_files)) self.transforms = transforms # For even balance, find out which diagnosis has the fewest images # and then get that many of each diagnosis if even_balance: # fewest images of any diagnosis num_to_keep = min(self.labels.count(i.value) for i in Diagnosis) print(f"num to keep per class: {num_to_keep}") self.image_files = [] for d in Diagnosis: files_for_diagnosis = \ [file for file in files if get_label(file) == d.value] self.image_files += files_for_diagnosis[:num_to_keep] random.shuffle(self.image_files) self.labels = list(map(get_label, self.image_files)) def __len__(self): return len(self.image_files) def __getitem__(self, index): return self.transforms(self.image_files[index]), self.labels[index] train_transforms = Compose([ LoadImage(image_only=True), Lambda(lambda im: im if im.ndim == 2 else im[..., 0]), AddChannel(), Resize(crop_size, "area"), ScaleIntensity(), RandRotate(range_x=15, prob=0.5, keep_size=True), RandFlip(spatial_axis=0, prob=0.5), Rand2DElastic((0.3, 0.3), (1.0, 2.0)), RandZoom(min_zoom=0.9, max_zoom=1.1, prob=0.5), ToTensor(), ]) val_transforms = Compose([ LoadImage(image_only=True), Lambda(lambda im: im if im.ndim == 2 else im[..., 0]), AddChannel(), Resize(crop_size, "area"), ScaleIntensity(), ToTensor(), ]) training_data_path = os.path.join(data_path, "training_data", "training_data") all_files = glob(os.path.join(training_data_path, "*.png")) random.shuffle(all_files) train_frac = 0.9 num_training_files = round(train_frac * len(all_files)) train_files = all_files[:num_training_files] val_files = all_files[num_training_files:] batch_size = 100 train_ds = CovidImageDataset(train_files, train_transforms, False) train_loader = torch.utils.data.DataLoader( train_ds, batch_size=batch_size, shuffle=True, num_workers=10) val_ds = CovidImageDataset(val_files, val_transforms, False) val_loader = torch.utils.data.DataLoader( val_ds, batch_size=batch_size, shuffle=True, num_workers=10) # - # Display examples fig, axes = plt.subplots(1, 3, figsize=(25, 15), facecolor='white') for true_label in Diagnosis: fnames = [v for v in val_files if true_label.name in os.path.basename(v)] random.shuffle(fnames) fname = fnames[0] im = val_transforms(fname) ax = axes[true_label.value] im_show = ax.imshow(im[0], cmap='gray') ax.set_title(os.path.basename(fname), fontsize=25) ax.axis('off') # # Training def create_new_net(): return densenet121( spatial_dims=2, in_channels=1, out_channels=num_class ).to(device) # + # %matplotlib notebook max_epochs = 30 val_interval = 1 lr = 1e-5 epoch_loss_values = [] auc = [] acc = [] best_acc = -1 net = create_new_net() net = net.to(device) loss = torch.nn.CrossEntropyLoss() opt = torch.optim.Adam(net.parameters(), lr) # Plotting stuff fig, ax = plt.subplots(1, 1, facecolor='white') ax.set_xlabel('Epoch') ax.set_ylabel('Metrics') plt.ion() fig.show() fig.canvas.draw() for epoch in range(max_epochs): net.train() epoch_loss = 0 for batch_data in train_loader: inputs, labels = batch_data[0].to(device), batch_data[1].to(device) opt.zero_grad() outputs = net(inputs) lossval = loss(outputs, labels) lossval.backward() opt.step() epoch_loss += lossval.item() epoch_loss /= len(train_loader) epoch_loss_values.append(epoch_loss) if (epoch + 1) % val_interval == 0: with eval_mode(net): y_pred = torch.tensor([], dtype=torch.float32, device=device) y = torch.tensor([], dtype=torch.long, device=device) for val_data in val_loader: val_images, val_labels = val_data[0].to( device), val_data[1].to(device), outputs = net(val_images) y_pred = torch.cat([y_pred, outputs], dim=0) y = torch.cat([y, val_labels], dim=0) auc_metric = monai.metrics.compute_roc_auc( y_pred, y, to_onehot_y=True, softmax=True) auc.append(auc_metric) acc_value = torch.eq(y_pred.argmax(dim=1), y) acc_metric = acc_value.sum().item() / len(acc_value) acc.append(acc_metric) if acc_metric > best_acc: best_acc = acc_metric torch.save(net.state_dict(), "best_acc_lung_xray_densenet.pth") ax.clear() train_epochs = np.linspace(1, epoch + 1, epoch + 1) ax.plot(train_epochs, epoch_loss_values, label='Avg. loss') val_epochs = np.linspace( 1, epoch + 1, np.floor( (epoch + 1) / val_interval).astype(np.int32)) ax.plot(val_epochs, acc, label='ACC') ax.plot(val_epochs, auc, label='AUC') ax.set_xlabel('Epoch') ax.set_ylabel('Metrics') ax.legend() fig.canvas.draw() # + # %matplotlib inline # Load best model net = create_new_net().to(device) net.load_state_dict(torch.load("best_acc_lung_xray_densenet.pth")) net.eval() with eval_mode(net): y_pred = torch.tensor([], dtype=torch.float32, device=device) y = torch.tensor([], dtype=torch.long, device=device) for val_data in val_loader: val_images, val_labels = val_data[0].to( device), val_data[1].to(device), outputs = net(val_images) y_pred = torch.cat([y_pred, outputs.argmax(dim=1)], dim=0) y = torch.cat([y, val_labels], dim=0) print(classification_report( y.cpu().numpy(), y_pred.cpu().numpy(), target_names=[d.name for d in Diagnosis])) cm = confusion_matrix( y.cpu().numpy(), y_pred.cpu().numpy(), normalize='true', ) disp = ConfusionMatrixDisplay( confusion_matrix=cm, display_labels=[d.name for d in Diagnosis], ) disp.plot(ax=plt.subplots(1, 1, facecolor='white')[1]) # - # # Interpretability # # Use GradCAM and occlusion sensitivity for network interpretability. # # The occlusion sensitivity returns two images: the sensitivity image and the most probable class. # # * Sensitivity image -- how the probability of an inferred class changes as the corresponding part of the image is occluded. # * Big decreases in the probability imply that that region was important in inferring the given class # * The output is the same as the input, with an extra dimension of size N appended. Here, N is the number of inferred classes. To then see the sensitivity image of the class we're interested (maybe the true class, maybe the predcited class, maybe anything else), we simply do ``im[...,i]``. # * Most probable class -- if that part of the image is covered up, does the predicted class change, and if so, to what? # # In this example the network has been sufficiently trained that the predicted class doesn't change as parts of the image are occluded. However, one can imagine how this feature might be useful when the results are less than satisfactory. # + # for name, _ in net.named_modules(): print(name) target_layer = "class_layers.relu" gradcam = monai.visualize.GradCAM(nn_module=net, target_layers=target_layer) occ_sens = monai.visualize.OcclusionSensitivity( nn_module=net, mask_size=10, n_batch=batch_size, stride=10) # + # Display examples subplot_shape = [4, num_class] fig, axes = plt.subplots(*subplot_shape, figsize=(25, 20), facecolor='white') for true_label in Diagnosis: fnames = [v for v in val_files if true_label.name in os.path.basename(v)] random.shuffle(fnames) # Find a correctly predicted example for fname in fnames: img = val_transforms(fname)[None].to(device) y_pred = net(img) pred_label = Diagnosis(y_pred.argmax(1).item()) if pred_label == true_label: break im_title = f"{os.path.basename(fname)}\npredicted as {pred_label.name}" for d in Diagnosis: im_title += f"\n{d.name}: {y_pred[0,d.value]:.3}" res_cam = gradcam(x=img, class_idx=true_label.value) occ_map, occ_most_prob = occ_sens(x=img) occ_map = occ_map[..., true_label.value] # the rest is for visualisations for row, (im, title) in enumerate(zip( [img, res_cam, occ_map, occ_most_prob], [im_title, "CAM", "Occ. sens.", "Occ. sens.\nmost probable"], )): cmap = 'gray' if row == 0 else 'jet' col = true_label.value ax = axes[row, col] if isinstance(im, torch.Tensor): im = im.detach().cpu() if row != 3: im_show = ax.imshow(im[0][0], cmap=cmap) else: im_show = ax.imshow(im[0][0], cmap=cmap, vmin=0, vmax=num_class - 1) # for the most probable, need to give the classes in the legend handles = [mpatches.Patch( color=im_show.cmap(im_show.norm(d.value)), label=d.name) for d in Diagnosis] ax.legend(handles=handles, loc='upper center', bbox_to_anchor=(0.5, -0.05), fontsize=20) ax.set_title(title, fontsize=25) ax.axis('off') fig.colorbar(im_show, ax=ax)
modules/interpretability/covid_classification.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import torch as th import syft as sy sy.create_sandbox(globals(), False, False) # # Step 1: Action Events alices_params = th.tensor([1,2,3,4]).send(alice) bobs_params = th.tensor([1,2,3,4]).send(bob) input_data = th.tensor([1,2,3,4]) # these ways of forming promises are identical alice_data_promise = sy.promise(alice, id=input_data.id) bob_data_promise = input_data.promise(bob) # + alices.promise_queue = {} # key = tensor ID # value = list of function which required this tensor ID # whenever alices.recv_obj is called, after it serializes the object, it checks the keys in the queue to see if there are any outstanding commands # - alices_result_promise = alices_params * alice_data_promise # + bobs_result_promise = bobs_params * bobs_data_promise # - new_averaged_model = (alices_result_promise.get() + bobs_result_promise.get())/2 # + # ASYNC using sockets # input_data.send(bob) # input_data.send(alice) alice_data_promise.fulfill(input_data) bobs_data_promise.fulfill(input_data) # - print(new_averaged_model.get()) # # Scratch class Promise(): def __init__(self, id): self.operations = list() self.trigger_id = id def fulfill(self, tensor): "" def __add__(self, other_promise): return x = Promise(10) y = x + x y
examples/experimental/PromiseTensor Mockup.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # A note on dust torus thermal emission # # `agnpy` is meant for calculations of non-thermal processes occurring in the jets of active galaxies. The dust torus (DT) emission is considered as a target radiation field for inverse Compton scattering or pair production. # # The contribution of the thermal emission of the DT to the overall MWL SED is usually considered in Flat Spectrum Radio Quasars (FSRQs), where still, this component is typically dominated by synchrotron radiation (see Fig. 15 in [Aleksic et al. 2016](https://ui.adsabs.harvard.edu/abs/2014A%26A...569A..46A/abstract)). The function computing the DT thermal SED in `agnpy` is mostly meant to check that this emission does not overtake the synchrotron one, and **not for precise modelling of the DT emission**. # # In this notebook we will illustrate that the single-temperature black-body (BB) radiation computed by `agnpy` (in `RingDustTorus.sed_flux`) does not accurately model the thermal emission observed from a DT. At the same time, we illustrate that for the sake of the inverse Compton calculation, even the strongest approximation of the emission as monochromatic (at the BB peak) is satisfactory. # import numpy, astropy and matplotlib for basic functionalities import numpy as np from scipy.interpolate import interp1d import astropy.units as u import astropy.constants as const from astropy.coordinates import Distance import matplotlib.pyplot as plt import pkg_resources # + # import agnpy classes from agnpy.emission_regions import Blob from agnpy.compton import ExternalCompton from agnpy.targets import SSDisk, SphericalShellBLR, RingDustTorus from agnpy.utils.plot import load_mpl_rc, sed_x_label, sed_y_label load_mpl_rc() # - # ## Using single- and multi-temperature black body to model the DT thermal emission # # We will consider, in this example, the DT emission in NGC 1068, using measurement from [Rieke and Low, 1975](https://ui.adsabs.harvard.edu/abs/1975ApJ...199L..13R/abstract) and a dedicated model of the emission by [Pier and Krolik, 1993](https://ui.adsabs.harvard.edu/abs/1992ApJ...401...99P/abstract). # + # load the Rieke and Low 1975 spectral points, they are included in the agnpy data sed_file = pkg_resources.resource_filename( "agnpy", "data/dt_seds/NGC1068_rieke_low_1975.txt" ) sed_data = np.loadtxt(sed_file) _lambda = sed_data[:, 0] * u.um flux = sed_data[:, 1] * u.Jy flux_err = sed_data[:, 2] * u.Jy # get a nuFnu SED nu = _lambda.to("Hz", equivalencies=u.spectral()) sed = (flux * nu).to("erg cm-2 s-1") sed_err = (flux_err * nu).to("erg cm-2 s-1") # flip the arrays (data were in wavelengths) nu = np.flip(nu) sed = np.flip(sed) sed_err = np.flip(sed_err) # + # load the Pier and Krolik 1992 model model_file = pkg_resources.resource_filename( "agnpy", "data/dt_seds/pier_krolik_1992.txt" ) model_data = np.loadtxt(model_file) _lambda_model = model_data[:, 0] * u.um flux_model = model_data[:, 1] * u.Unit("Jy um-1") # get a nuFnu SED nu_model = _lambda_model.to("Hz", equivalencies=u.spectral()) sed_model = (flux_model * const.c).to("erg cm-2 s-1") # flip the arrays (data were in wavelengths) nu_model = np.flip(nu_model) sed_model = np.flip(sed_model) # create a function interpolating the model points pier_krolik_sed_flux = interp1d(nu_model, sed_model) # - # Now that we have the measured flux and an accurate model, let us try to reproduce the DT emission with a single- and multi-temperature black body, using `agnpy`. # + # single-temperature black body # this is computed by default by agnpy's RingDustTorus.sed_flux L_disk = 0.6 * 4.7e11 * const.L_sun R_dt = 1 * u.pc d_L = 22 * u.Mpc T = 500 * u.K z = Distance(d_L).z dt_single = RingDustTorus(L_disk=L_disk, T_dt=T, xi_dt=1.0, R_dt=R_dt) # recompute the SED on the same frequency of the Pier Krolik model sed_single_t = dt_single.sed_flux(nu_model, z) # - # Given 20 wavelength values from $2$ to $30$ $\mu{\rm m}$, we will consider the same number of DT whose BB emission peaks at each $\lambda$ value. To generate the multi-temperature BB we will simply sum their emission. The total luminosity is the same of the single-temperature BB, additionally we scale each BB component following the Pier & Krolik 1993 model. # + def get_T_from_nu_peak(lambdas): """for each peak wavelgength get the corresponding BB peak T using Wien's displacement law: lambda_peak = b / T""" b = 2898 * u.um * u.K T = b / lambdas return T.to("K") # multi-T black body: # let us consider a range of wavelengths and extract the corresponding T for the BB to peak there number_bb = 20 lambdas = np.logspace(np.log10(3), np.log10(30), number_bb) * u.um nu_bb = lambdas.to("Hz", equivalencies=u.spectral()) T = get_T_from_nu_peak(lambdas) # + # to create a multi-T BB, we create a list of DTs with different T dts = [] seds_multi_t = [] for _T, _nu in zip(T, nu_bb): # scale the luminosity of each BB following the Pier Krolik model L_scale_factor = pier_krolik_sed_flux(_nu) / np.sum(pier_krolik_sed_flux(nu_bb)) dt = RingDustTorus(L_disk=L_scale_factor * L_disk, T_dt=_T, xi_dt=1.0, R_dt=R_dt) dts.append(dt) seds_multi_t.append(dt.sed_flux(nu_model, z)) # compute their sum sed_multi_t = np.sum(np.asarray(seds_multi_t), axis=0) # - # Let us see how they compare to each other # + fig, ax = plt.subplots() ax.loglog(nu_model, sed_model, ls="--", color="dodgerblue", label="Pier & Krolik, 1992") ax.loglog(nu_model, sed_single_t, ls="-", color="goldenrod", label="single-T BB") for i in range(len(seds_multi_t)): ax.loglog(nu_model, seds_multi_t[i], ls="-", lw=1.2, color="gray") ax.loglog(nu_model, sed_multi_t, ls="-", color="crimson", label="multi-T BB") ax.errorbar( nu.value, sed.value, yerr=sed_err.value, ls="", marker="o", color="k", label="Rieke & Low, 1975", ) ax.legend(fontsize=10) ax.set_xlabel(sed_x_label) ax.set_ylabel(sed_y_label) ax.set_ylim([1e-12, 1e-6]) plt.show() # - # It is clear that the single-temperature black body does not accurately reproduce the broad $(100-1\,{\rm \mu m})$ band observed flux. It does not span the entire range of data and it peaks in the wrong energy range. A multi-temperature black body is clearly better suited to reproduces the observed DT SED. # ## Impact on external Compton scattering # # Let us consider now the impact of using a single monochromatic approximation for the DT emission in the EC calculation by exploring the difference when using a multi-temperature (always monochormatic) DT as target. To realise the latter we just re-use the previously created list of DT peaking at different temperatures and compute the EC scattering on their photon fields. # + # arbitrary emission region norm = 1.5e5 * u.Unit("cm-3") parameters1 = { "p1": 2.0, "p2": 3.9, "gamma_b": 300.0, "gamma_min": 2.5, "gamma_max": 3.0e4, } spectrum_dict = {"type": "BrokenPowerLaw", "parameters": parameters1} R_b = 1.0e16 * u.cm B = 1.0 * u.G delta_D = 20 Gamma = 17 blob = Blob(R_b, z, delta_D, Gamma, B, norm, spectrum_dict) blob.set_gamma_size(500) # let us consider the emission region at a distance smaller than the DT radius r = 0.3 * u.pc ec = ExternalCompton(blob, dt_single, r) # compute the SED from EC nu_ec = np.logspace(15, 26, 100) * u.Hz sed_ec_single_t = ec.sed_flux(nu_ec) # + # re-calculate the SED considering each of the previously generated DT () seds_ec_multi_t = [] for dt in dts: ec = ExternalCompton(blob, dt, r) seds_ec_multi_t.append(ec.sed_flux(nu_ec)) ec_dt_seds_sum = np.sum(np.asarray(seds_ec_multi_t), axis=0) # + fig, ax = plt.subplots() for i in range(len(seds_ec_multi_t)): ax.loglog(nu_ec, seds_ec_multi_t[i], ls="-", lw=1.2, color="gray") ax.loglog(nu_ec, sed_ec_single_t, ls="-", color="crimson", label="EC on single-T DT") ax.loglog( nu_ec, ec_dt_seds_sum, lw=2, ls="-", color="dodgerblue", label="EC on multi-T DT" ) ax.legend() ax.set_xlabel(sed_x_label) ax.set_ylabel(sed_y_label) ax.set_ylim([1e-13, 1e-6]) plt.show() # - # As we can see, beside the low-energy branch of the SED, usually dominated by other radiative processes, considering a single- or multi-temperature DT target does not significantly impact the EC computation. The small shift of the two curves reflects the shift of the peak energy between the single delta function model and the full DT model.
docs/tutorials/dt_thermal_emission.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd import numpy as np import plotly.offline as py import plotly.graph_objs as go py.init_notebook_mode(connected=True) # - # plot table and bar chart def plot_table_bar(time_array, columns, index, **titles): fig_title = titles['fig_title'] table_title = titles['table_title'] bar_title = titles['bar_title'] # change numpy array to pandas dataFrame df = pd.DataFrame(time_array, columns=columns, index=index) # table trace trace_table = go.Table( domain=dict(x= [0.0, 0.5], y= [0, 1.0]), columnwidth = [1.4] + [1] * len(columns), header=dict( values = [''] + ['<b>'+col for col in columns], fill = dict(color='rgb(54,56,128)'), align = ['center'], font = dict(color='white', size=13), height = 35, ), cells=dict( values = [['<b>'+idx for idx in list(algorithms)]] + [df.iloc[:,j] for j in range(df.shape[1])], fill = dict(color='#F5F8FF'), align = ['right'], font = dict(size=13), height = 35, ) ) # bar traces trace_b1 = go.Bar( x = columns, y = df.loc[index[0]], name = index[0], marker=dict(color='rgb(54,141,202)'), width=0.35, ) trace_b2 = go.Bar( x = columns, y = df.loc[index[1]], name = index[1], marker=dict(color='rgb(104,85,201)'), width=0.35, ) trace_bar = [trace_b1, trace_b2] # layout axis=dict(showline=False, zeroline=True, mirror=False, ticklen=4, tickfont=dict(size=11) ) title = dict(showarrow=False, font=dict(size=15), xref='paper', yref='paper', y=1.01, xanchor='left', yanchor='bottom', ) layout = dict(width=950, height=400, autosize=False, title='<b>' + fig_title, margin = dict(t=100,l=0,r=0,b=100), xaxis=dict(axis, **dict(domain=[0.62,0.96])), yaxis=dict(axis, **dict(domain=[0.3,1], title='Time(s)', titlefont=dict(size=12))), annotations= [dict(title, **dict(text='<b>' + table_title, x=0.2)), dict(title, **dict(text='<b>' + bar_title, x=0.73))] ) # plot table and figure fig = dict(data=[trace_table] + trace_bar, layout=layout) py.iplot(fig) def get_time_array(*arrs): time_array = np.array(arrs) time_array = np.around(np.append(time_array, time_array.sum()), decimals=2) return time_array elapsed_pred0 = 24.8564356 elapsed_inv0 = 37.55436456 elapsed_deconv0 = 136.26543645 elapsed_pred1 = 1.6654363 elapsed_inv1 = 2.1656534 elapsed_deconv1 = 36.70543 # + origin = get_time_array([elapsed_pred0, elapsed_inv0, elapsed_deconv0]) optimized = get_time_array([elapsed_pred1, elapsed_inv1, elapsed_deconv1]) speedup = ['x{:.1f}'.format(s) for s in origin / optimized] summerize = np.array([origin, optimized, speedup]) columns = ['Predict', 'Invert', 'Deconv', 'Total'] algorithms = ['Origin', 'Optimized', 'Speedup'] fig_title = 'Single Image Processing Pipeline' table_title = 'Running Time(s)' bar_title = 'Compare' plot_table_bar(summerize, columns, algorithms, fig_title=fig_title, table_title=table_title, bar_title=bar_title) # -
arl-python/examples/arl/.ipynb_checkpoints/Untitled4-Copy1-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Copyright (c) Microsoft Corporation. All rights reserved. # # Licensed under the MIT License. # # Train in a remote VM (MLC managed DSVM) # * Create Workspace # * Create Experiment # * Upload data to a blob in workspace # * Configure ACI run config # * Submit the experiment in ACI # * Register the retrained model # # Prerequisites # Make sure you go through the [00. Installation and Configuration](00.configuration.ipynb) Notebook first if you haven't. # # ## Install Azure ML SDK # # * !pip install azureml-core # * !pip install azureml-contrib-iot # * !pip install azure-mgmt-containerregistry # # ## Check the conda environment # Make sure you have started the notebook from the correct conda environment import os print(os.__file__) # + # Check core SDK version number import azureml.core as azcore print("SDK version:", azcore.VERSION) # - # ## Initialize Workspace # # Initialize a workspace object from persisted configuration. # + from azureml.core import Workspace ws = Workspace.from_config('./aml_config/config.json') print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\n') # - # ## Create Experiment # # **Experiment** is a logical container in an Azure ML Workspace. It hosts run records which can include run metrics and output artifacts from your experiments. # + experiment_name = 'cats_dogs' from azureml.core import Experiment exp = Experiment(workspace = ws, name = experiment_name) # - # ## Upload data files into datastore # Register your existing azure storage as a new datastore with the workspace. The datastore should be backed by the Azure blob storage account. We can use it to transfer data from local to the cloud, and access it from the compute target. from azureml.core.datastore import Datastore ds = Datastore.register_azure_blob_container(workspace=ws, datastore_name='mycatdog', container_name='cat-dog', account_name='mytraindata', account_key='<KEY>', create_if_not_exists=False) data_path = "training_images" # This is the path to the folder in the blob container. Set this to None to get all the contents. print(ds.name, ds.datastore_type, ds.account_name, ds.container_name) # ## Configure for using ACI # Linux-based ACI is available in `West US`, `East US`, `West Europe`, `North Europe`, `West US 2`, `Southeast Asia`, `Australia East`, `East US 2`, and `Central US` regions. See details [here](https://docs.microsoft.com/en-us/azure/container-instances/container-instances-quotas#region-availability). # Create a `DataReferenceConfiguration` object to inform the system what data folder to download to the copmute target. from azureml.core.runconfig import DataReferenceConfiguration dr = DataReferenceConfiguration(datastore_name=ds.name, path_on_datastore=data_path, mode='download', # download files from datastore to compute target overwrite=True) # Set the system to build a conda environment based on the run configuration. Once the environment is built, and if you don't change your dependencies, it will be reused in subsequent runs. # + from azureml.core.compute import ComputeTarget, AmlCompute from azureml.core.compute_target import ComputeTargetException # choose a name for your cluster cluster_name = "cpucluster3" try: compute_target = ComputeTarget(workspace=ws, name=cluster_name) print('Found existing compute target.') except ComputeTargetException: print('Creating a new compute target...') compute_config = AmlCompute.provisioning_configuration(vm_size='Standard_D3', max_nodes=2) # create the cluster compute_target = ComputeTarget.create(ws, cluster_name, compute_config) compute_target.wait_for_completion(show_output=True) # Use the 'status' property to get a detailed status for the current AmlCompute. print(compute_target.status.serialize()) # + from azureml.core.runconfig import RunConfiguration, DEFAULT_CPU_IMAGE from azureml.core.conda_dependencies import CondaDependencies # create a new runconfig object run_config = RunConfiguration(framework = "python") # Set compute target run_config.target = compute_target.name # set the data reference of the run configuration run_config.data_references = {ds.name: dr} # enable Docker run_config.environment.docker.enabled = True # set Docker base image to the default CPU-based image run_config.environment.docker.base_image = DEFAULT_CPU_IMAGE # use conda_dependencies.yml to create a conda environment in the Docker image for execution run_config.environment.python.user_managed_dependencies = False # auto-prepare the Docker image when used for execution (if it is not already prepared) run_config.auto_prepare_environment = True # specify CondaDependencies obj run_config.environment.python.conda_dependencies = CondaDependencies.create(conda_packages=['tensorflow==1.8.0']) # - # ### Submit the Experiment # Submit script to run in the Docker image in the remote VM. If you run this for the first time, the system will download the base image, layer in packages specified in the `conda_dependencies.yml` file on top of the base image, create a container and then execute the script in the container. # + from azureml.core import Run from azureml.core import ScriptRunConfig src = ScriptRunConfig(source_directory = './scripts', script = 'retrain.py', run_config = run_config, # pass the datastore reference as a parameter to the training script arguments=['--image_dir', str(ds.as_download()), '--architecture', 'mobilenet_1.0_224', '--output_graph', 'outputs/retrained_graph.pb', '--output_labels', 'outputs/output_labels.txt', '--model_download_url', 'https://raw.githubusercontent.com/rakelkar/models/master/model_output/', '--model_file_name', 'imagenet_2_frozen.pb' ]) run = exp.submit(config=src) # - # ### View run history details run run.wait_for_completion(show_output=True) # ### Register the Model # + from azureml.core.model import Model model = run.register_model(model_name = experiment_name, model_path = 'outputs/') print(model.name, model.url, model.version, model.id, model.created_time) # - # ## Convert Model # + from azureml.contrib.iot.model_converters import SnpeConverter # submit a compile request compile_request = SnpeConverter.convert_tf_model( ws, source_model=model, input_node="input", input_dims="1,224,224,3", outputs_nodes = ["final_result"], allow_unconsumed_nodes = True) print(compile_request._operation_id) # - # wait for the request to complete compile_request.wait_for_completion(show_output=True) # get the compiled model compiled_model = compile_request.result print(compiled_model.name, compiled_model.url, compiled_model.version, compiled_model.id, compiled_model.created_time) compiled_model.download(target_dir="./converted/", exist_ok=True) # ### Create Docker Image # ### Show the sample application file with open('./main.py', 'r') as f: print(f.read()) # + from azureml.core.image import Image from azureml.contrib.iot import IotContainerImage image_config = IotContainerImage.image_configuration( architecture="arm32v7", execution_script="main.py", dependencies=["cameraapi.py","iot.py","ipcprovider.py","utility.py"], docker_file="Dockerfile", tags = ["mobilenet"], description = "MobileNet based demo module") image = Image.create(name = "peabodymobilenet", # this is the model object models = [compiled_model], image_config = image_config, workspace = ws) # - image.wait_for_creation(show_output = True) # ### Enter your container registry credentials # #### List the image to get URI container_reg = ws.get_details()["containerRegistry"] reg_name=container_reg.split("/")[-1] resource_group_name = ws.resource_group container_url = "\"" + image.image_location + "\"," subscription_id = ws.subscription_id print('{}'.format(image.image_location)) print('{}'.format(reg_name)) print('{}'.format(subscription_id)) from azure.mgmt.containerregistry import ContainerRegistryManagementClient from azure.mgmt import containerregistry client = ContainerRegistryManagementClient(ws._auth,subscription_id) result= client.registries.list_credentials(resource_group_name, reg_name, custom_headers=None, raw=False) username = result.username password = result.passwords[0].value # ### Build your Deployment.json file # + # %%writefile ./deploymentpb.json { "modulesContent": { "$edgeAgent": { "properties.desired": { "schemaVersion": "1.0", "runtime": { "type": "docker", "settings": { "minDockerVersion": "v1.25", "loggingOptions": "", "registryCredentials": { # - #Automatically adding your acr details acr_details = "\"" + reg_name +"\": {\n\t\t\t\"username\": \""+ username + "\",\n\t\t\t" + "\"password\":\"" + password + "\",\n\t\t\t" + "\"address\":\"" + reg_name + ".azurecr.io\"" + ",\n\t\t}" print('{}'.format(acr_details)) # %store acr_details >> deploymentpb.json # %%writefile -a ./deploymentpb.json } } }, "systemModules": { "edgeAgent": { "type": "docker", "settings": { "image": "mcr.microsoft.com/azureiotedge-agent:1.0", "createOptions": "{}", "env": { "UpstreamProtocol": { "value": "MQTT" } } } }, "edgeHub": { "type": "docker", "status": "running", "restartPolicy": "always", "settings": { "image": "mcr.microsoft.com/azureiotedge-hub:1.0", "createOptions": "{\"User\":\"root\",\"HostConfig\":{\"PortBindings\":{\"5671/tcp\":[{\"HostPort\":\"5671\"}], \"8883/tcp\":[{\"HostPort\":\"8883\"}],\"443/tcp\":[{\"HostPort\":\"443\"}]}}}", "env": { "UpstreamProtocol": { "value": "MQTT " } } } } }, "modules": { "VisionSampleModule": { "version": "1.0", "type": "docker", "status": "running", "restartPolicy": "always", "settings": { "image": #adding your container URL # %store container_url >> deploymentpb.json # %%writefile -a ./deploymentpb.json "createOptions": "{\"HostConfig\":{\"Binds\":[\"/data/misc/camera:/app/vam_model_folder\"],\"NetworkMode\":\"host\"},\"NetworkingConfig\":{\"EndpointsConfig\":{\"host\":{}}}}" } } } } }, "$edgeHub": { "properties.desired": { "schemaVersion": "1.0", "routes": { "route": "FROM /messages/* INTO $upstream" }, "storeAndForwardConfiguration": { "timeToLiveSecs": 7200 } } } } } # ## Deploy image as an IoT module # ### Set subscription to the same as your workspace # %%writefile ./setsub az account set --subscription iot_sub=ws.subscription_id # %store iot_sub >> setsub # !sh setsub print ('{}'.format(iot_sub)) # ### Provision Azure IoT Hub #RG and location to create hub iot_rg="vaidk_"+resource_group_name iot_location=ws.get_details()["location"] #temp to delete iot_location="eastus2" iot_hub_name="iothub-"+ ws.get_details()["name"] iot_device_id="vadik_"+ ws.get_details()["name"] iot_deployment_id="dpl"+ "cstmvaidk" print('{}'.format(iot_hub_name)) # %%writefile ./create #Command to create hub and device # Adding Intialization steps regcommand="\n echo Installing Extension ... \naz extension add --name azure-cli-iot-ext \n"+ "\n echo CREATING RG "+iot_rg+"... \naz group create --name "+ iot_rg +" --location "+ iot_location+ "\n" +"\n echo CREATING HUB "+iot_hub_name+"... \naz iot hub create --name "+ iot_hub_name + " --resource-group "+ iot_rg +" --sku S1" #print('{}'.format(regcommand)) # %store regcommand >> create # ### Create Identity for your device #Adding Device ID create_device="\n echo CREATING DEVICE ID "+iot_device_id+"... \n az iot hub device-identity create --device-id "+ iot_device_id + " --hub-name " + iot_hub_name +" --edge-enabled" #print('{}'.format(create_device)) # %store create_device >> create #Create command and vonfigure device # !sh create # ### Create Deployment # %%writefile ./deploy #Command to create hub and device #Add deployment command deploy_device="\necho DELETING "+iot_deployment_id+" ... \naz iot edge deployment delete --deployment-id \"" + iot_deployment_id +"\" --hub-name \"" + iot_hub_name +"\"\necho DEPLOYING "+iot_deployment_id+" ... \naz iot edge deployment create --deployment-id \"" + iot_deployment_id + "\" --content \"deploymentpb.json\" --hub-name \"" + iot_hub_name +"\" --target-condition \"deviceId='"+iot_device_id+"'\" --priority 1" print('{}'.format(deploy_device)) # %store deploy_device >> deploy #run deployment to stage all work for when the model is ready # !sh deploy # ### Use this conenction string on your camera to Initialize it # %%writefile ./showdetails #Command to create hub and device #Add deployment command get_string="\n echo THIS IS YOUR CONNECTION STRING ... \naz iot hub device-identity show-connection-string --device-id \"" + iot_device_id + "\" --hub-name \"" + iot_hub_name+"\"" #print('{}'.format(get_string)) # %store get_string >> showdetails # !sh showdetails
Transfer learning cats and dogs.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: PySpark 2.3 (Python 3) # language: python # name: pyspark3 # --- # # Bucketing # # We already saw that using a prepartitioned DataFrame for joins and grouped aggregations can accelerate execution time, but so far the prepartitioning had to be performed every time data is loaded from disk. It would be really nice, if there was some way to store prepartitioned data, such that Spark understands which columns were used to create the partitions. # # This is where bucketing comes into play, which is a special way to store data, such that Spark actually understands the partitioning schema. # ### Adjust Spark config # Again, we need to disable automatic broadcast joins and make sure that bucketing is enabled. spark.conf.set("spark.sql.autoBroadcastJoinThreshold", -1) spark.conf.set("spark.sql.sources.bucketing.enabled", True) # # 1 Load Data # # First we load the weather data, which consists of the measurement data and some station metadata. storageLocation = "s3://dimajix-training/data/weather" # ## 1.1 Load Measurements # # Measurements are stored in multiple directories (one per year). But we will limit ourselves to a single year in the analysis to improve readability of execution plans. # + from pyspark.sql.functions import * from functools import reduce # Read in all years, store them in an Python array raw_weather_per_year = [spark.read.text(storageLocation + "/" + str(i)).withColumn("year", lit(i)) for i in range(2003,2015)] # Union all years together raw_weather = reduce(lambda l,r: l.union(r), raw_weather_per_year) # - # Use a single year to keep execution plans small raw_weather = spark.read.text(storageLocation + "/2003").withColumn("year", lit(2003)) # ### Extract Measurements # # Measurements were stored in a proprietary text based format, with some values at fixed positions. We need to extract these values with a simple `SELECT` statement. weather = raw_weather.select( col("year"), substring(col("value"),5,6).alias("usaf"), substring(col("value"),11,5).alias("wban"), substring(col("value"),16,8).alias("date"), substring(col("value"),24,4).alias("time"), substring(col("value"),42,5).alias("report_type"), substring(col("value"),61,3).alias("wind_direction"), substring(col("value"),64,1).alias("wind_direction_qual"), substring(col("value"),65,1).alias("wind_observation"), (substring(col("value"),66,4).cast("float") / lit(10.0)).alias("wind_speed"), substring(col("value"),70,1).alias("wind_speed_qual"), (substring(col("value"),88,5).cast("float") / lit(10.0)).alias("air_temperature"), substring(col("value"),93,1).alias("air_temperature_qual") ) # ## 1.2 Load Station Metadata # # We also need to load the weather station meta data containing information about the geo location, country etc of individual weather stations. stations = spark.read \ .option("header", True) \ .csv(storageLocation + "/isd-history") # # 2 Bucketing Data # # Now we want to create a so called *bucketed table* of the weather measurements data. Bucketing is only possible within Hive, because additional meta data about the bucketing is required. That meta data is not stored on HDFS but persisted in the Hive metastore instead. # ## 2.1 Create Hive Table # # A bucketed Hive table can easily be created from within Spark by using the `bucketBy` and optionally `sortBy` method of the `DataFrameWriter` class. # + ## YOUR CODE HERE # - # ## 2.2 Inspect Table # # We can inspect the Hive table, which unverils that both bucketing columns and sorting columns are present in the Hive table. # + ## YOUR CODE HERE # - # ### `CREATE TABLE` statement # # We could also have used Hive SQL to create the table. Let's retrieve the statement via SQL `SHOW CREATE TABLE` # + ## YOUR CODE HERE # - # ### Inspect Files # Of couse there also need to be some files in HDFS now. These are stored in the directory `/user/hive/warehouse/weather_buckets` # + ## YOUR CODE HERE # - # # 3 Bucketing & Joins # # Now we can perform the same join again, but this time against the bucketed version of the weather table. # ## 3.1 Normal Join # # To see the effect of bucketing, we first perform a traditional join to see the execution plan as a reference. result = weather.join(stations, (weather["usaf"] == stations["usaf"]) & (weather["wban"] == stations["wban"])) result.explain() # ## 3.2 Bucketed Join # # Now we want to replace the original `weather` DataFrame by a bucketed version. This means that first we have to create a bucketed version in HDFS. This is only possible by creating a Hive table, since this is the only way to persist the bucketing information as table properties. weather_hive = ## YOUR CODE HERE result = ## YOUR CODE HERE result.explain() # #### Remarks # The execution plan now looks differently than before. # * The station meta data is still shuffled (we didn't bucketize it) # * The weather data does not require a shuffle any more, the join can be executed almost directly (A sort will still be performed, maybe a bug?) # ### Bucketing Strategie # # The following attributes have to match # * bucketing columns # * number of buckets = number of partitions # # 4 Bucketing & Aggregation # # Similar to `JOIN` operations, grouped aggregations (`GROUP BY`) also require a shuffle operation. Again this can be avoided if a Hive table is used that is already bucketed according to the grouping columns. # ## 4.1 Normal Aggregation # # First let us analyze the execution plan of a normal grouped aggregation operation without a bucketed table. This will result in an execution plan containing a shuffle operation result = weather.groupBy(weather["usaf"], weather["wban"]).agg( min(when(weather.air_temperature_qual == lit(1), weather.air_temperature)).alias('min_temp'), max(when(weather.air_temperature_qual == lit(1), weather.air_temperature)).alias('max_temp'), ) result.explain() # ### Remarks # As expected the execution plan has three steps related to the grouped aggregation: # 1. Partial aggregate (`HashAggregate`) # 2. Shuffle operation (`Exchange hashpartitioning`) # 3. Final aggregate (`HashAggregate`) # ## 4.2 Bucketed Aggregation # # Now let's perform the same operation, but this time using the bucketed Hive table. result = ## YOUR CODE HERE result.explain() # ### Remarks # As we hoped for, Spark will not perform a shuffle operation any more, since the data is already partitioned as needed. The execution plan now only contains two steps for implementing the grouped aggregation # 1. Partial aggregate (`HashAggregate`) # 2. Final aggregate (`HashAggregate`) # # 5 Bucketing & Filtering # # Unfortunately Spark does not use bucketing information for filtering yet. Let's prove that by a simple example. # ## 5.1 Filter without bucketing # # Let read in the raw data and add a filter operation that refers to the bucketing columns. result = ## YOUR CODE HERE result.explain() # + ## YOUR CODE HERE # - # ## 5.2 Filter with bucketing # # Now let's try the same example, but this time we use the bucketed Hive table instead of the raw data. result = ## YOUR CODE HERE result.explain() # + ## YOUR CODE HERE # - # ### Remarks # The execution plan contains *`PushedFilters`*, but the Spark web ui will reveil, that these filters are only pushed down to the parquet reader and Spark still reads all files.
spark-training/spark-python/jupyter-advanced/09 - Bucketing - Skeleton.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- # + tags=["skip-execution"] print('will fail') 1/0 # - print(42)
tests/resources/ignore_tag.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pathlib from matplotlib.colors import ListedColormap import seaborn as sns import matplotlib.pyplot as plt import numpy as np import pandas as pd import tensorflow as tf from sklearn.model_selection import train_test_split from tensorflow.keras.utils import to_categorical from tensorflow.keras.models import Sequential, load_model from tensorflow.keras.layers import Conv2D, MaxPool2D, Dense, Flatten, Dropout from PIL import Image import datetime # - DATA_BASE_DIR=pathlib.Path('../dataset/data') manufacturer_df= pd.read_csv(DATA_BASE_DIR/"images_manufacturer_train.txt", sep=" ", usecols=["image_id","manufacturer"], names=["image_id","manufacturer"], dtype={"image_id": str}, #to force the image_id to be a string, ) # # Check integrity of the file manufacturer_df['manufacturer'].value_counts(dropna=False) manufacturer_df.isna().sum() for col in manufacturer_df.columns : assert manufacturer_df[col].isna().sum() == 0 , "Missing value unexpected" # # Deal with N columns # ## Make assumptions about number of spaces (here = 2) # Naive method manufacturer_df= pd.read_csv(DATA_BASE_DIR/"images_manufacturer_train.txt", sep=" ", names=["image_id","m1","m2"], usecols=["image_id","m1","m2"], dtype={"image_id": str} #to force the image_id to be a string ) manufacturer_df["manufacturer"]= manufacturer_df["m1"]+ ' '+manufacturer_df["m2"] manufacturer_df["manufacturer"].unique() # !grep "T" ../data/dataset/data/images_manufacturer_train.txt | head -3 # !grep "T" ../data/dataset/data/images_manufacturer_train.txt | wc -l manufacturer_df= pd.read_csv(DATA_BASE_DIR/"images_manufacturer_train.txt", names=["all"], ) manufacturer_df["image_id"]=manufacturer_df["all"].apply(lambda x: x.split(' ')[0]) manufacturer_df["manufacturer"]=manufacturer_df["all"].apply(lambda x: ' '.join(x.split(' ')[1:])) manufacturer_df["manufacturer"].unique() manufacturer_df.head() manufacturer_df["path"]= manufacturer_df['image_id'].apply( lambda x: pathlib.Path("../dataset/data")/"images"/(x+'.jpg')) manufacturer_df.head() def build_image_database(path,target): """ Build a pandas dataframe with target class and access path to images. Parameters: - path (Path): Path pattern to read csv file containing images information - target(str): The second column to extract from the file Return: A pandas dataframe, ------- """ #Load file _df= pd.read_csv(path, names=["all"], ) #Recover data _df["image_id"]=_df["all"].apply(lambda x: x.split(' ')[0]) _df[target]=_df["all"].apply(lambda x: ' '.join(x.split(' ')[1:])) _df[target].unique() #Create path _df["path"]= _df['image_id'].apply( lambda x: pathlib.Path("../dataset/data")/"images"/(x+'.jpg')) return _df.drop(columns=["all"]) # # Load manufacturer dataset build_image_database(DATA_BASE_DIR/"images_manufacturer_train.txt","manufacturer").head(2) build_image_database(DATA_BASE_DIR/"images_family_train.txt","family").head(2) build_image_database(DATA_BASE_DIR/"images_variant_train.txt","variant").head(2) manufacturer_df= build_image_database(DATA_BASE_DIR/"images_manufacturer_train.txt","manufacturer") manufacturer_df.head(2) def show_image(df,row,target): """show the image in the ligne row and the associated target column Args: df (pandas.dataFrame): the dataframe of images row (int): the index of the row target (string): the column name of the associated label Return ------ None """ assert target in df.columns, f"Column {target} not found in dataframe" assert 'path' in df.columns, f"Column path doens't not exit in dataframe" _img = plt.imread(df.loc[row,'path']) print(df.loc[row,target]) plt.imshow(_img) return show_image(manufacturer_df,42,'manufacturer') show_image(build_image_database(DATA_BASE_DIR/'images_family_train.txt','family'),24,'family') manufacturer_df["image_shape"] =manufacturer_df["path"].apply(lambda p: plt.imread(p).shape) manufacturer_df["image_shape"].apply(lambda x: x[0]).value_counts() manufacturer_df["image_shape"].apply(lambda x: x[1]).value_counts() # # Constante IMAGE_WIDTH=128 IMAGE_HEIGHT=IMAGE_WIDTH IMAGE_DEPTH = 3 def load_resize_image(path,height,width): """Load an image and resize it to the target size Parameters: - path (Path): path to the file to load and resize - height (int): the height of the final resized image - width(int): the width of the resized image Return ------ numpy.array containing resized image """ return np.array(Image.open(path).resize((width,height))) manufacturer_df.head(10).apply(lambda r: load_resize_image(r['path'],IMAGE_HEIGHT,IMAGE_WIDTH),axis=1) manufacturer_df['resized_image'] = manufacturer_df.apply(lambda r: load_resize_image(r['path'],IMAGE_HEIGHT,IMAGE_WIDTH),axis=1) plt.imshow(manufacturer_df.loc[42,'resized_image']) def build_classification_model(df: pd.DataFrame,target: str, images: str): """Build a tensorflow model using information from target and images columns in dataframes Parameters ---------- - df (pandas.dataFrame): dataframe with target and images columns - target (str): column name for target variable - images (str): column name for images Returns ------ tensorflow model built & compiled """ #Compute number of classes for output layer nb_classes = df[target].nunique() # Computer images size for input layer size = df[images].iloc[0].shape # Building the model model = Sequential() model.add(Conv2D(filters=32, kernel_size=(5,5), activation='relu', input_shape=size)) model.add(Conv2D(filters=32, kernel_size=(5,5), activation='relu')) model.add(MaxPool2D(pool_size=(2, 2))) model.add(Dropout(rate=0.25)) model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu')) model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu')) model.add(MaxPool2D(pool_size=(2, 2))) model.add(Dropout(rate=0.25)) model.add(Flatten()) model.add(Dense(256, activation='relu')) model.add(Dropout(rate=0.5)) model.add(Dense(nb_classes , activation='softmax')) #Compilation of the model model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) #output layer of nb_classes return model build_classification_model(manufacturer_df,'manufacturer','resized_image') # Compute the number of classes to shape the output layer nb_classes = manufacturer_df.manufacturer.nunique() nb_classes # + #Building the model model = Sequential() model.add(Conv2D(filters=32, kernel_size=(5,5), activation='relu', input_shape=(IMAGE_WIDTH,IMAGE_HEIGHT,IMAGE_DEPTH))) model.add(Conv2D(filters=32, kernel_size=(5,5), activation='relu')) model.add(MaxPool2D(pool_size=(2, 2))) model.add(Dropout(rate=0.25)) model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu')) model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu')) model.add(MaxPool2D(pool_size=(2, 2))) model.add(Dropout(rate=0.25)) model.add(Flatten()) model.add(Dense(256, activation='relu')) model.add(Dropout(rate=0.5)) model.add(Dense(nb_classes , activation='softmax')) #Compilation of the model model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) #output layer of nb_classes # - y=tf.keras.utils.to_categorical (manufacturer_df['manufacturer'].astype('category').cat.codes) # # Build train & Test set def build_x_and_y(df: pd.DataFrame, target: str, images: str, stratify: str=None): """build x tensor and y tensor for model fitting. parameters ---------- df(pd.DataFrame): dataframe target(str): name of target column images (str): name of images column Returns ------- x (numpy.array): tensor of x values y (numpy.array): tensor of y values """ x= np.array(df[images].to_list()) y=tf.keras.utils.to_categorical (df[target].astype('category').cat.codes) return x,y X_train, X_test,y_train,y_test = train_test_split(manufacturer_df[['resized_image',"manufacturer"]],y,test_size=0.2,stratify=manufacturer_df['manufacturer']) assert X_train.shape[0]+X_test.shape[0]== manufacturer_df.shape[0] assert y_train.shape[0]+y_test.shape[0]== y.shape[0] X_train['manufacturer'].value_counts(normalize=True) X_test['manufacturer'].value_counts(normalize=True) # # Train model # %%time epochs = 5 history = model.fit(np.array(X_train['resized_image'].to_list()),y_train,batch_size = 32,epochs = epochs , validation_data = (np.array(X_test['resized_image'].to_list()),y_test)) # + #Load train & test dataset train_df = build_image_database(DATA_BASE_DIR/'images_manufacturer_train.txt','manufacturer') test_df = build_image_database(DATA_BASE_DIR/'images_manufacturer_test.txt','manufacturer') train_df['resized_image'] = train_df.apply(lambda r: load_resize_image(r['path'],IMAGE_HEIGHT,IMAGE_WIDTH),axis=1) test_df['resized_image'] = test_df.apply(lambda r: load_resize_image(r['path'],IMAGE_HEIGHT,IMAGE_WIDTH),axis=1) #Build tensors for training & testing X_train,y_train = build_x_and_y(train_df,'manufacturer','resized_image') X_test,y_test = build_x_and_y(test_df,'manufacturer','resized_image') model = build_classification_model(train_df,"manufacturer","resized_image") # + # Load the TensorBoard notebook extension # %load_ext tensorboard # !rm -rf ./logs log_dir = "logs/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S") tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1) # - # %%time epochs = 5 history = model.fit(X_train,y_train,batch_size = 32,epochs = epochs , validation_data = (X_test,y_test), callbacks=[tensorboard_callback] ) # + tags=[] # %tensorboard --logdir logs/fit # - model.predict(X_test[10:11]) show_image(test_df,10,'manufacturer') np.argmax(model.predict(X_test[10:20]),axis=1) def classify_images(images,model,classes_names=None)->int: """Classify images through a tensorflow model. Parameters: ----------- images(np.array): set of images to classify model (tensorflow.keras.Model): tensorflow/keras model Returns ------- predicted classes """ results = model.predict(images) classes = np.argmax(results,axis=1) if classes_names is not None: classes = np.array(classes_names[classes]) return classes fig, ax = plt.subplots(figsize=(12,10)) sns.heatmap(pd.crosstab(np.argmax(y_test,axis=1),classify_images(X_test,model), normalize='index'), cmap='vlag', ax=ax) fig, ax = plt.subplots(figsize=(12,10)) sns.heatmap(pd.crosstab(test_df['manufacturer'],classify_images(X_test,model,test_df['manufacturer'].astype('category').cat.categories), normalize='index'), cmap='vlag', ax=ax) fig, ax = plt.subplots(figsize=(12,10)) sns.heatmap(pd.crosstab(np.argmax(y_train,axis=1),classify_images(X_train,model), normalize='index'), cmap='vlag', ax=ax) fig, ax = plt.subplots(figsize=(12,10)) sns.heatmap(pd.crosstab(train_df['manufacturer'],classify_images(X_train,model,train_df['manufacturer'].astype('category').cat.categories), normalize='index'), cmap='vlag', ax=ax) def save_model(model, basename): """Save tf/Keras model. Model file is named model + timestamp. Parameters ---------- model (tf/Keras model): model to be saved basename: location to save model file """ model.save('{}_{}.h5'.format(basename, datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))) return model.save("../model/my_model.h5") reloaded_model = load_model('../model/my_model.h5') np.argmax(reloaded_model.predict(X_test[10:20]), axis=1)
notebooks/train_classification_model.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: tensorflow # language: python # name: tensorflow # --- # # Unity ML Agents # ## Proximal Policy Optimization (PPO) # Contains an implementation of PPO as described [here](https://arxiv.org/abs/1707.06347). # + import numpy as np import os import tensorflow as tf from ppo.history import * from ppo.models import * from ppo.trainer import Trainer from unityagents import * # - # ### Hyperparameters # + ### General parameters max_steps = 1e7 # Set maximum number of steps to run environment. run_path = "jump2" # The sub-directory name for model and summary statistics load_model = True # Whether to load a saved model. train_model = True # Whether to train the model. summary_freq = 10000 # Frequency at which to save training statistics. save_freq = 10000 # Frequency at which to save model. env_name = "build2/jump.exe" # Name of the training environment file. curriculum_file = "jump2.json" ### Algorithm-specific parameters for tuning gamma = 0.99 # Reward discount rate. lambd = 0.95 # Lambda parameter for GAE. time_horizon = 2048 # How many steps to collect per agent before adding to buffer. beta = 1e-3 # Strength of entropy regularization num_epoch = 5 # Number of gradient descent steps per batch of experiences. num_layers = 1 # Number of hidden layers between state/observation encoding and value/policy layers. epsilon = 0.2 # Acceptable threshold around ratio of old and new policy probabilities. buffer_size = 2048 # How large the experience buffer should be before gradient descent. learning_rate = 3e-4 # Model learning rate. hidden_units = 32 # Number of units in hidden layer. batch_size = 512 # How many experiences per gradient descent update step. normalize = True ### Logging dictionary for hyperparameters hyperparameter_dict = {'max_steps':max_steps, 'run_path':run_path, 'env_name':env_name, 'curriculum_file':curriculum_file, 'gamma':gamma, 'lambd':lambd, 'time_horizon':time_horizon, 'beta':beta, 'num_epoch':num_epoch, 'epsilon':epsilon, 'buffe_size':buffer_size, 'leaning_rate':learning_rate, 'hidden_units':hidden_units, 'batch_size':batch_size} # - # ### Load the environment env = UnityEnvironment(file_name=env_name, curriculum=curriculum_file) print(str(env)) brain_name = env.external_brain_names[0] # ### Train the Agent(s) # + tf.reset_default_graph() if curriculum_file == "None": curriculum_file = None def get_progress(): if curriculum_file is not None: if env._curriculum.measure_type == "progress": return steps / max_steps elif env._curriculum.measure_type == "reward": return last_reward else: return None else: return None # Create the Tensorflow model graph ppo_model = create_agent_model(env, lr=learning_rate, h_size=hidden_units, epsilon=epsilon, beta=beta, max_step=max_steps, normalize=normalize, num_layers=num_layers) is_continuous = (env.brains[brain_name].action_space_type == "continuous") use_observations = (env.brains[brain_name].number_observations > 0) use_states = (env.brains[brain_name].state_space_size > 0) model_path = './models/{}'.format(run_path) summary_path = './summaries/{}'.format(run_path) if not os.path.exists(model_path): os.makedirs(model_path) if not os.path.exists(summary_path): os.makedirs(summary_path) init = tf.global_variables_initializer() saver = tf.train.Saver() with tf.Session() as sess: # Instantiate model parameters if load_model: print('Loading Model...') ckpt = tf.train.get_checkpoint_state(model_path) saver.restore(sess, ckpt.model_checkpoint_path) else: sess.run(init) steps, last_reward = sess.run([ppo_model.global_step, ppo_model.last_reward]) summary_writer = tf.summary.FileWriter(summary_path) info = env.reset(train_mode=train_model, progress=get_progress())[brain_name] trainer = Trainer(ppo_model, sess, info, is_continuous, use_observations, use_states, train_model) if train_model: trainer.write_text(summary_writer, 'Hyperparameters', hyperparameter_dict, steps) while steps <= max_steps: if env.global_done: info = env.reset(train_mode=train_model, progress=get_progress())[brain_name] # Decide and take an action new_info = trainer.take_action(info, env, brain_name, steps, normalize) info = new_info trainer.process_experiences(info, time_horizon, gamma, lambd) if len(trainer.training_buffer['actions']) > buffer_size and train_model: # Perform gradient descent with experience buffer trainer.update_model(batch_size, num_epoch) if steps % summary_freq == 0 and steps != 0 and train_model: # Write training statistics to tensorboard. trainer.write_summary(summary_writer, steps, env._curriculum.lesson_number) if steps % save_freq == 0 and steps != 0 and train_model: # Save Tensorflow model save_model(sess, model_path=model_path, steps=steps, saver=saver) steps += 1 sess.run(ppo_model.increment_step) if len(trainer.stats['cumulative_reward']) > 0: mean_reward = np.mean(trainer.stats['cumulative_reward']) sess.run(ppo_model.update_reward, feed_dict={ppo_model.new_reward: mean_reward}) last_reward = sess.run(ppo_model.last_reward) # Final save Tensorflow model if steps != 0 and train_model: save_model(sess, model_path=model_path, steps=steps, saver=saver) env.close() export_graph(model_path, env_name) # - env.close() # ### Export the trained Tensorflow graph # Once the model has been trained and saved, we can export it as a .bytes file which Unity can embed. export_graph(model_path, env_name)
python/jump2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/rafaelaquinoo/Linear-Algebra-58019/blob/main/Python_Exercise_2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + colab={"base_uri": "https://localhost:8080/"} id="OhM5xkel4TWD" outputId="1e15e65b-104e-4916-de7d-e1fa1301ccf2" import numpy as np A = np.array([4,3]) B = np.array([-2,-5]) print("Vector A is", A) print("Vector B is", B) # + colab={"base_uri": "https://localhost:8080/"} id="F9Fb5x9G4kvy" outputId="c94a5443-e458-498f-c3f4-f5f8ae949388" import numpy as np ball1 = np.array ([1,2,3]) ball2 = np.array ([0,1,-1]) pool = np.array ([ball1,ball2]) pool.shape # + colab={"base_uri": "https://localhost:8080/"} id="z0suVs2U41Na" outputId="9e36602e-3966-402f-b83a-c38b2ab34580" U = np.array([[1,2,3],[4,5,6]]) U # + colab={"base_uri": "https://localhost:8080/"} id="AEV7vbWl5B_d" outputId="d65c1377-c957-459b-fbba-f6ad453dc775" U = np.array([[1,2,3],[4,5,6]]) U.shape # + colab={"base_uri": "https://localhost:8080/"} id="eTqCvgsS5JEt" outputId="2a353375-ee6b-4d2e-a491-30ccb30f041f" U = np.array([[1,2,3],[4,5,6]]) U.ndim # + colab={"base_uri": "https://localhost:8080/"} id="TDZ2m5aN5gcC" outputId="27bc742d-789e-4891-bcab-b06cbcea1c3d" U = np.array([[1,2,3],[4,5,6]]) U.size # + colab={"base_uri": "https://localhost:8080/"} id="9pOjtCDl5RUD" outputId="e38c574c-b418-401d-e97a-090461710537" U = np.array([[1,2,3],[4,5,6]]) U U.size # + [markdown] id="xe4tfk2Q5shr" # #Addition of Vector # + colab={"base_uri": "https://localhost:8080/"} id="vkSSAG7651zi" outputId="2aac5a82-e9cb-4bd1-e910-ede3d63f89ac" addend1 = np.array([0,0,0]) addend2 = np.array([1,1,0]) sum = addend1 + addend2 sum # + colab={"base_uri": "https://localhost:8080/"} id="lKjhuUVb6CvT" outputId="2460e7af-cf6b-44cf-9cbc-7a1683856746" addend1 = np.array([0,0,0]) addend2 = np.array([1,1,0]) resultant = np.add(addend1,addend2) resultant # + [markdown] id="EK7cFIHe6IHO" # #Subtraction of Vector # + colab={"base_uri": "https://localhost:8080/"} id="tBlzlDVA6Kiu" outputId="ee2ac2d2-879e-4960-8ea3-98fd44ca04a7" difference = addend1-addend2 difference # + colab={"base_uri": "https://localhost:8080/"} id="TaOn5XJz6QAl" outputId="b64471f4-b360-47c5-ba9b-e6fbea2b71ed" difference2 = np.subtract(addend1,addend2) difference2 # + [markdown] id="mDKXoOB96WfJ" # #Scaling # + colab={"base_uri": "https://localhost:8080/"} id="lFNCUWgp6XtS" outputId="86fd2d06-bfea-4402-dbf0-4487b1b506ff" A = np.array([1,5,8,9]) S = 5*A S # + [markdown] id="2ouF3dAG6eM7" # #Cross Product # + colab={"base_uri": "https://localhost:8080/"} id="mVYmN4_A6gCx" outputId="2ccb61bc-c72e-487e-de3d-a7e8c1edc9b6" A = np.array([2,3]) B = np.array([1,7]) cross = np.cross(A,B) print(cross) # + colab={"base_uri": "https://localhost:8080/"} id="kYcjtrzi6rys" outputId="eeb33a55-c229-4fb1-866a-0decf2449655" A = np.array([2,3,4]) B = np.array([1,7,1]) cross = np.cross(A,B) print(cross) # + [markdown] id="cC_6fuMg7TIb" # #Dot Product # + colab={"base_uri": "https://localhost:8080/"} id="q2hr4enn68xk" outputId="67b17738-e57c-46b2-a8ab-b29e56770611" import numpy as np A = np.array ([2,3]) B = np.array ([1,7]) dot = np.dot(A,B) print(dot)
Python_Exercise_2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="N9bElv2TGB4f" executionInfo={"status": "ok", "timestamp": 1613490090637, "user_tz": -330, "elapsed": 4868, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "16629777065147676607"}} import torch import torch.nn as nn import numpy as np import torchvision import torchvision.transforms as transforms import matplotlib.pyplot as plt import math from torch.utils.data import Dataset, DataLoader import pandas as pd import random import matplotlib.pyplot as plt # + id="TtXG8KrDGD1F" executionInfo={"status": "ok", "timestamp": 1613490092093, "user_tz": -330, "elapsed": 6180, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "16629777065147676607"}} filename = '/content/drive/My Drive/annotedData.csv' data = pd.read_csv(filename) file = open(filename, 'r') raw_text = file.read() file.close() # + id="gKGexWgH2GRL" executionInfo={"status": "ok", "timestamp": 1613490092093, "user_tz": -330, "elapsed": 6039, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "16629777065147676607"}} def to_Int(x): s = 0 for i in x: if(i >= '0' and i <= '9'): s *= 10 s += int(i) return s # + colab={"base_uri": "https://localhost:8080/", "height": 204} id="kMq-1VgW1td3" executionInfo={"status": "ok", "timestamp": 1613490092094, "user_tz": -330, "elapsed": 5884, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "16629777065147676607"}} outputId="1086272c-97a8-4010-9d3c-171c16d3f833" data.dropna(inplace = True) data = data[:int(99*len(data)/100)] data.Sent = data.Sent.apply(to_Int) data.tail() # + colab={"base_uri": "https://localhost:8080/"} id="W88v5JtC2LdD" executionInfo={"status": "ok", "timestamp": 1613490095221, "user_tz": -330, "elapsed": 8855, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "16629777065147676607"}} outputId="e27dfa84-9ec0-4332-aab7-ad4bb9812982" #Generates a list of tuples of (tweets, character wise classification whether the character is in an NER or not) inp_tweets = [] val = 0 j = 0 for val in range(1521): s1 = "" s2 = "" while(int(data['Sent'].iloc[j]) == val): s1 += str((data['Word']).iloc[j]) s1 += " " if(str(data['Tag'].iloc[j]) == 'Other'): c = '0' else: c = '1' for y in range(len(data['Word'].iloc[j])): s2 += c s2 += '0' j += 1 assert(len(s1) == len(s2)) if(s1 != ''): inp_tweets.append([s1, s2]) inp_tweets[0] # + id="gCAEnYse2ZAa" executionInfo={"status": "ok", "timestamp": 1613490095222, "user_tz": -330, "elapsed": 8705, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "16629777065147676607"}} #Test-Train split 80-20 EIGHTY_TWENTY = int(80/100 * len(inp_tweets)) train = inp_tweets[:EIGHTY_TWENTY] test = inp_tweets[EIGHTY_TWENTY:] # + colab={"base_uri": "https://localhost:8080/", "height": 285} id="cuPGODGz2_KJ" executionInfo={"status": "ok", "timestamp": 1613490095222, "user_tz": -330, "elapsed": 8540, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "16629777065147676607"}} outputId="4b621f47-47f6-4688-d0d6-e1408c7ebb46" #Distribution of lengths of tweets in characters cnts = {} MAX_TWEET_LEN = 0 for i in range(len(inp_tweets)): if(len(inp_tweets[i][0]) not in cnts): cnts[len(inp_tweets[i][0])] = 1 else: cnts[len(inp_tweets[i][0])] += 1 MAX_TWEET_LEN = max(MAX_TWEET_LEN, len(inp_tweets[i][0])) plt.bar(cnts.keys(), cnts.values()) print(f'The longest tweet is of length {MAX_TWEET_LEN}') # + colab={"base_uri": "https://localhost:8080/"} id="NoWZ9RIo3E2C" executionInfo={"status": "ok", "timestamp": 1613490095223, "user_tz": -330, "elapsed": 8383, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "16629777065147676607"}} outputId="104b3e66-59dc-40cf-c256-73aac5176390" #Making a mapping of characters for one-hot encoding char_list = ['^'] + sorted(set(raw_text)) mapping = dict((c, i) for i, c in enumerate(char_list)) N_CHARS = len(char_list) print(f'The number of distinct characters in the dataset are {N_CHARS}') # + id="299h0GaF4DPj" executionInfo={"status": "ok", "timestamp": 1613490095224, "user_tz": -330, "elapsed": 8224, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "16629777065147676607"}} x_train = [x[0] for x in train] y_train = [x[1] for x in train] x_test = [x[0] for x in test] y_test = [x[1] for x in test] # + id="Xc-XWj9X8nxY" executionInfo={"status": "ok", "timestamp": 1613490095224, "user_tz": -330, "elapsed": 8059, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "16629777065147676607"}} #Util functions to create tensors of one-hot encoding of characters def letter_to_index(letter): return mapping[letter] #Generates a 1xn tensor. def letter_to_tensor(letter): tensor = torch.zeros(N_CHARS) tensor[letter_to_index(letter)] = 1 return tensor #Generates a line_lengthx1xn tensor def line_to_tensor(line): tensor = torch.zeros(len(line), N_CHARS) for i, letter in enumerate(line): tensor[i][letter_to_index(letter)] = 1 return tensor def output_to_tensor(line): tensor = torch.zeros(len(line)) for i, letter in enumerate(line): tensor[i] = int(letter) return tensor # + colab={"base_uri": "https://localhost:8080/"} id="ri1wYueR_F62" executionInfo={"status": "ok", "timestamp": 1613490095224, "user_tz": -330, "elapsed": 7888, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "16629777065147676607"}} outputId="ea5d3bf7-f9ea-4f4c-8330-a550d291bccc" #Pad all strings to max tweet length for RNN training print(f'Tweet before padding {x_train[0]}') print(f'Answer before padding {y_train[0]}\n') for i in range(len(x_train)): x_train[i] = x_train[i].ljust(MAX_TWEET_LEN, '^') y_train[i] = y_train[i].ljust(MAX_TWEET_LEN, '0') for i in range(len(x_test)): x_test[i] = x_test[i].ljust(MAX_TWEET_LEN, '^') y_test[i] = y_test[i].ljust(MAX_TWEET_LEN, '0') print(f'Tweet after padding {x_train[0]}') print(f'Answer after padding {y_train[0]}') # + colab={"base_uri": "https://localhost:8080/"} id="oBnaap4wE4am" executionInfo={"status": "ok", "timestamp": 1613490101365, "user_tz": -330, "elapsed": 13853, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "16629777065147676607"}} outputId="fd296ca6-8bd3-428a-bea2-6d3f0f721126" #Convert strings to list of one-hot encodings of characters str_x_test = x_test.copy() for i in range(len(x_train)): x_train[i] = line_to_tensor(x_train[i]) y_train[i] = output_to_tensor(y_train[i]) for i in range(len(x_test)): x_test[i] = line_to_tensor(x_test[i]) y_test[i] = output_to_tensor(y_test[i]) x_train[:2] y_train[:2] # + id="XyBIK7K3GA3T" executionInfo={"status": "ok", "timestamp": 1613490101365, "user_tz": -330, "elapsed": 13665, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "16629777065147676607"}} #Convert lists to tensors x_train = torch.stack(x_train) y_train = torch.stack(y_train) x_test = torch.stack(x_test) y_test = torch.stack(y_test) # + colab={"base_uri": "https://localhost:8080/"} id="1VD2LbE2_bv1" executionInfo={"status": "ok", "timestamp": 1613490101366, "user_tz": -330, "elapsed": 13117, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "16629777065147676607"}} outputId="01f81009-61e3-4666-f999-de6bf6dca0ec" print(x_train.shape, y_train.shape, x_test.shape, y_test.shape) # + id="r_dtsCfmGNDm" executionInfo={"status": "ok", "timestamp": 1613490101366, "user_tz": -330, "elapsed": 12723, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "16629777065147676607"}} #Define bi-directional RNN module class GRU(nn.Module): def __init__(self, input_size, hidden_size, num_layers): super(GRU, self).__init__() self.num_layers = num_layers self.hidden_size = hidden_size self.input_size = input_size self.gru = nn.GRU(input_size, hidden_size, num_layers, batch_first=True) self.fc = nn.Linear(hidden_size, 1) self.sigmoid = nn.Sigmoid() def forward(self, x): num_batches = x.shape[0] h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size) out, _ = self.gru(x, h0) #print(f'The shape of out tensor is: {out.shape}') out = self.fc(out) #print(f'The shape of out tensor is: {out.shape}') out = out.view(num_batches, -1) out = self.sigmoid(out) #print(f'The shape of out tensor is: {out.shape}') return out # + id="8O25nLl7Nbw0" executionInfo={"status": "ok", "timestamp": 1613490101367, "user_tz": -330, "elapsed": 12127, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "16629777065147676607"}} #HYPER-PARAMETERS loss_function = nn.BCELoss() batch_size = 100 learning_rate = 0.002 num_epochs = 15 # + colab={"base_uri": "https://localhost:8080/"} id="MR8xirlEPQgG" executionInfo={"status": "ok", "timestamp": 1613490101367, "user_tz": -330, "elapsed": 11956, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "16629777065147676607"}} outputId="56e28264-6eff-48f7-8f6e-b6664a00cec1" model = GRU(x_train.shape[2], 128, 1) # input_tensor = line_to_tensor('Cat is named Kalyan') # input_tensor = input_tensor.view(1, -1, N_CHARS) # out_tensor = torch.zeros(1, 19) # out = model(input_tensor) # print(loss_function(out, out_tensor)) # + id="2Lm3BQc2Mrdk" executionInfo={"status": "ok", "timestamp": 1613490101368, "user_tz": -330, "elapsed": 11569, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "16629777065147676607"}} train = [(x_train[i], y_train[i]) for i in range(len(x_train))] test = [(x_test[i], y_test[i]) for i in range(len(x_test))] # + id="1AdOmZDCNMl9" executionInfo={"status": "ok", "timestamp": 1613490101368, "user_tz": -330, "elapsed": 11183, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "16629777065147676607"}} train_loader = torch.utils.data.DataLoader(dataset = train, batch_size = batch_size, shuffle = True) test_loader = torch.utils.data.DataLoader(dataset = test, batch_size = batch_size, shuffle = True) # + colab={"base_uri": "https://localhost:8080/"} id="KqGFam7MGRHd" executionInfo={"status": "ok", "timestamp": 1613490187458, "user_tz": -330, "elapsed": 96857, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "16629777065147676607"}} outputId="a16191bf-b5fc-4c03-8693-1f837b3008c5" #Train loss_function = nn.BCELoss() optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) for epoch in range(num_epochs): loss_g = 0 for i, (inp, out) in enumerate(train_loader): output = model(inp) loss = loss_function(output, out) optimizer.zero_grad() loss.backward() optimizer.step() loss_g = loss print(f'Epoch #{epoch + 1}, Loss = {loss_g}') #if((i + 1) % 500): #print(f'Epoch #{epoch + 1}, iter #{i+1}, loss = {loss.item()}') # + id="Pxj0_6cLALw4" executionInfo={"status": "ok", "timestamp": 1613490187459, "user_tz": -330, "elapsed": 96423, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "16629777065147676607"}} punctuation = ['^', ' ', '.', ',', '!', '?'] def get_acc(s, orig, mod): match, mismatch = 0, 0 curr_len = 0 ner = True correct = True ner_correct, ner_wrong = 0, 0 nner_correct, nner_wrong = 0, 0 for i in range(len(s)): if(s[i] in punctuation): if(curr_len != 0): if(correct == True and ner == True): ner_correct += 1 elif(correct == True and ner == False): nner_correct += 1 elif(correct == False and ner == True): ner_wrong += 1 else: nner_wrong += 1 curr_len = 0 correct = True else: if(orig[i] != mod[0][i].item()): correct = False curr_len += 1 if(mod[0][i].item() == 0): ner = False else: ner = True return ner_correct, nner_correct, ner_wrong, nner_wrong # + id="BvxTRgvU_HTc" executionInfo={"status": "ok", "timestamp": 1613490192662, "user_tz": -330, "elapsed": 100911, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "16629777065147676607"}} #Accuracy of words from test-set with torch.no_grad(): ner_correct, nner_correct, ner_wrong, nner_wrong = 0, 0, 0, 0 for i in range(len(str_x_test)): tens = line_to_tensor(str_x_test[i]).view(1, -1, N_CHARS) tens = model(tens) tens = (tens >= 0.5).long() a, b, c, d = (get_acc(str_x_test[i], y_test[i], tens)) ner_correct += a nner_correct += b ner_wrong += c nner_wrong += d # + id="zUr3me7sGZpe" executionInfo={"status": "ok", "timestamp": 1613490192663, "user_tz": -330, "elapsed": 100523, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "16629777065147676607"}} total = ner_correct + nner_correct + ner_wrong + nner_wrong # + id="bq4MeDg0Gc0-" executionInfo={"status": "ok", "timestamp": 1613490488744, "user_tz": -330, "elapsed": 1006, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "16629777065147676607"}} overall_accuracy = 100 * (0.0 + ner_correct + nner_correct) / total nner_accuracy = 100 * (0.0 + nner_correct) / (nner_correct + nner_wrong) ner_accuracy = 100 * (0.0 + ner_correct) / (ner_correct + ner_wrong) # + colab={"base_uri": "https://localhost:8080/"} id="HNg7eTrtDJ2A" executionInfo={"status": "ok", "timestamp": 1613490489158, "user_tz": -330, "elapsed": 1232, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "16629777065147676607"}} outputId="02e1cf6c-de41-4850-cad8-7534c2396078" print(f'The accuracy of the model on the test set is: {overall_accuracy:.2f}%') print(f'The accuracy of the model on predicting Named Entities is: {ner_accuracy:.2f}%') print(f'The accuracy of the model on predicting Non-Named Entities is: {nner_accuracy:.2f}%') # + id="rSJk3LjbGZZO" executionInfo={"status": "aborted", "timestamp": 1613473095989, "user_tz": -330, "elapsed": 46605, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "16629777065147676607"}} # + id="Mxc7idl_DQG_" executionInfo={"status": "ok", "timestamp": 1613467538615, "user_tz": -330, "elapsed": 48552, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "16629777065147676607"}}
NER_Detection.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # <NAME> # Importacion de librerias import numpy as np import pandas as pd import datetime # - # Constantes DIRECTORIO_BASE = 'D:/anaconda/data/' FECHA_MINIMA = '2014-02-01' FECHA_MAXIMA = '2015-07-01' NOMBRE_FICHERO_ORIGEN = 'CLEAN_House' TAMANO_VENTANA = 60 # Ventana de rolling CAMPO = 'Aggregate' DECIMALES = 2 def obtenerDatosFiltrados(fichero, columna): """Filtrar por fechas dadas y filtrar los datos de la columna pasada por parametro. Se devuelve los datos filtrado de la columna Argumentos: file -- fichero de entrada column -- columna a seleccionar """ # Leer fichero origen, seleccionar columna consumo total () datos_hogar = pd.read_csv(DIRECTORIO_BASE + fichero +'.csv', delimiter = ',', parse_dates=[0], index_col=0) datos_hogar = datos_hogar[[columna]] # Filtrardo de fechas. Rango de fechas con menor rango datos faltantes df_filtrado_fechas = datos_hogar[(datos_hogar.index > FECHA_MINIMA) & (datos_hogar.index < FECHA_MAXIMA)] return df_filtrado_fechas def limpiarDatos(dt_filtrado, rango): """Limpia los valores NA. para un rango dado Argumentos: dt_filtrado -- tabla con todos los datos rango -- rango a filtrar """ # Se seleccionan aquellos valores del rango hora # pasado por parámetro (0, 6, 12 ó 18) dt_filtrado_Rango = dt_filtrado[dt_filtrado_6H.Hora == rango] # Limpieza de NA dt_filtrado_Rango = dt_filtrado_Rango.interpolate(method ='linear', limit_direction ='forward') return dt_filtrado_Rango def dfTransforma6H(fichero, columna): """Agrupa los datos obtenidos del fichero file por rangos de 6H Argumentos: file -- fichero de entrada column -- columna a seleccionar """ # Obtener data frame filtrado por fechas y con la columna pasada por parametro df_filtrado = obtenerDatosFiltrados(fichero, columna) # Se agrupa por rangos de 6 horas en un día dt_filtrado_muestreo6H = df_filtrado.resample('6H').mean() # Se inserta la columna hora hora = dt_filtrado_muestreo6H.index.hour dt_filtrado_muestreo6H = pd.concat([dt_filtrado_muestreo6H, pd.DataFrame(hora, index=dt_filtrado_muestreo6H.index)], axis = 1) # Se renombran columnas dt_filtrado_muestreo6H.columns = [columna,'Hora'] return dt_filtrado_muestreo6H def generarFicheroSemanal(dt_filtrado_6H, campo): """Genera para esa semana los valores de consumo por los 4 rangos horarios (0, 6, 12, 18) Argumentos: dt_filtrado_6H -- datos a transformar campo -- campo a seleccionar """ # Limpiar datos datos_filtrar_0 = limpiarDatos(dt_filtrado_6H, 0) datos_filtrar_6 = limpiarDatos(dt_filtrado_6H, 6) datos_filtrar_12 = limpiarDatos(dt_filtrado_6H, 12) datos_filtrar_18 = limpiarDatos(dt_filtrado_6H, 18) # Filtrar por hora y obtener los datos agrupados por semana dt_filtrado_6H_Semana = datos_filtrar_0.rolling(window=TAMANO_VENTANA, center=True).mean().resample("W").mean().apply(lambda x: round(x, DECIMALES)) w6 = datos_filtrar_6.rolling(window=TAMANO_VENTANA, center=True).mean().resample("W").mean().apply(lambda x: round(x, DECIMALES)) w12 = datos_filtrar_12.rolling(window=TAMANO_VENTANA, center=True).mean().resample("W").mean().apply(lambda x: round(x, DECIMALES)) w18 = datos_filtrar_18.rolling(window=TAMANO_VENTANA, center=True).mean().resample("W").mean().apply(lambda x: round(x, DECIMALES)) # Preparar fichero por rangos dt_filtrado_6H_Semana['Rango 06-12'] = w6[campo] dt_filtrado_6H_Semana['Rango 12-18'] = w12[campo] dt_filtrado_6H_Semana['Rango 18-00'] = w18[campo] dt_filtrado_6H_Semana = dt_filtrado_6H_Semana.drop(columns=['Hora']) dt_filtrado_6H_Semana = dt_filtrado_6H_Semana.rename(columns={campo: 'Rango 00-06'}) return dt_filtrado_6H_Semana # Generar los ficheros con los datos filtrados for num_hogar in range(1, 2): if num_hogar != 14: fichero = NOMBRE_FICHERO_ORIGEN + str(num_hogar) # Filtrar por fechas los datos del campo seleccionado dt_filtrado_6H = dfTransforma6H(fichero, CAMPO) print(dt_filtrado_6H.head(10)) # Generar fichero con los valores energéticos agrupados por fechas dt_filtrado_6H_Semana = generarFicheroSemanal(dt_filtrado_6H, CAMPO) # Se ajustan los datos agrupados if num_hogar in [6]: dt_filtrado_6H_Semana.iloc[9:-4, :].to_csv('Hogar_' + str(num_hogar) + '_filtro_semanal_rango.csv', sep=',', encoding='utf-8') if num_hogar in [3, 11]: dt_filtrado_6H_Semana.iloc[4:-5, :].to_csv('Hogar_' + str(num_hogar) + '_filtro_semanal_rango.csv', sep=',', encoding='utf-8') if num_hogar in [4, 5, 12, 15, 16, 18, 21]: dt_filtrado_6H_Semana.iloc[5:-4, :].to_csv('Hogar_' + str(num_hogar) + '_filtro_semanal_rango.csv', sep=',', encoding='utf-8') if num_hogar in [10]: dt_filtrado_6H_Semana.iloc[6:-5, :].to_csv('Hogar_' + str(num_hogar) + '_filtro_semanal_rango.csv', sep=',', encoding='utf-8') if num_hogar in [1, 2, 7, 8, 9, 13, 14, 17, 19, 20]: dt_filtrado_6H_Semana.iloc[4:-4, :].to_csv('Hogar_' + str(num_hogar) + '_filtro_semanal_rango.csv', sep=',', encoding='utf-8')
01_Generar_datos_limpiados_semanal_rango_horario.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd import numpy as np ser1 = pd.Series([1,2,3,4]) print(ser1) print(ser1[0:2]) print(ser1[[0,1,2]]) # - sdata = {'python' : 1, 'pandas' : 2, 'matplotlib' : 3, 'numpy' : 4, 'scipy' : 5} ser2 = pd.Series(sdata) print(ser2) print(sdata.keys()) print(sdata.items()) print(sdata.values()) 'python' in sdata.keys() ser3 = pd.Series(sdata, index = ['apple', 'python', 'numpy', 'jupyter', 'scipy']) print(ser3) print('index:', ser3.index) print('value:', ser3.values) print(ser3[0:2]) print(ser3['apple':'numpy']) print(ser3[['apple', 'numpy']]) ser3.name = 'try' print(ser3) data = {'packages':['pandas', 'numpy', 'matplotlib', 'pandas', 'pandas'], 'id':[1, 2, 3, 4, 5], 'num':[1.2, 3.4, 4, 5.6, 7.0]} frame = pd.DataFrame(data) #DataFrame(data, columns=['id', 'num', 'packages'], index = ['A', 'B', 'C', 'D', 'E']) #frame.sort_values('num', ascending = False) #print(frame) #print(frame[['id','num']]) #print(frame.loc[0:3,:]) #print(frame.loc[frame.id<4,:]) #print(type(frame.id)) #print(type(frame.loc[:,'id'])) frame.describe # + data = pd.DataFrame(np.arange(16).reshape(4,4)) data[(data/2-1)%3==0] = np.nan print(data) #丢弃na print(data.dropna()) print(data.dropna(how = 'all')) print(data.dropna(axis = 1)) #填充na print(data.fillna(0)) print(data.fillna({0:1, 2:10})) print(data.fillna(method = 'bfill', limit = 1)) print(data.fillna(method = 'ffill', limit = 1)) # - frame = pd.DataFrame(np.arange(16).reshape(4,4), index = [['a', 'a', 'b', 'b'], [1, 2, 1, 2]], columns = [['pandas', 'pandas', 'numpy', 'numpy'], ['Series', 'DataFrame', 'array', 'array']]) print(frame) print(frame['numpy']) print(frame.loc[:,'numpy'].loc[:,'array']) print(frame.unstack()) #关于索引展开 df = pd.DataFrame({'key':['b', 'b', 'a', 'c', 'a', 'b'], 'data1':range(6)}) print(df) dummies = pd.get_dummies(df['key'], prefix = 'key') df_with_dummy = df[['data1']].join(dummies) print(df_with_dummy) # + # 结合使用dummy和cut df = pd.DataFrame({'name':['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'], 'score':np.arange(10)}) print(df) dummies = pd.get_dummies(pd.cut(df['score'], [0, 3, 6, 10]), prefix='interval') df_with_dummy = df[['name']].join(dummies) print(df_with_dummy) # + # 数据框一个单元格放置列表 df = pd.DataFrame({'key1':['<KEY>'], 'key2':['one','two','one','two','one'], 'data1':np.random.randn(5), 'data2':np.random.randn(5)}) df.at[0, 'key1'] = ['a', 'b'] print(df) # + # groupby对数据进行分组 df = pd.DataFrame({'key1':['<KEY>'], 'key2':['one','two','one','two','one'], 'data1':np.random.randn(5), 'data2':np.random.randn(5)}) df['data1'].groupby(df['key1']).mean() df['data1'].groupby([df['key1'], df['key2']]).min() # 语法糖 df.groupby('key1')[['data1']].max() df.groupby(['key1', 'key2'])[['data1']].count() # 函数: # # + min, max, first, last # # + sum, mean, count, prop # # + size, median, std, var # + df = pd.DataFrame({'key1':['<KEY>'], 'key2':['one','two','one','two','one'], 'data1':np.random.randn(5), 'data2':np.random.randn(5)}) for name,group in df.groupby('key1'): print(name) print(group) for (key1, key2),group in df.groupby(['key1', 'key2']): print(key1, key2) print(group) # + # 对列进行分组 people = pd.DataFrame(np.random.randn(5,5), columns = ['a','b','c','d','e'], index = ['Joe','Steve','Wes','Jim','Travis']) people.loc[2:3,['b','c']] = np.nan #print(people) mapping = {'a':'red','b':'red','c':'blue', 'd':'blue','e':'red','f':'orange'} people.groupby(mapping, axis=1).sum() people.groupby(len).sum() key_list = ['one','one','one','two','two'] people.groupby([len, key_list]).sum() # + # 分层索引的聚合 # 方法1: columns = pd.MultiIndex.from_arrays([['US', 'US', 'US', 'CN', 'CN'],[1,3,5,1,3]],names=['cty','tenor']) hier_df = pd.DataFrame(np.random.randn(4,5), columns = columns) # 方法2: hier_df = pd.DataFrame(np.random.randn(4,5), columns = [['US','US','US','CN','CN'],[1,3,5,1,3]]) hier_df.columns.names = ['city', 'tenor'] #hier_df.index.names = ['name1', 'name2] print(hier_df) hier_df.groupby(level='city',axis=1).count() # + # agg应用自定义函数 # 自定义极差函数 def peak_to_peak(arr): result = arr.max() - arr.min() return result tips = pd.read_csv('tips.csv') # 添加“小费占总额百分比”的列 tips['tip_pct'] = tips['tip']/tips['total_bill'] tips[:6] # 应用自定义函数 tips.groupby(['sex','smoker'])[['tip_pct']].agg(peak_to_peak) # 对多列应用多种函数 tips.groupby(['sex','smoker'])[['tip','tip_pct']].agg(['mean','max',peak_to_peak]) # + # 分组级运算和转换 df = pd.DataFrame({'key1':['a','a','b','b','a'], 'key2':['one','two','one','two','one'], 'data1':np.random.randn(5), 'data2':np.random.randn(5)}) k1_means = df.groupby('key1').mean().add_prefix('mean_') pd.merge(df, k1_means, left_on='key1', right_index = True) df.groupby('key1').transform(np.mean) # 同 group_by %>% mutate %>% ungroup # + def top(df,n=5,column='tip_pct'): return df.sort_values(by=column)[-n:] top(tips,n=6) # 分组应用函数 tips.groupby('smoker').apply(top) # 分组应用函数并传参 tips.groupby(['sex','smoker']).apply(top, n = 2, column = 'total_bill') # + # 分位数 frame=pd.DataFrame({'data1':np.random.randn(1000), 'data2':np.random.randn(1000)}) factor = pd.cut(frame.data1, 4) factor[:10] def get_status(group): return { 'min':group.min(), 'max':group.max(), 'count':group.count(), 'mean':group.mean() } frame.data2.groupby(factor).apply(get_status).unstack() # + # 透视表 tips.pivot_table(index = ['sex','smoker']) # + tips.pivot_table(['tip_pct','size'], index = ['sex','day'], columns='smoker') # 加入分项小计, 默认 aggfunc 为 mean tips.pivot_table(['tip_pct','size'], index = ['sex','day'], columns='smoker', margins=True) # - tips.groupby(['sex','day'])['tip_pct','size'].mean() tips.pivot_table('tip_pct', index = ['sex', 'smoker'], columns='day', aggfunc=len, margins=True, fill_value=0) # + # 交叉表 pd.crosstab(tips.sex, tips.smoker, margins=True) pd.crosstab([tips.sex, tips.smoker], tips.day, margins=True) # + # 读写文件 df.to_excel('try.xlsx', sheet_name = 'Sheet1') frame = pd.read_excel('try.xlsx', 'Sheet1', index_col=None, na_values=['NA']) frame
pandas_learn.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Plotly and Cufflinks # create interactive visualization import pandas as pd import numpy as np from plotly import __version__ # %matplotlib inline print(__version__) import cufflinks as cf from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot # we gonna host our plot offline init_notebook_mode(connected=True) cf.go_offline() # get data df1 = pd.DataFrame(np.random.randn(100,4), columns = 'A B C D'.split()) df1.head() df2 = pd.DataFrame({'Category':['A', 'B', 'C'], 'Value': [32, 43, 50]}) df2 # plot df1.iplot() # scatter plot df1.iplot(kind='scatter', x='A', y='B', mode='markers', size=10) # bar plot df2.iplot(kind='bar', x='Category', y='Value') #df1.count().iplot(kind='bar') you can call any aggregate function df1.sum().iplot(kind='bar') # box plot df1.iplot(kind='box') # 3D surface plot df3 = pd.DataFrame({'x':[1, 2, 3, 4, 5], 'y': [10, 20, 30, 20, 10], 'z': [5, 4, 3, 2, 1]}) df3 df3.iplot(kind='surface', colorscale='rdylbu') # histogramme df1['A'].iplot(kind='hist', bins=50) # spread plot df1[['A', 'B']].iplot(kind='spread') # bubble plot df1.iplot(kind='bubble', x='A', y='B', size='C') # scatter metric plot df.scatter_matrix() # area plot df1.iplot(kind='area', fill=True,opacity=1)
07.Python for Data Visualization - Plotly and Cufflinks/Data Visualization - Plotly and Cufflinks.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Variable Scope # ### The scope of a variable is the part of a program that can ‘see’ that variable. pressure = 103.9 # ## Exercises # %load ../exercises/scope-local-global.py # ## Key Points # # - The scope of a variable is the part of a program that can ‘see’ that variable.
files/notebooks/15-Variable_Scope.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- # # Fine-tuning Sentence Pair Classification with BERT # # Pre-trained language # representations have been shown to improve many downstream NLP tasks such as # question answering, and natural language inference. To apply pre-trained # representations to these tasks, there are two strategies: # # 1. **feature-based** approach, which uses the pre-trained representations as additional # features to the downstream task. # 2. **fine-tuning** based approach, which trains the downstream tasks by # fine-tuning pre-trained parameters. # # While feature-based # approaches such as ELMo [3] (introduced in the previous tutorial) are effective # in improving many downstream tasks, they require task-specific architectures. # Devlin, Jacob, et al proposed BERT [1] (Bidirectional Encoder Representations # from Transformers), which **fine-tunes** deep bidirectional representations on a # wide range of tasks with minimal task-specific parameters, and obtained state- # of-the-art results. # # In this tutorial, we will focus on fine-tuning with the # pre-trained BERT model to classify semantically equivalent sentence pairs. # Specifically, we will: # # 1. load the state-of-the-art pre-trained BERT model. # 2. # process and transform sentence pair data to be used for fine-tuning. # 3. fine- # tune BERT model for sentence classification. # # ## Preparation # # To run this tutorial locally, please [install gluonnlp](http://gluon-nlp.mxnet.io/#installation) # and click the download button at the top of the tutorial page to get all related code. # # Then we start with some usual preparation such as importing libraries # and setting the environment. # # ### Load MXNet and GluonNLP # + import warnings warnings.filterwarnings('ignore') import random import numpy as np import mxnet as mx from mxnet import gluon import gluonnlp as nlp # - # ### Set Environment np.random.seed(100) random.seed(100) mx.random.seed(10000) ctx = mx.gpu(0) # ## Use the Pre-trained BERT model # # The list of pre-trained BERT model available in GluonNLP can be found # [here](../../model_zoo/bert/index.rst). # # In this tutorial, we will load the BERT # BASE model trained on uncased book corpus and English Wikipedia dataset in # GluonNLP model zoo. # # ### Get BERT # # Let's first take a look at the BERT model # architecture for sentence pair classification below: # # <div style="width: # 500px;">![bert-sentence-pair](bert-sentence-pair.png)</div> # # where the model takes a pair of # sequences and **pools** the representation of the first token in the sequence. # Note that the original BERT model was trained for masked language model and next # sentence prediction tasks, which includes layers for language model decoding and # classification and are not useful for sentence pair classification. # # We load the # pre-trained BERT using the model API in GluonNLP, which returns the vocabulary # along with the model. To include the pooler layer of the pre-trained model, # `use_pooler` is set to `True`. # + from bert import * bert_base, vocabulary = nlp.model.get_model('bert_12_768_12', dataset_name='book_corpus_wiki_en_uncased', pretrained=True, ctx=ctx, use_pooler=True, use_decoder=False, use_classifier=False) print(bert_base) # - # ### Model Definition for Sentence Pair Classification # # Now that we have loaded # the BERT model, we only need to attach an additional layer for classification. # The `BERTClassifier` class uses a BERT base model to encode sentence # representation, followed by a `nn.Dense` layer for classification. # + model = bert.BERTClassifier(bert_base, num_classes=2, dropout=0.1) # only need to initialize the classifier layer. model.classifier.initialize(init=mx.init.Normal(0.02), ctx=ctx) model.hybridize(static_alloc=True) # softmax cross entropy loss for classification loss_function = gluon.loss.SoftmaxCELoss() loss_function.hybridize(static_alloc=True) metric = mx.metric.Accuracy() # - # ## Data Preprocessing for BERT # # # ### Dataset # # In this tutorial, for demonstration we use the dev set of the # Microsoft Research Paraphrase Corpus dataset. Each example in the dataset # contains a pair of sentences, and a label indicating whether the two sentences # are semantically equivalent. # # Let's take a look at the 3rd example in the # dataset: data_train = dataset.MRPCDataset('dev', root='.') sample_id = 0 # sentence a print(data_train[sample_id][0]) # sentence b print(data_train[sample_id][1]) # 1 means equivalent, 0 means not equivalent print(data_train[sample_id][2]) # To use the pre-trained BERT model, we need to preprocess the data in the same # way it was trained. The following figure shows the input representation in BERT: # <div style="width: 500px;">![bert-embed](bert-embed.png)</div> # # We will use # `BERTDatasetTransform` to perform the following transformations: # - tokenize # the input sequences # - insert [CLS], [SEP] as necessary # - generate segment ids to # indicate whether a token belongs to the first sequence or the second sequence. # - # generate valid length # + # use the vocabulary from pre-trained model for tokenization bert_tokenizer = nlp.data.BERTTokenizer(vocabulary, lower=True) # maximum sequence length max_len = 128 all_labels = ["0", "1"] transform = dataset.BERTDatasetTransform(bert_tokenizer, max_len, labels=all_labels, label_dtype='int32') data_train = data_train.transform(transform) print('token ids = \n%s'%data_train[sample_id][0]) print('valid length = \n%s'%data_train[sample_id][1]) print('segment ids = \n%s'%data_train[sample_id][2]) print('label = \n%s'%data_train[sample_id][3]) # - # ## Fine-tune BERT Model # # Putting everything together, now we can fine-tune the # model with a few epochs. For demonstration, we use a fixed learning rate and # skip validation steps. # + batch_size = 32 lr = 5e-6 train_sampler = nlp.data.FixedBucketSampler(lengths=[int(item[1]) for item in data_train], batch_size=batch_size, shuffle=True) bert_dataloader = mx.gluon.data.DataLoader(data_train, batch_sampler=train_sampler) trainer = gluon.Trainer(model.collect_params(), 'adam', {'learning_rate': lr, 'epsilon': 1e-9}) # collect all differentiable parameters # grad_req == 'null' indicates no gradients are calculated (e.g. constant parameters) # the gradients for these params are clipped later params = [p for p in model.collect_params().values() if p.grad_req != 'null'] grad_clip = 1 log_interval = 4 num_epochs = 3 for epoch_id in range(num_epochs): metric.reset() step_loss = 0 for batch_id, (token_ids, valid_length, segment_ids, label) in enumerate(bert_dataloader): with mx.autograd.record(): # load data to GPU token_ids = token_ids.as_in_context(ctx) valid_length = valid_length.as_in_context(ctx) segment_ids = segment_ids.as_in_context(ctx) label = label.as_in_context(ctx) # forward computation out = model(token_ids, segment_ids, valid_length.astype('float32')) ls = loss_function(out, label).mean() # backward computation ls.backward() # gradient clipping trainer.allreduce_grads() nlp.utils.clip_grad_global_norm(params, 1) trainer.update(1) step_loss += ls.asscalar() metric.update([label], [out]) if (batch_id + 1) % (log_interval) == 0: print('[Epoch {} Batch {}/{}] loss={:.4f}, lr={:.7f}, acc={:.3f}' .format(epoch_id, batch_id + 1, len(bert_dataloader), step_loss / log_interval, trainer.learning_rate, metric.get()[1])) step_loss = 0 # - # ## Conclusion # # In this tutorial, we show how to fine-tune a sentence pair # classification model with pre-trained BERT parameters. In GluonNLP, this can be # done with just a few simple steps: apply BERT-style data transformation to # preprocess the data, automatically download the pre-trained model, and feed the # transformed data into the model. For demonstration purpose, we skipped the warmup learning rate # schedule and validation on dev dataset used in the original implementation. Please visit # [here](../../model_zoo/bert/index.rst) for the complete fine-tuning scripts. # # ## References # # [1] <NAME>, et al. "Bert: Pre-training of deep # bidirectional transformers for language understanding." arXiv preprint # arXiv:1810.04805 (2018). # # [2] Dolan, <NAME>., and <NAME>. # "Automatically constructing a corpus of sentential paraphrases." Proceedings of # the Third International Workshop on Paraphrasing (IWP2005). 2005. # # [3] Peters, # <NAME>., et al. "Deep contextualized word representations." arXiv preprint # arXiv:1802.05365 (2018).
06_bert/bert.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Housing Price Estimation + Keras # + import shutil import math import multiprocessing from datetime import datetime import matplotlib.pyplot as plt # %matplotlib inline import numpy as np import pandas as pd import tensorflow as tf from tensorflow.python.feature_column import feature_column from tensorflow import data print(tf.__version__) # + MODEL_NAME = 'housing-price-model-01' DATA_FILE = 'data/housingdata.csv' TRAIN_DATA_FILES_PATTERN = 'data/housing-train-01.csv' TEST_DATA_FILES_PATTERN = 'data/housing-test-01.csv' RESUME_TRAINING = False PROCESS_FEATURES = True MULTI_THREADING = True # + HEADER = ['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT', 'MEDV'] HEADER_DEFAULTS = [[0.0],[0.0],[0.0],['NA'],[0.0],[0.0],[0.0],[0.0],[0.0],[0.0],[0.0],[0.0],[0.0],[0.0]] NUMERIC_FEATURE_NAMES = ['CRIM', 'ZN','INDUS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT'] CATEGORICAL_FEATURE_NAMES_WITH_VOCABULARY = {'CHAS':['0', '1']} CATEGORICAL_FEATURE_NAMES = list(CATEGORICAL_FEATURE_NAMES_WITH_VOCABULARY.keys()) FEATURE_NAMES = NUMERIC_FEATURE_NAMES + CATEGORICAL_FEATURE_NAMES TARGET_NAME = 'MEDV' UNUSED_FEATURE_NAMES = list(set(HEADER)-set(FEATURE_NAMES) - {TARGET_NAME}) print("Header: {}".format(HEADER)) print("Numeric Features: {}".format(NUMERIC_FEATURE_NAMES)) print("Categorical Features: {}".format(CATEGORICAL_FEATURE_NAMES)) print("Target: {}".format(TARGET_NAME)) print("Unused Features: {}".format(UNUSED_FEATURE_NAMES)) # - housing_dataset = pd.read_csv('./data/housingdata.csv', header=None, names=HEADER) housing_dataset.head() # ## 准备训练数据 DATA_SIZE = housing_dataset.shape[0] print('Dataset size: {}'.format(DATA_SIZE)) # + train_data = housing_dataset.sample(frac=0.70, random_state=19830610) test_data = housing_dataset[~housing_dataset.index.isin(train_data.index)] TRAIN_DATA_SIZE = train_data.shape[0] TEST_DATA_SIZE = test_data.shape[0] print('Train set size: {}'.format(TRAIN_DATA_SIZE)) print('Test set size: {}'.format(TEST_DATA_SIZE)) print() # - # ## 保存训练和测试的数据 # + train_data.to_csv(path_or_buf=TRAIN_DATA_FILES_PATTERN, header=False, index=False) test_data.to_csv(path_or_buf=TEST_DATA_FILES_PATTERN, header=False, index=False) pd.read_csv(TEST_DATA_FILES_PATTERN, header=None, names=HEADER).head() # - # ## 定义数据输入函数 # # + def parse_csv_row(csv_row): columns = tf.decode_csv(records=csv_row, record_defaults=HEADER_DEFAULTS) features = dict(zip(HEADER, columns)) for columns in UNUSED_FEATURE_NAMES: features.pop(columns) target = features.pop(TARGET_NAME) return features, target def process_features(features): features['CRIM'] = tf.log(features['CRIM'] + 0.01) features['B'] = tf.clip_by_value(features['B'], clip_value_min=300, clip_value_max=500) return features # - # ### 建立数据输入函数 def csv_input_fn(files_name_pattern, mode=tf.estimator.ModeKeys.EVAL, skip_header_lines=0, num_epochs=None, batch_size=200): shuffle = True if mode == tf.estimator.ModeKeys.TRAIN else False num_threads = multiprocessing.cpu_count() if MULTI_THREADING else 1 print("") print("* data input_fn:") print("================") print("Input file(s): {}".format(files_name_pattern)) print("Batch size: {}".format(batch_size)) print("Epoch Count: {}".format(num_epochs)) print("Mode: {}".format(mode)) print("Thread Count: {}".format(num_threads)) print("Shuffle: {}".format(shuffle)) print("================") print("") file_names = tf.matching_files(files_name_pattern) dataset = data.TextLineDataset(file_names) dataset = dataset.skip(skip_header_lines) if shuffle: dataset = dataset.shuffle(buffer_size=2 * batch_size + 1) dataset = dataset.batch(batch_size) dataset = dataset.map(lambda csv_row: parse_csv_row(csv_row), num_parallel_calls=num_threads) if PROCESS_FEATURES: dataset = dataset.map(lambda features, target: (process_features(features), target), num_parallel_calls=num_threads) dataset = dataset.repeat(num_epochs) iterator = dataset.make_one_shot_iterator() features, target = iterator.get_next() return features, target features, target = csv_input_fn(files_name_pattern='') print('Features in CSV: {}'.format(list(features.keys()))) print('Target ni CSV: {}'.format(target)) # ### 创建一个 Feature Columns # + def get_feature_columns(hparams): numeric_columns = [ tf.feature_column.numeric_column(key) for key in NUMERIC_FEATURE_NAMES ] indicator_columns = [ tf.feature_column.indicator_column( tf.feature_column.categorical_column_with_vocabulary_list(key=key, vocabulary_list=CATEGORICAL_FEATURE_NAMES_WITH_VOCABULARY[key])) for key in CATEGORICAL_FEATURE_NAMES ] return numeric_columns + indicator_columns feature_columns = get_feature_columns(tf.contrib.training.HParams( num_buckets=5, embedding_size=3 )) print('Feature Columns: {}'.format(feature_columns)) # - # ## 用 Keras 自定义一个 Estimator def model_fn(features, labels, mode, params, config): feature_columns = get_feature_columns(params) # features,就是输入的每行数据 input_layer = feature_column.input_layer(features, feature_columns) input_layer_dimension = input_layer.shape.as_list()[1] inputs = tf.keras.Input(shape=(input_layer_dimension, )) x = tf.keras.layers.Dense(params.hidden_units[0], activation=tf.nn.relu)(inputs) for layer_size in params.hidden_units[1:]: x = tf.keras.layers.Dense(layer_size, activation=tf.nn.relu)(x) outputs = tf.keras.layers.Dense(1)(x) model = tf.keras.Model(inputs=inputs, outputs=outputs) logits = model(input_layer) def _train_op_fn(loss): """返回优化loss的 op """ optimizer = tf.train.AdamOptimizer() train_op = optimizer.minimize(loss=loss, global_step=tf.train.get_global_step()) return train_op head = tf.contrib.estimator.regression_head( label_dimension=1, name='regression_head' ) return head.create_estimator_spec( features, mode, logits, labels=labels, train_op_fn=_train_op_fn ) # ### b. 定义新的 metrics def metric_fn(labels, predictions): metrics = {} pred_values = predictions['predictions'] metrics['mae'] = tf.metrics.mean_absolute_error(labels=labels, predictions=pred_values) metrics['rmse'] = tf.metrics.root_mean_squared_error(labels=labels, predictions=pred_values) return metrics # ### c. 定义 Estimator def create_estimator(run_config, hparams): estimator = tf.estimator.Estimator( model_fn=model_fn, config=run_config, params=hparams ) estimator = tf.contrib.estimator.add_metrics(estimator, metric_fn) return estimator # ## 进行实验 # + TRAIN_SIZE= TRAIN_DATA_SIZE NUM_EPOCHS = 10000 BATCH_SIZE = 177 EVAL_AFTER_SEC = 30 TOTAL_STEPS = (TRAIN_SIZE / BATCH_SIZE) * NUM_EPOCHS hparams = tf.contrib.training.HParams( num_epochs=NUM_EPOCHS, batch_size=BATCH_SIZE, hidden_units=[16, 8, 4], max_steps=TOTAL_STEPS ) model_dir = './trained_models/{}'.format(MODEL_NAME) run_config = tf.estimator.RunConfig( model_dir=model_dir, log_step_count_steps=1000, tf_random_seed=19830610 ) print(hparams) print('Model Directory: {}'.format(run_config.model_dir)) print('') print('Dataset Size: {}'.format(TRAIN_SIZE)) print('Batch Size: {}'.format(BATCH_SIZE)) print('Steps per epoch: {}'.format(TRAIN_SIZE // BATCH_SIZE)) print('Total Steps: ', TOTAL_STEPS) print("That is 1 evaluation step after each",EVAL_AFTER_SEC," training seconds") # + train_spec = tf.estimator.TrainSpec( input_fn=lambda :csv_input_fn( TRAIN_DATA_FILES_PATTERN, mode=tf.estimator.ModeKeys.TRAIN, num_epochs=hparams.num_epochs, batch_size=hparams.batch_size ), max_steps=hparams.max_steps, hooks=None ) eval_spec = tf.estimator.EvalSpec( input_fn=lambda: csv_input_fn( TEST_DATA_FILES_PATTERN, mode=tf.estimator.ModeKeys.EVAL, num_epochs=1, batch_size=hparams.batch_size ), throttle_secs=EVAL_AFTER_SEC, # 多少秒后eval hooks=None ) # - # ### c. 用 train_and_evaluate 跑实验 # + if not RESUME_TRAINING: print('Removing previous artifacts...') shutil.rmtree(model_dir, ignore_errors=True) else: print('Resuming training...') # Sets the threshold for what messages will be logged. tf.logging.set_verbosity(tf.logging.INFO) time_start = datetime.utcnow() print("Experiment started at {}".format(time_start.strftime("%H:%M:%S"))) print(".......................................") estimator = create_estimator(run_config, hparams) tf.estimator.train_and_evaluate( estimator=estimator, train_spec=train_spec, eval_spec=eval_spec ) time_end = datetime.utcnow() print(".......................................") print("Experiment finished at {}".format(time_end.strftime("%H:%M:%S"))) print("") time_elapsed = time_end - time_start print("Experiment elapsed time: {} seconds".format(time_elapsed.total_seconds())) # - # ## 评估模型 # + train_input_fn = lambda: csv_input_fn(files_name_pattern= TRAIN_DATA_FILES_PATTERN, mode= tf.estimator.ModeKeys.EVAL, batch_size= TRAIN_DATA_SIZE) test_input_fn = lambda: csv_input_fn(files_name_pattern= TEST_DATA_FILES_PATTERN, mode= tf.estimator.ModeKeys.EVAL, batch_size= TEST_DATA_SIZE) estimator = create_estimator(run_config, hparams) train_results = estimator.evaluate(input_fn=train_input_fn, steps=1) train_rmse = round(math.sqrt(train_results["rmse"]),5) print() print("############################################################################################") print("# Train RMSE: {} - {}".format(train_rmse, train_results)) print("############################################################################################") test_results = estimator.evaluate(input_fn=test_input_fn, steps=1) test_rmse = round(math.sqrt(test_results["rmse"]),5) print() print("############################################################################################") print("# Test RMSE: {} - {}".format(test_rmse, test_results)) print("############################################################################################") # - # ## Prediction # + import itertools predict_input_fn = lambda: csv_input_fn(files_name_pattern= TEST_DATA_FILES_PATTERN, mode= tf.estimator.ModeKeys.PREDICT, batch_size= 5) predictions = estimator.predict(input_fn=predict_input_fn) values = list(map(lambda item: item["predictions"][0],list(itertools.islice(predictions, 5)))) print() print("Predicted Values: {}".format(values)) # -
01_Regression/cypan09 - TF Regression Example - Housing Price Estimation + Keras.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Check setup # This small notebook checks if your environment is correctly set up. # # It is also the default notebook which opens when you git-pull the introML-2022 repository directly to Noto. import sys print(f'Python path: {sys.executable}') # If you are running this notebook locally (not on Noto), the path should have "introml" in it # ## 1. Packages # Let's check that all the necessary packages are installed by importing them. # + import numpy as np import scipy import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import plotly import sklearn import ipywidgets as widgets import tqdm print("All good for packages :)") # - # ## 2. JupyterLab extensions # ### 2.1. JupyterLab Plotly # Run the following cell. If a plot is displayed, then the JupyterLab Plotly extension is correctly installed. import plotly.express as px df = px.data.iris() fig = px.scatter(df, x="sepal_length", y="sepal_width", color="species", title="This title and interactive plot should be displayed if the plotly extension is installed correctly") fig.show() # ### 2.2. JupyterLab Table of Contents # The Table of Contents extension automatically generates a Table of Contents for the currently open notebook. This can be very useful when navigating through the exercises. If it is installed, you should see the following icon (the one shown in the red box) in the left sidebar. # # <img src="images/toc_icon.png" width=300/> # # Clicking on it should display a table of contents, like so: # # <img src="images/toc_view.png" width=300/> # If everything worked correctly so far, then your setup should be **all good** and you can use it for the exercises and graded homework.
exercises/00-setup/check_setup.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.8.2 64-bit # name: python38264bit4424ac2c25b9430296d778061d0259c2 # --- import requests from bs4 import BeautifulSoup import string import os url = 'https://www.pro-football-reference.com/players/{}/' base_url = 'https://www.pro-football-reference.com{0}' file_path = 'players' # + tags=[] letters = list(string.ascii_uppercase) # + tags=["outputPrepend"] if os.path.exists(file_path): os.remove(file_path) for letter in letters: print(letter) response = requests.get(url.format(letter)) html = response.content soup = BeautifulSoup(html) players_list = soup.find('div', {'id': 'div_players'}) players = players_list.find_all('a') for player in players: with open(file_path,'a') as fout: fout.write(base_url.format(player['href']+'\n')) print(base_url.format(player['href'])) # -
Jupyter.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8 (''DL_env'': conda)' # name: pythonjvsc74a57bd0bde1ba50c9c31d1dffdbf30b8d5293acc1b85b7a129ef469b25ff0114d8c4815 # --- # # LeNet5 Relu PGD # %cd ../ import numpy as np etas = [0.1, 0.01, 0.2, 0.02, 0.3, 0.4, 0.04, 0.5, 0.05, 0.6, 0.7, 0.007, 0.8, 0.08, 0.9, 0.014, 0.16, 0.028, 0.32, 0.0035, 0.056, 0.64, 0.112, 0.224, 0.448, 1.0, 1.1, 1.2, 1.3, 1.4, 1.6, 1.28, 2.0, 2.56] + np.round(np.arange(0.15, 0.5, 0.018), 2).tolist() + np.round(np.arange(0.03, 0.7, 0.06), 2).tolist() + [0.00000005, 0.0000001, 0.000001, 0.00001, 0.0001, 0.001, 0.01, 0.02, 0.023] print(etas) # + tags=["outputPrepend"] for eta in etas: #eta = eta0 * (2**count) loc = "PGD/LeNet5_relu_"+str(eta) # !python main.py -b 64 -n LeNet5 -a relu -l $eta -g 0 # !python adv_PGD.py -n LeNet5 -a relu -l $eta -f $loc -g 0 # !python main.py -b 64 -n LeNet5 -a relu -l $eta -r2 -f $loc -g 0 # + # PGD relu plot import numpy as np from plot import lr_plot unsorted_etas = [0.1, 0.01, 0.2, 0.02, 0.3, 0.4, 0.04, 0.5, 0.05, 0.6, 0.7, 0.007, 0.8, 0.08, 0.9, 0.014, 0.16, 0.028, 0.32, 0.0035, 0.056, 0.64, 0.112, 0.224, 0.448, 1.0, 1.1, 1.2, 1.3, 1.4, 1.6, 1.28, 2.0, 2.56] + np.round(np.arange(0.15, 0.5, 0.018), 2).tolist() + [0.00000005, 0.0000001, 0.000001, 0.00001, 0.0001, 0.001, 0.023] + np.round(np.arange(0.03, 0.7, 0.06), 2).tolist() + [0.00000005, 0.0000001, 0.000001, 0.00001, 0.0001, 0.001, 0.01, 0.02, 0.023] etas = sorted(unsorted_etas, reverse=False) list = [] for eta in etas: list.append("PGD/LeNet5_relu_{}".format(eta)) lr_plot(list, etas) # - from plot import get_adv_err, get_val_err print(etas) print('2/L: val acc {} | adv acc {}'.format(1-get_val_err('PGD/LeNet5_relu_0.63'), 1-get_adv_err('PGD/LeNet5_relu_0.63'))) print('1/L: val acc {} | adv acc {}'.format(1-get_val_err('PGD/LeNet5_relu_0.31'), 1-get_adv_err('PGD/LeNet5_relu_0.31')))
MNIST-figure-3/notebooks-figure-3/run_LeNet5_relu.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + _kg_hide-input=true import os import cv2 import math import warnings import numpy as np import pandas as pd import seaborn as sns import tensorflow as tf import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.metrics import confusion_matrix, fbeta_score from keras import optimizers from keras import backend as K from keras import applications from keras.models import Sequential from keras.preprocessing.image import ImageDataGenerator from keras.callbacks import LearningRateScheduler, EarlyStopping from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, Activation, BatchNormalization # Set seeds to make the experiment more reproducible. from tensorflow import set_random_seed from numpy.random import seed set_random_seed(0) seed(0) # %matplotlib inline sns.set(style="whitegrid") warnings.filterwarnings("ignore") # + _kg_hide-input=true train = pd.read_csv('../input/imet-2019-fgvc6/train.csv') labels = pd.read_csv('../input/imet-2019-fgvc6/labels.csv') test = pd.read_csv('../input/imet-2019-fgvc6/sample_submission.csv') train["attribute_ids"] = train["attribute_ids"].apply(lambda x:x.split(" ")) train["id"] = train["id"].apply(lambda x: x + ".png") test["id"] = test["id"].apply(lambda x: x + ".png") print('Number of train samples: ', train.shape[0]) print('Number of test samples: ', test.shape[0]) print('Number of labels: ', labels.shape[0]) display(train.head()) display(labels.head()) # - # ### Bottleneck features using a pre-trained model # Model parameters BATCH_SIZE = 64 EPOCHS = 200 LEARNING_RATE = 0.1 HEIGHT = 128 WIDTH = 128 CANAL = 3 N_CLASSES = labels.shape[0] classes = list(map(str, range(N_CLASSES))) # + _kg_hide-input=true def f2_score_thr(threshold=0.5): def f2_score(y_true, y_pred): beta = 2 y_pred = K.cast(K.greater(K.clip(y_pred, 0, 1), threshold), K.floatx()) true_positives = K.sum(K.clip(y_true * y_pred, 0, 1), axis=1) predicted_positives = K.sum(K.clip(y_pred, 0, 1), axis=1) possible_positives = K.sum(K.clip(y_true, 0, 1), axis=1) precision = true_positives / (predicted_positives + K.epsilon()) recall = true_positives / (possible_positives + K.epsilon()) return K.mean(((1+beta**2)*precision*recall) / ((beta**2)*precision+recall+K.epsilon())) return f2_score def step_decay(epoch): initial_lrate = LEARNING_RATE drop = 0.5 epochs_drop = 10 lrate = initial_lrate * math.pow(drop, math.floor((1+epoch)/epochs_drop)) return lrate # + _kg_hide-input=true train_datagen = ImageDataGenerator(rescale=1./255) train_generator = train_datagen.flow_from_dataframe( dataframe=train, directory="../input/imet-2019-fgvc6/train", x_col="id", y_col="attribute_ids", batch_size=BATCH_SIZE, shuffle=False, class_mode=None, target_size=(HEIGHT, WIDTH)) test_datagen = ImageDataGenerator(rescale=1./255) test_generator = test_datagen.flow_from_dataframe( dataframe=test, directory = "../input/imet-2019-fgvc6/test", x_col="id", target_size=(HEIGHT, WIDTH), batch_size=1, shuffle=False, class_mode=None) # + # Build the bottleneck network model_base = applications.InceptionV3(weights=None, include_top=False) model_base.load_weights('../input/inceptionv3/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5') STEP_SIZE_TRAIN = train_generator.n // train_generator.batch_size train_data = model_base.predict_generator(train_generator, STEP_SIZE_TRAIN) train_labels = [] for label in train['attribute_ids'][:train_data.shape[0]].values: zeros = np.zeros(N_CLASSES) for label_i in label: zeros[int(label_i)] = 1 train_labels.append(zeros) train_labels = np.asarray(train_labels) X_train, X_val, Y_train, Y_val = train_test_split(train_data, train_labels, test_size=0.2, random_state=0) # - # ### Model # + model = Sequential() model.add(Flatten(input_shape=train_data.shape[1:])) model.add(Dense(1024, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(N_CLASSES, activation="sigmoid")) optimizer = optimizers.SGD(lr=LEARNING_RATE, momentum=0.8, decay=0.0, nesterov=False) thresholds = [0.15, 0.2, 0.25, 0.3, 0.4, 0.5] metrics = ["accuracy", "categorical_accuracy", f2_score_thr(0.15), f2_score_thr(0.2), f2_score_thr(0.25), f2_score_thr(0.3), f2_score_thr(0.4), f2_score_thr(0.5)] lrate = LearningRateScheduler(step_decay) es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=10) callbacks = [lrate, es] model.compile(optimizer=optimizer, loss="binary_crossentropy", metrics=metrics) # + _kg_hide-input=true _kg_hide-output=true history = model.fit(x=X_train, y=Y_train, validation_data=(X_val, Y_val), epochs=EPOCHS, batch_size=BATCH_SIZE, callbacks=callbacks, verbose=2) # - # ### Model graph loss # + _kg_hide-input=true sns.set_style("whitegrid") fig, (ax1, ax2, ax3) = plt.subplots(1, 3, sharex='col', figsize=(20,7)) ax1.plot(history.history['loss'], label='Train loss') ax1.plot(history.history['val_loss'], label='Validation loss') ax1.legend(loc='best') ax1.set_title('Loss') ax2.plot(history.history['acc'], label='Train Accuracy') ax2.plot(history.history['val_acc'], label='Validation accuracy') ax2.legend(loc='best') ax2.set_title('Accuracy') ax3.plot(history.history['categorical_accuracy'], label='Train Cat Accuracy') ax3.plot(history.history['val_categorical_accuracy'], label='Validation Cat Accuracy') ax3.legend(loc='best') ax3.set_title('Cat Accuracy') plt.xlabel('Epochs') sns.despine() plt.show() # + _kg_hide-input=true _kg_hide-output=false fig, axes = plt.subplots(3, 2, sharex='col', figsize=(20,7)) axes[0][0].plot(history.history['f2_score'], label='Train F2 Score') axes[0][0].plot(history.history['val_f2_score'], label='Validation F2 Score') axes[0][0].legend(loc='best') axes[0][0].set_title('F2 Score threshold 0.15') axes[0][1].plot(history.history['f2_score_1'], label='Train F2 Score') axes[0][1].plot(history.history['val_f2_score_1'], label='Validation F2 Score') axes[0][1].legend(loc='best') axes[0][1].set_title('F2 Score threshold 0.2') axes[1][0].plot(history.history['f2_score_2'], label='Train F2 Score') axes[1][0].plot(history.history['val_f2_score_2'], label='Validation F2 Score') axes[1][0].legend(loc='best') axes[1][0].set_title('F2 Score threshold 0.25') axes[1][1].plot(history.history['f2_score_3'], label='Train F2 Score') axes[1][1].plot(history.history['val_f2_score_3'], label='Validation F2 Score') axes[1][1].legend(loc='best') axes[1][1].set_title('F2 Score threshold 0.3') axes[2][0].plot(history.history['f2_score_4'], label='Train F2 Score') axes[2][0].plot(history.history['val_f2_score_4'], label='Validation F2 Score') axes[2][0].legend(loc='best') axes[2][0].set_title('F2 Score threshold 0.4') axes[2][1].plot(history.history['f2_score_5'], label='Train F2 Score') axes[2][1].plot(history.history['val_f2_score_5'], label='Validation F2 Score') axes[2][1].legend(loc='best') axes[2][1].set_title('F2 Score threshold 0.5') plt.xlabel('Epochs') sns.despine() plt.show() # - # ### Find best threshold value # + _kg_hide-input=true best_thr = 0 best_thr_val = history.history['val_f2_score'][-1] for i in range(1, len(metrics)-2): if best_thr_val < history.history['val_f2_score_%s' % i][-1]: best_thr_val = history.history['val_f2_score_%s' % i][-1] best_thr = i threshold = thresholds[best_thr] print('Best threshold is: %s' % threshold) # - # ### Apply model to test set and output predictions # + _kg_hide-input=true test_generator.reset() STEP_SIZE_TEST = test_generator.n//test_generator.batch_size bottleneck_preds = model_base.predict_generator(test_generator, steps=STEP_SIZE_TEST) preds = model.predict(bottleneck_preds) # + _kg_hide-input=true predictions = [] for pred_ar in preds: valid = '' for idx, pred in enumerate(pred_ar): if pred > threshold: if len(valid) == 0: valid += str(idx) else: valid += (' %s' % idx) if len(valid) == 0: valid = str(np.argmax(pred_ar)) predictions.append(valid) # + _kg_hide-input=true filenames = test_generator.filenames results = pd.DataFrame({'id':filenames, 'attribute_ids':predictions}) results['id'] = results['id'].map(lambda x: str(x)[:-4]) results.to_csv('submission.csv',index=False) results.head(10)
Model backlog/Deep Learning/InceptionV3/[20th] - Bottleneck InceptionV3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <img src="../../../images/qiskit-heading.gif" alt="Note: In order for images to show up in this jupyter notebook you need to select File => Trusted Notebook" width="500 px" align="left"> # ## _*Quantum Random Number Generation*_ # # The latest version of this notebook is available on https://github.com/QISKit/qiskit-tutorial. # # *** # ### Contributors # <NAME>, IBM Research # *** # One important application of quantum computers is the generation of random numbers. This could be a simple as a coin flip. import random for n in range(5): print('Flip '+str(n+1)) if random.random()<0.5: print('HEADS\n') else: print('TAILS\n') # The above code doesn't use Qiskit. Instead it uses an entirely classical package unimaginatively called `random`. There are many classical methods that can be used for this purpose, but none are truly random. Instead, they are *pseudorandom number generators*. They produce an output by taking a definite input and processing it in a deterministic way. These inputs are chosen such that the result appears random, and the processing is usually designed to accentuate this. But nevertheless, if you knew what was going on under the hood, you'd know exactly what random numbers would come out. # # For example, suppose we take an extremely precise measure of the time at which the random number was asked for. We could then look at the last digit (which changes most quickly), and assign our heads or tails depending on whether this is even or odd. Slight fluctations in the speed of our process will result in random seeming numbers. import time for n in range(5): t = str(time.time()) print('Flip '+str(n+1)+' (system time = ' + t + ')') if int(t[-1])%2==0: print('HEADS\n') else: print('TAILS\n') # This already does a fairly good job at seeming random, and more sophisticated methods will do even better, but all have their limits. This can be an important issue, since some applications rely on having good quality random numbers. For example: # # * Simulations of random processes may have skewed statistics for bad quality randomness; # # # * Cryptography can be cracked if the key is created using poorly generated random numbers. # # We can solve these problems using the most genuine source of randomness that we know: measurements of quantum superpositions. Unlike a pseudorandom number generator, a qubit does not know what result it will give. It does not contain a definite set of details that tell it how to respond to any measurement it may experience. The result is completely undefined before the moment of measurement. This means we can use quantum computers for the ultimate coin flip. # ## Quantum coin flip # # To generate a single random bit, we need only one qubit. # + from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit q = QuantumRegister(1) c = ClassicalRegister(1) circ = QuantumCircuit(q, c) # - # We need to generate an equally weighted superposition of $|0\rangle$ and $|1\rangle$ to get a completely random bit. This is done most simply with the Hadamard. circ.h(q) # To extract the output, we then measure. circ.measure(q, c) # Now the circuit is complete, let's run it for 5 shots to get 5 results. # + from qiskit import BasicAer, execute backend = BasicAer.get_backend('qasm_simulator') job = execute(circ, backend, shots=5, memory=True) # - # Our results are extracted from `job.result()` using `get_memory()`. data = job.result().get_memory() print(data) # For a coin flip, we can simply use `0` to mean heads and `1` to mean tails. for output in data: print('Flip with output bit value ' +output) if output=='0': print('HEADS\n') else: print('TAILS\n') # We cheated above by using the simulator. Since this is a classical reproduction of a qubit, the results here were actually pseudorandom. For real randomness, we need a real device. So let's load an IBMQ account and use a 5 qubit quantum processor. # + from qiskit import IBMQ IBMQ.load_accounts() backend = IBMQ.get_backend('ibmq_5_tenerife') job = execute(circ, backend, shots=5, memory=True) # - for output in job.result().get_memory(): print('Flip with output bit value ' +output) if output=='0': print('HEADS\n') else: print('TAILS\n') # Here's the list of results as we get it from `get_memory()`. job.result().get_memory() # Note that noise results in these results typically showing a bias towards `0`. So some post-processing is required if these numbers are to be used for statistical studies. Also, though the universe ensures the cryptographic security of these numbers, their journey between the qubit and your computer might not be so secure. You'll need to take additional precautions before using these results for security applications. # ## More general probability distributions # Sometimes we need something more complex than just a coin flip. Many applications require generation of many possible numbers within a certain range (rather than just $0$ and $1$), and to do according probability distribitions for which each outcome could occur with a different probability. # # This is also possible for quantum computers. In fact, quantum computers will be able to efficiently sample from probability distributions that would be intractable for classical computers. # # To represent numbers larger than $0$ and $1$, we need more qubits. The $n$ bit string from the output of $n$ qubits can then be interpreted as the binary representation of numbers from $0$ to $2^n-1$. For example, let's choose $n=3$ to generate numbers from $0$ to $7$. n = 3 # To get a uniform distribition over all these numbers, we could simply do a Hadamard on each. Here's an implementation of this, with the histogram obtained from many samples. # + q = QuantumRegister(n) c = ClassicalRegister(n) circ = QuantumCircuit(q, c) for j in range(n): circ.h(q[j]) circ.measure(q,c) job = execute(circ, BasicAer.get_backend('qasm_simulator'), shots=8192) # get the histogram of bit string results, convert it to one of integers and plot it bit_counts = job.result().get_counts() int_counts = {} for bitstring in bit_counts: int_counts[ int(bitstring,2) ] = bit_counts[bitstring] from qiskit.tools.visualization import plot_histogram plot_histogram(int_counts) # - # Now let's try something slightly more complex, like a bias towards smaller numbers. This can be done by using the `rx` rotation on each qubit, by using an angle for each `rx` that decreases as we go from `q[0]` (whose output corresponds to the least significant binary digit) to `q[n-1]` (the most significant). # # Below is one possible implementation of this. It was chosen fairly arbitrarily, so why not try your own variation? # + q = QuantumRegister(n) c = ClassicalRegister(n) circ = QuantumCircuit(q, c) for j in range(n): circ.rx(3.14159/(2.5+j),q[j]) circ.measure(q,c) job = execute(circ, BasicAer.get_backend('qasm_simulator'), shots=8192) bit_counts = job.result().get_counts() int_counts = {} for bitstring in bit_counts: int_counts[ int(bitstring,2) ] = bit_counts[bitstring] plot_histogram(int_counts) # - # Now let's get some actual random samples from this distribution. Specifically, let's get ten of them. job = execute(circ, BasicAer.get_backend('qasm_simulator'), shots=10, memory=True) data = job.result().get_memory() print(data) # These are written in binary, but it is straightforward to convert them into decimal integers. int_data = [] for bitstring in data: int_data.append( int(bitstring,2) ) print(int_data) # Here we see, as we expected, a bias towards the smaller numbers in the range.
community/terra/qis_adv/random_number_generation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import os, sys, time, copy import numpy as np import matplotlib.pyplot as plt import myokit sys.path.append('../') sys.path.append('../Protocols') sys.path.append('../Models') sys.path.append('../Lib') import protocol_lib from br1977 import BR1977 # - ''' Beeler and Reuter 1977 ''' end_time = 500 # + import simulator_myokit ''' Simulation with Myokit ''' model_path = "../mmt-model-files/br-1977.mmt" model_myokit, protocol_myokit, script = myokit.load(model_path) sim_myokit = simulator_myokit.Simulator(model_myokit, protocol_myokit, max_step=None, abs_tol=1e-08, rel_tol=1e-10) # 1e-12, 1e-14 # 1e-08, 1e-10 sim_myokit.name = "br1977" times = np.linspace(0, end_time, 5000) sim_myokit.bcl = 1000 simulated_models_myokit = [] start_time = time.time() # y0 = sim_myokit.pre_simulate( 1000*100, sim_type=1) d = sim_myokit.simulate(end_time=end_time) simulated_models_myokit.append(d) print("--- %s seconds ---"%(time.time()-start_time)) # + import simulator_scipy ''' Simulation with BDF ''' protocol = protocol_lib.PacingProtocol(level=1, start=100, length=2, period=1000, multiplier=0, default_time_unit='ms') model = BR1977(protocol) sim_bdf = simulator_scipy.Simulator(model) simulated_models_BDF = [] start_time = time.time() # t_eval = np.linspace(0, t_span[1], 5000) sim_bdf.simulate(t_span=(0, end_time), method='BDF', max_step=1, atol=1E-6, rtol=1E-3) simulated_models_BDF.append(copy.copy(model)) print("--- %s seconds ---"%(time.time()-start_time)) # + import simulator_euler ''' Simulation with Euler ''' sim_euler = simulator_euler.Simulator(model) simulated_models_Euler = [] start_time = time.time() sim_euler.dt = 0.02 sim_euler.simulate(end_time=end_time) simulated_models_Euler.append(copy.copy(model)) print("--- %s seconds ---"%(time.time()-start_time)) # + ''' Plot ''' fig, ax = plt.subplots(figsize=(6,4)) fig.suptitle(model.name, fontsize=14) myokit = simulated_models_myokit[0] bdf = simulated_models_BDF[0] euler = simulated_models_Euler[0] # ax.set_title('Simulation %d'%(simulationNo)) ax.set_xlim(bdf.times.min(), bdf.times.max()) # ax.set_ylim(ylim[0], ylim[1]) ax.set_ylabel('Membrane Potential (mV)') ax.set_xlabel('Time (ms)') ax.plot( myokit['engine.time'], myokit['membrane.V'], label='Myokit', linewidth=8, color='y') ax.plot(bdf.times, bdf.V, label='BDF', linewidth=5, color='r') ax.plot(euler.times, euler.V, label='Euler', linewidth=2, color='k') # textstr = "GNa : %1.4f\nGNaL : %1.4f\nGto : %1.4f\nPCa : %1.4f\nGKr : %1.4f\nGKs : %1.4f\nGK1 : %1.4f\nGf : %1.4f"%(GNa/g_fc[0], \ # GNaL/g_fc[1], Gto/g_fc[2], PCa/g_fc[3], GKr/g_fc[4], GKs/g_fc[5], GK1/g_fc[6], Gf/g_fc[7]) # props = dict(boxstyle='round', facecolor='wheat', alpha=0.5) # place a text box in upper left in axes coords # ax.text(0.67, 0.60, textstr, transform=ax.transAxes, fontsize=14, verticalalignment='top', bbox=props) # fig1 = plt.gcf() ax.legend() ax.grid() plt.show() fig.savefig(os.path.join('Results', "BR1977.jpg"), dpi=100) # - print("Complete")
Examples/BeelerReuter1977_AP.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import pandas as pd from sklearn.preprocessing import OrdinalEncoder import os #Read data raw_data_path =os.path.join(os.path.pardir,os.path.pardir,'data','raw', 'Dataset3') train_file_path = os.path.join(raw_data_path,'data.csv') df = pd.read_csv(train_file_path) df["IsMale"] = np.where(df.Gender== "M", 1 ,0) df =df.drop(['Gender','CLIENTNUM','Naive_Bayes_Classifier_Attrition_Flag_Card_Category_Contacts_Count_12_mon_Dependent_count_Education_Level_Months_Inactive_12_mon_1','Naive_Bayes_Classifier_Attrition_Flag_Card_Category_Contacts_Count_12_mon_Dependent_count_Education_Level_Months_Inactive_12_mon_2'], axis=1) ord_enc = OrdinalEncoder() df["Attrition_Flag"] = ord_enc.fit_transform(df[["Attrition_Flag"]]) df["Education_Level"] = ord_enc.fit_transform(df[["Education_Level"]]) df["Card_Category"] = ord_enc.fit_transform(df[["Card_Category"]]) df["Income_Category"] = ord_enc.fit_transform(df[["Income_Category"]]) df["Marital_Status"] = ord_enc.fit_transform(df[["Marital_Status"]]) df = df.rename(columns={'Attrition_Flag': 'Churn', }) df.info() # - proccessed_data_path =os.path.join(os.path.pardir,os.path.pardir,'data','processed') write_train_path = os.path.join(proccessed_data_path,'dataset3.csv') df.to_csv(write_train_path) df.Churn.value_counts().plot(kind= 'pie', title='Churned proportion of customers', explode = [0,0.1],autopct='%1.1f%%', shadow=True);
project/notebooks/Dataset3/Dataset3 - EDA.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # <NAME> - 810098015 - Final Project # # Imports import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn.model_selection import train_test_split,GridSearchCV from sklearn.preprocessing import StandardScaler from sklearn import metrics from sklearn.feature_extraction.text import TfidfTransformer from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction import DictVectorizer from sklearn.metrics import mean_squared_log_error from sklearn.linear_model import SGDRegressor from sklearn.pipeline import Pipeline from scipy.sparse import hstack import copy import hazm import time # # Preprocessing & Previsualizations # first we find that the date which the row has created is weekend or not (friday and thursday)<br> # then we find the time within 24 hours, based on what we expect from the price if one letter has created on night or morning<br> after that, we clean brand to english brands, and then one-hot-encode # + df = pd.read_csv('mobile_phone_dataset.csv') df.drop('Unnamed: 0',axis=1,inplace=True) # created_at df['is_weekend'] = 'Nan' df['time'] = 'Nan' for i in range(len(df)): d = df['created_at'][i].split()[0] if ((d=='Friday')or(d=='Thursday')): df['is_weekend'][i] = 1 else: df['is_weekend'][i] = 0 t = df['created_at'][i].split()[1] am_pm = t[-2] + t[-1] hour = int(t[0] + t[1]) if (am_pm=='AM'): df['time'][i] = hour else: df['time'][i] = hour+12 # beand df['brand'] = [df['brand'][i].split('::')[0] for i in range(len(df))] # get dummies df = pd.get_dummies(df, columns=['city','brand']) df.drop('created_at',axis=1,inplace=True) # - dd = df.copy() # For keeping the main dataset # ## Correlation Heatmap # As it has been clarified in the plot bellow, without description and title columns, the correlations of other columns with price is pretty low. most of correlations are between some of the brands like apple and cities like tehran. plt.figure(figsize=(15,15)) sns.heatmap(dd.corr(),annot=True) # ## Text Preprocesses # As a result of that, lets preprocess the two desc and title columns. Lets start with normalizing (for better tokenizing) and tokenizing the column strings. by doing that, words will be separated from each other. # + normalizer = hazm.Normalizer() dd['title'] = [normalizer.normalize(df['title'][i]) for i in range(len(dd))] dd['desc'] = [normalizer.normalize(df['desc'][i]) for i in range(len(dd))] tokenizer = hazm.WordTokenizer() dd['title'] = [tokenizer.tokenize(df['title'][i]) for i in range(len(dd))] dd['desc'] = [tokenizer.tokenize(df['desc'][i]) for i in range(len(dd))] # - # Now let us normalize the words with the module "informal normalizer" for informal words. After that, the words may concatinate again with each othe. So we tokenize them again. # + normalizer_inf = hazm.InformalNormalizer() for i in range(len(dd)): temp = [] for j in dd['desc'][i]: temp.append(normalizer_inf.normalized_word(j)[0]) dd['desc'][i] = copy.deepcopy(temp) temp = [] for j in dd['title'][i]: temp.append(normalizer_inf.normalized_word(j)[0]) dd['title'][i] = copy.deepcopy(temp) dd['title'] = [tokenizer.tokenize(df['title'][i]) for i in range(len(dd))] dd['desc'] = [tokenizer.tokenize(df['desc'][i]) for i in range(len(dd))] # - # Personally, I guess finding word stems will reduce the precision of our model. as a result of that, I made 2 data frames; one with stemming (dd2) and one without it(dd). Also I removed '\u200c' from both. dd2 = dd.copy() # + stemmer = hazm.Stemmer() for i in range(len(dd2)): temp = [] temp2 = [] for j in dd2['desc'][i]: temp = j.split('\u200c') for q in temp: temp2.append(stemmer.stem(q)) dd2['desc'][i] = copy.deepcopy(temp2) for i in range(len(dd2)): temp = [] temp2 = [] for j in dd2['title'][i]: temp = j.split('\u200c') for q in temp: temp2.append(stemmer.stem(q)) dd2['title'][i] = copy.deepcopy(temp2) for i in range(len(dd)): temp = [] temp2 = [] for j in dd['desc'][i]: temp = j.split('\u200c') for q in temp: temp2.append(q) dd['desc'][i] = copy.deepcopy(temp2) for i in range(len(dd)): temp = [] temp2 = [] for j in dd['title'][i]: temp = j.split('\u200c') for q in temp: temp2.append(q) dd['title'][i] = copy.deepcopy(temp2) # - # ## Remove stopwords and Special Chars # Because they are useless to the meaning of sentences and as a result, to prices. # + special_chars = ['!','"','#','(',')','*',',','-','.','/','\'','«','»','،','؛','؟','.','…','$'] stopwords = hazm.stopwords_list() for i in range(len(dd2)): for j in dd2['desc'][i]: if (j in special_chars)or(j in stopwords): while (j in dd2['desc'][i]): dd2['desc'][i].remove(j) for j in dd2['title'][i]: if (j in special_chars)or(j in stopwords): while (j in dd2['desc'][i]): dd2['desc'][i].remove(j) for i in range(len(dd)): for j in dd['desc'][i]: if (j in special_chars)or(j in stopwords): while (j in dd['desc'][i]): dd['desc'][i].remove(j) for j in dd['title'][i]: if (j in special_chars)or(j in stopwords): while (j in dd['desc'][i]): dd['desc'][i].remove(j) # - # ## Just one word columns! # Because only words and the count of them will be important for us, I merged Two "title" and "desc" columns into one column and remove the othe two. Also I filled empty new columns with "missing" for further conciderations. # + dd['titile_desc'] = dd['desc'] + dd['title'] dd.drop('title',axis=1,inplace=True) dd.drop('desc',axis=1,inplace=True) dd2['titile_desc'] = dd2['desc'] + dd2['title'] dd2.drop('title',axis=1,inplace=True) dd2.drop('desc',axis=1,inplace=True) dd['titile_desc'].fillna(value='Missing',inplace=True) dd2['titile_desc'].fillna(value='Missing',inplace=True) # - # ## Column Normalizations # purpose of normalization : scale numeric data from different columns down to an equivalent scale so that the model doesn’t get skewed due to huge variance in a few columns. for example, prices are vary through dataframe and distances between them are very high. As a result, I splited test columns(unknown prices) and then I used standard scaler normalizer. # # After normalization, the RMSE has increased; reason being prices are large numbers and the regression models just try to calculate given numbers to predict the prices. As a result, low numbers could not lead us to the specific large numbers, so that I commented out normalization section. # + main1 = dd.loc[dd['price']!=-1] main2 = dd2.loc[dd['price']!=-1] test1 = dd.loc[dd['price']==-1] test2 = dd2.loc[dd['price']==-1] # temp = list(main1.columns) # temp.remove('price') # temp.remove('titile_desc') # ss = StandardScaler() # main1[temp] = ss.fit_transform(main1.drop(['price','titile_desc'],axis=1)) # main2[temp] = ss.fit_transform(main2.drop(['price','titile_desc'],axis=1)) # test1[temp] = ss.fit_transform(test1.drop(['price','titile_desc'],axis=1)) # test2[temp] = ss.fit_transform(test2.drop(['price','titile_desc'],axis=1)) # - # ## Title-Description Feature Extraction : TF-IDF vectorizer # A machine understands only numbers, it does not directly understand letters or text that we as humans can read. That means we need to convert our text and categorical data to numbers. This process is called feature extraction or featurization. # # TF-IDF (term frequency-inverse document frequency) is a statistical measure that evaluates how relevant a word is to a document in a collection of documents. In other words, that is intended to reflect how important a word is to a document in a collection or corpus. This is done by multiplying two metrics: how many times a word appears in a document, and the inverse document frequency of the word across a set of documents. n-gram is a contiguous sequence of n items from a given sample of text or speech. The items here are just words! # # I first used sklean's built-in TfidfVectorizer and I have encoded name and item_descriptions into TF-IDF vectors of uni-grams, bi-grams and tri-grams because I guess will be the most important features. I also limited the number of features to 1M in the pursuit of avoiding very high dimensional vectors. # # But before doing that, splited words in merged title+description column should be convert to sentences. # + for i in list(main1.index): main1['titile_desc'][i] = ' '.join(main1['titile_desc'][i]) for i in list(main1.index): main2['titile_desc'][i] = ' '.join(main2['titile_desc'][i]) # - vectorizer = TfidfVectorizer(ngram_range=(1, 3), min_df=1, max_features=1000000) tfidf_dd = vectorizer.fit_transform(main1['titile_desc']) tfidf_dd2 = vectorizer.fit_transform(main2['titile_desc']) # + dictvectorizer = DictVectorizer() dd_make_dict = main1.drop(['price','titile_desc'],axis=1).to_dict('records') categorised_dd = dictvectorizer.fit_transform(dd_make_dict) X_dd = hstack([tfidf_dd,categorised_dd]) dd2_make_dict = main2.drop(['price','titile_desc'],axis=1).to_dict('records') categorised_dd2 = dictvectorizer.fit_transform(dd2_make_dict) X_dd2 = hstack([tfidf_dd2,categorised_dd2]) # - # # Train-Validation Split seed = 101 X_train1, X_test1, y_train1, y_test1= train_test_split(X_dd, main1['price'], test_size=0.2, random_state=seed) X_train2, X_test2, y_train2, y_test2= train_test_split(X_dd2, main2['price'], test_size=0.2, random_state=seed) # # Reports # Here is the function that reports each model mean squared error, Root mean Squared error, and and its R2 score. It also generates random predictions in order to allow us to compare model scores to a random model. # + def makeRandom(y_test): rands = [] for counter in range(len(y_test)): rands.append(np.random.choice(list(range(1,300)))) return rands def reportRegressor(model,X_cross,y_cross,X_test,y_test): validationSetMSE = metrics.mean_squared_error(y_cross,model.predict(X_cross)) validationSetR2 = metrics.r2_score(y_cross,model.predict(X_cross)) validationSetRMSE = np.sqrt(validationSetMSE) testSetMSE = metrics.mean_squared_error(y_test,model.predict(X_test)) testSetR2 = metrics.r2_score(y_test,model.predict(X_test)) testSetRMSE = np.sqrt(testSetMSE) random_predicts = makeRandom(y_test) randomMSE = metrics.mean_squared_error(y_test,random_predicts) randomR2 = metrics.r2_score(y_test,random_predicts) randomRMSE = np.sqrt(randomMSE) print('Validation-set:\n\tMean Squared Error: ' , validationSetMSE ,'\n\tRoot Mean Squared Error: ',validationSetRMSE, '\n\tR2 Score: ' , validationSetR2) print('\nTest-set:\n\tMean Squared Error: ' , testSetMSE ,'\n\tRoot Mean Squared Error: ',testSetRMSE, '\n\tR2 Score: ' , testSetR2) print('\nRandom Predicts on Test-set:\n\tMean Squared Error: ' , randomMSE ,'\n\tRoot Mean Squared Error: ',randomRMSE, '\n\tR2 Score: ' , randomR2) # - # # Models & Evaluations # Some the models have been grid searched (with the commented code at the end of this part)...But because I didn't have the time, I just applied for some of them with best params_! # # As models below indicate, in most of them, with stemming(train-test2) and without it (train-test1) doesn't make any differences as much, although without stemming the model is Slightly better. # # Also we should note that high values of MSE and RMSE is because of great number of prices; for instance, if the model predicts the all prices with 20 Thousand Tomans, It performs pretty well, although MSE will be high. We should rely more on R2 score, which we know what it is from the class! # # Fitting times was very high, So I was just able to make limited models. # ## Linear Regression from sklearn.linear_model import LinearRegression linear_regressor = LinearRegression() linear_regressor.fit(X_train1,y_train1) reportRegressor(linear_regressor,X_train1,y_train1,X_test1,y_test1) # It is completely overfitted! hyperparameters need to be changed. # ## Decision Tree: # from sklearn.tree import DecisionTreeRegressor dtr = DecisionTreeRegressor(max_depth=7) dtr.fit(X_train2, y_train2) reportRegressor(dtr,X_train2,y_train2,X_test2,y_test2) dtr.fit(X_train1, y_train1) reportRegressor(dtr,X_train1,y_train1,X_test1,y_test1) # Not very accurate...right?! But no overfitting. # ## Ridge # alpha changed to where test-set had the best scores. # # Also we should note that Ridge Regression is a technique for analyzing multiple regression data that suffer from multicollinearity; thats why it performs a great model! from sklearn.linear_model import Ridge ridge1 = Ridge(alpha=0.8, random_state=seed, solver='auto') ridge1.fit(X_train1, y_train1) reportRegressor(ridge1,X_train1,y_train1,X_test1,y_test1) ridge2 = Ridge(alpha=0.8, random_state=seed, solver='auto') ridge2.fit(X_train2, y_train2) reportRegressor(ridge2,X_train2,y_train2,X_test2,y_test2) # ## Support Vector Regressor # Not very good. Hyperparameters need to be grid-seached! from sklearn.svm import SVR svr = SVR(C = 100 , epsilon=0.2,gamma=1,kernel='rbf') svr.fit(X_train1,y_train1) reportRegressor(svr,X_train1,y_train1,X_test1,y_test1) # As the ridge was the best model, I used it to predict unknown prices. Also, I just used without stemming description preprocess. Below is the code for preparing test set for being predicted by ridge. # + # for i in list(test1.index): # test1['titile_desc'][i] = ' '.join(test1['titile_desc'][i]) # vectorizer = TfidfVectorizer(ngram_range=(1, 3), min_df=1, max_features=1000000) # tfidf_dd = vectorizer.fit_transform(test1['titile_desc']) # dd_make_dict = test1.drop(['price','titile_desc'],axis=1).to_dict('records') # categorised_dd = dictvectorizer.fit_transform(dd_make_dict) # X_dd = hstack([tfidf_dd,categorised_dd]) # - # Bellow is the code I used for grid-searching in the pursuit of finding best parameters for some of the models mentioned above. # + # model = X! # params = {'':['','',''] , '':[ , , , ]} # gs = GridSearchCV(estimator=model, param_grid=params, scoring='mean_squared_error', n_jobs=1, cv=5, verbose=3) # start = time.time() # gs.fit(X_train1, y_train1) # end = time.time() # print('Time to train model: %0.2fs' % (end -start)) # model = gs.best_estimator_ # print(gs.best_params_) # print(gs.best_score_) # - # # Other Ideas: # 1- Visualizing more at the beggining could lead us to better preprocesses and algorithm selections. # # 2- Other regression algorithms could be easily done by the sklearn; and we could evaluate them using the repportRegressor function! Algorithms such as Logistic Regression, Polynomial Regression, Stepwise Regression, Lasso Regression, ElasticNet Regression, and of course neural nets. Also I guess voting models based on grid searched mention Algorithms could do a great job! # # 3- I didn't have the time to grid-search for some of the Algorithms and ofcourse more parameters. I applied, But we would have much better models with that.
NLP for Phone Price Prediction.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/Yaniii2021/Linear-Algebra-58019/blob/main/Final_Exam.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="76_Gfp8WHeBz" import numpy as np from scipy.linalg import solve from numpy.linalg import eig # + [markdown] id="3gWiCYWyHLkW" # Problem 1. Student A, Student B, and Student C have a total of Php 89 cash in their banks. Student A has 6 less than Student C. Student B has 3 times what Student C has. How much does each student have? (30 points) # + id="46Mj7SivGa6K" colab={"base_uri": "https://localhost:8080/"} outputId="b113c413-079c-4a35-81f0-7d213159bc94" students = np.array([[1,1,1], [1,0,4], [0,0,5]]) total = np.array([[89], [89], [89]]) per_student = np.linalg.inv(students) @ total print(per_student) # + [markdown] id="AsvhONaFHRli" # Problem 2. Solve each system: (30 points) # # 3x - y + z = 5 # # 9x - 3y +3z = 15 # # -12x +4y -4z = -20 # + id="z1cg5pLJHWZ2" colab={"base_uri": "https://localhost:8080/"} outputId="b8822a61-4a47-4056-94ec-1ad7549f73b5" coefficients = np.array([[3, -1, 1], [9, -3, 3], [-12, 4, -4]]) constants = np.array([[5], [15], [-20]]) unknown_values = np.linalg.pinv(coefficients) @ constants # since there was an error, i tried to use pseudo inverse in getting the inverse of the coefficients matrix print(unknown_values) # + colab={"base_uri": "https://localhost:8080/", "height": 451} id="OtCuchXcT5dI" outputId="c55b2c03-fcb9-4a70-8613-c502aab613c8" coefficients = np.array([[3, -1, 1], [9, -3, 3], [-12, 4, -4]]) constants = np.array([[5], [15], [-20]]) unknown_values = np.linalg.inv(coefficients) @ constants print(unknown_values) # + [markdown] id="Ytj3evfkVEFs" # For problem number 2, there was an error called singular matrix which means that the inverted matrix, coefficents matrix, cannot be inverted. This also means that the determinant is zero since it has no solution. Hence, the matrix should be a non singular matrix for it to not have a non zero determinant. # # + [markdown] id="xmV8axRvHZ1p" # Problem 3. Consider the matrix, (40 points) # + id="Wy-ilPIfHcLc" colab={"base_uri": "https://localhost:8080/"} outputId="e13ba496-10b6-460d-b8d5-eb2d6f5aa2c1" A = np.array([[8, 5, -6], [-12, -9, 12], [-3, -3, 5]]) value, vector = np.linalg.eig(A) # use the eig method in order to get the eigenvalue and eigenvector of a square matrix A print(f'\nThe Eigenvalue/s is/are: {value}') # print the eigenvalues print(f'\nThe right Eigenvectors are: \n{vector}') # print the eigenvectors print(f'\nThe right Eigenvectors are: \n{vector.round()}') # print the rounded eigenvectors
Final_Exam.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Final model # # Train the best model on the bigger dataset and evaluate once more. # Imports import findspark findspark.init() findspark.find() import pyspark # Imports for creating spark session from pyspark import SparkContext, SparkConf from pyspark.sql import SparkSession conf = pyspark.SparkConf().setAppName('sparkify-capstone-model').setMaster('local') sc = pyspark.SparkContext(conf=conf) spark = SparkSession(sc) # Imports for modelling, tuning and evaluation from pyspark.ml.classification import GBTClassifier from pyspark.ml.evaluation import BinaryClassificationEvaluator, MulticlassClassificationEvaluator from pyspark.ml.tuning import TrainValidationSplit, ParamGridBuilder # Imports for visualization and output import matplotlib.pyplot as plt from IPython.display import HTML, display # Read in dataset conf.set("spark.driver.maxResultSize", "0") path = "out/features.parquet" df = spark.read.parquet(path) def createSubset(df, factor): """ INPUT: df: The dataset to split factor: How much of the dataset to return OUTPUT: df_subset: The split subset """ df_subset, df_dummy = df.randomSplit([factor, 1 - factor]) return df_subset # + def printConfusionMatrix(tp, fp, tn, fn): """ Simple function to output a confusion matrix from f/t/n/p values as html table. INPUT: data: The array to print as table OUTPUT: Prints the array as html table. """ html = "<table><tr><td></td><td>Act. True</td><td>False</td></tr>" html += "<tr><td>Pred. Pos.</td><td>{}</td><td>{}</td></tr>".format(tp, fp) html += "<tr><td>Negative</td><td>{}</td><td>{}</td></tr>".format(fn, tn) html += "</table>" display(HTML(html)) def showEvaluationMetrics(predictions): """ Calculate and print the some evaluation metrics for the passed predictions. INPUT: predictions: The predictions to evaluate and print OUTPUT: Just prints the evaluation metrics """ # Calculate true, false positives and negatives to calculate further metrics later: tp = predictions[(predictions.churn == 1) & (predictions.prediction == 1)].count() tn = predictions[(predictions.churn == 0) & (predictions.prediction == 0)].count() fp = predictions[(predictions.churn == 0) & (predictions.prediction == 1)].count() fn = predictions[(predictions.churn == 1) & (predictions.prediction == 0)].count() printConfusionMatrix(tp, fp, tn, fn) # Calculate and print metrics f1 = MulticlassClassificationEvaluator(labelCol = "churn", metricName = "f1") \ .evaluate(predictions) accuracy = float((tp + tn) / (tp + tn + fp + fn)) recall = float(tp / (tp + fn)) precision = float(tp / (tp + fp)) print("F1: ", f1) print("Accuracy: ", accuracy) print("Recall: ", recall) print("Precision: ", precision) def printAUC(predictions, labelCol = "churn"): """ Print the area under curve for the predictions. INPUT: predictions: The predictions to get and print the AUC for OUTPU: Prints the AUC """ print("Area under curve: ", BinaryClassificationEvaluator(labelCol = labelCol).evaluate(predictions)) # - def undersampleNegatives(df, ratio, labelCol = "churn"): """ Undersample the negatives (0's) in the given dataframe by ratio. NOTE: The "selection" method here is of course very crude and in a real version should be randomized and shuffled. INPUT: df: dataframe to undersample negatives from ratio: Undersampling ratio labelCol: LAbel column name in the input dataframe OUTPUT: A new dataframe with negatives undersampled by ratio """ zeros = df.filter(df[labelCol] == 0) ones = df.filter(df[labelCol] == 1) zeros = createSubset(zeros, ratio) return zeros.union(ones) def gbtPredictions(df_train, df_test, maxIter = 10, labelCol = "churn", featuresCol = "features"): """ Fit, evaluate and show results for GBTClassifier INPUT: df_train: The training data set. df_test: The testing data set. maxIter: Number of maximum iterations in the gradeint boost. labelCol: The label column name, "churn" by default. featuresCol: The label column name, "features" by default. OUTPUT: predictions: The model's predictions """ # Fit and train model gbt = GBTClassifier(labelCol = labelCol, featuresCol = featuresCol, maxIter = maxIter).fit(df_train) return gbt.transform(df_test) # + df_train, df_test = df.randomSplit([0.9, 0.1]) gbt = GBTClassifier(labelCol = "churn", featuresCol = "features", maxIter = 120, maxDepth = 5).fit(undersampleNegatives(df_train, .7)) predictions = gbt.transform(df_test) showEvaluationMetrics(predictions) printAUC(predictions) # - gbt.save("out/model") # Output the notebook to an html file from subprocess import call call(['python', '-m', 'nbconvert', 'final-model.ipynb'])
udacity/data-scientist-nanodegree/sparkify/.ipynb_checkpoints/final-model-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <NAME> 315794057 import numpy as np import pandas as pd # solution to question 1: url = 'https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/vaccinations/vaccinations.csv' Data = pd.read_csv(url) Data.head() # solution to question 2: # I chose Latvia becuse it begins with "L" # L_locations = Data[Data['location'].str.contains('L')] L_locations # solution to question 3: # there are 87 rows in my file, I computed the answer in the following way: Latvia = Data[Data['location'].str.contains('Latvia')] Latvia['location'].count() # solution to question 4: # finding max value of vaccination in Latvia and then find the date whice has that value max_vacc_Lat = Latvia.daily_vaccinations.max() max_vacc_Lat Latvia.loc[Latvia['date'] == max_vacc_Lat] # solution to question 5: Latvia.iloc[0:3,[0,2,6]] # solution to question 6: Latvia.daily_vaccinations.mean() # solution to question 7: Drop_Lat = Latvia.dropna(subset=['people_vaccinated_per_hundred']) Drop_Lat # solution to question 8: # hermonic mean is 1.2716135337832166. I computed the answer in the following way: # + sum_of_rows = Drop_Lat.people_vaccinated_per_hundred.sum() counter=0 for i in Drop_Lat['people_vaccinated_per_hundred']: counter = counter +(1/i) sum_of_rows/counter # - # solution to question 9: # The mean is bigger. # looking at the column, we can see although vaccination is increasing doring time, the numnbers of the vaccination isnt in a consistant growth. so the value of the median couldnt be predict. it could have been bigger the the mean or smaller then him. # # Latvia.daily_vaccinations.mean() Latvia.daily_vaccinations.median() Latvia
submitted_assiments/1/Untitled.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import warnings warnings.filterwarnings('ignore') import ipywidgets as widgets from IPython.display import display, clear_output # + # #!jupyter nbextension enable --py widgetsnbextension --sys-prefix # #!jupyter serverextension enable voila --sys-prefix # + #choose the name Name = widgets.ToggleButtons( options=['Osama', 'Wafaa', 'Rana'] ) # -
Untitled.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import pandas as pd import kaggleSurvey as pf # 아래는 로컬에 설치해야할 library들임. 없는 경우에는 따로 설치해주세요. # import matplotlib as mpl # mpl.use('Agg') # import matplotlib.pylab as plt # import seaborn as sns # sns.set() # sns.set_style("whitegrid") # sns.set_color_codes() pd.set_option('display.max_columns', 5000) pd.set_option('display.max_colwidth', -1) # Pretty display for notebooks # %matplotlib inline # Ignore the warnings import warnings warnings.filterwarnings('ignore') # + ks_1 = pf.KaggleSurvey(is_update=False) # ks_2 = pf.KaggleSurvey(is_update=False) # 업데이트 소식이 있었던 경우에는 # ks = pf.KaggleSurvey(is_update=True) # - ks_1.set_df_I_want_by_country_name(["s"]) ks_1.set_df_I_want_by_country_name(['United States of America', 'India', 'China']) # ks_2.set_df_I_want_by_country_name(['Brazil', 'Switzerland', 'South Africa', 'South Korea']) q_df = ks_1.get_q_df() q_df ks_1.draw_plot(1) ks_1.draw_plot(2)
Danial/EDA_2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import math import copy import numpy as np from sklearn import datasets from sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegression iris = datasets.load_iris() X = iris.data y = iris.target X_train, X_test, y_train, y_test = train_test_split(X[:100], y[:100], test_size=0.20) reg = LogisticRegression(solver='lbfgs', max_iter=500) reg.fit(X_train, y_train) print("Training accuracy is %s"% reg.score(X_train,y_train)) print("Test accuracy is %s"% reg.score(X_test,y_test)) # + import numpy as np from sklearn.metrics import accuracy_score def sigmoid(x): z = 1 / (1 + np.exp(-x)) return z def add_b(dataMatrix): dataMatrix = np.column_stack((np.mat(dataMatrix),np.ones(np.shape(dataMatrix)[0]))) return dataMatrix def LogisticRegression_(x_train,y_train,x_test,y_test,alpha = 0.001 ,maxCycles = 500): x_train = add_b(x_train) x_test = add_b(x_test) y_train = np.mat(y_train).transpose() y_test = np.mat(y_test).transpose() m,n = np.shape(x_train) weights = np.ones((n,1)) for i in range(0,maxCycles): h = sigmoid(x_train*weights) error = y_train - h weights = weights + alpha * x_train.transpose() * error y_pre = sigmoid(np.dot(x_train, weights)) for i in range(len(y_pre)): if y_pre[i] > 0.5: y_pre[i] = 1 else: y_pre[i] = 0 print("Train accuracy is %s"% (accuracy_score(y_train, y_pre))) y_pre = sigmoid(np.dot(x_test, weights)) for i in range(len(y_pre)): if y_pre[i] > 0.5: y_pre[i] = 1 else: y_pre[i] = 0 print("Test accuracy is %s"% (accuracy_score(y_test, y_pre))) return weights weights = LogisticRegression_(X_train, y_train,X_test,y_test) # + import itertools import copy # Attack on LogisticRegression def LogisticRegression_attack(weights, X_predict, y_predict): X_predict = add_b(X_predict) m = np.diag([0.5,0.5,0.5,0.5])*4 flag = True for i in range(1,5): for ii in list(itertools.combinations([0,1,2,3],i)): delta = np.zeros(4) for jj in ii: delta += m[jj] delta = np.append(delta, 0.) y_pre = sigmoid(np.dot(copy.deepcopy(X_predict)+delta, weights)) if y_pre > 0.5: y_pre = 1 else: y_pre = 0 if y_predict != y_pre: X_predict += delta flag = False break y_pre = sigmoid(np.dot(copy.deepcopy(X_predict)-delta, weights)) if y_pre > 0.5: y_pre = 1 else: y_pre = 0 if y_predict != y_pre: X_predict -= delta flag = False break if not flag: break y_pre = sigmoid(np.dot(X_predict, weights)) if y_pre > 0.5: y_pre = 1 else: y_pre = 0 print('attack data: ', X_predict[0,:-1]) print('predict label: ', y_pre) X_test_ = X_test[0:1] y_test_ = y_test[0] print('original data: ', X_test_) print('original label: ', y_test_) LogisticRegression_attack(weights, X_test_, y_test_) # -
Chapter-3/3.3.4_LogisticRegression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ![image.png](attachment:image.png) # ## Section 0: Preview # - Data Types: Number, Boolean, List, String, Dictionary and DataFrame # - Data Type Conversion: int(), float(), bool(), list(), str() # - File I/O: open(), .readlines(), .write() # - For Loop # + #Create a file named 'test.txt' and wirte three lines to it and close it. # + #Repeat every line in above 'test.txt' three times, i.e. copy each line and paste it three times to the file. Save the outputs. # + #Cut lines in above created file by words. One word, one line. Save the outputs. # - # ## Practice: # 1. Download the file from: https://juniorworld.github.io/python-workshop-2018/doc/uid.txt # 2. The file contains a list of students' HKU id # 3. Please convert them into HKU email addresses accordingly # 4. Conversion rule: "3040040896" -> "<EMAIL>" # 5. Save the results to a new file named "emails.txt" # + #Write your code here # - # ## Section 2: Built-in Function (Continued) # <strong> If/Else Statement </strong> # - If/Else Statement is used to test whether a condition is True. If yes, do something. If not, do something else. Else statement is optional. # - Format: `if logical_condition1 :... (else: ...)` # - Example: # >```python # if a==1: # print('yes') #Block A # else: # print('no') #Block B``` # + #print out the smaller value between variable a and b # - # <div class="alert alert-block alert-success"> # **<b>Extra Knowledge</b>** <font style='color:red;font-weight:bold;'>If/Else</font> statement can be upgraded into a <font style='color:red;font-weight:bold;'>If/Elif/Else</font> statement.</div> a=1 if a<0: print('negative') elif a==0: print('neutral') else: print('positive') # #### Practice # <img src='img/week2-decision-tree.jpg'> # + #Use If/Elif/Else statement to allocate a patient with records as below: a={'new patient':False,'unpaid bill':False} # - # ## Section 3: Build our own function # - Function is a block of reusable codes. Annotation: y=f(x), where x is a list of input variables and y is a list of output variables. # - Terminology: input variables = <b>parameters</b>, output variables = <b>returned variables</b> and their actual values = <b>arguments</b> # - <b>Global vs Local</b>: function can create its local variables that are only used inside its boundary. Local variables can use same names as global variables without overriding their values. # - Format: # >```python # def function_name(input1[,input2,input3...]): # command line # return # ``` # - The function of function is to transform x into y. Like a magic trick turning a girl into a tiger. # <img src='img/week2-function.png' width='200px'> # + #Wrap our preview If/Elif/Else statements into a customer function, which takes patient record dictionary as input and return. a={'new patient':False,'unpaid bill':False} # - # <img src='img/week2-presidents.png'> # Presidential inauguration speeches capture the sentiment of the time. # ## Practice: Inauguration Speech # Download the dataset from: https://juniorworld.github.io/python-workshop-2018/doc/presidents.rar # # <p>Expected Objectives:</p> # # 1. Total number of sentences in the speech # 2. Total number of words in the speech # 3. Average length of sentences # 4. Coleman–Liau index of Readablity # # #### Coleman–Liau index: # ><b>CLI = 0.0588 &ast; L - 0.296 &ast; S - 15.8</b> # <br>L is the average number of letters per 100 words and S is the average number of sentences per 100 words. presidents=['Washington','Jefferson','Lincoln','Roosevelt','Kennedy','Nixon','Reagan','Bush','Clinton','W Bush','Obama','Trump'] for president in presidents: file=open('presidents\\'+president+'.txt','r') paragraphs=file.readlines() paragaraph_count= #Write your command here sentence_count,word_count,letter_count=readablity_test(paragraphs) CLI=0.0588*(letter_count/word_count*100)-0.296*(sentence_count/word_count*100) - 15.8 if CLI <= 6: grade_level='primary' elif CLI<=12: grade_level='secondary' elif CLI<=16: grade_level='undergrad' else: grade_level='postgrad' print(president,':',sentence_count,'sentences,',word_count,'words,',round(word_count/sentence_count),'words/sentence, CLI at',round(CLI),',',grade_level,' level') def readablity_test(paragraphs): #Define a customer function readablity_test() to output sentence_count,word_count and letter_count return sentence_count,word_count,letter_count # + #Save results to a new file # - # <img src='img\week2-flow.png'> # *** # # Break # *** # ## Section 4: Data Visualization # - Pie Chart: Compare Percentages # - Bar Chart: Compare Scores across groups # - Histogram: Show frequency of values/value range # - Line Chart: Show trend of Scores # - Scatter Plot: Show Relationship between a pair of Scores # - Map: Show Geo Distribution of data # |Type|Variable Y|Variable X| # |:--:|:--:|:--:| # |Pie Chart|Fractions|None| # |Bar Chart|Numbers|Categories| # |Histogram|Integer|Categories/Value Range| # |Line Chart|Numbers|Time/Date/Period| # |Scatter Plot|Numbers|Numbers| # |Map|Latitude|Longtitude| # ### Sign up for Plot.ly # 1. Sign up for Plot.ly: https://plot.ly/Auth/login/?action=signup# # 2. Get your API token: Settings -> API Keys -> Regenerate Key -> Copy your newly created key # 3. Save your API key somewhere # <div class="alert alert-block alert-warning"> # **<b>Reminder</b>** Free account can only call Plot.ly API 100 times per day and generate up to 25 graphs.</div> # + import plotly.plotly as py #Import library and give it an abbreviated name import plotly.graph_objs as go #go: graph object from plotly import tools py.sign_in('USER NAME', 'API TOKEN') #fill in your user name and API token # - # *** # ## Pie Chart # + labels = ['Female','Male'] values = [40,20] trace = go.Pie(labels=labels, values=values) py.iplot([trace], filename='pie_chart') # + #change data labels by re-defining parameter "textinfo" # + #change color setting by re-defining "marker" parameter # + #turn the pie chart into a donut by re-defining "hole" parameter # + #change the graph size to 400*300 and add a title by re-defining "width" and "height" in "layout" labels = ['Female','Male'] values = [40,20] trace = go.Pie(labels=labels, values=values) layout=go.Layout(width=400,height=300,title='Gender Distribution') fig=go.Figure(data=[trace],layout=layout) py.iplot(fig, filename='pie_chart') # - # #### <font style="color: blue">Practice:</font> # --- # <font style="color: blue"> Please download the Hong Kong census data about educational attainment from <a href='https://juniorworld.github.io/python-workshop-2018/doc/Hong Kong Census Educational Attainment.csv'>this link</a>. # <p>Create a pie chart to visualize the percentages of different education levels in 2016. The pie chart should meet following requirements:</p> # 1. Donut style # 2. Change slice colors # </font> # + #Write down your code here #--------------------------------------------------------- # - # *** # ## Bar Chart # <br>For more details: https://plot.ly/python/reference/#bar # + x = ['Female','Male'] y = [40,20] trace = go.Bar(x=x,y=y) py.iplot([trace], filename='bar_chart') # + #Widen the gap between bars by increasing "bargap" parameters in layout # + #Grouped bar chart x = ['Female','Male'] y1 = [40,20] y2 = [30,50] trace1 = go.Bar(x=x,y=y1) trace2 = go.Bar(x=x,y=y2) fig = go.Figure(data=[trace1,trace2]) py.iplot(fig, filename='bar_chart') # + #Stacked/Relative bar chart by re-defining "barmode" in layout # + #100% Stacked bar chart by re-defining "barnorm" as "percent" in layout # - # #### <font style="color: blue">Practice:</font> # --- # <font style="color: blue"> Please refer to "Hong Kong Census Educational Attainment.csv". # <p>Create a bar chart to visualize the percentages of different education levels in different years, i.e. 2006, 2011 and 2016. The bar chart should meet following requirements:</p> # 1. A bar represents a year # 2. 100% Stacked bar chart: higher education levels stacked on top of lower ones and the bar's full length is 100% # 2. The gap between bar groups = 0.2 # </font> # + #Write down your code here #--------------------------------------------------------- # - # *** # ## Break # *** # ## Histogram # Histogram is a special type of bar chart where one's y value is its count. It is used to show data distribution: viusalize the skewness and central tendency. # <br>For more details: https://plot.ly/python/reference/#histogram a=[1,2,3,3,4,4,4,5,5,6,7,3,3,2] trace=go.Histogram(x=a) py.iplot([trace],filename='Histogram') # + #Change the bins by re-defining "size" parameter in xbins # + #Convert into a 100% Histogram whose y value is percentage of getting a value #Re-define the "histnorm" to a "percent" mode # + #Decrease every element in "a" by one unit to create a new list "b" #Grouped Histogram # + #Overlay Histogram of a and b #Increase the transparency by re-defining "opacity" parameter #Change color by re-defining "color" parameter in "marker" #Change the value of "barmode" parameter in layout to "overlay" # - # #### <font style="color: blue">Practice:</font> # --- # <font style="color: blue"> <font style="color: blue"> Please download YouTube Popularity data from <a href='https://juniorworld.github.io/python-workshop-2018/doc/Youtube.csv'>this link</a>. # <p>Create two Histograms to visualize the distribution of views, likes, dislikes and comments. The histograms should meet following requirements:</p> # 1. One basic histogram to show distribution of "views" # 2. One basic histogram to show distribution of "log(views)" # 3. One 100% group histogram to show distributions of log(likes), log(dislikes) and log(comments) # Hint: to apply logarithmic transformation, you can use numpy's log10 function. For example: to calcualte the logrithm of a variable "a". # </font> # # >```python # import numpy as np # a=np.log10(a)``` # + #Write your code here # - # ## Line Chart # In Plot.ly, line chart is defined as a special scatter plot whose scatters are connected by lines. # <br>For more details: https://plot.ly/python/reference/#scatter # + #create your first line chart a=[1,2,3] b=[10,22,34] trace1=go.Scatter(x=a,y=b,mode='lines') #mode='lines','markers','lines+markers' py.iplot([trace1],filename='line chart') # + #add markers to it by changing mode to "lines+markers" # + #make it a dashed line by re-defining the "dash" parameters in "line" #try other alternative shapes: "solid", "dot", "dash", "longdash", "dashdot", or "longdashdot" # + #fill the area below a=[1,2,3] b=[10,22,34] trace1=go.Scatter(x=a,y=b,mode='lines',fill='tozeroy') #mode='lines' py.iplot([trace1],filename='line chart') # - #add another trace to it a=[1,2,3] b=[10,22,34] c=[34,22,10] trace1=go.Scatter(x=a,y=b,mode='lines') trace2=go.Scatter(x=a,y=c,mode='lines') py.iplot([trace1,trace2],filename='line chart') # + #change the range of axis # + #stacked line chart by re-defining "stackgroup" parameter # - # #### <font style="color: blue">Practice:</font> # --- # <font style="color: blue"> <font style="color: blue"> Please download stock price data from <a href='https://juniorworld.github.io/python-workshop-2018/doc/stock.csv'>this link</a>. # <p>Create a line chart to visualize the trend of these five listed companies. The line chart should meet following requirements:</p> # 1. Name lines after companies # </font> # + #Write your code here # - # ## Scatter Plot # <br>For more details: https://plot.ly/python/reference/#scatter # + #create your first scatter plot a=[1,2,3,4,5] b=[10,22,34,40,50] trace1=go.Scatter(x=a,y=b,mode='markers') py.iplot([trace1],filename='scatter') # + #style the markers a=[1,2,3,4,5] b=[10,22,34,40,50] trace1=go.Scatter(x=a,y=b,mode='markers',marker={'size':10,'color':'red'}) py.iplot([trace1],filename='scatter') # + #give their names by re-defining "text" # + #assign different sizes and colors to markers # + #assign color according to values in colorscale #"Colorscale" options: Greys,YlGnBu,Greens,YlOrRd,Bluered,RdBu,Reds,Blues,Picnic,Rainbow,Portland,Jet,Hot,Blackbody,Earth,Electric,Viridis,Cividis # + #try plotting scatters in a 3D space a=[1,2,3,4,5] b=[10,22,34,40,50] c=[2,3,4,5,6] trace1=go.Scatter3d(x=a,y=b,z=c,mode='markers') py.iplot([trace1],filename='scatter') # + #Change axis titles # - # #### <font style="color: blue">Practice:</font> # --- # <font style="color: blue"> <font style="color: blue"> Please download box office data from <a href='https://juniorworld.github.io/python-workshop-2018/doc/movies.csv'>this link</a>. # <p>Create a 3D scatter plot to visualize these movies. The scatter plot should meet following requirements:</p> # 1. X axis represents "Production Budget" # 2. Y axis represents "Box Office" # 3. Z axis represents "ROI" (Return on Investment) # 4. Size scatters according to their "IMDB Ratings" # 5. Color scatters according to their "Genre" # 6. Name scatters after movies # </font> colors_=[] for color in colors: if color =='Comedy': colors_.extend([1]) else: colors_.extend([len(color)]) size=[i*2 for i in movies['Rating IMDB']] trace1=go.Scatter3d(x=movies['Production Budget (millions)'],y=movies['Box Office (millions)'],z=movies['ROI'],mode='markers', marker={'size':size,'color':colors_,'colorscale':'Rainbow'},text=movies['Movie']) layout=go.Layout(scene={'xaxis':{'title':'Production Budget (millions)'},'yaxis':{'title':'Box Office (millions)'},'zaxis':{'title':'ROI'}}) fig=go.Figure(data=[trace1],layout=layout) py.iplot(fig,filename='scatter') # <div class="alert alert-block alert-info"> # **<b>Tips</b>** Two tools to better work with colors in Python: # <br>1. W3S color palette: https://www.w3schools.com/colors/colors_palettes.asp # <br>2. colorlover: https://github.com/jackparmer/colorlover</div>
doc/Class3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Correlation Functions # # A correlation function is a measure of order in a system. It's main purpose is to determine the spatial relationship between a single atom or group of atoms to other surrounding atoms or groups of atoms. # # To understand the expected output of a correlation function calculation, it is easiest to start with a 1D example. # # In scenario 1 below, liquid crystal monomers are stacked in a perfect column. The distance, d, between the center of mass of each head group (head group atoms are highlighted black) is exactly the same in all cases. The associated z-direction (the z-axis runs in the same direction that the monomers stack) correlation function shows equally spaced, equally intense, sharp spikes located at d, 2d, 3d ... # # <table><tr> # <td> <img src="images/ordered_monomers_stacked.png" style="width: 300px;" caption> <td> # <td> <img src="images/ordered_correlation_function.png" style="width: 400px;"> <td> # </tr></table> # # In the more realistic scenario 2, shown below, the liquid crystal monomers are still stacked in a column, but the head groups are far less ordered. The peaks of the correlation function broaden in response to this disorder, but their maxima are still located at about the same place as scenario 1. The amplitudes of the maxima, however, decays exponentially. The exponent describing this decay is related to the **correlation length** of the system which is a measure of the distance at which two particle positions are no longer correlated. # # <table><tr> # <td> <img src="images/disordered_monomers_stacked.png" style="width: 300px;" caption> <td> # <td> <img src="images/disordered_correlation_function.png" style="width: 400px;"> <td> # </tr></table> # # Now, let's see how to plot a correlation function from a molecular dyanamics trajectory using the classes and functions that are a part of the python script *correlation_function.py* located in the LLC_Membranes repository in LLC_Membranes/analysis/correlation_function.py. # # For more detail, see the [Documentation](https://llc-membranes.readthedocs.io/en/latest/correlation.html) for this script! # Import necessary modules from LLC_Membranes.analysis import correlation_function # + # Define path where trajectory files are located, and the names of the .gro and trajectory files in that directory path = "/home/bcoscia/Documents/Gromacs/Transport/NaGA3C11/MET/10wt" gro = 'berendsen.gro' traj = 'PR_nojump.xtc' # + # Start by initializing the correlation function with the class 'Correlation' atoms = ['C', 'C1', 'C2', 'C3', 'C4', 'C5'] # we will calculated the center of mass of these atoms residue = 'HII' # name of residue to which atoms belong (the name that appears in the second column of the .gro file) bins = [100, 100, 100] # The unit cell will be histogrammed. Specify the number of bins in the x, y and z directions. g = correlation_function.Correlation('%s/%s' %(path, gro), trajectory='%s/%s' %(path, traj), atoms=atoms, res=residue, bins=bins) # - # At this point, we have processed the trajectory so that it is in the correct format for calculating the 3d correlation function. Two main steps just took place: # 1. We narrowed the trajectory down to only the atoms we are interested in. If this was a group of atoms, then the center of mass of each group was also calculated. # 2. We converted the unit cell to a cube. We can't easily apply a discrete fourier transform to a hexagonal unit cell, so the \__init__ function transforms the coordinates into a cubic cell. This transformation will need to be reversed later. # + # Now perform the calculation g.calculate_correlation_function() # that's it # - # Now we have a 3D correlation function which is stored in an array that can be accessed by calling the attribute *correlation3d*. Each dimesion of it's shape should be equal to n - 1 of the bin dimensions. print(g.correlation3d.shape) # The 3D correlation function has a ton of information, but it's difficult to visualize and extract meaningful conclusions from it. Instead, we can plot 1D and 2D slices of it. # # Currently, only 1D slices are supported. More slices can be added if necessary in the future. # # Let's slice the correlation function straight up the z-axis. # + axis = 2 # the axis along which to slice. x = 0, y = 1, z = 2 g.make_slice(axis) # - # We ran into an error! (If you didn't, which is possible, just continue to follow along) The error stems from the fact that the center of mass of a given head group is not directly stacked on top of any other head group. The correlation function is 0 at all points, so we run into a divided by zero error. Sure enough, printing the slice will show a bunch of NaNs (not a number). print(g.slice) # In the actual LLC membrane system, the head groups are not single points that interact. They are clouds of atoms and electrons whose center of mass does not need to be situated directly above another in order to interact or be considered stacked. In fact, pi-stacking is most stable when phenyl rings are slightly offset from one another on the xy plane. # # To overcome the issue, we can define a radius, R. Any center of mass that is within this radius, on the xy plane, can be counted as a part of the 1D correlation function. # # <img src="images/head_group_labeled.png" style="width: 300px;"> r = 0.225 # approximately the van-der-waals radius of benzene g.make_slice(axis, radius=r) # No error! g.plot_slice(2) # We have now plotted a correlation function. But you'll notice that it appears roughly symmetric. That is because we are working with a periodic system. A particle situated at the origin feels about the same amount of correlation from a particle 1 nm above it as it does 1 nm below it. Therefore, we can safely cut off the correlation function halfway without losing any information. limits = ([0, 4.5], []) # Plot between 0 and 4.5 on the x-axis. Do not impose any limits on the y-axis (i.e. let matplotlib choose) g.plot_slice(2, limits=limits)
notebooks/Correlation Function.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + slideshow={"slide_type": "skip"} from IPython.display import IFrame from IPython.display import Markdown # Additional styling ; should be moved into helpers from IPython.core.display import display, HTML HTML('<style>{}</style>'.format(open('rise.css').read())) # + [markdown] slideshow={"slide_type": "slide"} # # Class 2C: Introduction to Programming in Python II # # We will begin at 12:00 PM! Until then, feel free to use the chat to socialize, and enjoy the music! # # # <div align = "right"> # July 16, 2021 <br> # <NAME> # </div> # + [markdown] slideshow={"slide_type": "slide"} # ## Announcements # + [markdown] slideshow={"slide_type": "fragment"} # 1. Lab 1 Feedback is now released, please check the feedback! # + [markdown] slideshow={"slide_type": "fragment"} # 2. Milestone 1 was due last night at 6 PM # + [markdown] slideshow={"slide_type": "fragment"} # 3. The "Teamwork Reflection" portion of milestone 1 is in Learning Log 2 # + [markdown] slideshow={"slide_type": "fragment"} # 4. Test 1 window will start this evening at 6 PM! # - [Test 1 details are here](../test1.md) # - Make sure to check and read the rules carefully! # + [markdown] slideshow={"slide_type": "fragment"} # 5. Lab 2 is due tomorrow, don't forget about the explainer video! # + [markdown] slideshow={"slide_type": "fragment"} # 6. Alternative way of learning Python (through slides) # + [markdown] slideshow={"slide_type": "slide"} # ## Loading Data into a Jupyter Notebook # + [markdown] slideshow={"slide_type": "fragment"} # The last task of milestone 1 is to load in your data. # Sorry about this, but I forgot to mention this on Monday (ran out of time), so I'll mention it now. # + [markdown] slideshow={"slide_type": "fragment"} # ### To load your dataset into a Jupyter notebook: # # ``` # # - start a new Jupyter Lab session in your project repo # # - Create a new notebook in the analysis directory/folder # # - import pandas as pd # # - Use the pd.read_csv('path_to_data') # # import pandas as pd # pd.read_csv('../data/raw/data.csv') # ``` # + [markdown] slideshow={"slide_type": "subslide"} # ### Demo # + slideshow={"slide_type": "fragment"} # Live Demo # + [markdown] slideshow={"slide_type": "slide"} # # Python II # + [markdown] slideshow={"slide_type": "fragment"} # In this class, we go through a notebook by a former colleague, Dr. <NAME>, option co-director of the UBC-Vancouver MDS program. # # If you prefer, you can also watch his recording of the same material. # + [markdown] slideshow={"slide_type": "skip"} # <div class="youtube"> # <iframe class="responsive-iframe" height="350px" width="622px" src="https://www.youtube-nocookie.com/embed/7FLv1ACEl-E" frameborder="0" allow="accelerometer; autoplay="0"; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> # </div> # + [markdown] slideshow={"slide_type": "subslide"} # ## Class Outline # # - Comments (0 min) # - Why Python? (0 min) # - Loops (15 min) # - Comprehensions (5 min) # - Functions intro (10 min) # - DRY principle (15 min) # - Break (5 min) # - Keyword arguments (5 min) # - Docstrings (10 min) # - Unit tests, corner cases (10 min) # - Multiple return values (5 min) # + [markdown] slideshow={"slide_type": "subslide"} # ### Attribution # # - The original version of these Python lectures were by [<NAME>](https://www.math.ubc.ca/~pwalls/). # - These lectures were delivered by [<NAME>](https://mikegelbart.com) and are [available publicly here](https://www.youtube.com/watch?v=7FLv1ACEl-E&list=PLWmXHcz_53Q26aQzhknaT3zwWvl7w8wQE&index=2). # + [markdown] slideshow={"slide_type": "subslide"} # ## Comments in python (0 min) # + slideshow={"slide_type": "fragment"} x = 1 # this is a comment # + slideshow={"slide_type": "fragment"} """ this is a string, which does nothing and can be used as a comment """ 7 x = 1 # + [markdown] slideshow={"slide_type": "subslide"} # ## Why Python? (0 min) # # - Why did we choose Python in Data 301? # - Extremely popular in DS (and beyond!) # - Relatively easy to learn # - Good documentation # - **Huge user community** # - Lots of Stack Overflow and other forums # - Lots of useful packages (more onm this next week) # + [markdown] slideshow={"slide_type": "subslide"} # ## Loops (10 min) # + [markdown] slideshow={"slide_type": "fragment"} # - Loops allow us to execute a block of code multiple times. # - We will focus on [`for` loops](https://docs.python.org/3/tutorial/controlflow.html#for-statements) # + slideshow={"slide_type": "fragment"} for n in [2, 7, -1, 5]: print("The number is", n, "its square is", n**2) # this is inside the loop print(n) # this is outside the loop n # + [markdown] slideshow={"slide_type": "fragment"} # The main points to notice: # # * Keyword `for` begins the loop # * Colon `:` ends the first line of the loop # * We can iterate over any kind of iterable: list, tuple, range, string. In this case, we are iterating over the values in a list # * Block of code indented is executed for each value in the list (hence the name "for" loops, sometimes also called "for each" loops) # * The loop ends after the variable `n` has taken all the values in the list # + slideshow={"slide_type": "fragment"} "abc" + "def" # + slideshow={"slide_type": "fragment"} word = "Python" for letter in word: print("Gimme a " + letter + "!") print("What's that spell?!! " + word + "!") # + [markdown] slideshow={"slide_type": "fragment"} # - A very common pattern is to use `for` with `range`. # - `range` gives you a sequence of integers up to some value. # + slideshow={"slide_type": "fragment"} for i in range(0,10): print(i) # + [markdown] slideshow={"slide_type": "fragment"} # We can also specify a start value and a skip-by value with `range`: # + slideshow={"slide_type": "fragment"} for i in range(1,101,10): print(i*2) # + [markdown] slideshow={"slide_type": "fragment"} # We can write a loop inside another loop to iterate over multiple dimensions of data. Consider the following loop as enumerating the coordinates in a 3 by 3 grid of points. # + slideshow={"slide_type": "fragment"} for x in [1,2,3]: for y in ["a","b","c"]: print((x,y)) # + slideshow={"slide_type": "fragment"} list_1 = [1,2,3] list_2 = ["a","b","c"] for i in range(3): print(list_1[i], list_2[i]) # + [markdown] slideshow={"slide_type": "fragment"} # We can loop through key-value pairs of a dictionary using `.items()`: # + slideshow={"slide_type": "fragment"} courses = {521 : "awesome", 551 : "riveting", 511 : "naptime!"} for course_num, description in courses.items(): print("DSCI", course_num, "is", description) # + slideshow={"slide_type": "fragment"} for course_num in courses: print(course_num, courses[course_num]) # + [markdown] slideshow={"slide_type": "fragment"} # Above: the general syntax is `for key, value in dictionary.items():` # + [markdown] slideshow={"slide_type": "subslide"} # #### `while` loops # # - We can also use a [`while` loop](https://docs.python.org/3/reference/compound_stmts.html#while) to excute a block of code several times. # - In reality, I rarely use these. # - Beware! If the conditional expression is always `True`, then you've got an infintite loop! # - (Use the "Stop" button in the toolbar above, or Ctrl-C in the terminal, to kill the program if you get an infinite loop.) # + slideshow={"slide_type": "fragment"} n = 10 while n > 0: print(n) n = n - 1 print("Blast off!") # + [markdown] slideshow={"slide_type": "subslide"} # ## Comprehensions (5 min) # # Comprehensions allow us to build lists/tuples/sets/dictionaries in one convenient, compact line of code. # + jupyter={"outputs_hidden": false} slideshow={"slide_type": "fragment"} words = ["hello", "goodbye", "the", "antidisestablishmentarianism"] y = [word[-1] for word in words] # list comprehension y # + slideshow={"slide_type": "fragment"} y = list() for word in words: y.append(word[-1]) y # + slideshow={"slide_type": "fragment"} y = (word[-1] for word in words) # this is NOT a tuple comprehension - more on generators later print(y) # + slideshow={"slide_type": "fragment"} y = {word[-1] for word in words} # set comprehension print(y) # + jupyter={"outputs_hidden": false} slideshow={"slide_type": "fragment"} word_lengths = {word : len(word) for word in words} # dictionary comprehension word_lengths # + word_lengths = {} for word in words: word_lengths[word] = len(words) word_lengths # + [markdown] slideshow={"slide_type": "subslide"} # ## Functions intro (5 min) # # - Define a [**function**](https://docs.python.org/3/tutorial/controlflow.html#defining-functions) to re-use a block of code with different input parameters, also known as **arguments**. # - For example, define a function called `square` which takes one input parameter `n` and returns the square `n**2`. # + slideshow={"slide_type": "fragment"} def square(n): n_squared = n**2 return n_squared # + slideshow={"slide_type": "fragment"} square(2) # + slideshow={"slide_type": "fragment"} square(100) # + slideshow={"slide_type": "fragment"} square(12345) # + [markdown] slideshow={"slide_type": "fragment"} # * Begins with `def` keyword, function name, input parameters and then colon (`:`) # * Function block defined by indentation # * Output or "return" value of the function is given by the `return` keyword # + [markdown] slideshow={"slide_type": "subslide"} # #### Side effects # # - If a function changes the variables passed into it, then it is said to have **side effects** # - Example: # + slideshow={"slide_type": "fragment"} def silly_sum(sri): sri.append(0) return sum(sri) # + slideshow={"slide_type": "fragment"} silly_sum([1,2,3,4]) # + [markdown] slideshow={"slide_type": "fragment"} # Looks good, like it sums the numbers? But wait... # # + slideshow={"slide_type": "fragment"} lst = [1,2,3,4] silly_sum(lst) # - silly_sum(lst) # + slideshow={"slide_type": "fragment"} lst # + [markdown] slideshow={"slide_type": "fragment"} # - If you function has side effects like this, you must mention it in the documentation (later today). # - In general avoid! # + [markdown] slideshow={"slide_type": "subslide"} # #### Null return type # # If you do not specify a return value, the function returns `None` when it terminates: # + slideshow={"slide_type": "fragment"} def f(x): x + 1 # no return! if x == 999: return print(f(0)) # + [markdown] slideshow={"slide_type": "subslide"} # ## DRY principle, designing good functions (15 min) # # - DRY: **Don't Repeat Yourself** # - See [Wikipedia article](https://en.wikipedia.org/wiki/Don%27t_repeat_yourself) # - Consider the task of, for each element of a list, turning it into a palindrome # - e.g. "mike" --> "mikeekim" # + slideshow={"slide_type": "fragment"} names = ["milad", "rodolfo", "tiffany","Firas"] # + slideshow={"slide_type": "fragment"} name = "mike" name[::-1] # + slideshow={"slide_type": "fragment"} names_backwards = list() names_backwards.append(names[0] + names[0][::-1]) names_backwards.append(names[1] + names[1][::-1]) names_backwards.append(names[2] + names[2][::-1]) names_backwards.append(names[3] + names[3][::-1]) names_backwards # + [markdown] slideshow={"slide_type": "fragment"} # - Above: this is gross, terrible, yucky code # 1. It only works for a list with 3 elements # 2. It only works for a list named `names` # 3. If we want to change its functionality, we need to change 3 similar lines of code (Don't Repeat Yourself!!) # 4. It is hard to understand what it does just by looking at it # + slideshow={"slide_type": "fragment"} names_backwards = list() for name in names: names_backwards.append(name + name[::-1]) names_backwards # + [markdown] slideshow={"slide_type": "fragment"} # Above: this is slightly better. We have solved problems (1) and (3). # + slideshow={"slide_type": "fragment"} def make_palindromes(names): names_backwards = list() for name in names: names_backwards.append(name + name[::-1]) return names_backwards make_palindromes(names) # + [markdown] slideshow={"slide_type": "fragment"} # - Above: this is even better. We have now also solved problem (2), because you can call the function with any list, not just `names`. # - For example, what if we had multiple _lists_: # + slideshow={"slide_type": "fragment"} names1 = ["milad", "rodolfo", "tiffany"] names2 = ["Trudeau", "Scheer", "Singh", "Blanchet", "May"] names3 = ["apple", "orange", "banana"] # + slideshow={"slide_type": "fragment"} names_backwards_1 = list() for name in names1: names_backwards_1.append(name + name[::-1]) names_backwards_1 # + slideshow={"slide_type": "fragment"} names_backwards_2 = list() for name in names2: names_backwards_2.append(name + name[::-1]) names_backwards_2 # + slideshow={"slide_type": "fragment"} names_backwards_3 = list() for name in names3: names_backwards_3.append(name + name[::-1]) names_backwards_3 # + [markdown] slideshow={"slide_type": "fragment"} # Above: this is very bad also (and imagine if it was 20 lines of code instead of 2). This was problem (2). Our function makes it much better: # + slideshow={"slide_type": "fragment"} make_palindromes(names1) # + slideshow={"slide_type": "fragment"} make_palindromes(names2) # + slideshow={"slide_type": "fragment"} make_palindromes(names3) # + [markdown] slideshow={"slide_type": "subslide"} # - You could get even more fancy, and put the lists of names into a list (so you have a list of lists). # - Then you could loop over the list and call the function each time: # + slideshow={"slide_type": "fragment"} for list_of_names in [names1, names2, names3]: print(make_palindromes(list_of_names)) # + [markdown] slideshow={"slide_type": "subslide"} # #### Designing good functions # + [markdown] slideshow={"slide_type": "fragment"} # - How far you go with this is sort of a matter of personal style, and how you choose to apply the DRY principle: DON'T REPEAT YOURSELF! # - These decisions are often ambiguous. For example: # - Should `make_palindromes` be a function if I'm only ever doing it once? Twice? # - Should the loop be inside the function, or outside? # - Or should there be TWO functions, one that loops over the other?? # + [markdown] slideshow={"slide_type": "fragment"} # - In my personal opinion, `make_palindromes` does a bit too much to be understandable. # - I prefer this: # + slideshow={"slide_type": "fragment"} def make_palindrome(name): return name + name[::-1] make_palindrome("milad") # + [markdown] slideshow={"slide_type": "fragment"} # - From here, we want to "apply `make_palindrome` to every element of a list" # - It turns out this is an extremely common desire, so Python has built-in functions. # - One of these is `map`, which we'll cover later. But for now, just a comprehension will do: # + slideshow={"slide_type": "fragment"} [make_palindrome(name) for name in names] # + [markdown] slideshow={"slide_type": "subslide"} # Other function design considerations: # # - Should we print output or produce plots inside or outside functions? # - I would usually say outside, because this is a "side effect" of sorts # - Should the function do one thing or many things? # - This is a tough one, hard to answer in general # + [markdown] slideshow={"slide_type": "subslide"} # ## Break (5 min) # + [markdown] slideshow={"slide_type": "subslide"} # ## Optional & keyword arguments (5 min) # # - Sometimes it is convenient to have _default values_ for some arguments in a function. # - Because they have default values, these arguments are optional, hence "optional arguments" # - Example: # + slideshow={"slide_type": "fragment"} def repeat_string(s, n=2): return s*n # + slideshow={"slide_type": "fragment"} repeat_string("mds", 2) # + slideshow={"slide_type": "fragment"} repeat_string("mds-", 5) # + slideshow={"slide_type": "fragment"} repeat_string("mds") # do not specify `n`; it is optional # + [markdown] slideshow={"slide_type": "subslide"} # Sane defaults: # # - Ideally, the default should be carefully chosen. # - Here, the idea of "repeating" something makes me think of having 2 copies, so `n=2` feels like a sane default. # + [markdown] slideshow={"slide_type": "fragment"} # Syntax: # # - You can have any number of arguments and any number of optional arguments # - All the optional arguments must come after the regular arguments # - The regular arguments are mapped by the order they appear # - The optional arguments can be specified out of order # + slideshow={"slide_type": "fragment"} def example(a, b, c="DEFAULT", d="DEFAULT"): print(a,b,c,d) example(1,2,3,4) # + [markdown] slideshow={"slide_type": "fragment"} # Using the defaults for `c` and `d`: # + slideshow={"slide_type": "fragment"} example(1,2) # + [markdown] slideshow={"slide_type": "fragment"} # Specifying `c` and `d` as **keyword arguments** (i.e. by name): # + slideshow={"slide_type": "fragment"} example(1,2,c=3,d=4) # + [markdown] slideshow={"slide_type": "subslide"} # Specifying only one of the optional arguments, by keyword: # + slideshow={"slide_type": "fragment"} example(1,2,c=3) # + [markdown] slideshow={"slide_type": "fragment"} # Or the other: # + slideshow={"slide_type": "fragment"} example(1,2,d=4) # + [markdown] slideshow={"slide_type": "fragment"} # Specifying all the arguments as keyword arguments, even though only `c` and `d` are optional: # + slideshow={"slide_type": "fragment"} example(a=1,b=2,c=3,d=4) # + [markdown] slideshow={"slide_type": "fragment"} # Specifying `c` by the fact that it comes 3rd (I do not recommend this because I find it is confusing): # + slideshow={"slide_type": "fragment"} example(1,2,3) # + [markdown] slideshow={"slide_type": "fragment"} # Specifying the optional arguments by keyword, but in the wrong order (this is also somewhat confusing, but not so terrible - I am OK with it): # + slideshow={"slide_type": "fragment"} example(1,2,d=4,c=3) # + [markdown] slideshow={"slide_type": "fragment"} # Specifying the non-optional arguments by keyword (I am fine with this): # + slideshow={"slide_type": "fragment"} example(a=1,b=2) # + [markdown] slideshow={"slide_type": "fragment"} # Specifying the non-optional arguments by keyword, but in the wrong order (not recommended, I find it confusing): # + slideshow={"slide_type": "fragment"} example(b=2,a=1) # + [markdown] slideshow={"slide_type": "fragment"} # Specifying keyword arguments before non-keyword arguments (this throws an error): # + slideshow={"slide_type": "fragment"} example(a=2,1) # + [markdown] slideshow={"slide_type": "subslide"} # - In general, I am used to calling non-optional arguments by order, and optional arguments by keyword. # - The language allows us to deviate from this, but it can be unnecessarily confusing sometimes. # + [markdown] slideshow={"slide_type": "fragment"} # #### Advanced stuff (optional): # # - You can also call/define functions with `*args` and `**kwargs`; see, e.g. [here](https://realpython.com/python-kwargs-and-args/) # - Do not instantiate objects in the function definition - see [here](https://docs.python-guide.org/writing/gotchas/) under "Mutable Default Arguments" # + slideshow={"slide_type": "fragment"} def example(a, b=[]): # don't do this! return 0 # + slideshow={"slide_type": "fragment"} def example(a, b=None): # insted, do this if b is None: b = [] return 0 # + [markdown] slideshow={"slide_type": "subslide"} # ## Docstrings (10 min) # + [markdown] slideshow={"slide_type": "fragment"} # - We got pretty far above, but we never solved problem (4): It is hard to understand what it does just by looking at it # - Enter the idea of function documentation (and in particular docstrings) # - The [docstring](https://www.python.org/dev/peps/pep-0257/) goes right after the `def` line. # + slideshow={"slide_type": "fragment"} def make_palindrome(string): """Turns the string into a palindrome by concatenating itself with a reversed version of itself.""" return string + string[::-1] # + [markdown] slideshow={"slide_type": "fragment"} # In IPython/Jupyter, we can use `?` to view the documentation string of any function in our environment. # + slideshow={"slide_type": "fragment"} # make_palindrome? # + slideshow={"slide_type": "fragment"} # print? # + [markdown] slideshow={"slide_type": "fragment"} # #### Docstring structure # # 1. **Single-line**: If it's short, then just a single line describing the function will do (as above). # 2. **PEP-8 style** Multi-line description + a list of arguments; see [here](https://www.python.org/dev/peps/pep-0257/). # 3. **Scipy style**: The most elaborate & informative; see [here](https://numpydoc.readthedocs.io/en/latest/format.html) and [here](https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_numpy.html). # # + [markdown] slideshow={"slide_type": "subslide"} # The PEP-8 style: # + slideshow={"slide_type": "fragment"} def make_palindrome(string): """ Turns the string into a palindrome by concatenating itself with a reversed version of itself. Arguments: string - (str) the string to turn into a palindrome """ return string + string[::-1] # + slideshow={"slide_type": "fragment"} # make_palindrome? # + [markdown] slideshow={"slide_type": "subslide"} # The scipy style: # + slideshow={"slide_type": "fragment"} def make_palindrome(string): """ Turn a string into a palindrome. Turns the string into a palindrome by concatenating itself with a reversed version of itself, so that the returned string is twice as long as the original. Parameters ---------- string : str The string to turn into a palindrome. Returns ------- str The new palindrome string. Examples -------- >>> make_palindrome("abc") "abccba" """ return string + string[::-1] # + slideshow={"slide_type": "fragment"} make_palindrome( # press shift-tab HERE to get docstring!! # + [markdown] slideshow={"slide_type": "fragment"} # Below is the general form of the scipy docstring (reproduced from the scipy/numpy docs): # + slideshow={"slide_type": "fragment"} def function_name(param1,param2,param3): """First line is a short description of the function. A paragraph describing in a bit more detail what the function does and what algorithms it uses and common use cases. Parameters ---------- param1 : datatype A description of param1. param2 : datatype A description of param2. param3 : datatype A longer description because maybe this requires more explanation and we can use several lines. Returns ------- datatype A description of the output, datatypes and behaviours. Describe special cases and anything the user needs to know to use the function. Examples -------- >>> function_name(3,8,-5) 2.0 """ # + [markdown] slideshow={"slide_type": "subslide"} # #### Docstrings with optional arguments # # When specifying the parameters, we specify the defaults for optional arguments: # + slideshow={"slide_type": "fragment"} # PEP-8 style def repeat_string(s, n=2): """ Repeat the string s, n times. Arguments: s -- (str) the string n -- (int) the number of times (default 2) """ return s*n # + slideshow={"slide_type": "fragment"} # scipy style def repeat_string(s, n=2): """ Repeat the string s, n times. Parameters ---------- s : str the string n : int, optional (default = 2) the number of times Returns ------- str the repeated string Examples -------- >>> repeat_string("Blah", 3) "BlahBlahBlah" """ return s*n # + [markdown] slideshow={"slide_type": "subslide"} # #### Automatically generated documentation # # - By following the docstring conventions, we can _automatically generate documentation_ using libraries like [sphinx](http://www.sphinx-doc.org/en/master/), [pydoc](https://docs.python.org/3.7/library/pydoc.html) or [Doxygen](http://www.doxygen.nl/). # - For example: compare this [documentation](https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsClassifier.html) with this [code](https://github.com/scikit-learn/scikit-learn/blob/1495f6924/sklearn/neighbors/classification.py#L23). # - Notice the similarities? The webpage was automatically generated because the authors used standard conventions for docstrings! # + [markdown] slideshow={"slide_type": "fragment"} # #### What makes good documentation? # # - What do you think about this? # + slideshow={"slide_type": "fragment"} ################################ # # NOT RECOMMENDED TO DO THIS!!! # ################################ def make_palindrome(string): """ Turns the string into a palindrome by concatenating itself with a reversed version of itself. To do this, it uses the Python syntax of `[::-1]` to flip the string, and stores this in a variable called string_reversed. It then uses `+` to concatenate the two strings and return them to the caller. Arguments: string - (str) the string to turn into a palindrome Other variables: string_reversed - (str) the reversed string """ string_reversed = string[::-1] return string + string_reversed # + [markdown] slideshow={"slide_type": "fragment"} # <br><br> # # - This is poor documentation! More is not necessarily better! # - Why? # - # + [markdown] slideshow={"slide_type": "subslide"} # ## [Optional] Unit tests, corner cases (10 min) # + [markdown] slideshow={"slide_type": "fragment"} # #### `assert` statements # # - `assert` statementS cause your program to fail if the condition is `False`. # - They can be used as sanity checks for your program. # - There are more sophisticated way to "test" your programs, which we'll discuss in DSCI 524. # - The syntax is: # # ```python # assert expression , "Error message if expression is False or raises an error." # ``` # + slideshow={"slide_type": "fragment"} assert 1 == 2 , "1 is not equal to 2." # + [markdown] slideshow={"slide_type": "subslide"} # #### Systematic Program Design # + [markdown] slideshow={"slide_type": "fragment"} # A systematic approach to program design is a general set of steps to follow when writing programs. Our approach includes: # # 1. Write a stub: a function that does nothing but accept all input parameters and return the correct datatype. # 2. Write tests to satisfy the design specifications. # 3. Outline the program with pseudo-code. # 4. Write code and test frequently. # 5. Write documentation. # # The key point: write tests BEFORE you write code. # # - You do not have to do this in MDS, but you may find it surprisingly helpful. # - Often writing tests helps you think through what you are trying to accomplish. # - It's best to have that clear before you write the actual code. # + [markdown] slideshow={"slide_type": "subslide"} # #### Testing woes - false positives # # - **Just because all your tests pass, this does not mean your program is correct!!** # - This happens all the time. How to deal with it? # - Write a lot of tests! # - Don't be overconfident, even after writing a lot of tests! # + slideshow={"slide_type": "fragment"} def sample_median(x): """Finds the median of a list of numbers.""" x_sorted = sorted(x) return x_sorted[len(x_sorted)//2] assert sample_median([1,2,3,4,5]) == 3 assert sample_median([0,0,0,0]) == 0 # + [markdown] slideshow={"slide_type": "fragment"} # Looks good? ... ? # # <br><br><br><br><br> # + slideshow={"slide_type": "fragment"} assert sample_median([1,2,3,4]) == 2.5 # + [markdown] slideshow={"slide_type": "fragment"} # <br><br><br><br><br> # + slideshow={"slide_type": "fragment"} assert sample_median([1,3,2]) == 2 # + [markdown] slideshow={"slide_type": "subslide"} # #### Testing woes - false negatives # # - It can also happen, though more rarely, that your tests fail but your program is correct. # - This means there is something wrong with your test. # - For example, in the autograding for lab1 this happened to some people, because of tiny roundoff errors. # + [markdown] slideshow={"slide_type": "fragment"} # #### Corner cases # # - A **corner case** is an input that is reasonable but a bit unusual, and may trip up your code. # - For example, taking the median of an empty list, or a list with only one element. # - Often it is desirable to add test cases to address corner cases. # + slideshow={"slide_type": "fragment"} assert sample_median([1]) == 1 # + [markdown] slideshow={"slide_type": "fragment"} # - In this case the code worked with no extra effort, but sometimes we need `if` statements to handle the weird cases. # - Sometimes we want the code to throw an error (e.g. median of an empty list); more on this later. # + [markdown] slideshow={"slide_type": "subslide"} # ## Multiple return values (0 min) # # - In most (all?) programming languages I've seen, functions can only return one thing. # - That is technically true in Python, but there is a "workaround", which is to return a tuple. # + slideshow={"slide_type": "fragment"} # not good from a design perspective! def sum_and_product(x, y): return (x+y, x*y) # + slideshow={"slide_type": "fragment"} sum_and_product(5,6) # + [markdown] slideshow={"slide_type": "fragment"} # In some cases in Python, the parentheses can be omitted: # + slideshow={"slide_type": "fragment"} def sum_and_product(x, y): return x+y, x*y # + slideshow={"slide_type": "fragment"} sum_and_product(5,6) # + [markdown] slideshow={"slide_type": "fragment"} # It is common to store these in separate variables, so it really feels like the function is returning multiple values: # + slideshow={"slide_type": "fragment"} s, p = sum_and_product(5, 6) # + slideshow={"slide_type": "fragment"} s # + slideshow={"slide_type": "fragment"} p # + [markdown] slideshow={"slide_type": "fragment"} # - Question: is this good function design. # - Answer: usually not, but sometimes. # - You will encounter this in some Python packages. # + [markdown] slideshow={"slide_type": "fragment"} # Advanced / optional: you can ignore return values you don't need with `_`: # + slideshow={"slide_type": "fragment"} s, _ = sum_and_product(5, 6) # + slideshow={"slide_type": "fragment"} s # + [markdown] slideshow={"slide_type": "subslide"} # #### Fun with tuples # # In general, you can do some weird stuff with tuples: # + slideshow={"slide_type": "fragment"} a, b = 5, 6 # + slideshow={"slide_type": "fragment"} a, b = (5, 6) # + slideshow={"slide_type": "fragment"} a, b = b, a # in other languages this requires a "temp" variable # + slideshow={"slide_type": "fragment"} a # + slideshow={"slide_type": "fragment"} b
notes/week04/Class4B/Class2C.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import requests import numpy as np import pandas as pd from bs4 import BeautifulSoup # - # ### See Web Page # + URL = "https://coronanepal.live/" html_page = requests.get(URL).text # + #can uncomment below text to see output #html_page # - # ### Convert them into html format # # i.e. < !DOCTYPE html > type soup = BeautifulSoup(html_page,'html.parser') #print(soup) # ## Data of various districts # #### Now on the site we want to scrap we must inspect the page i.e. F12 and find out the id of the table we want to scrape # + #example is the html id of the table we want to scrape get_table = soup.find("table",id='example') #get_table # + #geting body under tag <tr> and </tr> get_table_data = get_table.tbody.find_all("tr") #get_table_data # + #for easiness of converting to dataframe we chose dic dic = {} for i in range(len(get_table_data)): #all td is one digit list so [0] will bring out the value and string gives the required district name key = get_table_data[i].find_all("td")[0].string #print(key) #gives all the values including some unrequired value but it can be easily modified by pandas values = [a.string for a in get_table_data[i].find_all("td")] #print(key,values) dic[key] = values data=pd.DataFrame(dic).iloc[3:,:].T data.index_name='जिल्ला' columns_name = ['कुल संक्रमित','जम्मा मृत्यु','निको भएको'] data.columns = columns_name data # - data.head() data.tail() data.info() data.describe() # ## Total data of Nepal and World get_table_1 = soup.findAll('div',attrs={'class':'col-lg-3'}) get_table_1=get_table_1[2] # + #get_table_1 # - get_table_data_1 = get_table_1.find_all(["h4",'h1']) get_table_data_1 country = [] data2=[] for i in range(len(get_table_data_1)): if i == 0 or i ==9: country.append(get_table_data_1[i].text) else: value = get_table_data_1[i].text data2.append(value) country data2 dic2={} for i in range(0,7,2): key = data2[i] value = data2[i+1] dic2[key]=value dic2 dic3={} for i in range(8,len(data2)-1,2): key = data2[i] value = data2[i+1] dic3[key]=value dic3 data2=pd.DataFrame([dic2,dic3]).T data2.columns = country data2.T # ## Gender Based on Nepal # get_table_2 = soup.findAll('div',attrs={'class':'col-lg-6'}) get_table_2=get_table_2[2] get_table_data_2 = get_table_2.find_all("h4") get_table_data_2 l = [] for i in range(1,len(get_table_data_2)): l.append(get_table_data_2[i].text.replace(')',"")) print(l) for i in range(1,len(l),2): l[i]= (l[i].split('(')) l dic3={} for i in range(0,len(l)-1,2): key = l[i] value = l[i+1] dic3[key] = value dic3 data3=pd.DataFrame(dic3).T # + columns = ['जम्मा संक्रमित',"संक्रमित प्रतिशत"] data3.columns = columns data3 # - # ### convert def convert(a): c = {'०':'0','१':'1','२':'2','३':'3','४':'4','५':'5','६':'6','७':'7','८':'8','९':'9',',':''} for key,value in c.items(): a = a.replace(key,value) return a def remove_comma(a): a=a.replace(',','') convert('१८,०४५,५६८') for i in data.columns: data[i] = data[i].replace(',','') data[i]=data[i].apply(lambda x: convert(x)) for i in ['कुल संक्रमित','जम्मा मृत्यु','निको भएको']: data[i]=pd.to_numeric(data[i]) data.info() data data2.fillna("",inplace=True) for i in data2.columns: if i == '': continue data2[i]=data2[i].apply(lambda x: convert(x)) data2 data2.info() for i in data2.columns: data2[i]=pd.to_numeric(data2[i]) data2
.ipynb_checkpoints/web scraping of nepal covid data-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # `fmri-10`: Searchlight analysis # This demo introduces searchlight analysis using the familiar visual object recognition dataset from [Haxby et al., 2001](https://doi.org/10.1126/science.1063736). The goal of searchlight analysis is to apply spatially localized multivariate pattern analysis (MVPA) throughout the brain ([Kriegeskorte et al., 2006](https://doi.org/10.1073/pnas.0600244103)). This brings us back to the question of *localization*, allowing us to "search" throughout the brain for local patterns of activity containing information about our experimental manipulation. In searchlight analysis, we apply MVPA within local neighborhoods of voxels—typically defined as a sphere or cube in volumetric space, or a disk in surface space. These local neighborhoods are iteratively centered at each voxel in the brain and the MVPA result for a given neighborhood is assigned to the center voxel. import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns # ### Visual object recognition dataset # We'll start demo-ing RSA with our familiar visual object recognition fMRI dataset from [Haxby et al., 2001](https://doi.org/10.1126/science.1063736). Recall that participants were presented with images from 8 object categories (bottles, cats, chairs, faces, houses, scissors, scrambled images, and shoes) interspersed with periods of fixation (referred to as "rest" here). The TR in this study was 2.5 seconds. In a given run, a block of images from each of the 8 categories was presented one time. Each block was ~9 TRs long and contained multiple rapid presentations of images from a single category. A subject received 12 scanning runs. We'll focus on data from one subject for the purposes of this demo. # + # Load the Haxby 2001 dataset from nilearn import datasets from nilearn.image import concat_imgs, index_img, mean_img, new_img_like from nilearn.input_data import NiftiMasker data_dir = '/jukebox/PNI-classes/students/NEU502/2021/nilearn-data' #haxby_dataset = datasets.fetch_haxby() haxby_dataset = datasets.fetch_haxby(data_dir=data_dir) func_file = haxby_dataset.func[0] # - # Rather than focusing on ventral temporal (VT) cortex, we want to run the searchlight analysis throughout the whole brain. For this example, we'll use Nilearn's `NiftiMasker` to automatically generate a whole-brain mask based on the mean EPI image. (We use this approach because it yields a more conservative mask than the whole-brain mask provided with the dataset.) # + # Use nilearn to automagically create an EPI brain mask func_mean = mean_img(func_file) masker = NiftiMasker(mask_strategy='epi').fit(func_mean) mask = masker.mask_img_ # - # ### Runwise GLM # Prior to the upcoming searchlight analysis, we perform a first-level GLM to account for confounds and extract regression coefficients ("betas") corresponding to each stimulus category. For searchlight classification analysis, we'll perform separate GLMs for each scanning run (runwise GLM), yielding 8 betas corresponding to the 8 stimulus categories for each run. We perform a runwise GLM so that we can cross-validate across runs in the searchlight classification analysis. # + # Load in session metadata as pandas DataFrame session = pd.read_csv(haxby_dataset.session_target[0], sep=" ") # Extract stimuli and run labels for this subject stimuli, runs = session['labels'].values, session['chunks'].values # Get list of unique stimulus categories (excluding rest) categories = np.array([c for c in np.unique(stimuli) if c != 'rest']) # - # Split functional image according to runs func_runs = [] for run in np.unique(runs): func_runs.append(index_img(func_file, runs == run)) print(f"Indexed run {run + 1} functional image") # + # Build first-level GLM for each run from nilearn.glm.first_level import (make_first_level_design_matrix, FirstLevelModel) # Set parameters for your design matrix tr = 2.5 hrf_model = 'spm' drift_model = 'Cosine' high_pass = 1/128 # Build a design matrix for each run design_matrices = [] for run in np.unique(runs): stimuli_run = stimuli[runs == run] n_trs = len(stimuli_run) onsets = tr * np.arange(n_trs) duration = np.full(n_trs, tr) events_all = pd.DataFrame( {'onset': onsets, 'trial_type': stimuli_run, 'duration': duration}) events = events_all[events_all['trial_type'] != 'rest'] design_matrix = make_first_level_design_matrix( onsets, events, hrf_model=hrf_model, drift_model=drift_model, high_pass=high_pass) design_matrices.append(design_matrix) # + # Fit runwise GLM separately for each runs glm_rw = FirstLevelModel(t_r=tr, mask_img=mask, standardize=True, noise_model='ar1') maps_rw = [] categories_rw = [] runs_rw = [] for run, (func_run, design_matrix) in enumerate(zip(func_runs, design_matrices)): glm_rw.fit(func_run, design_matrices=design_matrix) # Collate contrast maps for VT for category in categories: maps_rw.append(glm_rw.compute_contrast(category)) categories_rw.append(category) runs_rw.append(run) print(f"Finished fitting GLM for run {run + 1}") # Concatenate images for separate runs maps_rw = concat_imgs(maps_rw) # - # ### Searchlight classification analysis # We now supply the whole-brain beta maps and accompanying category labels to a searchlight classification analysis. We first specify a very simple 3-fold cross-validation scheme to reduce computation time. We define our searchlight with a 5 mm radius (corresponding to two 2.5 mm voxels) and run the searchlight within the automated whole-brain mask. Within each searchlight, we'll deploy a support vector machine (SVM) classifier with default parameters using Nilearn's `'svc'` shortcut. (Note that this will take several minutes to run!) # + # Set up simple cross-validation scheme from sklearn.model_selection import KFold cv = KFold(n_splits=3) # Set up searchlight parameters from nilearn.decoding import SearchLight n_jobs = 8 radius = 5.0 searchlight = SearchLight(mask, process_mask_img=mask, radius=radius, estimator='svc', n_jobs=n_jobs, verbose=1, cv=cv) # Fit the searchlight (this takes time!) searchlight.fit(maps_rw, categories_rw) # + # Convert searchlight output to NIfTI image and save import nibabel as nib searchlight_img = new_img_like(func_mean, searchlight.scores_) nib.save(searchlight_img, 'haxby_searchlight_svc.nii.gz') # + # Plot searchlight classification from nilearn.plotting import plot_stat_map cut_coords = (30, -30, -5) plot_stat_map(searchlight_img, func_mean, cut_coords=cut_coords); # + # Get coordinate of best-performing searchlight max_sl = np.unravel_index(np.argmax(searchlight_img.get_fdata()), searchlight_img.shape) print("Best searchlight accuracy: " f"{searchlight_img.get_fdata()[max_sl]:.3f}") # - # #### References # * <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2001). Distributed and overlapping representations of faces and objects in ventral temporal cortex. *Science*, *293*(5539), 2425–2430. https://doi.org/10.1126/science.1063736 # # * <NAME>., <NAME>., & <NAME>. (2006). Information-based functional brain mapping. *Proceedings of the National Academy of Sciences of the United States of America*, *103*(10), 3863-3868. https://doi.org/10.1073/pnas.0600244103
fmri-10/fmri-10-searchlight.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] papermill={} tags=[] # <img width="10%" alt="Naas" src="https://landen.imgix.net/jtci2pxwjczr/assets/5ice39g4.png?w=160"/> # + [markdown] papermill={} tags=[] # # Trello - Get board data # <a href="https://app.naas.ai/user-redirect/naas/downloader?url=https://raw.githubusercontent.com/jupyter-naas/awesome-notebooks/master/Trello/Trello_Get_board_data.ipynb" target="_parent"><img src="https://naasai-public.s3.eu-west-3.amazonaws.com/open_in_naas.svg"/></a> # + [markdown] papermill={} tags=[] # **Tags:** #trello #project #board # + [markdown] papermill={} tags=[] # ## Input # + [markdown] papermill={} tags=[] # ### Import library # + papermill={} tags=[] import trello_connector # + [markdown] papermill={} tags=[] # ## Model # + [markdown] papermill={} tags=[] # ### Variables # + papermill={} tags=[] token = "" key = "" board_id = "VCmIpC16" export = "xls" # + [markdown] papermill={} tags=[] # - token and key can be get from trello developer dashboard # - board is the unique id for each trello board. it can be get from trello board url # - export can be csv or xls. # + [markdown] papermill={} tags=[] # ## Output # + [markdown] papermill={} tags=[] # ### Get board data # + papermill={} tags=[] df = trello_connector.main(key,token,board_id, export)
Trello/Trello_Get_board_data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # dependencies import pandas as pd import numpy as np # load and read in CSV file file_to_load = "purchase_data.csv" # create a variable to reference csv file and store into pandas df purchase_data = pd.read_csv(file_to_load) purchase_data.head() # # Player Count # # * Run basic calculations to obtain number of unique items, average price, etc. # * Display the total number of players # count 'SN' values total_count = purchase_data["SN"].value_counts() total_count = len(total_count) total_count # count 'Item ID' values unique_items = purchase_data["Item ID"].value_counts() unique_items = len(unique_items) unique_items # calculate avg 'price' avg_price = purchase_data["Price"].mean() avg_price # count 'Purchase IDs' total_purchases = purchase_data["Purchase ID"].count() total_purchases # add values in 'Price' total_revenue = purchase_data["Price"].sum() total_revenue total_players_df = pd.DataFrame({"Total Number of Players": [total_count]}) total_players_df # # Purchasing Analysis (Total) # # * Create a summary data frame to hold the results # * Optional: give the displayed data cleaner formatting # * Display the summary data frame # + # create new summary df summary_df = pd.DataFrame({"Number of Unique Items": [unique_items], "Average Price": [avg_price], "Number of Purchases": [total_purchases], "Total Revenue": [total_revenue]}).head() summary_df["Average Price"]=summary_df["Average Price"].map("${:.2f}".format) summary_df["Total Revenue"]=summary_df["Total Revenue"].map("${:.2f}".format) summary_df.head() # - # # Gender Demographics # # * Percentage and Count of Male Players # * Percentage and Count of Female Players # * Percentage and Count of Other / Non-Disclosed # filter by drop duplicates function gender_df = purchase_data.loc[:, ["Gender", "SN"]].drop_duplicates() gender = len(gender_df) gender # filter only males only_male = gender_df.loc[gender_df["Gender"] == "Male", :] male = len(only_male) male # filter only females only_female = gender_df.loc[gender_df["Gender"] == "Female", :] female = len(only_female) female # filter only other/ non-disclosed only_other = gender_df.loc[gender_df["Gender"] == "Other / Non-Disclosed", :] other = len(only_other) other # + # create percent calc variables male_perc = (male / gender) * 100 female_perc = (female / gender) * 100 other_perc = (other / gender) * 100 # create tuple variable (tutor suggestion) gender_data = [(male, male_perc) , (female, female_perc) , (other, other_perc)] # create a new dataframe, adding in % .map format gender_demo_df = pd.DataFrame(gender_data, columns=["Total Count", "Percentage of Players"], index=["Male", "Female", "Other"]) gender_demo_df["Percentage of Players"] = gender_demo_df["Percentage of Players"].map("{:.2f}%".format) gender_demo_df # - # # Purchasing Analysis (Gender) # # * Run basic calculations to obtain purchase count, avg. purchase price, avg. purchase total per person etc. by gender # * Create a summary data frame to hold the results # * Optional: give the displayed data cleaner formatting # * Display the summary data frame # # # group by gender groupby = purchase_data.groupby("Gender") # gender counts (unique players) gender_total = groupby["SN"].nunique() gender_total # count total purchases by gender purchase_count = groupby["Purchase ID"].count() purchase_count # avg purchase price by gender avg_price = groupby["Price"].mean() avg_price # total spend by gender total_price = groupby["Price"].sum() total_price # avg total spend (per person) avg_total_purchase = total_price / gender_total avg_total_purchase # + # create new dataframe for purchasing analysis (gender) calc purchase_analysis_df = pd.DataFrame({"Purchase Count": purchase_count, "Average Purchase Price": avg_price, "Total Purchase Value": total_price, "Average Total Purchase PP": avg_total_purchase}) # reformat dataframe with '$' added purchase_analysis_df["Average Purchase Price"]=purchase_analysis_df["Average Purchase Price"].map("${:.2f}".format) purchase_analysis_df["Total Purchase Value"]=purchase_analysis_df["Total Purchase Value"].map("${:.2f}".format) purchase_analysis_df["Average Total Purchase PP"]=purchase_analysis_df["Average Total Purchase PP"].map("${:.2f}".format) purchase_analysis_df # - # # Age Demographics # # * Establish bins for ages # * Categorize the existing players using the age bins. Hint: use pd.cut() # * Calculate the numbers and percentages by age group # * Create a summary data frame to hold the results # * Optional: round the percentage column to two decimal points # * Display Age Demographics Table # # + # using starter to determine age ranges bins = [0, 9, 14.9, 19.9, 24.9, 29.9, 34.9, 39.9, 99.9] # create age ranges for the bins age_bins = ["<10", "10 - 14", "15 - 19", "20 - 24", "25 - 29", "30 - 34","35 - 39", "40+"] # read csv and create a column to categorize existing players by 'age ranges' purchase_data["Age Ranges"] = pd.cut(purchase_data["Age"], bins, labels = age_bins) purchase_data # - # create a groupby 'age ranges' age_ranges = purchase_data.groupby("Age Ranges") # + # count the number of players in each age range age_count = age_ranges["SN"].nunique() # calc percentage of players in each age range age_percent = (age_count / total_count * 100) # create a dataframe breakdown for age demographics of players age_demo_df = pd.DataFrame({"Total Count": age_count, "Percentage of Players": age_percent}) # reformat Percentage of Players using % .map format age_demo_df["Percentage of Players"] = age_demo_df["Percentage of Players"].map("{:.2f}%".format) age_demo_df # - # # Purchasing Analysis (Age) # # * Bin the purchase_data data frame by age # * Run basic calculations to obtain purchase count, avg. purchase price, avg. purchase total per person etc. in the table below # * Create a summary data frame to hold the results # * Optional: give the displayed data cleaner formatting # * Display the summary data frame # + # age_count = age_ranges["SN"].nunique() *refer back to this groupby variable for actual number of players within each age range # count total purchases by age range purchase_count = age_ranges["SN"].count() # average purchase price by age range avg_purchase_price = age_ranges["Price"].mean() # total purchase values by age range total_purchases = age_ranges["Price"].sum() # average total purchase per person avg_purchase_pp = total_purchases / age_count # + # create a dataframe with above calc by age purchase_analysis_df = pd.DataFrame({"Purchase Counts (Age)": purchase_count, "Avg Purchase Price": avg_purchase_price, "Total Purchases (Age)":total_purchases, "Avg Total Purchase PP": avg_purchase_pp}) # reformat selected columns with '$' using the .map formatting purchase_analysis_df["Avg Purchase Price"]=purchase_analysis_df["Avg Purchase Price"].map("${:.2f}".format) purchase_analysis_df["Total Purchases (Age)"]=purchase_analysis_df["Total Purchases (Age)"].map("${:.2f}".format) purchase_analysis_df["Avg Total Purchase PP"]=purchase_analysis_df["Avg Total Purchase PP"].map("${:.2f}".format) purchase_analysis_df # - # # Top Spenders # # * Run basic calculations to obtain the results in the table below # * Create a summary data frame to hold the results # * Sort the total purchase value column in descending order # * Optional: give the displayed data cleaner formatting # * Display a preview of the summary data frame # + top_spenders = purchase_data.groupby("SN") purchase_count_spend = top_spenders["SN"].count() avg_purchase_price_spend = top_spenders["Price"].mean() total_purchase_spend = top_spenders["Price"].sum() top_spenders_df = pd.DataFrame({"Purchase Count": purchase_count_spend, "Average Purchase Price": avg_purchase_price_spend, "Total Purchase Value":total_purchase_spend}) top_spenders_df # + # sort the total purchase value column in descending order * use ascending=False to show descending values top_spenders_df = top_spenders_df.sort_values(["Total Purchase Value"], ascending=False).head() # reformat the columns with '$' using the .map function top_spenders_df["Average Purchase Price"]=top_spenders_df["Average Purchase Price"].map("${:.2f}".format) top_spenders_df["Total Purchase Value"]=top_spenders_df["Total Purchase Value"].map("${:.2f}".format) top_spenders_df.head() # - # # Most Popular Items # # * Retrieve the Item ID, Item Name, and Item Price columns # * Group by Item ID and Item Name. Perform calculations to obtain purchase count, item price, and total purchase value # * Create a summary data frame to hold the results # * Sort the purchase count column in descending order # * Optional: give the displayed data cleaner formatting # * Display a preview of the summary data frame # + # retrieve the Item ID, Item Name, and Item Price columns most_popular = purchase_data[["Item ID", "Item Name", "Price"]] # groupby 'Item ID' and 'Item Name' popular_items = most_popular.groupby(["Item ID", "Item Name"]) # count purchases for popular items purchase_count_items = popular_items["Item ID"].count() # prices of popular items item_price = popular_items["Price"].mean() # total purchase value by item total_purchase_value = popular_items["Price"].sum() # - # create dataframe to summarize results most_popular_df = pd.DataFrame({"Purchase Count": purchase_count_items, "Item Price": item_price, "Total Purchase Value": total_purchase_value}) most_popular_df # + # sort the purchase count column by descending order most_popular_df = most_popular_df.sort_values(["Purchase Count"], ascending=False).head() # reformat the columns with '$' using the .map function most_popular_df["Item Price"]=most_popular_df["Item Price"].map("${:.2f}".format) most_popular_df["Total Purchase Value"]=most_popular_df["Total Purchase Value"].map("${:.2f}".format) most_popular_df.head() # - # # Most Profitable Items # # * Sort the above table by total purchase value in descending order # * Optional: give the displayed data cleaner formatting # * Display a preview of the data frame # sort the total purchase value column by descending order most_popular_df = most_popular_df.sort_values(["Total Purchase Value"], ascending=False).head() most_popular_df.head() # + # after all of this, I can't seem to get the formatting perfect at the finish line...frustrating!
HeroesOfPymoli/Heroes_Assignment.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: waf_tutorial_part1 # language: python # name: waf_tutorial_part1 # --- # # Notebook 04: Simple ML Classification # # ### Goal: Basic training a ML using a single feature/predictor/input and a single ML model # # #### Reminder of Problem Statement # # Before we jump into the ML, I want to remind you of the ML task we want to accomplish in the paper. # # 1. Does this image contain a thunderstorm? <-- Classification # 2. How many lightning flashes are in this image? <-- Regression # # #### Background # # For the training of machine learning we will use the [scikit-learn](https://scikit-learn.org/stable/) python package. Scikit-learn (also known as sklearn) has a wealth of resources for learning how to apply many ML methods. Furthermore, their documentation on any one method is extensive and very helpful. I encourage you to use their documentation if you want to use more that what we will show you in this tutorial. # # What is really nice about the sklearn package is that all of its models all work off the same syntax. In general this is how you use any of their models: # # 1. Create or load your input data. It must be of shape ```[n_samples,n_features]```. It is commonly written as ```X``` in all of the sklearn documentation. # 2. Create or load your output data. It must be of shape ```[n_samples]```. It is commonly written as ```y``` in all of the sklearn documentation # 3. Initialize your model. To initialize a model in python usually you just need to add ```()``` at the end of the model name. # 4. Fit your model. To do this all we will need to do is apply the ```.fit(X,y)``` to your initialized model # 5. Evaluate your trained model. To get the model predictions for evaluation we will use the ```model.predict(X_val)``` or the ```model.predict_proba(X_val)```. # # Now that we have the basic work flow, lets actually do this with the simple example in the paper, 1 input predictor. We will start with the classification task first # # #### Step 1 & 2: Import packages and load data for Classification # Last notebook, we showed you how to do train/val and test data splits. We will do that all with our pre-made function now. But we only want 1 feature, which is feature 0. So all we need to change is the ```features_to_keep``` keyword to get just the first feature. # + #needed packages import xarray as xr import matplotlib.pyplot as plt import numpy as np import pandas as pd #plot parameters that I personally like, feel free to make these your own. import matplotlib matplotlib.rcParams['axes.facecolor'] = [0.9,0.9,0.9] #makes a grey background to the axis face matplotlib.rcParams['axes.labelsize'] = 14 #fontsize in pts matplotlib.rcParams['axes.titlesize'] = 14 matplotlib.rcParams['xtick.labelsize'] = 12 matplotlib.rcParams['ytick.labelsize'] = 12 matplotlib.rcParams['legend.fontsize'] = 12 matplotlib.rcParams['legend.facecolor'] = 'w' matplotlib.rcParams['savefig.transparent'] = False #make default resolution of figures much higher (i.e., High definition) # %config InlineBackend.figure_format = 'retina' #import some helper functions for our other directory. import sys sys.path.insert(1, '../scripts/') from aux_functions import load_n_combine_df (X_train,y_train),(X_validate,y_validate),(X_test,y_test) = load_n_combine_df(path_to_data='../datasets/sevir/',features_to_keep=np.arange(0,1,1),class_labels=True) # - # Let's print out the shapes of things to make sure they match what we expected, ```[n_samples,n_features]``` for ```X``` and ```[n_samples]``` for ```y``` print('X_train, y_train shapes: {},{}'.format(X_train.shape,y_train.shape)) print('X_val, y_val shapes: {},{}'.format(X_validate.shape,y_validate.shape)) print('X_test, y_test shapes: {},{}'.format(X_test.shape,y_test.shape)) # Okay, things look correct. Our ```n_samples``` match of each dataset, and ```X``` is indeed ```[n_samples,n_features]```. Let us reproduce Figure 8 to *gauge* the predictive power of the minimum brightness temperature from the clean infrared channel. To do this, we need to find which samples have lightning flashes and which do not. We will use the ```np.where``` to find the indicies of ```X``` and ```y``` where the label (i.e., ```y```) is equal to 1. When you set ```class_labels=True``` in the ```load_n_combine_df``` it will already label ```y``` as either 0 where there are no flashes, and 1 where there is greater than or equal to 1 flash in the image. #find samples with more at least 1 flash idx_flash = np.where(y_train == 1)[0] #find samples with no flashes idx_noflash = np.where(y_train == 0)[0] # For those who have never used the ```np.where``` function before, we can double check it worked by looking at a few histograms. # + #show us the whole dataset plt.hist(y_train,bins=[-0.01,0.01,0.99,1.01],edgecolor='k') #show us the flash data plt.figure() plt.hist(y_train[idx_flash],bins=[-0.01,0.01,0.99,1.01],edgecolor='k') #show us the no flash data plt.figure() plt.hist(y_train[idx_noflash],bins=[-0.01,0.01,0.99,1.01],edgecolor='k') # - # Now that things look to be working properly, lets make the histogram of brightness temperatures. The expectation is that images with flashes in them will have generally colder minimum brightness temperatures since they will likely have stronger storms (on average). # + #this is something to help make the ticks show up where I want them from matplotlib.ticker import (MultipleLocator, FormatStrFormatter, AutoMinorLocator) #this is an array of bins I wish to do the histogram with, in degC xbins = np.arange(-100,50,2) #some colors I like. The order of ratios is [Red,Green,Blue] r = [255/255,127/255,127/255] b = [126/255,131/255,248/255] #make figure fig = plt.figure(figsize=(5,5)) #set background color to white so we can copy paste out of the notebook if we want fig.set_facecolor('w') #get axis for drawing ax = plt.gca() #use matplotlib histogram function ax.hist(X_train[idx_flash,0],density=True,bins=xbins,color=b,alpha=0.5,zorder=0,label='T-storm',edgecolor=b) ax.hist(X_train[idx_noflash,0],density=True,bins=xbins,color=r,alpha=0.5,zorder=0,label='No T-Storm',edgecolor=r) #add title and axis labels ax.set_title('Minimum IR') ax.set_ylabel(r'Normalized count') ax.set_xlabel("$T_{b}$, [$\degree$C]") #set tick locations ax.xaxis.set_minor_locator(MultipleLocator(5)) ax.yaxis.set_minor_locator(MultipleLocator(0.001)) #force xlimit ax.set_xlim([-100,50]) #turn grid on ax.grid('on') #draw legend ax.legend() plt.tight_layout() # - # Great! It follows our expectation. While there is considerable overlap between the two classes, there does seem to be a good decision boundary to separate the two. Let's start out by training a logistic regression first. # # #### Step 3: Initialize model # # Like I mentioned in the background, to initialize we can use the ```()``` after the model name. This is also where you can choose various hyperparameters. For now, just leave it empty, it will use the default parameters, which is a good place to start # + #load model from sklearn from sklearn.linear_model import LogisticRegression #initialize model = LogisticRegression() print(model) # - # #### Step 4: Train your ML model! # # This is it, we have finally made it to actually doing some machine learning, are you ready? I am! So let's go! model = model.fit(X_train,y_train) # <img src="../images/congrats.gif" width="400" height="200" class="center" /> # # Thats it! Congrats, you have officially trained your first ML model. I know, it seems a bit underwhelming because there is no fanfare from python, but to make you feel better enjoy the gif. After you are done celebrating, time to move on to evaluating how well it did! # # #### Step 5: Evaluate your ML model # # In order to evaluate your model, we will need the predictions of the model on the validation dataset. We will use the ```model.predict()``` method to get a prediction value for each sample yhat = model.predict(X_validate) # I name the prediction vector yhat (i.e., $\hat{y}$) to match usual math notation. But you can name it ypred if you want. This vector is a label for each sample, so we could look at a histogram of the results, or we can calculate some more informative metrics. # # ##### a). Contingency Table: Accuracy # # To get those more informative metrics, lets get the contingency table which is: # # # | | Yes | No| # | ----------- | :----: | :----: | # | **Yes** | Hits | False Alarms| # | **No** | Misses | Correct Nulls | # # The left column is the model predictions, while the top row is the observations (true labels). Each spot in the table is filled out based on: # - if both the model and observation predict **Yes** (both predict a label of 1; which is a hit or *True Positive*) # - if the model outputs a **Yes** but the observation is **No** (model=1,obs=0; False Alarm or *False Positive*) # - if the model says **No** but the observation says **Yes** (model=0,obs=1; Miss or *False Positive*) # - if the model and the observation say **No** (model=0,obs=0; Correct Null or *True Negative*) # # We have coded this up for you, so all we need to use is the function called ```get_contingency_table``` from ```gewitter_functions.py```. These functions were original written by Dr. Lagerquist during his PhD studies, but have been adapted by me to be able to run out of the box with just the ```gewitter_functions.py``` script. # + from gewitter_functions import get_contingency_table #the contingency table calculator expects y_true,y_pred cont_table = get_contingency_table(y_validate,yhat) #the function returns a dictionary (thats what the curly brackets mean) of each entry in the table. print(cont_table) # - # Now that we have the contingency table, we can calculate a simple metric, accuracy. Which is: # # $$ Accuracy = \frac{TruePositives + TrueNegatives}{SumOfAllCategories} \times{100}$$ # # for conciseness, we have made a function to do this named ```get_accuracy``` from gewitter_functions import get_acc accuracy = get_acc(cont_table) print('Accuracy = {}%'.format(np.round(accuracy,0))) # 81% accuracy is good! Given we only use 1 input! But as pointed out in the paper, it is good to always use more than 1 evaluation metric. # # ##### b). Contingency Table: Performance Diagram # # <img src="../images/Blank_Performance_Diagram.png" width="400" height="200" class="center" /> # # One of my favorite diagrams to gauge model skill is a performance diagram (shown above). I have added some hopefully helpful annotations that show you where better performing models (top-right) and worse performing models (bottom left, top left or bottom right) show up on the diagram. # # To make this diagram we need to calculate 2 variables, the Probability of Detection (POD; y-axis) and Success Ratio (SR; x-axis), both that are calculated from the contingency table. # # $$ \mathrm{POD} = \frac{TruePositives}{TruePositives + FalseNegatives} $$ # # $$ \mathrm{SR} = \frac{TruePositives}{TruePositives + FalsePositives} $$ # # One perk of this diagram is that we can glean the *Critical Success Index* (CSI) from the same diagram. CSI is formulated as: # # $$ \mathrm{CSI} = \frac{TruePositives}{TruePositives + FalsePositives + FalseNegatives} $$ # # What is great about the performance diagram is that it helps us evaluate ML tasks that might have unbalanced data. By unbalanced data, I mean if the scenario we are trying to predict is rare (e.g., hail, tornadoes). What happens in rare scenarios is that the data naturally have a ton of Null cases, so some metrics that consider TrueNegatives in the numerator (e.g., Accuracy) might be inflated. # # We have included the calculation of these parameters as functions as well ```get_pod```, ```get_sr```, ```csi_from_sr_and_pod```. # + from gewitter_functions import get_pod,get_sr,csi_from_sr_and_pod pod = get_pod(cont_table) sr = get_sr(cont_table) csi = csi_from_sr_and_pod(sr,pod) print('POD:{}, SR:{}, CSI:{}'.format(np.round(pod,2),np.round(sr,2),np.round(csi,2))) # - # Now that we have our calculated values, you could put these in a table, or you could put it on the performance diagram. We made a function to make the performance diagram axis for us, named ```make_performance_diagram``` from gewitter_functions import make_performance_diagram_axis ax = make_performance_diagram_axis() # the function returns the axis (```ax```) so we can plot on it. ax = make_performance_diagram_axis() ax.plot(sr,pod,'o',color='dodgerblue',markerfacecolor='w'); # and BAM. You have plotted your model's performance on the performance diagram. Turns out this model is doing very well, with a large POD and SR and is near the center line. # # #### Step 6: Save your trained model # # Sometimes loading the training dataset and re-training the model each time can be cumbersome. There is a way to save the trained models. We will use the python ```joblib``` package to do this. # # **Notes:** # # 1. Storage efficiency: While most of the models in sklearn can be stored in a decent file size, your random forest model could become large (on the order of 1 GB). I know a GB is not alot of space nowadays, but it is important to note. This is also why the trained random forest from the paper is not found in the ```datasets``` folder. # 2. Keep note of your ```sklearn``` version: ```joblib``` will load the model to the syntax of the ```sklearn``` that it is saved with. So if you are getting some odd error with ```sklearn``` while loading the saved model, check to make sure the version hasn’t changed. import joblib name = 'LogisticRegression.pkl' start_path = '../datasets/sklearnmodels/classification/onefeature/' savefile = open(start_path + name,'wb') joblib.dump(model, savefile, 9) #the 9 here is the highest compression. Slows the time of saving/loading but saves disk space # #### Step 7: Load a saved model # # Now that you have it saved, if you need to load it do the following: import joblib name = 'LogisticRegression.pkl' start_path = '../datasets/sklearnmodels/classification/onefeature/' #notice the change from wb to rb savefile = open(start_path + name,'rb') #notice the change from dump to load model = joblib.load(savefile) print(model) # In the next notebook we will look at the Regression problem. If you want to continue on with the classification, check out Notebook 6.
jupyter_notebooks/Notebook04_SimpleMLClassification.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # TAKE ALL REVIEWS(POS NEG AND UNSUP) AND MERGE THEM IN A SINGLE TEXT FILE WITH ONE REVIEW PER LINE (merged.txt) # PERFORM TEXT TOKENIZATION # REWRITE ALL THE TOKENIZED CORPUS AS A SINGLE WORDS SEQUENCE SEPARATED BY A BLANK SPACE (new_merged). # This file is used as input for GloVe training. # - from os import listdir from os.path import isfile, join from nltk import RegexpTokenizer tokenizer = RegexpTokenizer(r'\w+') # new_path = "./" old_path = "./dataset/imdb/train/" # support file merged = open("./merged.txt", "w") # + current_directory = old_path + 'pos' files = [f for f in listdir(current_directory) if ( isfile(join(current_directory, f)) and (f[0] != '.') ) ] for f in files: t = open(current_directory+'/'+f, 'r') for line in t: if line: line = line.replace('<br /><br />', ' ') merged.write(line+'\n') t.close() current_directory = old_path + 'neg' files = [f for f in listdir(current_directory) if ( isfile(join(current_directory, f)) and (f[0] != '.') ) ] for f in files: t = open(current_directory+'/'+f, 'r') for line in t: if line: line = line.replace('<br /><br />', ' ') merged.write(line+'\n') t.close() current_directory = old_path + 'unsup' files = [f for f in listdir(current_directory) if ( isfile(join(current_directory, f)) and (f[0] != '.') ) ] for f in files: t = open(current_directory+'/'+f, 'r') for line in t: if line: line = line.replace('<br /><br />', ' ') merged.write(line+'\n') t.close() merged.close() # - corpus = open("./merged.txt", "r") new_corpus = open("./GloVe-1.2/new_merged", "w") # + lines = corpus.readlines() text = '' # put everithing in one line for line in lines: text = text + line corpus.close() # perform TOKENIZATION, returns a vector of words words = tokenizer.tokenize(text.lower()) # rewrite this vector in a file in which words are separated by blank spaces for w in words: new_corpus.seek(0, 2) new_corpus.write(w + ' ') new_corpus.close() # -
glove_training_preparation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np import matplotlib.pyplot as plt dfx = pd.read_csv('xdata.csv') dfy = pd.read_csv('ydata.csv') X = dfx.values Y = dfy.values print(X.shape, Y.shape) dfx.head() X = X[:, 1:] Y = Y[:, 1:] Y = Y.reshape((-1,)) print(X.shape, Y.shape) plt.scatter(X[:, 0], X[:, 1], c=Y) plt.show() plt.style.use('seaborn') plt.scatter(X[:, 0], X[:, 1], c=Y) plt.show() query_x = np.array([0,3]) print(query_x) plt.scatter(X[:, 0], X[:, 1], c=Y) plt.scatter(query_x[0], query_x[1], color = 'red') plt.show() def dist(x1, x2): return np.sqrt(sum((x1-x2)**2)) def knn(X, Y, query_point, k=5): vals = [] m = X.shape[0] for i in range(m): d = dist(query_point, X[i]) vals.append((d, Y[i])) vals = sorted(vals) vals = vals[:k] vals = np.array(vals) print(vals) new_vals = np.unique(vals[:, 1], return_counts=True) print(new_vals) index = new_vals[1].argmax() print(index) pred = new_vals[0][index] print(pred) return pred x = knn(X, Y, query_x) print(x)
Workshop 9th-Sept-18/.ipynb_checkpoints/KNNImplementation-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Values or Pixles import numpy as np # numerical python import cv2 # computer vision import matplotlib.pyplot as plt # %matplotlib inline # 10 x 10 array arr = np.arange(0,100,1) print(arr) len(arr) arr.shape # reshape arr1 = arr.reshape((10,10)) arr1 # m*n = len(arr) arr1.shape plt.imshow(arr1,cmap='gray') # display random number (10x10) arr2 = np.random.randint(0,150,(10,10))# range 0-150 and size (10,10) arr2 plt.imshow(arr2,cmap='gray') # display random number (10x10) in image # read the parrot img = cv2.imread('data/test.jpg') # bgr img.shape # convert into grayscale gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) gray.shape plt.imshow(gray,cmap='gray') slice = gray[0:10,0:10] plt.imshow(slice,cmap='gray')
1 Face Recognition Web App with Machine Learning in Flask/Module-1/Module-01-Values or Pixles.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- import syft as sy sy.networks # # Initial Network Setup # # The network has to join itself so that it's registered within its own key server and can use the VPN network that it's hosting. NETWORK_PUBLIC_HOST = "http://172.16.31.10:80" network_root = sy.login(email="<EMAIL>", password="<PASSWORD>", url="http://172.16.31.10", port=80) network_root.join_network(host_or_ip=NETWORK_PUBLIC_HOST) vpn_s = network_root.vpn_status() vpn_s network_root.vpn_status() # + from requests import get ip = get('https://api.ipify.org').content.decode('utf8') print('My public IP address is: {}'.format(ip)) # - # # Domain Applies to Network import syft as sy NETWORK_HOST_IP = "localhost:8081" domain = sy.login(email="<EMAIL>", password="<PASSWORD>", port=8082) NETWORK_PUBLIC_HOST = "http://localhost:8081" NETWORK_HOST_IP="localhost" NETWORK_HOST_PORT=8081 NETWORK_PUBLIC_HOST="http://"+NETWORK_HOST_IP+":"+str(80) domain.join_network(host_or_ip=NETWORK_PUBLIC_HOST) domain.vpn_status() domain.apply_to_network(domain_vpn_ip="172.16.31.10", network_vpn_ip="172.16.31.10") # # Network Approves Domain Application network_root.association.pandas() request_id = int(network_root.association.all()[0]["association_id"]) network_root.association[1].accept() # # Domain Owner Uploads Some Data import numpy as np data = sy.Tensor([1,2,3,4,5]).private(min_val=0, max_val=5, entities="bob") domain.load_dataset(assets={"january":data}, name="Temporal Data", description="Some data from january.") domain.datasets # # Data Scientist
notebooks/Experimental/Traskinator/Demo With Network Node.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Priors # # The priors used in our code have been written to exemplify what we believe to be probable values for every parameter. # # load in necessary packages import numpy as np import scipy.stats import matplotlib.pyplot as plt # ## Mean # # Most normalized or detrended magnitudes tend to hover around 0. Our fluxes were normalized around a value of 1, so we make the mean for our normal distribution 1 with a small sigma of 0.5 so that if other data uses 0 as it's mean, it shouldn't have any significant effect on the likelihood. # + mean = np.arange(-4, 5, 0.1) p_mean = scipy.stats.norm(1, 0.5).logpdf(mean) plt.plot(mean, np.exp(p_mean)) plt.vlines(1, 0, 0.8, alpha=0.5, label="Mean=1, sigm=0.5") plt.title("Mean Prior") plt.legend() plt.show() # - # ## Amplitude (long kernel) # # Our kernel modeling long-term changes in the profile of the lightcurve has a hyper-parameter for the amplitude. While we don't expect large changes in the amplitudes over time, we also didn't want to exclude any values (except for negative values) so we chose a prior with a peak at 2, and a large sigma. # # # + log_amp_l = np.arange(-5, 5, 0.1) p_log_amp_k2 = scipy.stats.norm(np.log(2), np.log(10)).logpdf(log_amp_l) plt.plot(np.exp(log_amp_l), np.exp(p_log_amp_k2)) plt.vlines(2, 0, 0.2, alpha=0.5, label="Mean=2, sigma=10") plt.title("Amplitude Prior") plt.legend() plt.show() # - # ## Metric (long kernel) # # The metric hyper-parameter for the long-term kernel is expected to capture any gradual changes to the lightcurve profile over time, meaning that we want the average time to be quite long, as to discourage it from trying to fit for any short term changes, since those should be coming from the periodicity of the asteroid. # + log_metric = np.arange(-5, 10, 0.1) p_log_metric = scipy.stats.norm(np.log(100), np.log(10)).logpdf(log_metric) plt.plot(np.exp(log_metric), np.exp(p_log_metric)) plt.vlines(100, 0, 0.2, alpha=0.5, label="Mean=100,, sigma=10") plt.title("Metric Prior") plt.legend() plt.show() # - # ## Amplitude (periodic) # # The amplitude of the periodic kernel is expected to be similar to the difference in magnitude of the asteroid, while the other amplitude is meant to model more of the change in the mean of the amplitude over extended periods of time. The periodic amplitude is thus expected to vary by a few magnitudes potentially, but never anything extensive. # # # + log_amp_p = np.arange(-3, 3, 0.01) p_log_amp_k1 = scipy.stats.norm(np.log(2), np.log(2)).logpdf(log_amp_p) plt.plot(np.exp(log_amp_p), np.exp(p_log_amp_k1)) plt.vlines(2, 0, 0.8, alpha=0.5, label="Mean=2, sigma=2") plt.title("Amplitude (Periodic) Prior") #plt.legend() plt.show() # - # ## Gamma # # Gamma determines the length-scale of the variability of the asteroid profile. The smaller the value, the smoother the lightcurve is expected to look, versus a higher value for gamma indicates a lot of detail within the correlating period. # # If gamma becomes unusually large, it might be because the estimated period is capturing multiple period cycles, and is thus interpreting the lightcurve to be more complex than it actually is. # + gamma = np.arange(0.0001 ,50, 0.01) p_log_gamma = scipy.stats.norm(np.log(10), np.log(2)).logpdf(np.log(gamma)) plt.plot(gamma, np.exp(p_log_gamma)) plt.vlines(10, 0, 0.8, alpha=0.5, label="Mean=10, sigma=2") plt.title("Gamma Prior") plt.legend() plt.show() # - # ## Period # # The period is the most anticipated parameter we are looking to fit. We know from previous detailed studies of asteroids what we would expect the general distribution of asteroid periods to look like, so we are replicating the general distribution here. Most asteroids you would expect to have a period within 24-48 hours, with little chance of a period being faster than 2 hours (although not impossible). # + log_period = np.arange(-3, 1, 0.01) p_log_period = scipy.stats.norm(np.log(4.0 / 24.0), (12.0 / 24.0)).logpdf(log_period) plt.plot(np.exp(log_period)*24, np.exp(p_log_period)) plt.vlines(4, 0, 0.8, alpha=0.5, label="Mean=4, sigma=12") plt.title("Period Prior") plt.legend() plt.show() # -
docs/source/Prior_Plotting.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/01-vyom/Machine-Learning-Projects/blob/master/Kaggle/iWildCam%202020/iwildcam_2020_demo_kernel.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + _uuid="3a88b781-577f-48f2-85cd-1117a73faee4" _cell_guid="7c5c6f8d-e8bc-4036-b437-7b6e43b5d002" id="Wg10VaYKWMK0" colab_type="code" colab={} # This Python 3 environment comes with many helpful analytics libraries installed # It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python # For example, here's several helpful packages to load in import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import matplotlib.pyplot as plt from PIL import Image, ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import json import os from IPython.display import FileLink # for dirname, _, filenames in os.walk('/kaggle/input'): # for filename in filenames: # print(os.path.join(dirname, filename)) # Any results you write to the current directory are saved as output. # + id="3XpZCRRtWMLA" colab_type="code" colab={} with open('/kaggle/input/iwildcam-2020-fgvc7/iwildcam2020_train_annotations.json') as f: train_data = json.load(f) with open('/kaggle/input/iwildcam-2020-fgvc7/iwildcam2020_test_information.json') as f: test_data = json.load(f) # + id="C31cwuwsWMLH" colab_type="code" colab={} outputId="602f4c3d-8b50-4495-c224-122744e6ecc1" train_data.keys() # + id="3XhWG02xWMLS" colab_type="code" colab={} train = pd.DataFrame(train_data['annotations']) # + id="aJ_9boh6WMLX" colab_type="code" colab={} outputId="72f74c9e-6c3b-4bde-8e43-67a3cd4008f1" train.head() # + id="eviEjVV5WMLr" colab_type="code" colab={} train.rename(columns={'count': 'cnt'}, inplace=True) # + id="09bRGGB2WMLy" colab_type="code" colab={} outputId="0bb1ab0f-e9df-4e25-cc5f-0c7537475345" train[train.cnt > 1].describe() # + id="s4Ig5rDLWML4" colab_type="code" colab={} outputId="47b54d0a-b5e0-4cf1-f015-8c6252851db6" train.describe() # + id="03hoH_YAWML8" colab_type="code" colab={} train_img = pd.DataFrame(train_data['images']) # + id="comONRdiWMMA" colab_type="code" colab={} indices1 = [] indices2 = [] indices1.append( train[ train['image_id'] == '896c1198-21bc-11ea-a13a-137349068a90' ].index ) indices1.append( train[ train['image_id'] == '8792549a-21bc-11ea-a13a-137349068a90' ].index ) indices1.append( train[ train['image_id'] == '87022118-21bc-11ea-a13a-137349068a90' ].index ) indices1.append( train[ train['image_id'] == '98a295ba-21bc-11ea-a13a-137349068a90' ].index ) indices2.append( train_img[ train_img['id'] == '896c1198-21bc-11ea-a13a-137349068a90' ].index ) indices2.append( train_img[ train_img['id'] == '8792549a-21bc-11ea-a13a-137349068a90' ].index ) indices2.append( train_img[ train_img['id'] == '87022118-21bc-11ea-a13a-137349068a90' ].index ) indices2.append( train_img[ train_img['id'] == '98a295ba-21bc-11ea-a13a-137349068a90' ].index ) for _id in train_img[train_img['location'] == 537]['id'].values: indices1.append( train[ train['image_id'] == _id ].index ) indices2.append(train_img[ train_img['id'] == _id ].index) for the_index in indices1: train = train.drop(train.index[the_index]) for the_index in indices2: train_img = train_img.drop(train_img.index[the_index]) # + id="LVa7Mt76WMMD" colab_type="code" colab={} outputId="8791926d-263d-4e33-eadc-80af589d5075" train_img.head() # + id="9K98YYQhWMMH" colab_type="code" colab={} outputId="87e92ff2-d005-4f63-e2ca-ed307aa726f7" fig = plt.figure(figsize=(19, 4)) ax = sns.distplot(train['category_id']) plt.title('distribution of number of data per category') # + id="XwjKHzkAWMMM" colab_type="code" colab={} outputId="05906344-af41-439e-eb0f-977105862f0c" fig = plt.figure(figsize=(30, 4)) ax = sns.barplot(x="category_id", y="cnt",data=train) plt.title('distribution of count per id') # + id="SqBOOzgpWMMQ" colab_type="code" colab={} outputId="075c6e2c-67da-415a-c578-6de23e722cc2" fig = plt.figure(figsize=(30, 4)) ax = sns.countplot(train_img['location']) plt.title('distribution of number of animals by location') # + id="x6SBNlYxWMMU" colab_type="code" colab={} labels_month = sorted(list(set(train_img['datetime'].map(lambda str: str[5:7])))) # fig, ax = plt.subplots(1,2, figsize=(20,7) plt.title('Count of train data per month') ax = sns.countplot(train_img['datetime'].map(lambda str: str[5:7] ), order=labels_month) ax.set(xlabel='Month', ylabel='count') # ax.set(ylim=(0,55000)) # + id="0tboEoaeWMMX" colab_type="code" colab={} outputId="70de5b00-3281-4fad-9697-c30eb15467ce" train_img.describe() # + id="9h3MHbLZWMMa" colab_type="code" colab={} outputId="4c4f3e6a-2622-48af-ebac-1aa78e965919" train.describe() # + id="HEEkS6N4WMMd" colab_type="code" colab={} train_img = train_img train = train # + id="LTBUyJSEWMMk" colab_type="code" colab={} train_img['category'] = train['category_id'] # + id="VmEWy9JJWMMm" colab_type="code" colab={} train_img.drop(train_img.columns.difference(['file_name','category']), 1, inplace=True) # + id="NML3SCF4WMMp" colab_type="code" colab={} train_img['category'] = train_img['category'].apply(str) # + id="tItmMcvGWMMr" colab_type="code" colab={} outputId="941dafef-d965-4fd4-99a6-a4d39e1be159" train_img.head() # + id="BRqnZJ2uWMMu" colab_type="code" colab={} outputId="d4f38864-53be-4993-b705-b1fd72d8de54" train_img[ train_img['file_name'] == '883572ba-21bc-11ea-a13a-137349068a90.jpg' ].index # + id="BGwuMk_FWMMy" colab_type="code" colab={} train_img.drop(123658,inplace=True) # + id="d04vL8PDWMM0" colab_type="code" colab={} train_img.drop(123651,inplace=True) # + id="93DK-6v9WMM5" colab_type="code" colab={} train_img.drop(123653,inplace=True) # + id="3slLghZZWMM9" colab_type="code" colab={} # # !pip install tensorflow-gpu==1.14.0 # # !pip install keras==2.2.4 # + id="l_2Nv0qsWMNC" colab_type="code" colab={} import tensorflow as tf from tensorflow.keras import layers from tensorflow.keras import Model from tensorflow.keras.preprocessing.image import ImageDataGenerator import pickle import numpy as np import pandas as pd import matplotlib.pyplot as plt import cv2 from sklearn.model_selection import train_test_split # import pickle import dill from tqdm import tqdm from os import makedirs from os.path import expanduser, exists, join # + id="H5XtBsE7WMNF" colab_type="code" colab={} train_datagen = ImageDataGenerator( rescale=1./255, horizontal_flip = True, zoom_range = 0.3, width_shift_range = 0.3, height_shift_range=0.3, rotation_range = 40, shear_range = 0.3, channel_shift_range=150.0, fill_mode='nearest', brightness_range=(0.2, 0.9) ) # (max_rotate=20, max_zoom=1.3, max_lighting=0.4, max_warp=0.4, # p_affine=1., p_lighting=1. # + id="HARvUrj0WMNK" colab_type="code" colab={} outputId="c88b7b5c-cb26-42b4-b004-e6cb0dd1dca7" train_generator = train_datagen.flow_from_dataframe( dataframe=train_img[90000:120000], directory='/kaggle/input/iwildcam-2020-fgvc7/train', x_col="file_name", y_col="category", target_size=(150,150), batch_size=256, classes = train_img['category'].unique().tolist(), class_mode='categorical') # + id="zZRq-As5WMNQ" colab_type="code" colab={} labels = (train_generator.class_indices) labels = dict((v,k) for k,v in labels.items()) # + id="5qT-5fu3WMNU" colab_type="code" colab={} outputId="588112e2-e2ea-4780-eddb-76ec0de66451" print(labels) # + id="oaZ_UCx6WMNX" colab_type="code" colab={} # cache_dir = expanduser(join('~', '.keras')) # if not exists(cache_dir): # makedirs(cache_dir) # models_dir = join(cache_dir, 'models') # if not exists(models_dir): # makedirs(models_dir) # # !cp ../input/keras-pretrained-models/*notop* ~/.keras/models/ # # !cp ../input/keras-pretrained-models/imagenet_class_index.json ~/.keras/models/ # # !cp ../input/keras-pretrained-models/resnet50* ~/.keras/models/ # + id="ykAUsE5UWMNa" colab_type="code" colab={} # !ls ../input/keras-pretrained-models/ # + id="5tMpu_EzWMNd" colab_type="code" colab={} # # !git clone https://github.com/qubvel/efficientnet.git # + id="vC1LgzFkWMNi" colab_type="code" colab={} # import efficientnet.efficientnet.tfkeras as efn # + id="iCUWWGPeWMNl" colab_type="code" colab={} from tensorflow.keras.applications import inception_v3 from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense,Flatten,Dropout,BatchNormalization, GlobalAveragePooling2D from tensorflow.keras.optimizers import Adam # + id="1nc4a2HbWMNp" colab_type="code" colab={} pre_trained_model = tf.keras.applications.InceptionV3(include_top=False,input_shape = (150, 150, 3), weights='../input/keras-pretrained-models/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5') # + id="GWG3R7opWMNr" colab_type="code" colab={} # pre_trained_model = efn.EfficientNetB7(weights='imagenet', include_top=False, pooling='avg', input_shape=(96, 96, 3)) # + id="8thLhQxwWMNv" colab_type="code" colab={} for layer in pre_trained_model.layers: layer.trainable = False # + id="RLmabctpWMN0" colab_type="code" colab={} # x = pre_trained_model.output # predictions = Dense(573, activation="softmax")(x) # model = Model(inputs=pre_trained_model.input, outputs=predictions) # + id="V8guGYQLWMN3" colab_type="code" colab={} model = Sequential() # first (and only) set of FC => RELU layers model.add(Flatten()) model.add(Dense(1024, activation='relu')) model.add(Dropout(0.3)) model.add(BatchNormalization()) model.add(Dense(512, activation='relu')) model.add(Dropout(0.3)) model.add(BatchNormalization()) model.add(Dense(216,activation='softmax')) # + id="E__6vTR1WMN7" colab_type="code" colab={} pretrainedInput = pre_trained_model.input pretrainedOutput = pre_trained_model.output output = model(pretrainedOutput) model = Model(pretrainedInput, output) # + id="BTRHMpkRWMN-" colab_type="code" colab={} model.compile(Adam(), loss='categorical_crossentropy', metrics=['accuracy']) # + id="KjOWjJkkWMOC" colab_type="code" colab={} model.summary() # + id="JS4KKh2MWMOJ" colab_type="code" colab={} history = new_model.fit_generator( train_generator, steps_per_epoch=train_generator.n//train_generator.batch_size+1, epochs=5, shuffle = True, verbose = 1) # + id="pKPDmVT4WMON" colab_type="code" colab={} # + id="AiHMPJ5uWMOR" colab_type="code" colab={} import matplotlib.pyplot as plt acc = history.history['accuracy'] loss = history.history['loss'] epochs = range(len(acc)) plt.plot(epochs, acc, 'r', label='Training accuracy') plt.title('Training accuracy vs epochs') plt.legend(loc=0) plt.figure() plt.show() # + _kg_hide-output=true id="d74kUdSNWMOV" colab_type="code" colab={} new_model.save('Modeln.h5') # + id="iuYcEk4jWMOX" colab_type="code" colab={} FileLink('Modeln.h5') # + id="PAXi62UGWMOZ" colab_type="code" colab={} test = pd.DataFrame(test_data['images']) # + id="GhKggGPyWMOb" colab_type="code" colab={} test.head() # + id="X2UcI-9FWMOh" colab_type="code" colab={} test.describe() # + id="Krs7xEO6WMOp" colab_type="code" colab={} test_data.keys() # + id="suRwwIzOWMOw" colab_type="code" colab={} test_datagen = ImageDataGenerator(rescale = 1./255.) # + id="x4YiPb99WMOz" colab_type="code" colab={} test_generator = test_datagen.flow_from_dataframe( dataframe=test, directory='/kaggle/input/iwildcam-2020-fgvc7/test', x_col="file_name", target_size=(150, 150), batch_size=64,class_mode=None) # + id="NNuMfOQiWMO6" colab_type="code" colab={} new_model = tf.keras.models.load_model('/kaggle/input/model-1/Modeln.h5') # + id="Ik6HZPrjWMO8" colab_type="code" colab={} preds = new_model.predict_generator(test_generator, steps=test_generator.n//test_generator.batch_size+1, verbose=1) # + id="PVN5fZ3HWMO-" colab_type="code" colab={} # + id="ljP8mksRWMPB" colab_type="code" colab={} predicted_class_indices=np.argmax(preds,axis=1) # + id="Ws5y7v5BWMPE" colab_type="code" colab={} labels = (train_generator.class_indices) labels = dict((v,k) for k,v in labels.items()) predictions = [labels[k] for k in predicted_class_indices] # + id="1o1q0b7KWMPK" colab_type="code" colab={} Id=test.id # + id="xN-cU3iGWMPM" colab_type="code" colab={} results=pd.DataFrame({"Id":Id, "Category":predictions}) # + id="WB-4Ne_GWMPO" colab_type="code" colab={} submission = pd.read_csv('/kaggle/input/iwildcam-2020-fgvc7/sample_submission.csv') submission = submission.drop(['Category'], axis=1) submission = submission.merge(results, on='Id') submission.to_csv('modeln.csv', index=False) # + id="PH_eA3_lWMPP" colab_type="code" colab={} FileLink('modeln.csv') # + id="V-zGyNkEWMPR" colab_type="code" colab={} # results.to_csv("results.csv",index=False)
Kaggle/iWildCam 2020/iwildcam_2020_demo_kernel.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.5 64-bit (''base'': conda)' # name: python3 # --- import pyspark from pyspark.sql import SparkSession spark = SparkSession.builder.master("local[1]") \ .appName('PySparkRddWordCountDemo') \ .getOrCreate() rdd = spark.sparkContext.textFile("../resources/data.txt") print("RDD count :"+str(rdd.count())) for element in rdd.collect(): print(element) #Flatmap rdd2=rdd.flatMap(lambda x: x.split(" ")) for element in rdd2.collect(): print(element) #map rdd3=rdd2.map(lambda x: (x,1)) for element in rdd3.collect(): print(element) #reduceByKey rdd4=rdd3.reduceByKey(lambda a,b: a+b) for element in rdd4.collect(): print(element) #map rdd5 = rdd4.map(lambda x: (x[1],x[0])).sortByKey() for element in rdd5.collect(): print(element) #filter rdd6 = rdd5.filter(lambda x : 'a' in x[1]) for element in rdd6.collect(): print(element)
pyspark/pyspark-rdd/spark_rdd_wordcount.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # EOPF S3 OLCI L1 Product Data Structure Proposal # import os import xarray as xr import glob import rasterio from IPython.core.display import HTML import glob import re from utils import display from EOProductDataStructure import EOProductBuilder, EOVariableBuilder, EOGroupBuilder from lxml import etree # + variable_chunks = { 'B01': 192, 'B02': 1024, 'B03': 1024, 'B04': 1024, 'B05': 640, 'B06': 640, 'B07': 640, 'B08': 640, 'B8A': 640, 'B09': 192, 'B10': 192, 'B11': 640, 'B12': 640, 'TCI': 256 } def get_jp2_ds(path_to_product, glob_patterns, var_pattern, resolution): variables = {} coordinates = {} attributes = {} for glob_pattern in glob_patterns: files = glob.glob(path_to_product + '/' + glob_pattern) for file in files: var = re.match(var_pattern, file[file.rfind('/')+1:]).group(1) chunks = variable_chunks[var] ds1 = xr.open_dataset(file, chunks=chunks, engine='rasterio', mask_and_scale=False) if var == 'TCI': variables['red'] = ds1.get('band_data')[0].drop('band') variables['green'] = ds1.get('band_data')[1].drop('band') variables['blue'] = ds1.get('band_data')[2].drop('band') else: variables[var] = ds1.get('band_data')[0].drop('band') for attr in ds1.attrs: if attr not in attributes: attributes[attr] = ds1.attrs[attr] ds = xr.Dataset(data_vars=variables, coords=coordinates, attrs=attributes).rename({'x': 'x_'+resolution, 'y': 'y_'+resolution}).drop(['spatial_ref', 'x_'+resolution, 'y_'+resolution]) return ds def get_coord_ds(path_to_product, glob_patterns, resolutions): variables = {} coordinates = {} attributes = {} for glob_pattern, resolution in zip(glob_patterns, resolutions): files = glob.glob(path_to_product + '/' + glob_pattern) for file in files: ds1 = xr.open_dataset(file, engine='rasterio', mask_and_scale=False).rename({'x': 'x_'+resolution, 'y': 'y_'+resolution}) variables['x_' + resolution] = ds1['x_' + resolution] variables['y_' + resolution] = ds1['y_' + resolution] if 'spatial_ref' in ds1 and 'spatial_ref' not in variables: variables['spatial_ref'] = ds1['spatial_ref'] for attr in ds1.attrs: if attr not in attributes: attributes[attr] = ds1.attrs[attr] ds = xr.Dataset(data_vars=variables, coords=coordinates, attrs=attributes) return ds # + band_names = ['B01', 'B02', 'B03', 'B04', 'B05', 'B06', 'B07', 'B08', 'B8A', 'B09', 'B10', 'B11', 'B12'] def get_values(dom, xpath): list = dom.xpath(xpath, namespaces={'n1': 'https://psd-14.sentinel2.eo.esa.int/PSD/S2_PDI_Level-1C_Tile_Metadata.xsd'}) array = [[float(i) for i in x.text.split()] for x in list] da = xr.DataArray(array, dims=['y_tiepoints', 'x_tiepoints']) return da def get_shape(dom, xpath): list = dom.xpath(xpath, namespaces={'n1': 'https://psd-14.sentinel2.eo.esa.int/PSD/S2_PDI_Level-1C_Tile_Metadata.xsd'}) return [len(list), len(list[0].text.split())] def parse_xml(path_to_product, glob_pattern): path = glob.glob(path_to_product + '/' + glob_pattern)[0] dom = etree.parse(path) return dom def get_angles_ds(path_to_product, glob_pattern): dom = parse_xml(path_to_product, glob_pattern) sza = get_values(dom, 'n1:Geometric_Info/Tile_Angles/Sun_Angles_Grid/Zenith/Values_List/VALUES') saa = get_values(dom, 'n1:Geometric_Info/Tile_Angles/Sun_Angles_Grid/Azimuth/Values_List/VALUES') bands = {'sza': sza, 'saa': saa} for band_id in range(13): for detector_id in range(1,7): vza = get_values(dom, 'n1:Geometric_Info/Tile_Angles/Viewing_Incidence_Angles_Grids[@bandId="{}" and @detectorId="{}"]/Zenith/Values_List/VALUES' .format(band_id, detector_id)) vaa = get_values(dom, 'n1:Geometric_Info/Tile_Angles/Viewing_Incidence_Angles_Grids[@bandId="{}" and @detectorId="{}"]/Azimuth/Values_List/VALUES' .format(band_id, detector_id)) bands['vza_{}_{}'.format(band_names[band_id], detector_id)] = vza bands['vaa_{}_{}'.format(band_names[band_id], detector_id)] = vaa ds = xr.Dataset(bands) return ds def get_tiepoints_ds(path_to_product, glob_pattern): dom = parse_xml(path_to_product, glob_pattern) shape_y_x = get_shape(dom, 'n1:Geometric_Info/Tile_Angles/Sun_Angles_Grid/Zenith/Values_List/VALUES') ymax = float(dom.xpath('n1:Geometric_Info/Tile_Geocoding/Geoposition[@resolution="10"]/ULY', namespaces={'n1': 'https://psd-14.sentinel2.eo.esa.int/PSD/S2_PDI_Level-1C_Tile_Metadata.xsd'})[0].text) xmin = float(dom.xpath('n1:Geometric_Info/Tile_Geocoding/Geoposition[@resolution="10"]/ULX', namespaces={'n1': 'https://psd-14.sentinel2.eo.esa.int/PSD/S2_PDI_Level-1C_Tile_Metadata.xsd'})[0].text) ystep = float(dom.xpath('n1:Geometric_Info/Tile_Angles/Sun_Angles_Grid/Zenith/ROW_STEP', namespaces={'n1': 'https://psd-14.sentinel2.eo.esa.int/PSD/S2_PDI_Level-1C_Tile_Metadata.xsd'})[0].text) xstep = float(dom.xpath('n1:Geometric_Info/Tile_Angles/Sun_Angles_Grid/Zenith/COL_STEP', namespaces={'n1': 'https://psd-14.sentinel2.eo.esa.int/PSD/S2_PDI_Level-1C_Tile_Metadata.xsd'})[0].text) y = [ymax - i * ystep - ystep / 2 for i in range(shape_y_x[0])] x = [xmin + i * xstep + xstep / 2 for i in range(shape_y_x[1])] ds = xr.Dataset({'y_tiepoints': y, 'x_tiepoints': x}) return ds # + path_to_product = glob.glob("data/S2?_MSIL1C*.SAFE")[0] # Groups definition groups = {} groups['coordinates'] = get_coord_ds(path_to_product, ["GRANULE/*/IMG_DATA/*_%s.jp2" % r for r in ['B02','B05','B01']], ['10m', '20m', '60m']) # extensional coordinates, metric and geographic groups['tiepoints'] = get_tiepoints_ds(path_to_product, "GRANULE/*/MTD_TL.xml") #groups['crs'] = get_crs_ds(path_to_product, [""]) # utm zone, geographic footprint, metric corners, metric resolutions, parameters to feed proj groups['measurements_10m'] = get_jp2_ds(path_to_product,["GRANULE/*/IMG_DATA/*_%s.jp2" % r for r in ['B02','B03','B04','B08']], '.*_(...).jp2', '10m') groups['measurements_20m'] = get_jp2_ds(path_to_product,["GRANULE/*/IMG_DATA/*_%s.jp2" % r for r in ['B05','B06','B07','B8A','B11','B12']], '.*_(...).jp2', '20m') groups['measurements_60m'] = get_jp2_ds(path_to_product,["GRANULE/*/IMG_DATA/*_%s.jp2" % r for r in ['B01','B09','B10']], '.*_(...).jp2', '60m') groups['quicklook_tci'] = get_jp2_ds(path_to_product,["GRANULE/*/IMG_DATA/*_%s.jp2" % r for r in ['TCI']], '.*_(...).jp2', '10m') groups['geometry'] = get_angles_ds(path_to_product,"GRANULE/*/MTD_TL.xml") # angles on tiepoint raster #groups['instrument'] = get_xml_ds(path_to_product,["MTD_MSIL1C.xml"]) # band characteristics, gains #groups['meteo'] = get_ds(path_to_product,["tie_meteo"]) # Create a new EOProduct instance product_name = os.path.basename("S2_MSIL1C") product = EOProductBuilder("S2_MSIL1C") # do the same work as before product.metadatas = ["MTD_MSIL1C.xml"] # ==================== Product groups setting ======================== for group_name, ds in groups.items(): group = EOGroupBuilder(group_name) group.attrs["description"] = f"{group_name} Data Group" group.dims = ds.dims for v, var in ds.variables.items(): variable = EOVariableBuilder(v, default_attrs = False) variable.dtype = var.dtype variable.dimensions = var.dims variable.attrs = var.attrs group.variables.append(variable) product.groups.append(group) product.attrs['metadata_files'] = '[xfdumanfist.xml]' print("inputs read") # - display(product.compute())
eopf-notebooks/eopf_product_data_structure/EOPF_S2_MSI_v1.2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.6.13 ('py36') # language: python # name: python3 # --- from turtle import TPen, color import numpy as np import pandas as pd import random import matplotlib.pyplot as plt import seaborn as sns import sklearn.metrics as metrics from keras.models import Sequential from keras.layers import Dense, LSTM, Flatten, Dropout # + def get_ace_values(temp_list): ''' This function lists out all permutations of ace values in the array sum_array For example, if you have 2 aces, there are 4 permutations: [[1,1], [1,11], [11,1], [11,11]] These permutations lead to 3 unique sums: [2, 12, 22] of these 3, only 2 are <=21 so they are returned: [2, 12] ''' sum_array = np.zeros((2**len(temp_list), len(temp_list))) # This loop gets the permutations for i in range(len(temp_list)): n = len(temp_list) - i half_len = int(2**n * 0.5) for rep in range(int(sum_array.shape[0]/half_len/2)): #⭐️ shape[0] 返回 numpy 数组的行数 sum_array[rep*2**n : rep*2**n+half_len, i] = 1 sum_array[rep*2**n+half_len : rep*2**n+half_len*2, i] = 11 # Only return values that are valid (<=21) # return list(set([int(s) for s in np.sum(sum_array, axis=1) if s<=21])) #⭐️ 将所有 'A' 能组成总和不超过 21 的值返回 return [int(s) for s in np.sum(sum_array, axis=1)] #⭐️ 将所有 'A' 能组成的点数以 int 类型返回(有重复和超过 21 点的值) def ace_values(num_aces): ''' Convert num_aces, an int to a list of lists For example, if num_aces=2, the output should be [[1,11],[1,11]] I require this format for the get_ace_values function ''' temp_list = [] for i in range(num_aces): temp_list.append([1,11]) return get_ace_values(temp_list) # - def func(x): ''' 判断玩家起手是否为 21 点 ''' if x == 21: return 1 else: return 0 def make_decks(num_decks, card_types): ''' Make a deck -- 根据给定副数洗好牌 input: num_decks -> 牌副数 card_types -> 单副牌单个花色对应的牌值 output: new_deck -> 一副牌对应牌值 ''' new_deck = [] for i in range(num_decks): for j in range(4): # 代表黑红梅方 new_deck.extend(card_types) #⭐️ extend() 函数用于在列表末尾一次性追加另一个序列中的多个值 random.shuffle(new_deck) return new_deck def total_up(hand): ''' Total up value of hand input: <list> hand -> 当前手牌组合 output: <int> -> 计算当前手牌的合法值 ''' aces = 0 # 记录 ‘A’ 的数目 total = 0 # 记录除 ‘A’ 以外数字之和 for card in hand: if card != 'A': total += card else: aces += 1 # Call function ace_values to produce list of possible values for aces in hand ace_value_list = ace_values(aces) final_totals = [i+total for i in ace_value_list if i+total<=21] # ‘A’ 可以是 1 也可以是 11,当前牌值不超过 21 时,取最大值 -- 规则❗️ if final_totals == []: return min(ace_value_list) + total else: return max(final_totals) def model_decision_old(model, player_sum, has_ace, dealer_card_num, hit=0, card_count=None): ''' Given the relevant inputs, the function below uses the neural net to make a prediction and then based on that prediction, decides whether to hit or stay —— 将玩家各参数传入神经网络模型,如果预测结果大于 0.52, 则 hit, 否则 stand input: model -> 模型(一般指 NN 模型) player_sum -> 玩家当前手牌和 has_ace -> 玩家发牌是否有 'A' dealer_card_num -> 庄家发牌(明牌)值 hit -> 玩家是否‘要牌’ card_count -> 记牌器 return: 1 -> hit 0 -> stand ''' # 将需要进入神经网络模型的数据统一格式 # [[18 0 0 6]] input_array = np.array([player_sum, hit, has_ace, dealer_card_num]).reshape(1, -1) # 二维数组变成一行 (1, n) cc_array = pd.DataFrame.from_dict([card_count]) input_array = np.concatenate([input_array, cc_array], axis=1) # print(input_array) # input_array 作为输入传入神经网络,使用预测函数后存入 predict_correct # [[0.10379896]] predict_correct = model.predict(input_array) # print(predict_correct) if predict_correct >= 0.52: return 1 else: return 0 def model_decision(model, card_count, dealer_card_num): ''' Given the relevant inputs, the function below uses the neural net to make a prediction and then based on that prediction, decides whether to hit or stay —— 将玩家各参数传入神经网络模型,如果预测结果大于 0.52, 则 hit, 否则 stand input: model -> 模型(一般指 NN 模型) card_count -> 记牌器 dealer_card_num -> 庄家发牌(明牌)值 return: 1 -> hit 0 -> stand ''' # # 将需要进入神经网络模型的数据统一格式 # input_array = np.array([player_sum, 0, has_ace, dealer_card_num, # new_stack, games_played]).reshape(1, -1) # 二维数组变成一行 (1, n) # cc_array = pd.DataFrame.from_dict([card_count]) # input_array = np.concatenate([input_array, cc_array], axis=1) cc_array_bust = pd.DataFrame.from_dict([card_count]) # print(cc_array_bust, dealer_card_num) input_array = np.concatenate([cc_array_bust, np.array(dealer_card_num).reshape(1, -1)], axis=1) # print(input_array) # input_array 作为输入传入神经网络,使用预测函数后存入 predict_correct # [[0.10379896]] predict_correct = model.predict(input_array) # print(predict_correct) if predict_correct >= 0.52: return 1 else: return 0 def bust_Z_score(pred, pred_mean, pred_std): '''(模型预测值 - 模型预测值的平均数) / 模型预测值的标准差''' return (pred - pred_mean) / pred_std def bust_score(model, cc_array_bust, dealer_face_up_card, pred_Y_train_bust): input_array = np.concatenate([cc_array_bust, np.array(dealer_face_up_card).reshape(1, -1)], axis=1) bust_pred = model.predict(input_array) # 将封装好的数据集放入 model_nn_bj 模型中得出预测值 pred_mean = pred_Y_train_bust.mean() # model_nn_bj 的预测值平均数 pred_std = pred_Y_train_bust.std() # model_nn_bj 的预测值标准差 bust_Z = bust_Z_score(bust_pred, pred_mean, pred_std) # print(bust_pred, pred_mean, pred_std, bust_Z) if bust_Z >= 0: return 100 * (1 + bust_Z) else: return 100 def create_data(type, dealer_card_feature, player_card_feature, player_results, action_results=None, new_stack=None, games_played=None, card_count_list=None, dealer_bust=None): ''' input: type -> 0: naive 版本 1: random 版本 2: NN 版本 dealer_card_feature -> 庄家手牌 player_card_feature -> 玩家手牌 player_results -> 玩家输赢结果 action_results -> 玩家是否要牌 new_stack -> 是否是第一轮游戏 games_played -> 本局第几轮游戏 card_count_list -> 记牌器 dealer_bust -> 庄家是否爆牌 return: model_df -> dealer_card: 庄家发牌(明牌) player_total_initial: 玩家一发牌手牌和 Y: 玩家一“输”、“平”、“赢”结果(-1, 0, 1) lose: 玩家一“输”、“不输”结果(1, 0) has_ace: 玩家一发牌是否有'A' dealer_card_num: 庄家发牌(明牌)牌值 correct_action: 判断是否是正确的决定 hit?: 玩家一发牌后是否要牌 new_stack: 是否是第一轮游戏 games_played_with_stack: 本局第几轮游戏 dealer_bust: 庄家是否爆牌 blackjack?: 玩家起手是否 21 点 2 ~ 'A': 本轮游戏记牌 ''' model_df = pd.DataFrame() # 构造数据集 model_df['dealer_card'] = dealer_card_feature # 所有游戏庄家的第一张牌 model_df['player_total_initial'] = [total_up(i[0][0:2]) for i in player_card_feature] # 所有游戏第一个玩家前两张牌的点数和(第一个玩家 -- 作为数据分析对象❗️) model_df['Y'] = [i[0] for i in player_results] # 所有游戏第一个玩家输赢结果(第一个玩家 -- 作为数据分析对象❗️) if type == 1 or type == 2: player_live_action = [i[0] for i in action_results] model_df['hit?'] = player_live_action # 玩家在发牌后是否要牌 has_ace = [] for i in player_card_feature: if ('A' in i[0][0:2]): # 玩家一发牌有 ‘A’,has_ace 列表追加一个 1 has_ace.append(1) else: # 玩家一发牌无 ‘A’,has_ace 列表追加一个 0 has_ace.append(0) model_df['has_ace'] = has_ace dealer_card_num = [] for i in model_df['dealer_card']: if i == 'A': # 庄家第一张牌是 ‘A’,dealer_card_num 列表追加一个 11 dealer_card_num.append(11) else: # 庄家第一张牌不是 ‘A’,dealer_card_num 列表追加该值 dealer_card_num.append(i) model_df['dealer_card_num'] = dealer_card_num lose = [] for i in model_df['Y']: if i == -1: # 玩家输,lose 列表追加一个 1,e.g. [1, 1, ...] lose.append(1) else: # 玩家平局或赢,lose 列表追加一个 0,e.g. [0, 0, ...] lose.append(0) model_df['lose'] = lose if type == 1: # 如果玩家要牌且输了,那么不要是正确的决定; # 如果玩家不动且输了,那么要牌是正确的决定; # 如果玩家要牌且未输,那么要牌是正确的决定; # 如果玩家不动且未输,那么不要是正确的决定。 correct = [] for i, val in enumerate(model_df['lose']): if val == 1: # 玩家输 if player_live_action[i] == 1: # 玩家采取要牌动作(玩家一输了 val = 1,玩家二采取了要牌动作 action = 1 有什么关系❓) correct.append(0) else: correct.append(1) else: if player_live_action[i] == 1: correct.append(1) else: correct.append(0) model_df['correct_action'] = correct # Make a new version of model_df that has card counts ❗️ card_count_df = pd.concat([ pd.DataFrame(new_stack, columns=['new_stack']), # 所有游戏是否是开局第一轮游戏 pd.DataFrame(games_played, columns=['games_played_with_stack']), # 所有游戏是本局内的第几轮 pd.DataFrame.from_dict(card_count_list), # 所有游戏记牌后结果 pd.DataFrame(dealer_bust, columns=['dealer_bust'])], axis=1) # 所有游戏庄家是否爆牌 model_df = pd.concat([model_df, card_count_df], axis=1) model_df['blackjack?'] = model_df['player_total_initial'].apply(func) # 将各模型数据保存至 data 文件夹下 # model_df.to_csv('./data/data' + str(type) + '.csv', sep=' ') # 统计玩家一的所有输、赢、平的次数 # -1.0 199610 # 1.0 99685 # 0.0 13289 # Name: 0, dtype: int64 # 312584 count = pd.DataFrame(player_results)[0].value_counts() print(count, sum(count)) return model_df def play_game(type, players, live_total, dealer_hand, player_hands, blackjack, dealer_cards, player_results, action_results, hit_stay=0, temp_new_stack=0, games_played=0, multiplier=0, card_count=None, dealer_bust=None, model=None): ''' Play a game of blackjack (after the cards are dealt) input: type -> 0: naive 版本 1: random 版本 2: NN 版本 players -> 玩家人数 live_total -> 玩家发牌手牌和 dealer_hand -> 庄家发牌(明牌 + 暗牌) player_hands -> 玩家发牌(两张) blackjack -> set(['A', 10]) dealer_cards -> 牌盒中的牌 player_results -> np.zeros((1, players)) action_results -> np.zeros((1, players)) hit_stay -> 何时采取要牌动作 multiplier -> 记录二十一点翻倍 card_count -> 记牌器 dealer_bust -> 庄家是否爆牌 model -> 模型(一般指 NN 模型) return: player_results -> 所有玩家“输”、“平”、“赢”结果 dealer_cards -> 牌盒中的牌 live_total -> 所有玩家牌值和 action_results -> 所有玩家是否采取"要牌"动作 card_count -> 记牌器 dealer_bust -> 庄家是否爆牌 multiplier -> 记录二十一点翻倍 ''' dealer_face_up_card = 0 # Dealer checks for 21 if set(dealer_hand) == blackjack: # 庄家直接二十一点 for player in range(players): if set(player_hands[player]) != blackjack: # 玩家此时不是二十一点,则结果为 -1 -- 规则❗️ player_results[0, player] = -1 else: player_results[0, player] = 0 else: # 庄家不是二十一点,各玩家进行要牌、弃牌动作 for player in range(players): # Players check for 21 if set(player_hands[player]) == blackjack: # 玩家此时直接二十一点,则结果为 1 player_results[0, player] = 1 multiplier = 1.25 else: # 玩家也不是二十一点 if type == 0: # Hit only when we know we will not bust -- 在玩家当前手牌点数不超过 11 时,才决定拿牌 while total_up(player_hands[player]) <= 11: player_hands[player].append(dealer_cards.pop(0)) card_count[player_hands[player][-1]] += 1 # 记下玩家此时要的牌 if total_up(player_hands[player]) > 21: # 拿完牌后再次确定是否爆牌,爆牌则结果为 -1 player_results[0, player] = -1 break elif type == 1: # Hit randomly, check for busts -- 以 hit_stay 是否大于 0.5 的方式决定拿牌 if (hit_stay >= 0.5) and (total_up(player_hands[player]) != 21): player_hands[player].append(dealer_cards.pop(0)) card_count[player_hands[player][-1]] += 1 # 记下玩家此时要的牌 action_results[0, player] = 1 live_total.append(total_up(player_hands[player])) # 玩家要牌后,将点数和记录到 live_total if total_up(player_hands[player]) > 21: # 拿完牌后再次确定是否爆牌,爆牌则结果为 -1 player_results[0, player] = -1 elif type == 2: # Neural net decides whether to hit or stay # -- 通过 model_decision 方法给神经网络计算后,决定是否继续拿牌 if 'A' in player_hands[player][0:2]: # 玩家起手有 ‘A’ ace_in_hand = 1 else: ace_in_hand = 0 if dealer_hand[0] == 'A': # 庄家起手有 ‘A’ dealer_face_up_card = 11 else: dealer_face_up_card = dealer_hand[0] while (model_decision_old(model, total_up(player_hands[player]), ace_in_hand, dealer_face_up_card, hit=action_results[0, player], card_count=card_count) == 1) and (total_up(player_hands[player]) != 21): # while (model_decision(model, total_up(player_hands[player]), ace_in_hand, # dealer_face_up_card, temp_new_stack, games_played, card_count) == 1 # ) and (total_up(player_hands[player]) != 21): # while (model_decision(model, card_count, dealer_face_up_card) # == 1) and (total_up(player_hands[player]) != 21): player_hands[player].append(dealer_cards.pop(0)) card_count[player_hands[player][-1]] += 1 # 记下玩家此时要的牌 action_results[0, player] = 1 live_total.append(total_up(player_hands[player])) # 玩家要牌后,将点数和记录到 live_total if total_up(player_hands[player]) > 21: # 拿完牌后再次确定是否爆牌,爆牌则结果为 -1 player_results[0, player] = -1 break card_count[dealer_hand[-1]] += 1 # 记录庄家第二张发牌 # Dealer hits based on the rules while total_up(dealer_hand) < 17: # 庄家牌值小于 17,则继续要牌 dealer_hand.append(dealer_cards.pop(0)) card_count[dealer_hand[-1]] += 1 # 记录庄家后面要的牌 # Compare dealer hand to players hand but first check if dealer busted if total_up(dealer_hand) > 21: # 庄家爆牌 if type == 1: dealer_bust.append(1) # 记录庄家爆牌 for player in range(players): # 将结果不是 -1 的各玩家设置结果为 1 if player_results[0, player] != -1: player_results[0, player] = 1 else: # 庄家没爆牌 if type == 1: dealer_bust.append(0) # 记录庄家没爆牌 for player in range(players): # 将玩家牌点数大于庄家牌点数的玩家结果置为 1 if total_up(player_hands[player]) > total_up(dealer_hand): if total_up(player_hands[player]) <= 21: player_results[0, player] = 1 elif total_up(player_hands[player]) == total_up(dealer_hand): player_results[0, player] = 0 else: player_results[0, player] = -1 if type == 0: return player_results, dealer_cards, live_total, action_results, card_count elif type == 1: return player_results, dealer_cards, live_total, action_results, card_count, dealer_bust elif type == 2: return player_results, dealer_cards, live_total, action_results, multiplier, card_count def play_stack(type, stacks, num_decks, card_types, players, model=None, pred_Y_train_bust=None): ''' input: type -> 0: naive 版本 1: random 版本 2: NN 版本 stacks -> 游戏局数 num_decks -> 牌副数目 card_types -> 纸牌类型 players -> 玩家数 model -> 已经训练好的模型(一般指 NN 模型) output: dealer_card_feature -> 所有游戏庄家的第一张牌 player_card_feature -> 所有游戏玩家所有手牌 player_results -> 所有玩家“输”、“平”、“赢”结果 action_results -> 所有玩家是否采取"要牌"动作 new_stack -> 是否是第一轮游戏 games_played_with_stack -> 本局第几轮游戏 card_count_list -> 记牌器 dealer_bust -> 庄家是否爆牌 bankroll -> 本局结束剩余筹码 ''' bankroll = [] dollars = 10000 # 起始资金为 10000 dealer_card_feature = [] player_card_feature = [] player_live_total = [] player_results = [] action_results = [] dealer_bust = [] first_game = True prev_stack = 0 stack_num_list = [] new_stack = [] card_count_list = [] games_played_with_stack = [] for stack in range(stacks): games_played = 0 # 记录同局游戏下有几轮 # Make a dict for keeping track of the count for a stack card_count = { 2: 0, 3: 0, 4: 0, 5: 0, 6: 0, 7: 0, 8: 0, 9: 0, 10: 0, 'A': 0 } # 每新开一局时,temp_new_stack 为 1 # 同局游戏下不同轮次,temp_new_stack 为 0 # 第一局第一轮,temp_new_stack 为 0 if stack != prev_stack: temp_new_stack = 1 else: temp_new_stack = 0 blackjack = set(['A', 10]) dealer_cards = make_decks(num_decks, card_types) # 根据给定牌副数洗牌 while len(dealer_cards) > 20: # 牌盒里的牌不大于 20 张就没必要继续用这副牌进行游戏 -- 规则⭐️ curr_player_results = np.zeros((1, players)) curr_action_results = np.zeros((1, players)) dealer_hand = [] player_hands = [[] for player in range(players)] live_total = [] multiplier = 1 # Record card count cc_array_bust = pd.DataFrame.from_dict([card_count]) # 直接从字典构建 DataFrame # Deal FIRST card for player, hand in enumerate(player_hands): # 先给所有玩家发第一张牌 player_hands[player].append(dealer_cards.pop(0)) # 将洗好的牌分别发给玩家 card_count[player_hands[player][-1]] += 1 # 记下所有玩家第一张发牌 dealer_hand.append(dealer_cards.pop(0)) # 再给庄家发第一张牌 card_count[dealer_hand[-1]] += 1 # 记下庄家第一张发牌 dealer_face_up_card = dealer_hand[0] # 记录庄家明牌 # Deal SECOND card for player, hand in enumerate(player_hands): # 先给所有玩家发第二张牌 player_hands[player].append(dealer_cards.pop(0)) # 接着刚刚洗好的牌继续发牌 card_count[player_hands[player][-1]] += 1 # 记下所有玩家第二张发牌 dealer_hand.append(dealer_cards.pop(0)) # 再给庄家发第二张牌 if type == 0: curr_player_results, dealer_cards, live_total, curr_action_results, card_count = play_game( 0, players, live_total, dealer_hand, player_hands, blackjack, dealer_cards, curr_player_results, curr_action_results, card_count=card_count) elif type == 1: # Record the player's live total after cards are dealt live_total.append(total_up(player_hands[player])) # 前 stacks/2 局,玩家在发牌后手牌不是 21 点就继续拿牌; # 后 stacks/2 局,玩家在发牌后手牌不是 21 点不继续拿牌。 if stack < stacks/2: hit = 1 else: hit = 0 curr_player_results, dealer_cards, live_total, curr_action_results, card_count, \ dealer_bust = play_game(1, players, live_total, dealer_hand, player_hands, blackjack, dealer_cards, curr_player_results, curr_action_results, hit_stay=hit, card_count=card_count, dealer_bust=dealer_bust) elif type == 2: # Record the player's live total after cards are dealt live_total.append(total_up(player_hands[player])) curr_player_results, dealer_cards, live_total, curr_action_results, multiplier, \ card_count = play_game(2, players, live_total, dealer_hand, player_hands, blackjack, dealer_cards, curr_player_results, curr_action_results, temp_new_stack=temp_new_stack, games_played=games_played, multiplier=multiplier, card_count=card_count, model=model) # bet = bust_score(model, cc_array_bust, dealer_face_up_card, pred_Y_train_bust) # dollars += curr_player_results[0, player] * bet * multiplier # 玩家当前输赢结果 * 下赌注的概率 * 玩家 21点翻倍 # bankroll.append(dollars) # 记录本局游戏剩余资金 # Track features dealer_card_feature.append(dealer_hand[0]) # 将庄家的第一张牌存入新的 list player_card_feature.append(player_hands) # 将每个玩家当前手牌存入新的 list player_results.append(list(curr_player_results[0])) # 将各玩家的输赢结果存入新的 list if type == 1 or type == 2: player_live_total.append(live_total) # 将 所有玩家发牌后的点数和 以及 采取要牌行动玩家的点数和 存入新的 list action_results.append(list(curr_action_results[0])) # 将玩家是否采取要牌行动存入新的 list(只要有一个玩家要牌,action = 1) # Update card count list with most recent game's card count # 每新开一局时,new_stack 添加一个 1 # 同局游戏下不同轮次,new_stack 添加一个 0 # 第一局第一轮,new_stack 添加一个 0 if stack != prev_stack: new_stack.append(1) else: # 记录本次为第一局游戏 new_stack.append(0) if first_game == True: first_game = False else: games_played += 1 stack_num_list.append(stack) # 记录每次游戏是否是新开局 games_played_with_stack.append(games_played) # 记录每局游戏的次数 card_count_list.append(card_count.copy()) # 记录每次游戏记牌结果 prev_stack = stack # 记录上一局游戏局数 if type == 0: return dealer_card_feature, player_card_feature, player_results elif type == 1: return dealer_card_feature, player_card_feature, player_results, action_results, new_stack, games_played_with_stack, card_count_list, dealer_bust elif type == 2: return dealer_card_feature, player_card_feature, player_results, action_results, bankroll def step(type, model=None, pred_Y_train_bust=None): ''' 经过 stacks 局游戏后将数据记录在 model_df input: type -> 0: naive 版本 1: random 版本 2: NN 版本 model -> 已经训练好的模型(一般指 NN 模型) return: model_df -> 封装好数据的 DataFrame ''' if type == 0 or type == 1: nights = 1 stacks = 50000 # 牌局数目 elif type == 2: nights = 201 stacks = 201 # 牌局数目 bankrolls = [] players = 1 # 玩家数目 num_decks = 1 # 牌副数目 card_types = ['A', 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10] for night in range(nights): if type == 0: dealer_card_feature, player_card_feature, player_results = play_stack( 0, stacks, num_decks, card_types, players) model_df = create_data( 0, dealer_card_feature, player_card_feature, player_results) elif type == 1: dealer_card_feature, player_card_feature, player_results, action_results, new_stack, \ games_played_with_stack, card_count_list, dealer_bust = play_stack( 1, stacks, num_decks, card_types, players) model_df = create_data( 1, dealer_card_feature, player_card_feature, player_results, action_results, new_stack, games_played_with_stack, card_count_list, dealer_bust) elif type == 2: dealer_card_feature, player_card_feature, player_results, action_results, bankroll = play_stack( 2, stacks, num_decks, card_types, players, model, pred_Y_train_bust) model_df = create_data( 2, dealer_card_feature, player_card_feature, player_results, action_results) # bankrolls.append(bankroll) # 将每 700局左右的剩余资金记录在 bankrolls list下 # if type == 2: # bet_results = pd.DataFrame(bankrolls).T.fillna(method='ffill', axis=0) # bet_results.to_csv('./data/dumb_bet.csv') return model_df def train_nn_ca(model_df): ''' Train a neural net to play blackjack input: model_df -> 模型(一般指 random 模型) return: model -> NN 模型(预测是否是正确决定) pred_Y_train -> correct_action 的预测值 actuals -> correct_action 的实际值 ''' # Set up variables for neural net feature_list = [i for i in model_df.columns if i not in [ 'dealer_card', 'Y', 'lose', 'correct_action', 'dealer_bust', 'dealer_bust_pred', 'new_stack', 'games_played_with_stack', 2, 3, 4, 5, 6, 7, 8, 9, 10, 'A', 'blackjack?']] print(feature_list) # 将模型里的数据按矩阵形式存储 train_X = np.array(model_df[feature_list]) train_Y = np.array(model_df['correct_action']).reshape(-1, 1) # 二维数组变成一列 (n, 1) # print(train_X, train_Y) # Set up a neural net with 5 layers model = Sequential() # model.add(Dense(train_X.shape[1]+1)) model.add(Dense(16)) model.add(Dense(128)) model.add(Dense(32)) model.add(Dense(8)) model.add(Dense(1, activation='sigmoid')) model.compile(loss='binary_crossentropy', optimizer='sgd') model.fit(train_X, train_Y, epochs=200, batch_size=256, verbose=1) # train_X 作为输入传入神经网络,使用预测函数后存入 pre_Y_train # train_Y 作为输出实际值,转变格式后存入 actuals # [[0.4260913 ] # [0.3595919 ] # [0.24476886] # ... # [0.2946579 ] # [0.39343864] # [0.27353495]] # [1 0 0 ... 0 1 0] pred_Y_train = model.predict(train_X) actuals = train_Y[:, -1] # 将二维数组将为一维 # print(pred_Y_train, actuals) return model, pred_Y_train, actuals def train_nn_ca2(model_df): ''' Train a neural net to PREDICT BLACKJACK Apologize for the name, it started as a model to predict dealer busts Then I decided to predict blackjacks instead but neglected to rename it input: model_df -> 模型(一般指 random 模型) return: model_bust -> NN 模型(预测玩家初始是否 21 点) pred_Y_train_bust -> blackjack? 的预测值 actuals -> blackjack? 的实际值 ''' # bust_features = [2, 3, 4, 5, 6, 7, 8, 9, 10, 'A', 'dealer_card_num'] feature_list = [i for i in model_df.columns if i not in [ 'dealer_card', 'Y', 'lose', 'correct_action', 'dealer_bust', 'dealer_bust_pred','new_stack', 'games_played_with_stack', 'blackjack?']] # Set up variables for neural net # feature_list_bust = [i for i in bust_features if i not in ['dealer_bust']] print(feature_list) train_X_bust = np.array(model_df[feature_list]) train_Y_bust = np.array(model_df['correct_action']).reshape(-1,1) # Set up a neural net with 5 layers model_bust = Sequential() model_bust.add(Dense(train_X_bust.shape[1])) model_bust.add(Dense(128)) model_bust.add(Dense(32, activation='relu')) model_bust.add(Dense(8)) model_bust.add(Dense(1, activation='sigmoid')) model_bust.compile(loss='binary_crossentropy', optimizer='sgd') model_bust.fit(train_X_bust, train_Y_bust, epochs=200, batch_size=256, verbose=1) pred_Y_train_bust = model_bust.predict(train_X_bust) actuals = train_Y_bust[:, -1] return model_bust, pred_Y_train_bust, actuals # + def comparison_chart(data, position): ''' 绘制多模型数据分析图 input: data -> 数据集 position -> dealer / player ''' fig, ax = plt.subplots(figsize=(12,6)) ax.bar(x=data.index-0.3, height=data['random'].values, color='blue', width=0.3, label='Random') ax.bar(x=data.index, height=data['naive'].values, color='orange', width=0.3, label='Naive') ax.bar(x=data.index+0.3, height=data['smart'].values, color='red', width=0.3, label='Smart') ax.set_ylabel('Probability of Tie or Win', fontsize=16) if position == 'dealer': ax.set_xlabel("Dealer's Card", fontsize=16) plt.xticks(np.arange(2, 12, 1.0)) elif position == 'player': ax.set_xlabel("Player's Hand Value", fontsize=16) plt.xticks(np.arange(4, 21, 1.0)) plt.legend() plt.tight_layout() # plt.savefig(fname= './img/' + position + '_card_probs_smart', dpi=150) def comparison(model_df_naive, model_df_random, model_df_smart): ''' 多个模型数据分析 input: model_df_naive -> naive 模型 model_df_random -> random 模型 model_df_smart -> NN 模型 output: ./img/dealer_card_probs_smart -> 模型对比:按庄家发牌(明牌)分组,分析玩家“不输”的概率 ./img/player_card_probs_smart -> 模型对比:按玩家发牌分组,分析玩家“不输”的概率 ./img/hit_frequency -> 模型对比:按玩家发牌分组,对比 naive 模型与 NN 模型玩家“要牌”的频率 ./img/hit_frequency2 -> 针对玩家发牌为 12, 13, 14, 15, 16 的数据,按庄家发牌分组,分析玩家“要牌”的频率 ''' # 模型对比:按庄家发牌(明牌)分组,分析玩家“不输”的概率 # 保守模型 data_naive = 1 - (model_df_naive.groupby(by='dealer_card_num').sum()['lose'] / model_df_naive.groupby(by='dealer_card_num').count()['lose']) # 随机模型 data_random = 1 - (model_df_random.groupby(by='dealer_card_num').sum()['lose'] / model_df_random.groupby(by='dealer_card_num').count()['lose']) # 新模型 data_smart = 1 - (model_df_smart.groupby(by='dealer_card_num').sum()['lose'] / model_df_smart.groupby(by='dealer_card_num').count()['lose']) data = pd.DataFrame() data['naive'] = data_naive data['random'] = data_random data['smart'] = data_smart comparison_chart(data, 'dealer') # 模型对比:按玩家发牌分组,分析玩家“不输”的概率 # 保守模型 data_naive = 1 - (model_df_naive.groupby(by='player_total_initial').sum()['lose'] / model_df_naive.groupby(by='player_total_initial').count()['lose']) # 随机模型 data_random = 1 - (model_df_random.groupby(by='player_total_initial').sum()['lose'] / model_df_random.groupby(by='player_total_initial').count()['lose']) # 新模型 data_smart = 1 - (model_df_smart.groupby(by='player_total_initial').sum()['lose'] / model_df_smart.groupby(by='player_total_initial').count()['lose']) data = pd.DataFrame() data['naive'] = data_naive[:-1] data['random'] = data_random[:-1] data['smart'] = data_smart[:-1] comparison_chart(data, 'player') # 各模型玩家赢的概率 # Random: 0.3515 # Random: 0.4165 # Random: 0.4194 # round(x, [, n]) 返回浮点数 x 四舍五入值,小数点后保留 n 位 print('Random: ' + str(round(model_df_random[model_df_random['Y']==1].shape[0]/model_df_random.shape[0], 4))) print('Random: ' + str(round(model_df_naive[model_df_naive['Y']==1].shape[0]/model_df_naive.shape[0], 4))) print('Random: ' + str(round(model_df_smart[model_df_smart['Y']==1].shape[0]/model_df_smart.shape[0], 4))) # Check out the probability of hitting for various player hand totals # -- 模型对比:按玩家发牌分组,对比 naive 模型与 NN 模型玩家“要牌”的频率 # 以玩家前两张牌点数和分组,统计玩家总局数 # [ 1364 3602 5267 7495 8782 11244 12565 14971 28538 29678 27366 25614 23379 22017 19519 18205 31118 14567] game_count_df = model_df_smart.groupby(by=['player_total_initial']).count()['lose'] game_count = np.array(game_count_df) # print(game_count) # 以玩家前两张牌点数和分组,统计玩家要牌的局数 # [1291, 3424, 4985, 7127, 8328, 10678, 11929, 14203, 27117, 28251, 23643, 16234, 9307] hit_count = list(model_df_smart[model_df_smart['hit?']==1].groupby(by=['player_total_initial']).count()['lose']) # print(hit_count) hit_count.extend([0 for i in range(len(game_count) - len(hit_count))]) hit_rate_df = pd.DataFrame(np.array(hit_count) / np.array(game_count), index=game_count_df.index, columns=['neural net hit frequency']) hit_rate_df.reset_index(inplace=True) # print(hit_rate_df) # 保守模型在玩家手牌小于 12 时,以 1.0 的概率要牌 naive_hit_rate = [] for i in range(4, 22): if i < 12: naive_hit_rate.append(1.0) else: naive_hit_rate.append(0.0) hit_rate_df['naive strategy hit frequency'] = naive_hit_rate # 输出 hit_rate_df 模型中 'neural net hit frequency', 'naive strategy hit frequency' 两列值 # neural net hit frequency naive strategy hit frequency # 0 0.946481 1.0 # 1 0.950583 1.0 # 2 0.946459 1.0 # 3 0.950901 1.0 # 4 0.948303 1.0 # 5 0.949662 1.0 # 6 0.949383 1.0 # 7 0.948701 1.0 # 8 0.950207 0.0 # 9 0.951917 0.0 # 10 0.863955 0.0 # 11 0.633794 0.0 # 12 0.398092 0.0 # 13 0.000000 0.0 # 14 0.000000 0.0 # 15 0.000000 0.0 # 16 0.000000 0.0 # 17 0.000000 0.0 data = hit_rate_df[['neural net hit frequency', 'naive strategy hit frequency']] print(data) fig, ax = plt.subplots(figsize=(12, 6)) ax.bar(x=hit_rate_df['player_total_initial']-0.2, height=data['neural net hit frequency'].values, color='blue', width=0.4, label='Neural Network') ax.bar(x=hit_rate_df['player_total_initial']+0.2, height=data['naive strategy hit frequency'].values, color='orange', width=0.4, label='Naive') ax.set_xlabel("Player's Hand Value", fontsize=16) ax.set_ylabel("Frequency of Hitting", fontsize=16) plt.xticks(np.arange(4, 21, 1.0)) plt.legend() plt.tight_layout() # plt.savefig(fname='./img/hit_frequency', dpi=150) # Calculate and graph the neural net's hit frequency vs. dealer card for # player hands in [12, 13, 14, 15, 16] # -- 针对玩家发牌为 12, 13, 14, 15, 16 的数据,按庄家发牌分组,分析玩家“要牌”的频率 # 筛选出玩家点数和为 12, 13, 14, 15, 16 的数据,以庄家第一张手牌和是否要牌按次序分组,统计玩家总局数 # dealer_card_num hit? lose # 0 2 0.0 6138 # 1 2 1.0 4374 # 2 3 0.0 4199 # 3 3 1.0 6230 # 4 4 0.0 3834 # 5 4 1.0 6413 # 6 5 0.0 3621 # 7 5 1.0 6772 # 8 6 0.0 1842 # 9 6 1.0 8291 # 10 7 0.0 1842 # 11 7 1.0 8754 # 12 8 0.0 1750 # 13 8 1.0 8515 # 14 9 0.0 303 # 15 9 1.0 10113 # 16 10 0.0 3245 # 17 10 1.0 37921 # 18 11 0.0 3249 # 19 11 1.0 7169 player_despair_df = model_df_smart[(model_df_smart['player_total_initial']==12) | (model_df_smart['player_total_initial']==13) | (model_df_smart['player_total_initial']==14) | (model_df_smart['player_total_initial']==15) | (model_df_smart['player_total_initial']==16)] \ .groupby(by=['dealer_card_num', 'hit?']).count()['lose'] player_despair_df2 = player_despair_df.reset_index().copy() print(player_despair_df2) # 会有 ['hit?'] != 1 的情况 -- 数据量不充足❓ # 在筛选的基础上,统计玩家要牌的频率 # [0.41609589 0.59737271 0.62584171 0.65159242 0.8182177 0.82616082 0.82951778 0.97091014 0.92117281 0.68813592] hit_rate_despair = np.array(player_despair_df2[player_despair_df2['hit?']==1])[:, -1] / \ np.array(player_despair_df2.groupby(by='dealer_card_num').sum())[:, -1] # print(hit_rate_despair) # 按庄家发牌分类,统计玩家在初始手牌为 12, 13, 14, 15, 16 下要牌的频率 # dealer_card_num hit_rate # 2 0.416096 # 3 0.597373 # 4 0.625842 # 5 0.651592 # 6 0.818218 # 7 0.826161 # 8 0.829518 # 9 0.970910 # 10 0.921173 # 11 0.688136 data = pd.DataFrame(hit_rate_despair, index=player_despair_df2.groupby(by='dealer_card_num').sum().index, columns=['hit_rate']) print(data) fig, ax = plt.subplots(figsize=(12, 6)) ax.bar(x=data.index, height=data['hit_rate'].values) ax.set_xlabel("Dealer's Card", fontsize=16) ax.set_ylabel("Frequency of Hitting", fontsize=16) plt.xticks(np.arange(2, 12, 1.0)) plt.tight_layout() # plt.savefig(fname='./img/hit_frequency2', dpi=150) # 模型中所有局数要牌的频率 print('Total hit frequency: ' + str(round(model_df_smart[model_df_smart['hit?']==1].shape[0] / np.sum(model_df_smart.shape[0]), 4))) # - def presentation(model_df): ''' 单个模型数据分析 input: model_df -> 待分析模型 output: ./img/dealer_card_probs -> 按庄家发牌(明牌)分组,分析玩家“不输”的概率 ./img/player_hand_probs -> 按玩家发牌分组,分析玩家“不输”的概率 ./img/heat_map -> 去掉玩家初始 21 点,按玩家发牌与庄家发牌分组分析玩家“不输”的概率 ''' # 按庄家发牌(明牌)分组,分析玩家“不输”的概率 data = 1 - (model_df.groupby(by='dealer_card').sum()['lose'] / model_df.groupby(by='dealer_card').count()['lose']) fig, ax = plt.subplots(figsize=(10, 6)) ax = sns.barplot(x=data.index, y=data.values) ax.set_xlabel("Dealer's Card", fontsize=16) ax.set_ylabel("Probability of Tie or Win", fontsize=16) plt.tight_layout() # plt.savefig(fname='./img/dealer_card_probs' + str(type), dpi=150) # 按玩家发牌分组,分析玩家“不输”的概率 data = 1 - (model_df.groupby(by='player_total_initial').sum()['lose'] / model_df.groupby(by='player_total_initial').count()['lose']) fig, ax = plt.subplots(figsize=(10, 6)) ax = sns.barplot(x=data[:-1].index, y=data[:-1].values) ax.set_xlabel("Player's Hand Value", fontsize=16) ax.set_ylabel("Probability of Tie or Win", fontsize=16) plt.tight_layout() # plt.savefig(fname='./img/player_hand_probs' + str(type), dpi=150) # 玩家有‘A’对玩家“输”的影响 # has_ace # 0 0.683229 # 1 0.384232 # Name: lose, dtype: float64 # print(model_df.groupby(by='has_ace').sum()['lose'] / # model_df.groupby(by='has_ace').count()['lose']) # print(model_df.groupby(by='Y').count()['lose'] / model_df.shape[0]) # print('Win or Tie Rate:', 1-model_df.sum()['lose'] / model_df.count()['lose']) # 去掉玩家初始 21 点,按玩家发牌与庄家发牌分组分析玩家“不输”的概率 pivot_data = model_df[model_df['player_total_initial'] != 21] # 去掉玩家一初始手牌点数为 21 的数据 losses_pivot = pd.pivot_table(pivot_data, values='lose', index=['dealer_card_num'], columns=['player_total_initial'], aggfunc=np.sum) games_pivot = pd.pivot_table(pivot_data, values='lose', index=['dealer_card_num'], columns=['player_total_initial'], aggfunc='count') heat_data = 1 - losses_pivot.sort_index(ascending=False) / games_pivot.sort_index(ascending=False) fig, ax = plt.subplots(figsize=(16, 8)) sns.heatmap(heat_data, square=False, cmap="PiYG") ax.set_xlabel("Player's Hand Value", fontsize=16) ax.set_ylabel("Dealer's Card", fontsize=16) # plt.savefig(fname='./img/heat_map' + str(type), dpi=150) plt.show() def plot_roc(pred_Y_train, actuals): ''' Plot ROC Curve —— 查看预测值与实际值之间的拟合程度 input: pred_Y_train -> 预测值 actuals -> 实际值 output: ./img/roc_curve_blackjack -> 【NN】预测值与真实值的拟合程度 ''' # Plot ROC Curve fpr, tpr, threshold = metrics.roc_curve(actuals, pred_Y_train) roc_auc = metrics.auc(fpr, tpr) fig, ax = plt.subplots(figsize=(10, 8)) plt.plot(fpr, tpr, label = ('ROC AUC = %0.3f' % roc_auc)) plt.legend(loc='lower right') plt.plot([0, 1], [0, 1], 'r--') plt.xlim([0, 1]) plt.ylim([0, 1]) ax.set_xlabel("False Positive Rate", fontsize=16) ax.set_ylabel("True Positive Rate", fontsize=16) plt.setp(ax.get_legend().get_texts(), fontsize=16) plt.tight_layout() # plt.savefig(fname='./img/roc_curve_blackjack', dpi=150) plt.show() def plot_mean(): # static_smart_bets = pd.read_csv('./data/static_smart_bet.csv') print("plot_mean!") smart_bets = pd.read_csv('./data/smart_bet.csv') dumb_bets = pd.read_csv('./data/dumb_bet.csv') plt.subplots(figsize=(10, 6)) plt.plot(smart_bets.mean(axis=1), label='Mean Banktoll - Neural Net 2 Sizing Bets') plt.plot(dumb_bets.mean(axis=1), label='Mean Bankroll - Always Bet $10') plt.xlabel('Games Played', fontsize=16) plt.ylabel('Bankroll, Starts with $10,000', fontsize=16) plt.legend() plt.tight_layout() # plt.savefig('./img/gamble_results', bpi=150) plt.show() plt.subplots(figsize=(10, 6)) plt.hist(smart_bets.iloc[-1], lable='Mean Bankroll - Neural Net Sizing Bets', bins=20) plt.hist(dumb_bets.iloc[-1], label='Mean Bankroll - Always Bet $10', bins=20, alpha=0.6) plt.xlabel('Games Played', fontsize=16) plt.ylabel('Bankroll, Starts with $10,000', fontsize=16) plt.legend() plt.tight_layout() # plt.savefig('./img/gamble_hist', bpi=150) plt.show() model_df_naive = step(0) # 生成 naive 模型各项数据 model_df_random = step(1) # 生成 random 模型各项数据 presentation(model_df_random) # 分析 random 模型数据 model_nn, pred_Y_train, actuals = train_nn_ca(model_df_random) # 用 random 模型训练是否是正确决策 plot_roc(pred_Y_train, actuals) model_nn_bj, pred_Y_train_bust, actuals_bust = train_nn_ca2(model_df_random) # 用 random 模型训练玩家起手是否有 21 点 plot_roc(pred_Y_train_bust, actuals_bust) # + model_df_nn = step(2, model_nn_bj, pred_Y_train_bust) # 生成 NN 模型各项数据 presentation(model_df_nn) # 分析 NN 模型数据 # plot_mean() # presentation(2, model_df_nn) # 分析 NN 模型数据 comparison(model_df_naive, model_df_random, model_df_nn) # 将 naive, random, NN 三个模型的数据对比分析 # print(model_df_naive, model_df_random, model_df_nn)
blackjack_with_NN_v4.ipynb
/ --- / jupyter: / jupytext: / text_representation: / extension: .q / format_name: light / format_version: '1.5' / jupytext_version: 1.14.4 / kernelspec: / display_name: SQL / language: sql / name: SQL / --- / + [markdown] azdata_cell_guid="d25a34e0-7c52-4112-bf09-8feafe21a142" extensions={"azuredatastudio": {"views": []}} / # Troubleshoot a Full Transaction Log (SQL Server Error 9002) / / <span style="box-sizing: inherit; font-weight: 600; outline-color: inherit; color: rgb(23, 23, 23); font-family: &quot;Segoe UI&quot;, SegoeUI, &quot;Helvetica Neue&quot;, Helvetica, Arial, sans-serif; font-size: 16px; background-color: rgb(255, 255, 255);">Applies to:</span> ![yes](https://docs.microsoft.com/en-us/sql/includes/media/yes-icon.png?view=sql-server-ver15)<span style="color: rgb(23, 23, 23); font-family: &quot;Segoe UI&quot;, SegoeUI, &quot;Helvetica Neue&quot;, Helvetica, Arial, sans-serif; font-size: 16px; background-color: rgb(255, 255, 255);">SQL Server (all supported versions)</span> / / / This topic discusses possible responses to a full transaction log and suggests how to avoid it in the future. / / When the transaction log becomes full, SQL Server Database Engine issues a 9002 error. The log can fill when the database is online, or in recovery. If the log fills while the database is online, the database remains online but can only be read, not updated. If the log fills during recovery, the Database Engine marks the database as RESOURCE PENDING. In either case, user action is required to make log space available. / / / <mark> Note / / This article is focused on SQL Server. For more specific information on this error in Azure SQL Database and Azure SQL Managed Instance, see Troubleshooting transaction log errors with Azure SQL Database and Azure SQL Managed Instance. Azure SQL Database and Azure SQL Managed Instance are based on the latest stable version of the Microsoft SQL Server database engine, so much of the content is similar though troubleshooting options and tools may differ.</mark> / / / ## Common reasons for a full transaction log / / The appropriate response to a full transaction log depends on what conditions caused the log to fill. Common causes include: / / Log not being truncated / Disk volume is full / Log size is set to a fixed maximum value (autogrow is disabled) / Replication or availability group synchronization that is unable to complete / If no recent transaction log history is indicated for the database with a full transaction log, the solution to the problem is straightforward: resume regular transaction log backups of the database. For more information and a script to review backup history, see the section Backing up the log in this article. / / / ## Resolving a full transaction log / The following specific steps will help you find the reason for a full transaction log and resolve the issue. / / ## Truncate the Log / There is a difference between truncating a transaction log and shrinking a transaction log. Log Truncation occurs normally during a transaction log backup, and is a logical operation which removes committed records inside the log, whereas log shrinking reclaims physical space on the file system by reducing the file size. Log truncation occurs on a virtual-log-file (VLF) boundary, and a log file may contain many VLFs. A log file can be shrunk only if there is empty space inside the log file to reclaim. Shrinking a log file alone cannot solve the problem of a full log file, instead, you must discover why the log file is full and cannot be truncated. / / <mark> Warning / / Data that is moved to shrink a file can be scattered to any available location in the file. This causes index fragmentation and might slow the performance of queries that search a range of the index. To eliminate the fragmentation, consider rebuilding the indexes on the file after shrinking. For more information, see Shrink a database.</mark> / / / To discover what is preventing log truncation in a given case, use the log_reuse_wait and log_reuse_wait_desc columns of the sys.databases catalog view. For more information, see [sys.databases (Transact-SQL)](https://docs.microsoft.com/sql/relational-databases/system-catalog-views/sys-databases-transact-sql). For descriptions of factors that can delay log truncation, see [The Transaction Log (SQL Server)](https://docs.microsoft.com/sql/relational-databases/logs/the-transaction-log-sql-server). / / The following set of T-SQL commands will help you identify if a database transaction log is not truncated and the reason for it. The following script will also recommend steps to resolve the issue: / + azdata_cell_guid="9056554c-ad23-446e-9225-daa19154731a" extensions={"azuredatastudio": {"views": []}} SET NOCOUNT ON DECLARE @SQL VARCHAR (8000), @log_reuse_wait tinyint, @log_reuse_wait_desc nvarchar(120), @dbname sysname, @database_id int, @recovery_model_desc varchar (24) IF ( OBJECT_id (N'tempdb..#CannotTruncateLog_Db') is not null) BEGIN DROP TABLE #CannotTruncateLog_Db END --get info about transaction logs in each db. Use a DMV which supports all supported versions IF ( OBJECT_id (N'tempdb..#dm_db_log_space_usage') is not null) BEGIN DROP TABLE #dm_db_log_space_usage END SELECT * INTO #dm_db_log_space_usage FROM sys.dm_db_log_space_usage where 1=0 DECLARE log_space CURSOR FOR SELECT NAME FROM sys.databases OPEN log_space FETCH NEXT FROM log_space into @dbname WHILE @@FETCH_STATUS = 0 BEGIN set @SQL = ' insert into #dm_db_log_space_usage ( database_id, total_log_size_in_bytes, used_log_space_in_bytes, used_log_space_in_percent, log_space_in_bytes_since_last_backup ) select database_id, total_log_size_in_bytes, used_log_space_in_bytes, used_log_space_in_percent, log_space_in_bytes_since_last_backup from ' + @dbname +'.sys.dm_db_log_space_usage' BEGIN TRY exec (@SQL) END TRY BEGIN CATCH SELECT ERROR_MESSAGE() AS ErrorMessage; END CATCH; FETCH NEXT FROM log_space into @dbname END CLOSE log_space DEALLOCATE log_space --select the affected databases SELECT sdb.name as DbName, sdb.log_reuse_wait, sdb.log_reuse_wait_desc, log_reuse_wait_explanation = CASE WHEN log_reuse_wait = 1 THEN 'No checkpoint has occurred since the last log truncation, or the head of the log has not yet moved beyond' WHEN log_reuse_wait = 2 THEN 'A log backup is required before the transaction log can be truncated.' WHEN log_reuse_wait = 3 THEN 'A data backup or a restore is in progress (all recovery models). Please wait or cancel backup' WHEN log_reuse_wait = 4 THEN 'A long-running active transaction or a defferred transaction is keeping log from being truncated. You can attempt a log backup to free space or complete/rollback long transaction' WHEN log_reuse_wait = 5 THEN 'Database mirroring is paused, or under high-performance mode, the mirror database is significantly behind the principal database. (Full recovery model only)' WHEN log_reuse_wait = 6 THEN 'During transactional replication, transactions relevant to the publications are still undelivered to the distribution database. Investigate the status of agents involved in replication or Changed Data Capture (CDC). (Full recovery model only.)' WHEN log_reuse_wait = 7 THEN 'A database snapshot is being created. This is a routine, and typically brief, cause of delayed log truncation.' WHEN log_reuse_wait = 8 THEN 'A transaction log scan is occurring. This is a routine, and typically a brief cause of delayed log truncation.' WHEN log_reuse_wait = 9 THEN 'A secondary replica of an availability group is applying transaction log records of this database to a corresponding secondary database. (Full recovery model only.)' WHEN log_reuse_wait = 13 THEN 'If a database is configured to use indirect checkpoints, the oldest page on the database might be older than the checkpoint log sequence number (LSN).' WHEN log_reuse_wait = 16 THEN 'An In-Memory OLTP checkpoint has not occurred since the last log truncation, or the head of the log has not yet moved beyond a VLF.' ELSE 'None' END, sdb.database_id, sdb.recovery_model_desc, lsu.used_log_space_in_bytes/1024 as Used_log_size_MB, lsu.total_log_size_in_bytes /1024 as Total_log_size_MB, 100 - lsu.used_log_space_in_percent as Percent_Free_Space INTO #CannotTruncateLog_Db FROM sys.databases AS sdb INNER JOIN #dm_db_log_space_usage lsu ON sdb.database_id = lsu.database_id WHERE log_reuse_wait > 0 SELECT * FROM #CannotTruncateLog_Db DECLARE no_truncate_db CURSOR FOR SELECT log_reuse_wait, log_reuse_wait_desc, dbname, database_id, recovery_model_desc FROM #CannotTruncateLog_Db; OPEN no_truncate_db FETCH NEXT FROM no_truncate_db into @log_reuse_wait, @log_reuse_wait_desc, @dbname, @database_id, @recovery_model_desc WHILE @@FETCH_STATUS = 0 BEGIN if (@log_reuse_wait > 0) select '-- ''' + @dbname + ''' database has log_reuse_wait = ' + @log_reuse_wait_desc + ' --' as 'Individual Database Report' if (@log_reuse_wait = 1) BEGIN select 'Consider running the checkpoint command to attempt resolving this issue or further t-shooting may be required on the checkpoint process. Also, examine the log for active VLFs at the end of file' as Recommendation select 'USE ''' + @dbname+ '''; CHECKPOINT' as CheckpointCommand select 'select * from sys.dm_db_log_info(' + convert(varchar,@database_id)+ ')' as VLF_LogInfo END else if (@log_reuse_wait = 2) BEGIN select 'Is '+ @recovery_model_desc +' recovery model the intended choice for ''' + @dbname+ ''' database? Review recovery models and determine if you need to change it. https://docs.microsoft.com/sql/relational-databases/backup-restore/recovery-models-sql-server' as RecoveryModelChoice select 'To truncate the log consider performing a transaction log backup on database ''' + @dbname+ ''' which is in ' + @recovery_model_desc +' recovery model. Be mindful of any existing log backup chains that could be broken' as Recommendation select 'BACKUP LOG [' + @dbname + '] TO DISK = ''some_volume:\some_folder\' + @dbname + '_LOG.trn ''' as BackupLogCommand END else if (@log_reuse_wait = 3) BEGIN select 'Either wait for or cancel any active backups currently running for database ''' +@dbname+ '''. To check for backups, run this command:' as Recommendation select 'select * from sys.dm_exec_requests where command like ''backup%'' or command like ''restore%''' as FindBackupOrRestore END else if (@log_reuse_wait = 4) BEGIN select 'Active transactions currently running for database ''' +@dbname+ '''. To check for active transactions, run these commands:' as Recommendation select 'DBCC OPENTRAN (''' +@dbname+ ''')' as FindOpenTran select 'select database_id, db_name(database_id) dbname, database_transaction_begin_time, database_transaction_state, database_transaction_log_record_count, database_transaction_log_bytes_used, database_transaction_begin_lsn, stran.session_id from sys.dm_tran_database_transactions dbtran left outer join sys.dm_tran_session_transactions stran on dbtran.transaction_id = stran.transaction_id where database_id = ' + convert(varchar, @database_id) as FindOpenTransAndSession END else if (@log_reuse_wait = 5) BEGIN select 'Database Mirroring for database ''' +@dbname+ ''' is behind on synchronization. To check the state of DBM, run the commands below:' as Recommendation select 'select db_name(database_id), mirroring_state_desc, mirroring_role_desc, mirroring_safety_level_desc from sys.database_mirroring where mirroring_guid is not null and mirroring_state <> 4 and database_id = ' + convert(sysname, @database_id) as CheckMirroringStatus select 'Database Mirroring for database ''' +@dbname+ ''' may be behind: check unsent_log, send_rate, unrestored_log, recovery_rate, average_delay in this output' as Recommendation select 'exec msdb.sys.sp_dbmmonitoraddmonitoring 1; exec msdb.sys.sp_dbmmonitorresults ''' + @dbname+ ''', 5, 0; waitfor delay ''00:01:01''; exec msdb.sys.sp_dbmmonitorresults ''' + @dbname+ '''; exec msdb.sys.sp_dbmmonitordropmonitoring' as CheckMirroringStatusAnd END else if (@log_reuse_wait = 6) BEGIN select 'Replication transactions still undelivered from publisher database ''' +@dbname+ ''' to Distribution database. Check the oldest non-distributed replication transaction. Also check if the Log Reader Agent is running and if it has encoutered any errors' as Recommendation select 'DBCC OPENTRAN (''' + @dbname + ''')' as CheckOldestNonDistributedTran select 'select top 5 * from distribution..MSlogreader_history where runstatus in (6, 5) or error_id <> 0 and agent_id = find_in_mslogreader_agents_table order by time desc ' as LogReaderAgentState END else if (@log_reuse_wait = 9) BEGIN select 'Always On transactions still undelivered from primary database ''' +@dbname+ ''' to Secondary replicas. Check the Health of AG nodes and if there is latency is Log block movement to Secondaries' as Recommendation select 'select availability_group=cast(ag.name as varchar(30)), primary_replica=cast(ags.primary_replica as varchar(30)),primary_recovery_health_desc=cast(ags.primary_recovery_health_desc as varchar(30)), synchronization_health_desc=cast(ags.synchronization_health_desc as varchar(30)),ag.failure_condition_level, ag.health_check_timeout, automated_backup_preference_desc=cast(ag.automated_backup_preference_desc as varchar(10)) from sys.availability_groups ag join sys.dm_hadr_availability_group_states ags on ag.group_id=ags.group_id' as CheckAGHealth select 'SELECT group_name=cast(arc.group_name as varchar(30)), replica_server_name=cast(arc.replica_server_name as varchar(30)), node_name=cast(arc.node_name as varchar(30)),role_desc=cast(ars.role_desc as varchar(30)), ar.availability_mode_Desc, operational_state_desc=cast(ars.operational_state_desc as varchar(30)), connected_state_desc=cast(ars.connected_state_desc as varchar(30)), recovery_health_desc=cast(ars.recovery_health_desc as varchar(30)), synhcronization_health_desc=cast(ars.synchronization_health_desc as varchar(30)), ars.last_connect_error_number, last_connect_error_description=cast(ars.last_connect_error_description as varchar(30)), ars.last_connect_error_timestamp, primary_role_allow_connections_desc=cast(ar.primary_role_allow_connections_desc as varchar(30)) from sys.dm_hadr_availability_replica_cluster_nodes arc join sys.dm_hadr_availability_replica_cluster_states arcs on arc.replica_server_name=arcs.replica_server_name join sys.dm_hadr_availability_replica_states ars on arcs.replica_id=ars.replica_id join sys.availability_replicas ar on ars.replica_id=ar.replica_id join sys.availability_groups ag on ag.group_id = arcs.group_id and ag.name = arc.group_name ORDER BY cast(arc.group_name as varchar(30)), cast(ars.role_desc as varchar(30))' as CheckReplicaHealth select 'select database_name=cast(drcs.database_name as varchar(30)), drs.database_id, drs.group_id, drs.replica_id, drs.is_local,drcs.is_failover_ready,drcs.is_pending_secondary_suspend, drcs.is_database_joined, drs.is_suspended, drs.is_commit_participant, suspend_reason_desc=cast(drs.suspend_reason_desc as varchar(30)), synchronization_state_desc=cast(drs.synchronization_state_desc as varchar(30)), synchronization_health_desc=cast(drs.synchronization_health_desc as varchar(30)), database_state_desc=cast(drs.database_state_desc as varchar(30)), drs.last_sent_lsn, drs.last_sent_time, drs.last_received_lsn, drs.last_received_time, drs.last_hardened_lsn, drs.last_hardened_time,drs.last_redone_lsn, drs.last_redone_time, drs.log_send_queue_size, drs.log_send_rate, drs.redo_queue_size, drs.redo_rate, drs.filestream_send_rate, drs.end_of_log_lsn, drs.last_commit_lsn, drs.last_commit_time, drs.low_water_mark_for_ghosts, drs.recovery_lsn, drs.truncation_lsn, pr.file_id, pr.error_type, pr.page_id, pr.page_status, pr.modification_time from sys.dm_hadr_database_replica_cluster_states drcs join sys.dm_hadr_database_replica_states drs on drcs.replica_id=drs.replica_id and drcs.group_database_id=drs.group_database_id left outer join sys.dm_hadr_auto_page_repair pr on drs.database_id=pr.database_id order by drs.database_id' as LogMovementHealth select 'For more information see https://docs.microsoft.com/en-us/troubleshoot/sql/availability-groups/error-9002-transaction-log-large' as OnlineDOCResource END else if (@log_reuse_wait in (10, 11, 12, 14)) BEGIN select 'This state is not documented and is expected to be rare and short-lived' as Recommendation END else if (@log_reuse_wait = 13) BEGIN select 'The oldest page on the database might be older than the checkpoint log sequence number (LSN). In this case, the oldest page can delay log truncation.' as Finding select 'This state should be short-lived, but if you find it is taking a long time, you can consider disabling Indirect Checkpoint temporarily' as Recommendation select 'ALTER DATABASE [' +@dbname+ '] SET TARGET_RECOVERY_TIME = 0' as DisableIndirectCheckpointTemporarily END else if (@log_reuse_wait = 16) BEGIN select 'For memory-optimized tables, an automatic checkpoint is taken when transaction log file becomes bigger than 1.5 GB since the last checkpoint (includes both disk-based and memory-optimized tables)' as Finding select 'Review https://blogs.msdn.microsoft.com/sqlcat/2016/05/20/logging-and-checkpoint-process-for-memory-optimized-tables-2/' as ReviewBlog select 'use ' +@dbname+ ' CHECKPOINT' as RunCheckpoint END FETCH NEXT FROM no_truncate_db into @log_reuse_wait, @log_reuse_wait_desc, @dbname, @database_id, @recovery_model_desc END CLOSE no_truncate_db DEALLOCATE no_truncate_db / + [markdown] azdata_cell_guid="c9efd682-4795-49c2-96aa-86f064b9ee47" extensions={"azuredatastudio": {"views": []}} / <mark> Important / / If the database was in recovery when the 9002 error occurred, after resolving the problem, recover the database by using ALTER DATABASE database_name SET ONLINE.</mark> / / More information about the following two actions is provided below: / / - Backing up the log / - Completing or killing a long-running transaction / / ### Backing up the log / Under the FULL or BULK_LOGGED recovery model, if the transaction log has not been backed up recently, backup might be what is preventing log truncation. If the log has never been backed up, you must create two log backups to permit the Database Engine to truncate the log to the point of the last backup. Truncating the log frees logical space for new log records. To keep the log from filling up again, take log backups regularly and more frequently. For more information, see Recovery Models. / / To review the complete backup history of a database, use the following sample script: / + azdata_cell_guid="f351810c-19e0-4b39-aa1b-01fced961829" extensions={"azuredatastudio": {"views": []}} SELECT bs.database_name , backuptype = CASE WHEN bs.type = 'D' and bs.is_copy_only = 0 THEN 'Full Database' WHEN bs.type = 'D' and bs.is_copy_only = 1 THEN 'Full Copy-Only Database' WHEN bs.type = 'I' THEN 'Differential database backup' WHEN bs.type = 'L' THEN 'Transaction Log' WHEN bs.type = 'F' THEN 'File or filegroup' WHEN bs.type = 'G' THEN 'Differential file' WHEN bs.type = 'P' THEN 'Partial' WHEN bs.type = 'Q' THEN 'Differential partial' END + ' Backup' , bs.recovery_model , BackupStartDate = bs.Backup_Start_Date , BackupFinishDate = bs.Backup_Finish_Date , LatestBackupLocation = bf.physical_device_name , backup_size_mb = bs.backup_size/1024./1024. , compressed_backup_size_mb = bs.compressed_backup_size/1024./1024. , database_backup_lsn -- For tlog and differential backups, this is the checkpoint_lsn of the FULL backup it is based on. , checkpoint_lsn , begins_log_chain FROM msdb.dbo.backupset bs LEFT OUTER JOIN msdb.dbo.backupmediafamily bf ON bs.[media_set_id] = bf.[media_set_id] WHERE recovery_model in ('FULL', 'BULK-LOGGED') AND bs.backup_start_date > DATEADD(month, -2, sysdatetime()) --only look at last two months ORDER BY bs.database_name asc, bs.Backup_Start_Date desc; / + [markdown] azdata_cell_guid="7fdd7c75-6904-4ec1-89c8-459aef29b1bb" extensions={"azuredatastudio": {"views": []}} / **To create a transaction log backup** / / <mark> Important / / If the database is damaged, see Tail-Log Backups (SQL Server).</mark> / / - [Back Up a Transaction Log (SQL Server)](https://docs.microsoft.com/sql/relational-databases/backup-restore/back-up-a-transaction-log-sql-server) / / - [SqlBackup](https://docs.microsoft.com/en-us/dotnet/api/microsoft.sqlserver.management.smo.backup.sqlbackup?view=sql-smo-160) (SMO) / / / / ### Discovering long-running transactions / / A very long-running transaction can cause the transaction log to fill. To look for long-running transactions, use one of the following: / / - **[sys.dm_tran_database_transactions](https://docs.microsoft.com/sql/relational-databases//system-dynamic-management-views/sys-dm-tran-database-transactions-transact-sql.md).** / / This dynamic management view returns information about transactions at the database level. For a long-running transaction, columns of particular interest include the time of the first log record [(database_transaction_begin_time)](../system-dynamic-management-views/sys-dm-tran-database-transactions-transact-sql.md), the current state of the transaction [(database_transaction_state)](../system-dynamic-management-views/sys-dm-tran-database-transactions-transact-sql.md), and the [log sequence number (LSN)](../backup-restore/recover-to-a-log-sequence-number-sql-server.md) of the begin record in the transaction log [(database_transaction_begin_lsn)](../system-dynamic-management-views/sys-dm-tran-database-transactions-transact-sql.md). / / - **[DBCC OPENTRAN](https://docs.microsoft.com/sql//t-sql/database-console-commands/dbcc-opentran-transact-sql.md).** / This statement lets you identify the user ID of the owner of the transaction, so you can potentially track down the source of the transaction for a more orderly termination (committing it rather than rolling it back). / / ### Kill a transaction / / Sometimes you just have to end the process; you may have to use the [KILL](https://docs.microsoft.com/sql/t-sql/language-elements/kill-transact-sql.md) statement. Please use this statement very carefully, especially when critical processes are running that you don't want to kill. For more information, see [KILL (Transact-SQL)](https://docs.microsoft.com/sql/t-sql/language-elements/kill-transact-sql.md) / / ## Disk volume is full / / In some situations the disk volume that hosts the transaction log file may fill up. You can take one of the following actions to resolve the log-full scenario that results from a full disk: / / ### Free disk space / / You might be able to free disk space on the disk drive that contains the transaction log file for the database by deleting or moving other files. The freed disk space allows the recovery system to enlarge the log file automatically. / / ### Move the log file to a different disk / / If you cannot free enough disk space on the drive that currently contains the log file, consider moving the file to another drive with sufficient space. / / <mark>IMPORTANT / Log files should never be placed on compressed file systems. </mark> / / See [Move Database Files](https://docs.microsoft.com/sql/relational-databases/databases/move-database-files.md) for information on how to change the location of a log file. / / ### Add a log file on a different disk / / Add a new log file to the database on a different disk that has sufficient space by using `ALTER DATABASE <database_name> ADD LOG FILE`. Multiple log files for a single database should be considered a temporary condition to resolve a space issue, not a long-term condition. Most databases should only have one transaction log file. Continue to investigate the reason why the transaction log is full and cannot be truncated. Consider adding temporary additional transaction log files as an advanced troubleshooting step. / / / For more information see [Add Data or Log Files to a Database](https://docs.microsoft.com/sql/relational-databases/databases/add-data-or-log-files-to-a-database.md). / / / ### Utility script for recommended actions / / / These steps can be partly-automated by running this T-SQL script which will identify logs files that using a large percentage of disk space and suggest actions: / / + azdata_cell_guid="1c9389f3-6389-4bfa-8a96-b57a5fd3a834" extensions={"azuredatastudio": {"views": []}} DECLARE @log_reached_disk_size BIT = 0 SELECT name LogName, physical_name, convert(bigint, size)*8/1024 LogFile_Size_MB, volume_mount_point, available_bytes/1024/1024 Available_Disk_space_MB, (convert(bigint, size)*8.0/1024)/(available_bytes/1024/1024 )*100 file_size_as_percentage_of_disk_space, db_name(mf.database_id) DbName FROM sys.master_files mf CROSS APPLY sys.dm_os_volume_stats (mf.database_id, file_id) WHERE mf.[type_desc] = 'LOG' and (convert(bigint, size)*8.0/1024)/(available_bytes/1024/1024 )*100 > 90 --log is 90% of disk drive ORDER BY size DESC if @@ROWCOUNT > 0 BEGIN set @log_reached_disk_size = 1 -- Discover if any logs have are close to or completely filled disk volume they reside on. -- Either Add A New File To A New Drive, Or Shrink Existing File -- If Cannot Shrink, Go To Cannot Truncate Section DECLARE @db_name_filled_disk sysname, @log_name_filled_disk sysname, @go_beyond_size bigint DECLARE log_filled_disk CURSOR FOR SELECT db_name(mf.database_id), name FROM sys.master_files mf CROSS APPLY sys.dm_os_volume_stats (mf.database_id, file_id) WHERE mf.[type_desc] = 'LOG' and (convert(bigint, size)*8.0/1024)/(available_bytes/1024/1024 )*100 > 90 --log is 90% of disk drive ORDER BY size desc OPEN log_filled_disk FETCH NEXT FROM log_filled_disk into @db_name_filled_disk , @log_name_filled_disk WHILE @@FETCH_STATUS = 0 BEGIN SELECT 'Transaction log for database "' + @db_name_filled_disk + '" has nearly or comletely filled disk volume it resides on!' AS Finding SELECT 'Consider using one of the below commands to shrink the "' + @log_name_filled_disk +'" transaction log file size or add a new file to a NEW volume' AS Recommendation SELECT 'DBCC SHRINKFILE(''' + @log_name_filled_disk + ''')' AS Shrinkfile_Command SELECT 'ALTER DATABASE ' + @db_name_filled_disk + ' ADD LOG FILE ( NAME = N''' + @log_name_filled_disk + '_new'', FILENAME = N''NEW_VOLUME_AND_FOLDER_LOCATION\' + @log_name_filled_disk + '_NEW.LDF'', SIZE = 81920KB , FILEGROWTH = 65536KB )' AS AddNewFile SELECT 'If shrink does not reduce the file size, likely it is because it has not been truncated. Please review next section below. See https://docs.microsoft.com/sql/t-sql/database-console-commands/dbcc-shrinkfile-transact-sql' AS TruncateFirst SELECT 'Can you free some disk space on this volume? If so, do this to allow for the log to continue growing when needed.' AS FreeDiskSpace FETCH NEXT FROM log_filled_disk into @db_name_filled_disk , @log_name_filled_disk END CLOSE log_filled_disk DEALLOCATE log_filled_disk END / + [markdown] azdata_cell_guid="fc76db02-a5b3-406c-8a4c-63aba78b5053" extensions={"azuredatastudio": {"views": []}} / ## Log size is set to a fixed maximum value / / Error 9002 can be generated if the transaction log size has been set to an upper limit and autogrow is not allowed. In this case, enabling autogrow or increasing the log size manually can help resolve the issue. Use this T-SQL command to find such log files and follow the recommendations provided: / + azdata_cell_guid="2b57ea05-f10d-46a0-9c4e-ef93e48b5222" extensions={"azuredatastudio": {"views": []}} SELECT db_name(database_id) DbName, name LogName, physical_name, type_desc , convert(bigint, SIZE)*8/1024 LogFile_Size_MB , convert(bigint,max_size)*8/1024 LogFile_MaxSize_MB , (SIZE*8.0/1024)/(max_size*8.0/1024)*100 percent_full_of_max_size FROM sys.master_files WHERE file_id = 2 AND max_size not in (-1, 268435456) AND (SIZE*8.0/1024)/(max_size*8.0/1024)*100 > 90 if @@ROWCOUNT > 0 BEGIN DECLARE @db_name_max_size sysname, @log_name_max_size sysname, @configured_max_log_boundary bigint DECLARE reached_max_size CURSOR FOR SELECT db_name(database_id), name, convert(bigint, SIZE)*8/1024 FROM sys.master_files WHERE file_id = 2 AND max_size not in (-1, 268435456) AND (SIZE*8.0/1024)/(max_size*8.0/1024)*100 > 90 OPEN reached_max_size FETCH NEXT FROM reached_max_size into @db_name_max_size , @log_name_max_size, @configured_max_log_boundary WHILE @@FETCH_STATUS = 0 BEGIN SELECT 'The database "' + @db_name_max_size+'" contains a log file "' + @log_name_max_size + '" whose max limit is set to ' + convert(varchar(24), @configured_max_log_boundary) + ' MB and this limit has been reached!' as Finding SELECT 'Consider using one of the below ALTER DATABASE commands to either change the log file size or add a new file' as Recommendation SELECT 'ALTER DATABASE ' + @db_name_max_size + ' MODIFY FILE ( NAME = N''' + @log_name_max_size + ''', MAXSIZE = UNLIMITED)' as UnlimitedSize SELECT 'ALTER DATABASE ' + @db_name_max_size + ' MODIFY FILE ( NAME = N''' + @log_name_max_size + ''', MAXSIZE = something_larger_than_' + CONVERT(varchar(24), @configured_max_log_boundary) +'MB )' as IncreasedSize SELECT 'ALTER DATABASE ' + @db_name_max_size + ' ADD LOG FILE ( NAME = N''' + @log_name_max_size + '_new'', FILENAME = N''SOME_FOLDER_LOCATION\' + @log_name_max_size + '_NEW.LDF'', SIZE = 81920KB , FILEGROWTH = 65536KB )' as AddNewFile FETCH NEXT FROM reached_max_size into @db_name_max_size , @log_name_max_size, @configured_max_log_boundary END CLOSE reached_max_size DEALLOCATE reached_max_size END ELSE SELECT 'Found no files that have reached max log file size' as Findings / + [markdown] azdata_cell_guid="46cf127c-ff3b-48e4-ba77-5fd04cb7a146" extensions={"azuredatastudio": {"views": []}} / ### Increase log file size / / If space is available on the log disk, you can increase the size of the log file. The maximum size for log files is two terabytes (TB) per log file. / / If autogrow is disabled, the database is online, and sufficient space is available on the disk, do either of these: / / Manually increase the file size to produce a single growth increment. / Turn on autogrow by using the ALTER DATABASE statement to set a non-zero growth increment for the FILEGROWTH option. / / <mark>Note / / In either case, if the current size limit has been reached, increase the MAXSIZE value.</mark>
Troubleshooting-Notebooks/DOCs-to-Notebooks/T-Shooting_LogFull_9002.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda root] # language: python # name: conda-root-py # --- # # cdo_nco_draft2018a.ipynb # # ## Purpose # Use CDO and NCO for post-processing of CESM output data, prior to production of figures using [figures_draft2018a.ipynb](https://github.com/grandey/p17d-sulphur-eas-eqm/blob/master/analysis_draft2018a/figures_draft2018a.ipynb) # # ## Dependencies # - Climate Data Operators (CDO) # - NetCDF Operators (NCO) # - Python modules mentioned in next cell, including [lib_draft2018a.py](https://github.com/grandey/p17d-sulphur-eas-eqm/blob/master/analysis_draft2018a/lib_draft2018a.py) and its dependencies (including [climapy](https://github.com/grandey/climapy)). # # ## Data requirements # CESM output data, post-processed to time-series format. These data are archived at https://doi.org/10.6084/m9.figshare.6072887. The data files must be downloaded, and the variable `output_dir` in [lib_draft2018a.py](https://github.com/grandey/p17d-sulphur-eas-eqm/blob/master/analysis_draft2018a/lib_draft2018a.py) must point to the correct local folder. # # ## Author # <NAME>, 2018 # + # ! date import lib_draft2018a import os print('lib_draft2018a.py is using {}'.format(lib_draft2018a.dependency_versions())) # - # ! cdo --version # ! ncks --version scenario_name_dict = lib_draft2018a.load_scenario_name_dict() in_dir = lib_draft2018a.output_dir # location of output time-series out_dir = in_dir # write files in same directory # ## Extract data on specific model levels variable_ml_forb_list = [('OMEGA', '19', 'b'), # variable, model level, 'f' or 'b' ('U', '27', 'b')] # ml 19 is ~525hPa; ml 27 is ~936hPa for variable, ml, f_or_b in variable_ml_forb_list: new_variable = '{}_ml{}'.format(variable, ml) # new variable name for scenario in scenario_name_dict.keys(): print('{}, {}, {}:'.format(new_variable, f_or_b, scenario)) # Filenames in_filename = '{}/p17d_{}_{}.cam.h0.{}.nc'.format(in_dir, f_or_b, scenario, variable) out_filename = '{}/p17d_{}_{}.cam.h0.{}.nc'.format(out_dir, f_or_b, scenario, new_variable) temp_filename = '{}/temp_{}_{}.cam.h0.{}.nc'.format(out_dir, f_or_b, scenario, new_variable) # Remove previous out file if os.path.exists(out_filename): print(' Removing {}'.format(out_filename.split('/')[-1])) os.remove(out_filename) # Interpolate to pressure level using CDO print(' Selecting data for model level {}'.format(ml)) # ! cdo -s sellevidx,{ml} {in_filename} {temp_filename} # Rename variable using NCO print(' Renaming variable to {}'.format(new_variable)) # ! ncrename -v {variable},{new_variable} {temp_filename} {out_filename} >/dev/null 2>/dev/null print(' Written {}'.format(out_filename.split('/')[-1])) # Remove temporary file for filename in [temp_filename, ]: print(' Removing {}'.format(filename.split('/')[-1])) os.remove(filename) # ! date
analysis_draft2018a/cdo_nco_draft2018a.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import pandas as pd from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score, f1_score, confusion_matrix, roc_auc_score from sklearn.model_selection import train_test_split from collections import Counter import matplotlib.pyplot as plt import seaborn as sns # - data = pd.read_csv('creditcard.csv') data.head() data.shape data.Class.value_counts() data.isnull().sum() x = data.iloc[:, :-1] y = data.iloc[:, -1] x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=42) # + # create the object model = LogisticRegression() model.fit(x, y) y_predict = model.predict(x) # - accuracy_score(y_predict, y) # + # import linrary from xgboost import XGBClassifier xgb_model = XGBClassifier().fit(x_train, y_train) # predict xgb_y_predict = xgb_model.predict(x_test) # accuracy score xgb_score = accuracy_score(xgb_y_predict, y_test) print('Accuracy score is:', xgb_score) print('Roc auc scote:', roc_auc_score(xgb_y_predict, y_test)) print('F1 core:',f1_score(xgb_y_predict, y_test)) # - confusion_matrix(xgb_y_predict, y_test) # + # class count class_count_0, class_count_1 = data['Class'].value_counts() # divie class class_0 = data[data['Class'] == 0] class_1 = data[data['Class'] == 1] # - # print the shape of the class print('class 0:', class_0.shape) print('\nclass 1:', class_1.shape) # + class_0_under = class_0.sample(class_count_1) test_under = pd.concat([class_0_under, class_1], axis=0) print("total class of 1 and 0:\n",test_under['Class'].value_counts()) test_under['Class'].value_counts().plot(kind='bar', title='Count (target)') plt.show() # + class_1_over = class_1.sample(class_count_0, replace=True) test_under = pd.concat([class_1_over, class_0], axis=0) # print the number of class count print('class count of 1 and 0:\n', test_under['Class'].value_counts()) # plot the count test_under['Class'].value_counts().plot(kind='bar', title='Count (target)') plt.show() # + import imblearn from sklearn.datasets import load_boston # + # import library from imblearn.under_sampling import RandomUnderSampler rus = RandomUnderSampler(random_state=42, replacement=True) # fit predictor and target varialbe x_rus, y_rus = rus.fit_resample(x, y) print('original dataset shape:', Counter(y)) print('Resample dataset shape', Counter(y_rus)) # + # import library from imblearn.over_sampling import RandomOverSampler ros = RandomOverSampler(random_state=42) # fit predictor and target varaible x_ros, y_ros = ros.fit_resample(x, y) print('Original dataset shape', Counter(y)) print('Resample dataset shape', Counter(y_ros)) # + # load library from imblearn.under_sampling import TomekLinks tl = TomekLinks(sampling_strategy='majority') # fit predictor and target variable x_tl, y_tl = tl.fit_sample(x, y) print('Original dataset shape:', Counter(y)) print('Resample dataset shape:', Counter(y_tl)) # -
Handling Class Imbalance/Handling Class Imbalance.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from app import get_downloaded_base156 import pandas as pd csv_file, csv_encoding = get_downloaded_base156() # + field_names = ['SOLICITACAO', 'TIPO', 'ORGAO', 'DATA', 'HORARIO', 'ASSUNTO', 'SUBDIVISAO', 'DESCRICAO', 'LOGRADOURO_ASS', 'BAIRRO_ASS', 'REGIONAL_ASS', 'MEIO_RESPOSTA', 'OBSERVACAO', 'SEXO', 'BAIRRO_CIDADAO', 'REGIONAL_CIDADAO', 'DATA_NASC', 'TIPO_CIDADAO', 'ORGAO_RESP', 'RESPOSTA_FINAL', 'RESPOSTA_FINAL_DETALHE'] data = pd.read_csv(csv_file, sep=';', encoding=csv_encoding, error_bad_lines=False, skiprows=[0,1], names=field_names) # - data data[['ORGAO']].groupby(['ORGAO']).size().reset_index(name='counts')
research.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # DataFlow # ## LungStage # + from my_happy_graphviz import pydot from my_happy_jupyter_utils import ( image_utils ) G = pydot.Dot(graph_type='digraph') nm_data_node_xml = G.make_node('Nuclear Medicine Records (XML)', 'tab') nm_data_node_xls = G.make_node('Nuclear Medicine Records (XLS)', 'tab') nm_data_node_adb = G.make_node('Nuclear Medicine Records (AccessDB)', 'tab') nm_data_node = G.make_node('Aggregate NM Data', 'trapezium') G.make_link(nm_data_node_xml, nm_data_node, style='dashed') G.make_link(nm_data_node_xls, nm_data_node, style='dashed') G.make_link(nm_data_node_adb, nm_data_node, style='dashed') onco_data_node = G.make_node('Oncology Records (XLS)', 'tab') join_node = G.make_node('Merge Records', 'trapezium') G.make_link(onco_data_node, join_node) G.make_link(nm_data_node, join_node, 'Filter on disease\ncase and type') pacs_node = G.make_node('PACS Records', 'cds') solr_node = G.make_node('PACSCrawler/SOLR Database', 'box') pacs_join_node = G.make_node('Match PET Scans', 'trapezium') G.make_link(pacs_node, solr_node, 'pypacscrawler\nmetd', style='dashed') G.make_link(solr_node, pacs_join_node, style='dashed') G.make_link(join_node, pacs_join_node, style='dashed') master_list_node = G.make_node('Save as full_list.json', 'tab') G.make_link(pacs_join_node, master_list_node, 'Master Patient List') screened_list_node = G.make_node('Screened List', 'box') G.make_link( pacs_join_node, screened_list_node, 'LungStage Screen\nGoogle Doc\n(Gregor)\n5min/patient') download_node = G.make_node('Download for Annotation', 'box3d') G.make_link(screened_list_node, download_node, style='dashed') pet_download_node = G.make_node('Region Annotations', 'box3d') G.make_link( download_node, pet_download_node, 'LungStage Annotation\nSlicer-based To ol\n(<NAME> Thomas)\n30min/patient', style='dashed' ) G.make_link( pet_download_node, screened_list_node, 'Filter already\nannotated cases', style='dashed' ) region_list_node = G.make_node('Region Data\nlsa.npz\nlsa.json', 'tab') G.make_link( pet_download_node, region_list_node, 'Save Region Data', style='dashed' ) onco_list_node = G.make_node('Oncology List', 'box') G.make_link(region_list_node, onco_list_node, style='dashed') G.make_link(master_list_node, onco_list_node, style='dashed') full_list_node = G.make_node('Full Patient Data', 'box') G.make_link( onco_list_node, full_list_node, 'LungStage Oncology Tool\nGoogle Docs\n(Audrey)\n30min/patient' ) G.set_overlap(False) G.set_rankdir('UD') print(G.to_string()) file_name = '/'.join([ 'data/output/images', '0502_0101_lungstage.svg' ]) G.draw(file_name, prog='dot') image_utils.show_image_with_title_by_url({ 'file_path': file_name, 'title': 'LungStage', }) # - # # mri # + from my_happy_graphviz import pydot from my_happy_jupyter_utils import ( image_utils ) G = pydot.Dot(graph_type='digraph') mri_node = G.make_node('mri', 'folder') mri_node.set_label('MRI Image Data\n(T2, DWI Scout)') sort_node = G.make_node('sort', 'record') sort_node.set_label('{{Neural Image Sorter}|{DWI|T1|T2|Other}}') G.make_link(mri_node, sort_node) pos_node = G.make_node('position', 'record') pos_node.set_label('{{Neural Position\nEstimator}|{z}}') G.make_link(mri_node, pos_node) stage_node = G.make_node('t2_stage', 'record') stage_node.set_label( '{{Staging}|{T0|T1|T2|T3|T4}|{N0|N1|N2}|{M0|M1}}' ) G.make_link(mri_node, stage_node, 'MRI Images') G.make_link(sort_node, stage_node, 'MRI Category', style='dashed') G.make_link(pos_node, stage_node, 'Position\nEstimate', style='dashed') outcome_node = G.make_node('outcome', 'record') outcome_node.set_label( '{{Outcome}|{Recurrence|Remission}}' ) G.make_link( mri_node, outcome_node, 'MRI Images' ) G.make_link(stage_node, outcome_node, 'Stage\nEstimation', style='dashed') G.make_link(sort_node, outcome_node, style='dashed') for inode in [pos_node, sort_node]: inode.set_style('filled') inode.set_fillcolor('lightblue') for inode in [stage_node, outcome_node]: inode.set_style('filled') inode.set_fillcolor('lightgreen') G.set_rankdir('UD') print(G.to_string()) file_name = '/'.join([ 'data/output/images', '0502_0201_mri.svg' ]) G.draw(file_name) image_utils.show_image_with_title_by_url({ 'file_path': file_name, 'title': 'mri', }) # -
examples/0502_0101_dataflow.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + import os from random import randint from matplotlib import pyplot as plt from keras.models import load_model from keras.callbacks import ModelCheckpoint, EarlyStopping from python_research.experiments.multiple_feature_learning.utils.utils import load_patches, combine_patches from python_research.experiments.multiple_feature_learning.utils.dataset import Dataset from python_research.experiments.multiple_feature_learning.builders.keras_builders import build_model, build_settings_for_dataset from python_research.preprocessing.grids.grid_extraction import extract_grids # %matplotlib inline PATCHES_DIRECTORY = "" DATASET_PATH = "C:\\Users\mmyller.KPLABS\Documents\datasets\pavia\PaviaU_corrected.npy" DATASET_GT_PATH = "C:\\Users\mmyller.KPLABS\Documents\datasets\pavia\PaviaU_gt.npy" OUTPUT_PATH = "grids_validation_3D\\artifact" PATCH_SIZE = (17, 30) PIXEL_NEIGHBOURHOOD = (7, 7) TOTAL_NUMBER_OF_TRAIN_SAMPLES = 2700 CLASSES_COUNT = 9 PATIENCE = 15 EPOCHS = 200 BATCH_SIZE = 64 os.makedirs("grids_validation_3D", exist_ok=True) # Load data if path to the folder with patches is specified if PATCHES_DIRECTORY != "": train_val_data, test_data = load_patches(PATCHES_DIRECTORY, PIXEL_NEIGHBOURHOOD, CLASSES_COUNT) dataset_image = test_data.x[:, :, randint(0, test_data.x.shape[-1])] # Extract grids from provided dataset else: patches, test_set, dataset_image = extract_grids(DATASET_PATH, DATASET_GT_PATH, PATCH_SIZE, TOTAL_NUMBER_OF_TRAIN_SAMPLES) train_val_data, test_data = combine_patches(patches[0], patches[1], test_set[0], test_set[1], PIXEL_NEIGHBOURHOOD, CLASSES_COUNT) # Show location of extracted grids plt.imshow(dataset_image) plt.show() # + # Normalize data train_val_data.normalize_data(CLASSES_COUNT) # Normalize test data using min and max from train data test_data.x_test = (test_data.x_test - train_val_data.min) / (train_val_data.max - train_val_data.min) bands_count = test_data.x.shape[-1] # Build model settings = build_settings_for_dataset(PIXEL_NEIGHBOURHOOD) model = build_model(settings, CLASSES_COUNT, bands_count) print(model.summary()) # + # Callbacks early = EarlyStopping(patience=PATIENCE) checkpoint = ModelCheckpoint(OUTPUT_PATH + "_model", save_best_only=True) model.fit(x=train_val_data.x_train, y=train_val_data.y_train, batch_size=BATCH_SIZE, epochs=EPOCHS, verbose=False, callbacks=[early, checkpoint], validation_data=[train_val_data.x_val, train_val_data.y_val]) best_model = load_model(OUTPUT_PATH + "_model") # Evaluate test set score accuracy = best_model.evaluate(x=test_data.x_test, y=test_data.y_test)[1] print("Test set accuracy: {}".format(accuracy)) # -
grids_validation_3D.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import matplotlib.pyplot as plt import seaborn as sns import pickle import pandas as pd # + with open('Q_stag.pickle', 'rb') as fp: Q = pickle.load(fp) with open('inven_stag.pickle', 'rb') as fp: inven = pickle.load(fp) # - sns.set_theme(style="whitegrid") # part of Q and inven # pQ = Q[:, ::10000, :, :] # pI = inven[:, ::10000, :] pQ = Q pI = inven l = pI.shape[1] pI.shape df = [] for ins in range(10): for act in range(4): val = pQ[ins, :, :, act].mean(1) df.append(pd.DataFrame({'Q':val, 'instance': ins*np.ones(l), 'Actions':[str(act+1),]*l, 'step': np.arange(l)})) Qdf = pd.concat(df) # figsize=(8,6) plt.figure() palette = sns.color_palette("husl", 4) g = sns.lineplot(data=Qdf, x='step', y='Q', hue='Actions', palette=palette) plt.xlabel(r'Steps ($10^4$)', fontsize=16) plt.ylabel('Q-values', fontsize=16) g.tick_params(axis = 'both', which = 'major', labelsize = 16) plt.legend(bbox_to_anchor=(1.05, 1), title='Actions', fontsize=14, title_fontsize=14) plt.savefig('QConverge.pdf', format='pdf', dpi=1000, bbox_inches='tight', pad_inches=0.1) df_temp = [] for ins in range(10): for agent in range(2): val = pI[ins, :, agent] df_temp.append(pd.DataFrame({'Inventory':val, 'instance': ins*np.ones(l), 'Agent':[str(agent+1),]*l, 'step': np.arange(l)})) Idf = pd.concat(df_temp) # figsize=(8,6) plt.figure() palette = sns.color_palette("husl", 4) g = sns.lineplot(data=Idf, x='step', y='Inventory', hue='Agent') plt.xlabel(r'Steps ($10^4$)', fontsize=16) plt.ylabel('Inventory', fontsize=16) g.tick_params(axis = 'both', which = 'major', labelsize = 16) plt.legend(bbox_to_anchor=(1.25, 1), title='Agents', fontsize=14, title_fontsize=14) plt.savefig('Inventory.pdf', format='pdf', dpi=1000, bbox_inches='tight', pad_inches=0.1)
Figs/Stag_Fig2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:sasmodels2] * # language: python # name: conda-env-sasmodels2-py # --- # # Plotting SANS Data with Fits # # This notebook will plot the SANS/USANS data overlaid with the final model fit for each sample. Each fit includes the appropriate constant polysytrene background contribution (Guinier-Porod model). Models for the conjugated polymer phase are either sphere, sphere+cylinder, ellipsoid, or ellipsoid+cylinder. # + import numpy as np import pandas as pd import os import matplotlib.pyplot as plt import matplotlib.patheffects as pe from bumps.names import * from bumps.fitters import fit import bumps import sasmodels from sasmodels.core import load_model from sasmodels.bumps_model import Model, Experiment from sasmodels.data import load_data, plot_data, empty_data1D from sasmodels.direct_model import DirectModel import sas # - # ## Loading data and relevant meta-data # + # sample meta-data sample_info = pd.read_csv('../data/sans/Sample_Info.csv') # helpful meta-data dictionaries names = dict(zip(sample_info['Sample'], sample_info['Name'])) cps = dict(zip(sample_info['Sample'], sample_info['Conjugated Polymer'])) matrix = dict(zip(sample_info['Sample'], sample_info['Commodity Polymer'])) solvent_names = dict(zip(sample_info['Sample'], sample_info['Solvent'])) # target weight percents of conjugated polymer target = dict(zip(sample_info['Sample'], sample_info['Target Fraction']*100)) # fixing 401/402 and 403/404 sample pair target values for plotting colors only target[401] = 0.5 target[402] = 0.1 target[403] = 5 target[404] = 1 # actual weight percentages data = np.loadtxt('../data/uv_vis/Corrected_wtPercents.csv', delimiter=',', skiprows=1) actual = {} actual_stdev = {} actual_vol = {} actual_stdev_vol = {} for key, tar, act, stdev, act_vol, stdev_vol in data: actual[key] = act actual_stdev[key] = stdev actual_vol[key] = act_vol actual_stdev_vol[key] = stdev_vol slds = {'RRe-P3HT':0.676, 'RRa-P3HT':0.676, 'P3DDT':0.316, 'PQT-12':0.676, 'Polystyrene-D8':6.464, # density 1.13 'Polystyrene-H8':1.426} # - data_dir = '../data/sans/Smeared_Data_20200629/' files = os.listdir(data_dir) sans_data = {} usans_data = {} for file in files: if 'USANS' in file: key = int(file.split('_')[0][3:]) usans_data[key] = load_data(data_dir + file) elif 'SANS' in file: key = int(file.split('_')[0][3:]) sans_data[key] = load_data(data_dir + file) # + # useful dictionaries with labels and colors for the plots and their legends wt_names = {} full_names = {} wt_colors = {} solvent_colors = {} cp_colors = {} rep_colors = {} rep_names = {} temp_wt_colors = { 0.1: 'firebrick', 0.5: 'darkorange', 1.0: 'darkcyan', 5.0: 'mediumblue', 10.0: 'deeppink', 25.0: 'darkorchid', 50.0: 'forestgreen', 0.0: 'black' } temp_solvent_colors = { 'Chloroform': 'firebrick', 'Bromobenzene': 'darkorange', 'Toluene': 'darkcyan', 'Slow Dry Chloroform': 'darkorchid' } temp_cp_colors = { 'RRe-P3HT': 'firebrick', 'RRa-P3HT': 'darkorange', 'P3DDT': 'darkorchid', 'PQT-12': 'darkcyan', 'None': 'black' } for key in names.keys(): if key in actual.keys(): frac = actual[key] else: frac = target[key] frac = np.round(frac,2) if cps[key] == 'None': wt_names[key] = matrix[key] + ' Control' full_names[key] = matrix[key] + ' Control' else: wt_names[key] = str(frac) + ' wt% ' + cps[key] full_names[key] = str(frac) + ' wt% ' + cps[key] + ' in ' + matrix[key] for key in cps.keys(): wt_colors[key] = temp_wt_colors[target[key]] solvent_colors[key] = temp_solvent_colors[solvent_names[key]] cp_colors[key] = temp_cp_colors[cps[key]] # - # ## Sorting the sample list by the final combined form factor specified # + true_reads = pd.read_csv('../data/sans/Fit_Finals.csv') true_reads = true_reads.to_numpy() mask = np.where(true_reads[:,1]=='Sphere')[0] final_keys_spheres = true_reads[mask,0] true_reads = pd.read_csv('../data/sans/Fit_Finals.csv') true_reads = true_reads.to_numpy() mask = np.where(true_reads[:,1]=='Ellipsoid')[0] final_keys_ellipsoids = true_reads[mask,0] true_reads = pd.read_csv('../data/sans/Fit_Finals.csv') true_reads = true_reads.to_numpy() mask = np.where(true_reads[:,1]=='Sphere+Cylinder')[0] final_keys_sphere_cylinders = true_reads[mask,0] true_reads = pd.read_csv('../data/sans/Fit_Finals.csv') true_reads = true_reads.to_numpy() mask = np.where(true_reads[:,1]=='Ellipsoid+Cylinder')[0] final_keys_ellipsoid_cylinders = true_reads[mask,0] # - # ### Loading polystyrene fit information background_files = [file for file in os.listdir('../data/sans/PS_Fitting/ps_fit_results/power_law_background') if 'json' in file] backgrounds = {} # key is sample key, value is ('best', '95% confidence interval') for file in background_files: data_read = pd.read_json('../data/sans/PS_Fitting/ps_fit_results/power_law_background/' + file) key = int(file.split('_')[0][3:]) p95 = data_read.loc['p95',str(key) + ' background'] backgrounds[key] = (data_read.loc['best',str(key) + ' background'], p95) # + power_law_fit_info = pd.read_json('../data/sans/PS_Fitting/ps_fit_results/power_law_porod_exp_scale/PS_porod_exp_scale-err.json') ps_scales = {} for key, value in power_law_fit_info.items(): if 'porod_exp' in key: ps_porod_exp = value['best'] ps_porod_exp_95 = value['p95'] else: key = int(key.split()[0]) ps_scales[key] = (value['best'], value['p95']) # + guinier_porod_fit = pd.read_json('../data/sans/PS_Fitting/ps_fit_results/guinier_porod_s_scale/PS_s_scale-err.json') rgs = {} adjusted_scales = {} for key, value in guinier_porod_fit.items(): if key == 'ps s': ps_s = value['best'] ps_s_95 = value['p95'] elif 'rg' in key: key = int(key.split()[0]) rgs[key] = (value['best'], value['p95']) elif 'scale' in key: key = int(key.split()[0]) ps_scales[key] = (value['best'], value['p95']) for key in rgs.keys(): q1 = (1/rgs[key][0]) * np.sqrt((ps_porod_exp - ps_s)*(3-ps_s)/2) new_scale = ps_scales[key][0] * np.exp(-1*q1**2*rgs[key][0]**2/(3-ps_s)) * q1**(ps_porod_exp - ps_s) new_95p = np.array(ps_scales[key][1]) * np.exp(-1*q1**2*rgs[key][0]**2/(3-ps_s)) * q1**(ps_porod_exp - ps_s) adjusted_scales[key] = (new_scale, list(new_95p)) # - avg_rg = np.average([x[0] for x in rgs.values()]) max_rg = np.max([x[1][1] for x in rgs.values()]) min_rg = np.min([x[1][0] for x in rgs.values()]) avg_scale = np.average([x[0] for y, x in ps_scales.items() if y in rgs.keys()]) max_scale = np.average([x[1][1] for y, x in ps_scales.items() if y in rgs.keys()]) min_scale = np.average([x[1][0] for y, x in ps_scales.items() if y in rgs.keys()]) # ### Loading the Porod analysis results, we will only utilize the previously determined background values to minimize the fitting here. # + porod_files = [file for file in os.listdir('../data/sans/Porod_analysis/porod_results') if 'json' in file] for file in porod_files: data_read = pd.read_json('../data/sans/Porod_analysis/porod_results/' + file) key = int(file.split('_')[0][3:]) for column, value in data_read.items(): if 'background' in column: backgrounds[key] = (value['best'], value['p95']) # - # ### Define plotting functions for each combined model and load the final fitting resultsSphere Fitting Results # #### Spheres # + results_direct = '../data/sans/Sample_Fitting/fitting_results/ps_sphere/' fit_keys = [] fit_sphere_radius= {} fit_sphere_scale = {} for file in [file for file in os.listdir(results_direct) if '.json' in file]: data_read = pd.read_json(results_direct + file) key = int(file.split('_')[0][3:]) fit_keys.append(key) for pd_key, value in data_read.items(): if 'radius' in pd_key: best = value['best'] ci95 = value['p95'] fit_sphere_radius[key] = (best, ci95) elif 'sphere scale' in pd_key: best = value['best'] ci95 = value['p95'] fit_sphere_scale[key] = (best, ci95) # - def plot_sphere_fits(key, sans, usans, color_choice, zorder): kernel = load_model('guinier_porod+sphere') vol = actual_vol[key]/100 # cp volume fraction from uv-vis vol_stdev = actual_stdev_vol[key]/100 # model parameters scale = Parameter(1, name=str(key) + 'scale') background = Parameter(backgrounds[key][0], name=str(key) + 'background') A_scale = Parameter(avg_scale*(1-vol), name=str(key) + ' PS scale') A_rg = Parameter(avg_rg, name=str(key) + ' PS rg') A_s = Parameter(ps_s, name=str(key) + ' PS s') A_porod_exp = Parameter(ps_porod_exp, name=str(key) + ' PS porod_exp') B_scale = Parameter(fit_sphere_scale[key][0], name=str(key) + ' sphere scale') B_sld = Parameter(slds[cps[key]], name=str(key) + ' PS sld') B_sld_solvent = Parameter(slds[matrix[key]], name=str(key) + ' PS solvent') B_radius = Parameter(fit_sphere_radius[key][0], limits=[0,inf], name=str(key) + ' sphere radius') B_radius_pd = Parameter(0.5, name = str(key) + ' sphere radius pd') B_radius_pd_n = Parameter(200, name = str(key) + ' sphere radius pd n') B_radius_pd_nsigma = Parameter(8, name = str(key) + ' sphere radius pd nsigma') # setting up the combined model for plotting sans_model = Model( model=kernel, scale=scale, background=background, A_scale=A_scale, A_rg=A_rg, A_s=A_s, A_porod_exp=A_porod_exp, B_scale=B_scale, B_sld=B_sld, B_sld_solvent=B_sld_solvent, B_radius=B_radius, B_radius_pd_type='lognormal', B_radius_pd=B_radius_pd, B_radius_pd_n=B_radius_pd_n, B_radius_pd_nsigma=B_radius_pd_nsigma, ) sans = sans_data[key] sans.dx = sans.dx - sans.dx #plt.errorbar(sans.x, sans.y, yerr=sans.dy, fmt='o', c=color_choice, zorder=1, ms=4, mfc='white', mec='black') usans = usans_data[key] #plt.errorbar(usans.x, usans.y, yerr=usans.dy, fmt='o', c=color_choice, zorder=1, ms=4, mfc='white', mec='black') sans_experiment=Experiment(data=sans, model=sans_model) usans_experiment=Experiment(data=usans, model=sans_model) usans_smearing = sasmodels.resolution.Slit1D(usans.x, 0.117) usans_experiment.resolution = usans_smearing sans_problem=FitProblem(sans_experiment) usans_problem=FitProblem(usans_experiment) plt.errorbar(sans.x, sans_problem.fitness.theory(), c=color_choice, linewidth=2, zorder=zorder, fmt='-', label='Sphere Fit',path_effects=[pe.Stroke(linewidth=5, foreground='white'), pe.Normal()]) plt.errorbar(usans.x, usans_problem.fitness.theory(), c=color_choice, linewidth=2, zorder=zorder, fmt='-',path_effects=[pe.Stroke(linewidth=5, foreground='white'), pe.Normal()]) # #### Ellipsoids # + results_direct = '../data/sans/Sample_Fitting/fitting_results/ps_ellipsoid/' fit_ellipsoid_polar_radius = {} fit_ellipsoid_equatorial_radius = {} fit_ellipsoid_scale = {} for file in [file for file in os.listdir(results_direct) if '.json' in file]: data_read = pd.read_json(results_direct + file) key = int(file.split('_')[0][3:]) fit_keys.append(key) for pd_key, value in data_read.items(): if 'polar radius' in pd_key: best = value['best'] ci95 = value['p95'] fit_ellipsoid_polar_radius[key] = (best, ci95) elif 'equatorial radius' in pd_key: best = value['best'] ci95 = value['p95'] fit_ellipsoid_equatorial_radius[key] = (best, ci95) elif 'scale' in pd_key: best = value['best'] ci95 = value['p95'] fit_ellipsoid_scale[key] = (best, ci95) # - def plot_ellipsoid_fits(key, sans, usans, color_choice, zorder): kernel = load_model('guinier_porod+ellipsoid') vol = actual_vol[key]/100 # cp volume fraction from uv-vis vol_stdev = actual_stdev_vol[key]/100 # model parameters scale = Parameter(1, name=str(key) + 'scale') background = Parameter(backgrounds[key][0], name=str(key) + 'background') A_scale = Parameter(avg_scale*(1-vol), name=str(key) + ' PS scale') A_rg = Parameter(avg_rg, name=str(key) + ' PS rg') A_s = Parameter(ps_s, name=str(key) + ' PS s') A_porod_exp = Parameter(ps_porod_exp, name=str(key) + ' PS porod_exp') B_scale = Parameter(fit_ellipsoid_scale[key][0], name=str(key) + ' sphere scale') B_sld = Parameter(slds[cps[key]], name=str(key) + ' PS sld') B_sld_solvent = Parameter(slds[matrix[key]], name=str(key) + ' PS solvent') B_radius_polar = Parameter(fit_ellipsoid_polar_radius[key][0], limits=[0,inf], name=str(key) + ' ellipsoid polar radius') B_radius_polar_pd = Parameter(0.5, name = str(key) + ' ellipsoid polar radius pd') B_radius_polar_pd_n = Parameter(200, name = str(key) + ' ellipsoid polar radius pd n') B_radius_polar_pd_nsigma = Parameter(8, name = str(key) + ' ellipsoid polar radius pd nsigma') B_radius_equatorial = Parameter(fit_ellipsoid_equatorial_radius[key][0], limits=[0,inf], name=str(key) + ' ellipsoid equatorial radius') B_radius_equatorial_pd = Parameter(0.5, name = str(key) + ' ellipsoid equatorial radius pd') B_radius_equatorial_pd_n = Parameter(200, name = str(key) + ' ellipsoid equatorial radius pd n') B_radius_equatorial_pd_nsigma = Parameter(8, name = str(key) + ' ellipsoid equatorial radius pd nsigma') # setting up the combined model for plotting sans_model = Model( model=kernel, scale=scale, background=background, A_scale=A_scale, A_rg=A_rg, A_s=A_s, A_porod_exp=A_porod_exp, B_scale=B_scale, B_sld=B_sld, B_sld_solvent=B_sld_solvent, B_radius_polar=B_radius_polar, B_radius_polar_pd_type='lognormal', B_radius_polar_pd=B_radius_polar_pd, B_radius_polar_pd_n=B_radius_polar_pd_n, B_radius_polar_pd_nsigma=B_radius_polar_pd_nsigma, B_radius_equatorial=B_radius_equatorial, B_radius_equatorial_pd_type='lognormal', B_radius_equatorial_pd=B_radius_equatorial_pd, B_radius_equatorial_pd_n=B_radius_equatorial_pd_n, B_radius_equatorial_pd_nsigma=B_radius_equatorial_pd_nsigma, ) sans = sans_data[key] sans.dx = sans.dx - sans.dx #plt.errorbar(sans.x, sans.y, yerr=sans.dy, fmt='o', c=color_choice, zorder=1, ms=4, mfc='white', mec='black') usans = usans_data[key] #plt.errorbar(usans.x, usans.y, yerr=usans.dy, fmt='o', c=color_choice, zorder=1, ms=4, mfc='white', mec='black') sans_experiment=Experiment(data=sans, model=sans_model) usans_experiment=Experiment(data=usans, model=sans_model) usans_smearing = sasmodels.resolution.Slit1D(usans.x, 0.117) usans_experiment.resolution = usans_smearing sans_problem=FitProblem(sans_experiment) usans_problem=FitProblem(usans_experiment) plt.errorbar(sans.x, sans_problem.fitness.theory(), c=color_choice, linewidth=2, zorder=zorder, fmt='-', label='Ellipsoid Fit',path_effects=[pe.Stroke(linewidth=5, foreground='white'), pe.Normal()]) plt.errorbar(usans.x, usans_problem.fitness.theory(), c=color_choice, linewidth=2, zorder=zorder, fmt='-',path_effects=[pe.Stroke(linewidth=5, foreground='white'), pe.Normal()]) # #### Sphere + Cylinder # + results_direct = '../data/sans/Sample_Fitting/fitting_results/ps_sphere_cylinder_lm/' fit_sphcyl_sphere_radius = {} fit_sphcyl_cylinder_radius = {} fit_sphcyl_scale_ratio = {} fit_sphcyl_cp_scale = {} fit_sphcyl_cylinder_length = {} for file in [file for file in os.listdir(results_direct) if '.csv' in file]: data_read = np.loadtxt(results_direct+file, delimiter=',', dtype='str') key = int(file.split('_')[0][3:]) fit_keys.append(key) for label, x, dx in data_read: x = float(x) dx = float(dx) if 'radius' in label: if 'sphere' in label: fit_sphcyl_sphere_radius[key] = (x,dx) elif 'cylinder' in label: fit_sphcyl_cylinder_radius[key] = (x,dx) elif 'cp scale' in label: fit_sphcyl_cp_scale[key] = (x,dx) elif 'scale ratio' in label: fit_sphcyl_scale_ratio[key] = (x,dx) elif 'length' in label: fit_sphcyl_cylinder_length[key] = (x,dx) # - def plot_sphere_cylinder_fits(key, sans, usans, color_choice, zorder): kernel = load_model('guinier_porod+sphere+cylinder') vol = actual_vol[key]/100 # cp volume fraction from uv-vis vol_stdev = actual_stdev_vol[key]/100 # model parameters scale = Parameter(1, name=str(key) + 'scale') background = Parameter(backgrounds[key][0], name=str(key) + 'background') A_scale = Parameter(avg_scale*(1-vol), name=str(key) + ' PS scale') A_rg = Parameter(avg_rg, name=str(key) + ' PS rg') A_s = Parameter(ps_s, name=str(key) + ' PS s') A_porod_exp = Parameter(ps_porod_exp, name=str(key) + ' PS porod_exp') scale_ratio = Parameter(fit_sphcyl_scale_ratio[key][0], name=str(key) + ' B scale ratio').range(0,1) scale_normal = bumps.bounds.Normal(mean=vol, std=vol_stdev) cp_scale = Parameter(fit_sphcyl_cp_scale[key][0], name=str(key) + ' cp scale', bounds=scale_normal) B_scale = scale_ratio * cp_scale B_sld = Parameter(slds[cps[key]], name=str(key) + ' PS sld') B_sld_solvent = Parameter(slds[matrix[key]], name=str(key) + ' PS solvent') B_radius = Parameter(fit_sphcyl_sphere_radius[key][0], limits=[0,inf], name=str(key) + ' sphere radius').range(100,200000) B_radius_pd = Parameter(0.5, name = str(key) + ' sphere radius pd') B_radius_pd_n = Parameter(200, name = str(key) + ' sphere radius pd n') B_radius_pd_nsigma = Parameter(8, name = str(key) + ' sphere radius pd nsigma') C_scale = (1-scale_ratio) * cp_scale C_sld = Parameter(slds[cps[key]], name=str(key) + ' PS sld') C_sld_solvent = Parameter(slds[matrix[key]], name=str(key) + ' PS solvent') C_radius = Parameter(fit_sphcyl_cylinder_radius[key][0], limits=[0,inf], name = str(key) + ' cylinder radius').range(10,1000) C_radius_pd = Parameter(0.2, name = str(key) + ' cylinder radius pd') C_radius_pd_n = Parameter(200, name = str(key) + ' cylinder radius pd n') C_radius_pd_nsigma = Parameter(8, name = str(key) + ' cylinder radius pd nsigma') C_length = Parameter(fit_sphcyl_cylinder_length[key][0], limits=[0,inf], name = str(key) + ' length').range(10000,300000) C_length_pd = Parameter(0, name = str(key) + ' length pd') C_length_pd_n = Parameter(200, name = str(key) + ' length pd n') C_length_pd_nsigma = Parameter(8, name = str(key) + ' length pd nsigma') # setting up the combined model for fitting sans_model = Model( model=kernel, scale=scale, background=background, A_scale=A_scale, A_rg=A_rg, A_s=A_s, A_porod_exp=A_porod_exp, B_scale=B_scale, B_sld=B_sld, B_sld_solvent=B_sld_solvent, B_radius=B_radius, B_radius_pd_type='lognormal', B_radius_pd=B_radius_pd, B_radius_pd_n=B_radius_pd_n, B_radius_pd_nsigma=B_radius_pd_nsigma, C_scale = C_scale, C_sld = C_sld, C_sld_solvent = C_sld_solvent, C_radius = C_radius, C_radius_pd_type='lognormal', C_radius_pd = C_radius_pd, C_radius_pd_n = C_radius_pd_n, C_radius_pd_nsigma = C_radius_pd_nsigma, C_length = C_length, C_length_pd_type='lognormal', C_length_pd = C_length_pd, C_length_pd_n = C_length_pd_n, C_length_pd_nsigma = C_length_pd_nsigma, ) sans = sans_data[key] sans.dx = sans.dx - sans.dx #plt.errorbar(sans.x, sans.y, yerr=sans.dy, fmt='o', c=color_choice, zorder=1, ms=4, mfc='white', mec='black') usans = usans_data[key] #plt.errorbar(usans.x, usans.y, yerr=usans.dy, fmt='o', c=color_choice, zorder=1, ms=4, mfc='white', mec='black') sans_experiment=Experiment(data=sans, model=sans_model) usans_experiment=Experiment(data=usans, model=sans_model) usans_smearing = sasmodels.resolution.Slit1D(usans.x, 0.117) usans_experiment.resolution = usans_smearing sans_problem=FitProblem(sans_experiment) usans_problem=FitProblem(usans_experiment) plt.errorbar(sans.x, sans_problem.fitness.theory(), c=color_choice, linewidth=2, zorder=zorder, fmt='-', label='Sphere+Cylinder Fit',path_effects=[pe.Stroke(linewidth=5, foreground='white'), pe.Normal()]) plt.errorbar(usans.x, usans_problem.fitness.theory(), c=color_choice, linewidth=2, zorder=zorder, fmt='-',path_effects=[pe.Stroke(linewidth=5, foreground='white'), pe.Normal()]) # #### Ellipsoid + Cylinder Fits # + results_direct = '../data/sans/Sample_Fitting/fitting_results/ps_ellipsoid_cylinder_lm/' fit_ellcyl_polar_radius = {} fit_ellcyl_equatorial_radius = {} fit_ellcyl_cylinder_radius = {} fit_ellcyl_scale_ratio = {} fit_ellcyl_cp_scale = {} fit_ellcyl_cylinder_length = {} for file in [file for file in os.listdir(results_direct) if '.csv' in file]: data_read = np.loadtxt(results_direct+file, delimiter=',', dtype='str') key = int(file.split('_')[0][3:]) fit_keys.append(key) for label, x, dx in data_read: x = float(x) dx = float(dx) if 'radius' in label: if 'cylinder' in label: fit_ellcyl_cylinder_radius[key] = (x,dx) elif 'polar' in label: fit_ellcyl_polar_radius[key] = (x,dx) elif 'equatorial' in label: fit_ellcyl_equatorial_radius[key] = (x,dx) elif 'cp scale' in label: fit_ellcyl_cp_scale[key] = (x,dx) elif 'scale ratio' in label: fit_ellcyl_scale_ratio[key] = (x,dx) elif 'length' in label: fit_ellcyl_cylinder_length[key] = (x,dx) # - def plot_ellipsoid_cylinder_fits(key, sans, usans, color_choice, zorder): kernel = load_model('guinier_porod+ellipsoid+cylinder') vol = actual_vol[key]/100 # cp volume fraction from uv-vis vol_stdev = actual_stdev_vol[key]/100 # model parameters scale = Parameter(1, name=str(key) + 'scale') background = Parameter(backgrounds[key][0], name=str(key) + 'background') A_scale = Parameter(avg_scale*(1-vol), name=str(key) + ' PS scale') A_rg = Parameter(avg_rg, name=str(key) + ' PS rg') A_s = Parameter(ps_s, name=str(key) + ' PS s') A_porod_exp = Parameter(ps_porod_exp, name=str(key) + ' PS porod_exp') scale_ratio = Parameter(fit_ellcyl_scale_ratio[key][0], name=str(key) + ' B scale ratio').range(0,1) scale_normal = bumps.bounds.Normal(mean=vol, std=vol_stdev) if key in fit_ellcyl_cp_scale.keys(): cp_scale = Parameter(fit_ellcyl_cp_scale[key][0], name=str(key) + ' cp scale', bounds=scale_normal) else: cp_scale = Parameter(vol, name=str(key) + ' cp scale', bounds=scale_normal) B_scale = scale_ratio * cp_scale B_sld = Parameter(slds[cps[key]], name=str(key) + ' PS sld') B_sld_solvent = Parameter(slds[matrix[key]], name=str(key) + ' PS solvent') B_radius_polar = Parameter(fit_ellcyl_polar_radius[key][0], limits=[0,inf], name=str(key) + ' ellipsoid polar radius').range(100,200000) B_radius_polar_pd = Parameter(0.5, name = str(key) + ' ellipsoid polar radius pd') B_radius_polar_pd_n = Parameter(200, name = str(key) + ' ellipsoid polar radius pd n') B_radius_polar_pd_nsigma = Parameter(8, name = str(key) + ' ellipsoid polar radius pd nsigma') B_radius_equatorial = Parameter(fit_ellcyl_equatorial_radius[key][0], limits=[0,inf], name=str(key) + ' ellipsoid equatorial radius').range(100,200000) B_radius_equatorial_pd = Parameter(0.5, name = str(key) + ' ellipsoid equatorial radius pd') B_radius_equatorial_pd_n = Parameter(200, name = str(key) + ' ellipsoid equatorial radius pd n') B_radius_equatorial_pd_nsigma = Parameter(8, name = str(key) + ' ellipsoid equatorial radius pd nsigma') C_scale = (1-scale_ratio) * cp_scale C_sld = Parameter(slds[cps[key]], name=str(key) + ' PS sld') C_sld_solvent = Parameter(slds[matrix[key]], name=str(key) + ' PS solvent') C_radius = Parameter(fit_ellcyl_cylinder_radius[key][0], limits=[0,inf], name = str(key) + ' cylinder radius').range(10,1000) C_radius_pd = Parameter(0.2, name = str(key) + ' cylinder radius pd') C_radius_pd_n = Parameter(200, name = str(key) + ' cylinder radius pd n') C_radius_pd_nsigma = Parameter(8, name = str(key) + ' cylinder radius pd nsigma') C_length = Parameter(fit_ellcyl_cylinder_length[key][0], limits=[0,inf], name = str(key) + ' cylinder length').range(10000,300000) #C_length = Parameter(200000, limits=[0,inf], name = str(key) + ' length') C_length_pd = Parameter(0, name = str(key) + ' length pd') C_length_pd_n = Parameter(200, name = str(key) + ' length pd n') C_length_pd_nsigma = Parameter(8, name = str(key) + ' length pd nsigma') # setting up the combined model for fitting sans_model = Model( model=kernel, scale=scale, background=background, A_scale=A_scale, A_rg=A_rg, A_s=A_s, A_porod_exp=A_porod_exp, B_scale=B_scale, B_sld=B_sld, B_sld_solvent=B_sld_solvent, B_radius_polar = B_radius_polar, B_radius_polar_pd_type = 'lognormal', B_radius_polar_pd = B_radius_polar_pd, B_radius_polar_pd_n = B_radius_polar_pd_n, B_radius_polar_pd_nsigma = B_radius_polar_pd_nsigma, B_radius_equatorial = B_radius_equatorial, B_radius_equatorial_pd_type = 'lognormal', B_radius_equatorial_pd = B_radius_equatorial_pd, B_radius_equatorial_pd_n = B_radius_equatorial_pd_n, B_radius_equatorial_pd_nsigma = B_radius_equatorial_pd_nsigma, C_scale = C_scale, C_sld = C_sld, C_sld_solvent = C_sld_solvent, C_radius = C_radius, C_radius_pd_type='lognormal', C_radius_pd = C_radius_pd, C_radius_pd_n = C_radius_pd_n, C_radius_pd_nsigma = C_radius_pd_nsigma, C_length = C_length, C_length_pd_type='lognormal', C_length_pd = C_length_pd, C_length_pd_n = C_length_pd_n, C_length_pd_nsigma = C_length_pd_nsigma, ) sans = sans_data[key] sans.dx = sans.dx - sans.dx #plt.errorbar(sans.x, sans.y, yerr=sans.dy, fmt='o', c=color_choice, zorder=1, ms=4, mfc='white', mec='black') usans = usans_data[key] #plt.errorbar(usans.x, usans.y, yerr=usans.dy, fmt='o', c=color_choice, zorder=1, ms=4, mfc='white', mec='black') sans_experiment=Experiment(data=sans, model=sans_model) usans_experiment=Experiment(data=usans, model=sans_model) usans_smearing = sasmodels.resolution.Slit1D(usans.x, 0.117) usans_experiment.resolution = usans_smearing sans_problem=FitProblem(sans_experiment) usans_problem=FitProblem(usans_experiment) plt.errorbar(sans.x, sans_problem.fitness.theory(), c=color_choice, linewidth=2, zorder=zorder, fmt='-', label='Ellipsoid+Cylinder Fit',path_effects=[pe.Stroke(linewidth=5, foreground='white'), pe.Normal()]) plt.errorbar(usans.x, usans_problem.fitness.theory(), c=color_choice, linewidth=2, zorder=zorder, fmt='-',path_effects=[pe.Stroke(linewidth=5, foreground='white'), pe.Normal()]) # #### <NAME> def plot_ps_fits(key, sans, usans, color_choice, zorder): if key in rgs.keys(): kernel = load_model('guinier_porod') model = Model(kernel, scale=ps_scales[key][0], rg=rgs[key][0], s=ps_s, porod_exp=ps_porod_exp, background=backgrounds[key][0]) else: kernel = load_model('power_law') model = Model(kernel, scale=ps_scales[key][0], power=ps_porod_exp, background=backgrounds[key][0]) sans.dx = sans.dx - sans.dx #plt.errorbar(sans.x, sans.y, yerr=sans.dy, fmt='o', c='black', zorder=1, ms=4) sans_experiment = Experiment(data=sans, model=model) sans_problem = FitProblem(sans_experiment) plt.errorbar(sans.x, sans_problem.fitness.theory(), c=color_choice, linewidth=2, zorder=zorder, fmt='-', label='Guinier-Porod Fit') if usans is not None: #plt.errorbar(usans.x, usans.y, yerr=usans.dy, fmt='o', c='black', zorder=1, ms=4) usans_experiment = Experiment(data=sans, model=model) usans_smearing = sasmodels.resolution.Slit1D(usans.x, 0.117) usans_experiment.resolution = usans_smearing usans_problem = FitProblem(usans_experiment) plt.errorbar(usans.x, usans_problem.fitness.theory(), c=color_choice, linewidth=2, zorder=zorder, fmt='-') ps_keys = [key for key, value in target.items() if value == 0 and matrix[key] == 'Polystyrene-D8' and key in sans_data.keys()] ps_keys # ### Wrapping function for plots def plot_sans(keys,sans_data_dict,usans_data_dict,zorders,label_dict,color_dict,filename,title=None,legend_title=None, size=6): zorders = np.array(zorders)*10 make_colors = {} if type(color_dict) is list: print('converting color list') i = 0 for key in keys: make_colors[key] = color_dict[i] i += 1 color_dict = make_colors make_labels = {} if type(label_dict) is list: print('converting label list') i = 0 for key in keys: make_labels[key] = label_dict[i] i += 1 label_dict = make_labels plt.figure(figsize=(6,6)) i = 0 for key in keys: sans_data = sans_data_dict[key] plt.errorbar(sans_data.x, sans_data.y, yerr=sans_data.dy, zorder = zorders[i], label=label_dict[key], c=color_dict[key], mec=color_dict[key], ms=size, mfc='white', fmt='o') if key in ps_keys: if key in usans_data_dict.keys(): usans_data = usans_data_dict[key] plt.errorbar(usans_data.x, usans_data.y, yerr=usans_data.dy, zorder = zorders[i], label=None, c=color_dict[key], mec=color_dict[key], ms=size, mfc='white', fmt='o') plot_ps_fits(key, sans_data, usans_data, color_dict[key], zorders[i]) else: plot_ps_fits(key, sans_data, None, color_dict[key], zorders[i]) elif key in usans_data_dict.keys(): usans_data = usans_data_dict[key] plt.errorbar(usans_data.x, usans_data.y, yerr=usans_data.dy, zorder = zorders[i], label=None, c=color_dict[key], mec=color_dict[key], ms=size, mfc='white', fmt='o') if key in final_keys_spheres: plot_sphere_fits(key, sans_data, usans_data, color_dict[key], zorders[i]) elif key in final_keys_ellipsoids: plot_ellipsoid_fits(key, sans_data, usans_data, color_dict[key], zorders[i]) elif key in final_keys_sphere_cylinders: plot_sphere_cylinder_fits(key, sans_data, usans_data, color_dict[key], zorders[i]) elif key in final_keys_ellipsoid_cylinders: plot_ellipsoid_cylinder_fits(key, sans_data, usans_data, color_dict[key], zorders[i]) plt.xscale('log') plt.yscale('log') plt.xlabel(r'Q ($\AA^{-1}$)', fontsize=16) plt.ylabel(r'I(Q) (cm$^{-1}$)', fontsize=16) legend = plt.legend(fontsize=10, title=legend_title, ncol=2) legend.get_title().set_fontsize('10') plt.xticks(fontsize=14) plt.yticks(fontsize=14) plt.ylim(1e-03, 1e11) plt.xlim(0.00002, 0.3) if title is not None: plt.title(title, fontsize=16) plt.tight_layout() plt.savefig(filename, dpi=600) plt.close() # #### Plotting concentration series with final model fits # + save_loc = '../data/sans/SANS_Figures_with_Fits/' os.makedirs(save_loc, exist_ok=True) plot_sans([21,1,2,3,4,5],sans_data,usans_data,[1,6,5,4,3,2],wt_names,wt_colors, save_loc + 'RReP3HT_CF_1.png',title='RRe-P3HT in PS-D8 from Chloroform') plot_sans([21,6,7,8,9,10],sans_data,usans_data,[1,6,5,4,3,2],wt_names,wt_colors, save_loc +'RRaP3HT_CF_1.png',title='RRa-P3HT in PS-D8 from Chloroform') plot_sans([21,11,12,13,14,15],sans_data,usans_data,[1,6,5,4,3,2],wt_names,wt_colors, save_loc +'P3DDT_CF.png',title='P3DDT in PS-D8 from Chloroform') plot_sans([21,16,17,18,19,20],sans_data,usans_data,[1,6,5,4,3,2],wt_names,wt_colors, save_loc +'PQT12_CF.png',title='PQT-12 in PS-D8 from Chloroform') plot_sans([113,101,102,103,104,105,106,107],sans_data,usans_data,[1,8,7,6,5,4,3,2],wt_names,wt_colors, save_loc +'RReP3HT_CF_2.png',title='RRe-P3HT in PS-D8 from Chloroform') plot_sans([114,108,109,110,111,112],sans_data,usans_data,[1,6,5,4,3,2],wt_names,wt_colors, save_loc +'RReP3HT_CF_3.png',title='RRe-P3HT in PS-D8 from Chloroform') plot_sans([28,22,23,24],sans_data,usans_data,[1,4,3,2],wt_names,wt_colors, save_loc +'RReP3HT_BB_1.png',title='RRe-P3HT in PS-D8 from Bromobenzene') plot_sans([28,25,26,27],sans_data,usans_data,[1,4,3,2],wt_names,wt_colors, save_loc +'P3DDT_BB.png',title='P3DDT in PS-D8 from Bromobenzene') plot_sans([35,29,30,31],sans_data,usans_data,[1,4,3,2],wt_names,wt_colors, save_loc +'RReP3HT_Tol_1.png',title='RRe-P3HT in PS-D8 from Toluene') plot_sans([35,32,33,34],sans_data,usans_data,[1,4,3,2],wt_names,wt_colors, save_loc +'P3DDT_Tol.png',title='P3DDT in PS-D8 from Toluene') plot_sans([113,203],sans_data,usans_data,[1,2],wt_names,wt_colors, save_loc +'RRaP3HT_CF_2.png',title='RRa-P3HT in PS-D8 from Chloroform') plot_sans([114,205,206],sans_data,usans_data,[1,3,2],wt_names,wt_colors, save_loc +'RRaP3HT_CF_3.png',title='RRa-P3HT in PS-D8 from Chloroform') plot_sans([310,301,302,303,304,305],sans_data,usans_data,[1,6,5,4,3,2],wt_names,wt_colors, save_loc +'RReP3HT_BB_2.png',title='RRe-P3HT in PS-D8 from Bromobenzene') plot_sans([309,306,307],sans_data,usans_data,[1,3,2],wt_names,wt_colors, save_loc +'RReP3HT_BB_3.png',title='RRe-P3HT in PS-D8 from Bromobenzene') plot_sans([409,402,401,404,403,405],sans_data,usans_data,[1,6,5,4,3,2],wt_names,wt_colors, save_loc +'RReP3HT_Tol_2.png',title='RRe-P3HT in PS-D8 from Toluene') plot_sans([409,406,407,408],sans_data,usans_data,[1,4,3,2],wt_names,wt_colors, save_loc +'RReP3HT_Tol_3.png',title='RRe-P3HT in PS-D8 from Toluene') plot_sans([506,501,502,503,504,505],sans_data,usans_data,[1,6,5,4,3,2],wt_names,wt_colors, save_loc +'RReP3HT_CF_SLOW.png',title='RRe-P3HT in PS-D8 from Chloroform - Slow Dry') # -
sans/Plotting_SANS_Data_with_Fits.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # **Automatic Account Detection** # Dataset From - https://www.kaggle.com/c/twitter-bot-classification | https://www.kaggle.com/charvijain27/detecting-twitter-bot-data # **Libraries** # + # Basic libraries import numpy as np import pandas as pd import pickle # Data Visualization import matplotlib.pyplot as plt import seaborn as sns # %config InlineBackend.figure_format = 'retina' # %matplotlib inline # Natural Language Processing from sklearn.feature_extraction import stop_words from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer import re import string import nltk from nltk.tokenize import word_tokenize from nltk.corpus import stopwords nltk.download('stopwords') nltk.download('punkt') nltk.download('wordnet') from nltk.stem import PorterStemmer from nltk.stem import WordNetLemmatizer stemmer= PorterStemmer() lemmatizer=WordNetLemmatizer() # Modeling import sklearn.ensemble from sklearn.model_selection import train_test_split, GridSearchCV from sklearn import metrics from sklearn.metrics import accuracy_score, recall_score, precision_score, confusion_matrix,roc_curve,auc from imblearn.over_sampling import SMOTE # - url_bot_train = 'data/Bot/training_data_2_csv_UTF.csv' df_bot_train = pd.read_csv(url_bot_train) training_data = df_bot_train sns.countplot(training_data['bot']) training_data.apply(lambda x: sum(x.isnull())) text_cols=training_data[['screen_name','location','description','url','created_at','lang','status','has_extended_profile','name']] text_cols.head() text_cols.rename(columns={'screen_name':'screen_name_processed'}, inplace=True) text_cols.rename(columns={'name':'name_processed'}, inplace=True) # + stop_words = set(stopwords.words('english')) text_cols['screen_name_processed_num_count'] = "" for index, row in text_cols.iterrows(): row['screen_name_processed'] = row['screen_name_processed'].lower() #Convert text to lowercase row['screen_name_processed_num_count'] = (sum(c.isdigit() for c in row['screen_name_processed'])) #create new column to get number of numbers row['screen_name_processed'] = re.sub(r'\d+','', row['screen_name_processed']) #Remove numbers row['screen_name_processed']= row['screen_name_processed'].translate(str.maketrans('','',string.punctuation)) #Remove punctuation row['screen_name_processed'] = row['screen_name_processed'].strip() #Remove whitespaces row['screen_name_processed'] = [i for i in word_tokenize(row['screen_name_processed']) if not i in stop_words] #Tokenization - REMOVE STOP WORDS for word in row['screen_name_processed']: row['screen_name_processed'] = lemmatizer.lemmatize(word) # - text_cols['name_processed_num_count'] = "" for index, row in text_cols.iterrows(): row['name_processed'] = row['name_processed'].lower() #Convert text to lowercase row['name_processed_num_count'] = (sum(c.isdigit() for c in row['name_processed'])) #create new column to get number of numbers row['name_processed'] = re.sub(r'\d+','', row['name_processed']) #Remove numbers row['name_processed']= row['name_processed'].translate(str.maketrans('','',string.punctuation)) #Remove punctuation row['name_processed'] = row['name_processed'].strip() #Remove whitespaces row['name_processed'] = [i for i in word_tokenize(row['name_processed']) if not i in stop_words] #Tokenization - REMOVE STOP WORDS text_cols.head() def impute_na(variable): for i, row in text_cols[text_cols[variable].isnull()].iterrows(): obs_sample = text_cols[variable].dropna().sample(1, random_state=int(row.screen_name_processed_num_count)) obs_sample.index = [i] text_cols.at[i, variable] = obs_sample impute_na('has_extended_profile') word_list = r'bot|b0t|cannabis|tweet me|mishear|follow me|updates every|gorilla|yes_ofc|forget' \ r'expos|kill|clit|bbb|butt|fuck|XXX|sex|truthe|fake|anony|free|virus|funky|RNA|kuck|jargon' \ r'nerd|swag|jack|bang|bonsai|chick|prison|paper|pokem|xx|freak|ffd|dunia|clone|genie|bbb' \ r'ffd|onlyman|emoji|joke|troll|droop|free|every|wow|cheese|yeah|bio|magic|wizard|face' listofwords = pickle.load(open("model/words_in_not_credible.pickle", 'rb')) listofwords2 = pickle.load(open("model/word_couples_in_not_credible.pickle", 'rb')) list_ofwords=list(listofwords) str1 = '|'.join(str(e) for e in list_ofwords) list_ofwords2=list(listofwords2) str2 = '|'.join(str(e) for e in list_ofwords2) text_cols['screen_name_binary'] = training_data.screen_name.str.contains(word_list, case=False, na=False) text_cols['name_binary'] = training_data.name.str.contains(word_list, case=False, na=False) text_cols['description_binary'] = training_data.description.str.contains(word_list, case=False, na=False) text_cols['status_binary'] = training_data.status.str.contains(word_list, case=False, na=False) word_list = str1 + '|' + str2 + '|' + word_list text_cols['listed_count_binary'] = (training_data.listed_count>20000)==False for column in training_data: text_cols[column+'_NA'] = np.where(training_data[column].isnull(), 1, 0) NA_cols = [col for col in text_cols.columns if 'NA' in col] NA_cols text_cols.has_extended_profile = text_cols.has_extended_profile.astype(int) text_cols.rename(columns={'has_extended_profile':'has_extended_profile_processed'}, inplace=True) # + training_data['des_hashtags'] = training_data['description'].str.count('#') training_data['des_mentions'] = training_data['description'].str.count('@') training_data['des_length'] = training_data['description'].str.len() training_data['status_hashtags'] = training_data['status'].str.count('#') training_data['status_mentions'] = training_data['status'].str.count('@') training_data['status_length'] = training_data['status'].str.len() training_data['des_link_count'] = training_data['description'].str.count(':') training_data['status_punctuation'] = training_data['status'].str.count('\.') training_data['des_punctuation'] = training_data['description'].str.count('\.') training_data['status_quote'] = training_data['status'].str.count('"') training_data['des_quote'] = training_data['description'].str.count('"') # - text_cols_features = text_cols[['has_extended_profile_processed','name_processed_num_count','screen_name_processed_num_count','screen_name_binary', 'name_binary', 'description_binary', 'status_binary', 'listed_count_binary','location_NA','description_NA','url_NA','status_NA','has_extended_profile_NA']].copy() training_data_features = training_data[['verified', 'followers_count', 'friends_count', 'statuses_count','bot']].copy() feature_set = training_data[['status_punctuation','des_punctuation','status_quote','des_quote','des_link_count','des_hashtags', 'des_mentions', 'des_length', 'status_hashtags','status_mentions','status_length']].copy().fillna(0) feature_set.head(100) result = pd.concat([feature_set,text_cols_features, training_data_features], axis=1, sort=False) result.to_csv('data/Bot/processed.csv', index=False) # + mask_on = training_data['bot'] == 1 training_data_bot_description = training_data[mask_on]['description'] # Instantiate a CountVectorizer cv1 = CountVectorizer(stop_words = 'english') # Fit and transform the vectorizer on our corpus bot_cvec = cv1.fit_transform(training_data_bot_description.values.astype('U')) # Convert onion_cvec into a DataFrame bot_cvec_df = pd.DataFrame(bot_cvec.toarray(), columns=cv1.get_feature_names()) # Inspect head of Onion Titles cvec print(bot_cvec_df.shape) # - def bar_plot(x, y, title, color): # Set up barplot plt.figure(figsize=(9,5)) g=sns.barplot(x, y, color = color) ax=g # Label the graph plt.title(title, fontsize = 15) plt.xticks(fontsize = 10) # Enable bar values # Code modified from http://robertmitchellv.com/blog-bar-chart-annotations-pandas-mpl.html # create a list to collect the plt.patches data totals = [] # find the values and append to list for p in ax.patches: totals.append(p.get_width()) # set individual bar lables using above list total = sum(totals) # set individual bar lables using above list for p in ax.patches: # get_width pulls left or right; get_y pushes up or down ax.text(p.get_width()+.3, p.get_y()+.38, \ int(p.get_width()), fontsize=10) # + bot_wc = bot_cvec_df.sum(axis = 0) bot_top_10 = bot_wc.sort_values(ascending=False).head(10) # Call function bar_plot(bot_top_10.values, bot_top_10.index, 'Top 10 words on Description of bots','r') # + result_no_f = training_data_features training_result_no_f = result_no_f.drop('bot', axis=1) X = training_result_no_f.as_matrix() X_train, X_test, y_train, y_test = train_test_split(X, result_no_f.bot, test_size=0.3, random_state=0) ovrsmple = SMOTE(ratio='minority') X_train, y_train = ovrsmple.fit_sample(X_train, y_train) rfc = sklearn.ensemble.RandomForestClassifier(n_estimators=500, min_samples_split=5, criterion='gini', max_features='auto', max_depth = 8, oob_score=True, random_state=42, n_jobs=-1) rfc.fit(X_train, y_train) print('Random Forest Classifier Train Accuracy Score :', rfc.score(X_train, y_train)) print('Random Forest Classifier Test Score :', rfc.score(X_test, y_test)) # - training = result.drop('bot', axis=1) X = training.as_matrix() X_train, X_test, y_train, y_test = train_test_split(X, result.bot, test_size=0.3, random_state=0) X_train.shape, X_test.shape # + ovrsmple = SMOTE(ratio='minority') X_train, y_train = ovrsmple.fit_sample(X_train, y_train) # - np.bincount(y_train) rfc = sklearn.ensemble.RandomForestClassifier(n_estimators=500, min_samples_split=5, criterion='gini', max_features='auto', max_depth = 8, oob_score=True, random_state=42, n_jobs=-1) # + rfc.fit(X_train, y_train) print('Random Forest Classifier Train Accuracy Score :', rfc.score(X_train, y_train)) print('Random Forest Classifier Test Score :', rfc.score(X_test, y_test)) # - from sklearn.metrics import classification_report predt = rfc.predict(X_test) report = classification_report(y_test, predt) print(report) plt.figure(figsize=(10,9)) feat_importances = pd.Series(rfc.feature_importances_, index=training.columns) feat_importances.nlargest(30).plot(kind='barh') from sklearn.tree import DecisionTreeClassifier dtc = DecisionTreeClassifier(criterion='gini', max_depth = 10, max_features = 'auto', random_state = 1, splitter = 'best') dtc.fit(X_train, y_train) print("Train Score :", dtc.score(X_train, y_train)) print("Test Score :", dtc.score(X_test, y_test)) # + from sklearn.preprocessing import StandardScaler from sklearn.linear_model import LogisticRegressionCV,LogisticRegression ss = StandardScaler() X_train_s= ss.fit_transform(X_train) # saved the mean and std from X_train X_test_s=ss.transform(X_test) # Create regularization penalty space penalty = ['l1', 'l2'] # Create regularization hyperparameter space C = np.logspace(0, 4, 10) # Create hyperparameter options hyperparameters = dict(C=C, penalty=penalty) #initialize grid search and Logistic regression lr = LogisticRegression(n_jobs=-1) # clf = GridSearchCV(lr, hyperparameters, cv=5, verbose=0) # # Train the model using the training sets # clf.fit(X_train_s, y_train) lr.fit(X_train_s, y_train) lr_y_pred = lr.predict(X_test_s) print("LR train score :") print(lr.score(X_train_s, y_train)) print('') print("LR test score :") print(lr.score(X_test_s, y_test)) # + sns.set_style("whitegrid", {'axes.grid' : False}) scores_train = rfc.predict_proba(X_train) scores_test = rfc.predict_proba(X_test) y_scores_train = [] y_scores_test = [] for i in range(len(scores_train)): y_scores_train.append(scores_train[i][1]) for i in range(len(scores_test)): y_scores_test.append(scores_test[i][1]) fpr_rf_train, tpr_rf_train, _ = roc_curve(y_train, y_scores_train, pos_label=1) fpr_rf_test, tpr_rf_test, _ = roc_curve(y_test, y_scores_test, pos_label=1) plt.plot(fpr_rf_train, tpr_rf_train, color='darkblue', label='Train AUC: %5f' %auc(fpr_rf_train, tpr_rf_train)) plt.plot(fpr_rf_test, tpr_rf_test, color='red', ls='--', label='Test AUC: %5f' %auc(fpr_rf_test, tpr_rf_test)) plt.title("Random Forest ROC Curve for Engineered Features") plt.xlabel("False Positive Rate (FPR)") plt.ylabel("True Positive Rate (TPR)") plt.legend(loc='lower right') # - labels=['bot','human'] def plot_confusion_matrix(cm, title='Confusion matrix', cmap=plt.cm.YlOrRd): plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar(shrink=0.7) tick_marks = np.arange(len(labels)) plt.xticks(tick_marks, labels, rotation=45, ha='right', fontsize=12) plt.yticks(tick_marks, labels , fontsize=12) plt.tight_layout() plt.ylabel('True label', fontsize=12) plt.xlabel('Predicted label', fontsize=12) # Create the predictions for Y training data preds = rfc.predict(X_test) cm = confusion_matrix(y_test, preds) np.set_printoptions(precision=2) print('Confusion matrix') print(cm) plt.figure() plot_confusion_matrix(cm) # + # Assign True Neg, False Pos, False Neg, True Pos variables cnf_matrix = np.array(cm).tolist() tn_fp, fn_tp = cm tn, fp = tn_fp fn, tp = fn_tp # - # Print Scores print("Accuracy:",round(metrics.accuracy_score(y_test, preds)*100, 2),'%') print("Precision:",round(metrics.precision_score(y_test, preds)*100, 2), '%') print("Recall:",round(metrics.recall_score(y_test, preds)*100, 2), '%') print("Specificity:", round((tn/(tn+fp))*100, 2), '%') print("Misclassification Rate:", round((fp+fn)/(tn+fp+fn+tn)*100, 2), '%') pickle.dump(rfc, open("model/model_bot.pickle", "wb")) model = pickle.load(open("model/model_bot.pickle", 'rb')) result.head(1) url_bot_test = 'data/Bot/test_data_4_students.csv' df_bot_test = pd.read_csv(url_bot_test,sep='\t', encoding = "ISO-8859-1",nrows=4) text_cols2=df_bot_test[['screen_name','location','description','url','created_at','lang','status','has_extended_profile','name']].copy() text_cols2.head() screen_name = "Nishan_CW" location = "Western Sri Lanka" description = "Social Innovator | System Designer | Realist | Humanitarian" url = "https://t.co/zW9UUFzeWa" created_at = "Mon Sep 19 04:59:39 +0000 2011" lang = "" status = "{ 'created_at': 'Wed Jan 08 12:34:56 +0000 2020', 'id': 1214888214726496300, 'id_str': '1214888214726496256', 'text': 'Advances in neural networks and other techniques promise to transform health care while raising profound questions… https://t.co/AWcRAgOXKM', 'truncated': true, 'entities': { 'hashtags': [], 'symbols': [], 'user_mentions': [], 'urls': [ { 'url': 'https://t.co/AWcRAgOXKM', 'expanded_url': 'https://twitter.com/i/web/status/1214888214726496256', 'display_url': 'twitter.com/i/web/status/1…', 'indices': [ 116, 139 ] } ] }, 'source': '<a href='http://twitter.com/download/android' rel='nofollow'>Twitter for Android</a>', 'in_reply_to_status_id': null, 'in_reply_to_status_id_str': null, 'in_reply_to_user_id': null, 'in_reply_to_user_id_str': null, 'in_reply_to_screen_name': null, 'geo': null, 'coordinates': null, 'place': { 'id': '173c2bb9d42baaa5', 'url': 'https://api.twitter.com/1.1/geo/id/173c2bb9d42baaa5.json', 'place_type': 'country', 'name': 'Sri Lanka', 'full_name': '<NAME>', 'country_code': 'LK', 'country': 'Sri Lanka', 'contained_within': [], 'bounding_box': { 'type': 'Polygon', 'coordinates': [ [ [ 79.6505263, 5.9213829 ], [ 81.8787816, 5.9213829 ], [ 81.8787816, 9.8359375 ], [ 79.6505263, 9.8359375 ] ] ] }, 'attributes': {} }, 'contributors': null, 'is_quote_status': false, 'retweet_count': 0, 'favorite_count': 0, 'favorited': false, 'retweeted': false, 'possibly_sensitive': false, 'lang': 'en' }""{ 'created_at': 'Wed Jan 08 12:34:56 +0000 2020', 'id': 1214888214726496300, 'id_str': '1214888214726496256', 'text': 'Advances in neural networks and other techniques promise to transform health care while raising profound questions… https://t.co/AWcRAgOXKM', 'truncated': true, 'entities': { 'hashtags': [], 'symbols': [], 'user_mentions': [], 'urls': [ { 'url': 'https://t.co/AWcRAgOXKM', 'expanded_url': 'https://twitter.com/i/web/status/1214888214726496256', 'display_url': 'twitter.com/i/web/status/1…', 'indices': [ 116, 139 ] } ] }, 'source': '<a href='http://twitter.com/download/android' rel='nofollow'>Twitter for Android</a>', 'in_reply_to_status_id': null, 'in_reply_to_status_id_str': null, 'in_reply_to_user_id': null, 'in_reply_to_user_id_str': null, 'in_reply_to_screen_name': null, 'geo': null, 'coordinates': null, 'place': { 'id': '173c2bb9d42baaa5', 'url': 'https://api.twitter.com/1.1/geo/id/173c2bb9d42baaa5.json', 'place_type': 'country', 'name': '<NAME>', 'full_name': '<NAME>', 'country_code': 'LK', 'country': 'Sri Lanka', 'contained_within': [], 'bounding_box': { 'type': 'Polygon', 'coordinates': [ [ [ 79.6505263, 5.9213829 ], [ 81.8787816, 5.9213829 ], [ 81.8787816, 9.8359375 ], [ 79.6505263, 9.8359375 ] ] ] }, 'attributes': {} }, 'contributors': null, 'is_quote_status': false, 'retweet_count': 0, 'favorite_count': 0, 'favorited': false, 'retweeted': false, 'possibly_sensitive': false, 'lang': 'en' }" has_extended_profile = True name = "<NAME>" verified = False followers_count = 248 friends_count = 167 statuses_count = 2560 listed_count = 6 df_bot_test = pd.DataFrame(columns=["screen_name", "location", "description","url","created_at","lang","status","has_extended_profile","name","verified","followers_count","friends_count","statuses_count","listed_count"], data=[[screen_name, location, description, url, created_at,lang,status,has_extended_profile,name,verified,followers_count,friends_count, statuses_count,listed_count]]) df_bot_test text_cols2=df_bot_test[['screen_name','location','description','url','created_at','lang','status','has_extended_profile','name']].copy() text_cols2.head() point = [] text_cols2.rename(columns={'screen_name':'screen_name_processed'}, inplace=True) text_cols2.rename(columns={'name':'name_processed'}, inplace=True) def convert1(variable): stop_words = set(stopwords.words('english')) text_cols2[variable+'_processed_num_count'] = "" for i, row1 in text_cols2.iterrows(): row1[variable+'_processed'] = row1[variable+'_processed'].lower() #Convert text to lowercase row1[variable+'_processed_num_count'] = sum(ch.isdigit() for ch in row1[variable+'_processed']) #create new column to get number of numbers row1[variable+'_processed'] = re.sub(r'\d+','', row1[variable+'_processed']) #Remove numbers row1[variable+'_processed']= row1[variable+'_processed'].translate(str.maketrans('','',string.punctuation)) #Remove punctuation row1[variable+'_processed'] = row1[variable+'_processed'].strip() #Remove whitespaces row1[variable+'_processed'] = [i for i in word_tokenize(row1[variable+'_processed']) if not i in stop_words] #Tokenization - REMOVE STOP WORDS for word in row1[variable+'_processed']: row1[variable+'_processed'] = lemmatizer.lemmatize(word) text_cols2.at[i, 'screen_name_processed'] = row1['screen_name_processed'] text_cols2.at[i, 'screen_name_processed_num_count'] = row1['screen_name_processed_num_count'] text_cols2.head(4) convert1('screen_name') text_cols2['name_processed_num_count'] = "" for index, row in text_cols2.iterrows(): row['name_processed'] = row['name_processed'].lower() #Convert text to lowercase row['name_processed_num_count'] = (sum(c.isdigit() for c in row['name_processed'])) #create new column to get number of numbers row['name_processed'] = re.sub(r'\d+','', row['name_processed']) #Remove numbers row['name_processed']= row['name_processed'].translate(str.maketrans('','',string.punctuation)) #Remove punctuation row['name_processed'] = row['name_processed'].strip() #Remove whitespaces row['name_processed'] = [i for i in word_tokenize(row['name_processed']) if not i in stop_words] #Tokenization - REMOVE STOP WORDS text_cols2.at[index, 'name_processed'] = row['name_processed'] text_cols2.at[index, 'name_processed_num_count'] = row['name_processed_num_count'] text_cols2.head() word_list = r'bot|b0t|cannabis|tweet me|mishear|follow me|updates every|gorilla|yes_ofc|forget' \ r'expos|kill|clit|bbb|butt|fuck|XXX|sex|truthe|fake|anony|free|virus|funky|RNA|kuck|jargon' \ r'nerd|swag|jack|bang|bonsai|chick|prison|paper|pokem|xx|freak|ffd|dunia|clone|genie|bbb' \ r'ffd|onlyman|emoji|joke|troll|droop|free|every|wow|cheese|yeah|bio|magic|wizard|face' listofwords = pickle.load(open("model/words_in_not_credible.pickle", 'rb')) listofwords2 = pickle.load(open("model/word_couples_in_not_credible.pickle", 'rb')) list_ofwords=list(listofwords) str1 = '|'.join(str(e) for e in list_ofwords) list_ofwords2=list(listofwords2) str2 = '|'.join(str(e) for e in list_ofwords2) word_list = str1 + '|' + str2 + '|' + word_list word_list text_cols2['screen_name_binary'] = df_bot_test.screen_name.str.contains(word_list, case=False, na=False) text_cols2['name_binary'] = df_bot_test.name.str.contains(word_list, case=False, na=False) text_cols2['description_binary'] = df_bot_test.description.str.contains(word_list, case=False, na=False) text_cols2['status_binary'] = df_bot_test.status.str.contains(word_list, case=False, na=False) text_cols2['listed_count_binary'] = (df_bot_test.listed_count>20000)==False for column in df_bot_test: text_cols2[column+'_NA'] = np.where(df_bot_test[column].isnull(), 1, 0) # + def impute_na(variable): for i, row in text_cols2[text_cols2[variable].isnull()].iterrows(): obs_sample = text_cols2[variable].dropna().sample(1, random_state=int(row.screen_name_processed_num_count)) obs_sample.index = [i] text_cols2.at[i, variable] = obs_sample impute_na('has_extended_profile') # - text_cols2['has_extended_profile'].head(20) text_cols2.has_extended_profile = text_cols2.has_extended_profile.astype(int) # + df_bot_test['des_hashtags'] = df_bot_test['description'].str.count('#') df_bot_test['des_mentions'] = df_bot_test['description'].str.count('@') df_bot_test['des_length'] = df_bot_test['description'].str.len() df_bot_test['status_hashtags'] = df_bot_test['status'].str.count('#') df_bot_test['status_mentions'] = df_bot_test['status'].str.count('@') df_bot_test['status_length'] = df_bot_test['status'].str.len() df_bot_test['des_link_count'] = df_bot_test['description'].str.count(':') df_bot_test['status_punctuation'] = df_bot_test['status'].str.count('\.') df_bot_test['des_punctuation'] = df_bot_test['description'].str.count('\.') df_bot_test['status_quote'] = df_bot_test['status'].str.count('"') df_bot_test['des_quote'] = df_bot_test['description'].str.count('"') # - feature_set = df_bot_test[['status_punctuation','des_punctuation','status_quote','des_quote','des_link_count','des_hashtags', 'des_mentions', 'des_length', 'status_hashtags','status_mentions','status_length']].copy().fillna(0) # + text_cols2.rename(columns={'has_extended_profile':'has_extended_profile_processed'}, inplace=True) text_cols_features = text_cols2[['has_extended_profile_processed','name_processed_num_count','screen_name_processed_num_count','screen_name_binary', 'name_binary', 'description_binary', 'status_binary', 'listed_count_binary','location_NA','description_NA','url_NA','status_NA','has_extended_profile_NA']].copy() test_data_features = df_bot_test[['verified', 'followers_count', 'friends_count', 'statuses_count']].copy() result = pd.concat([feature_set,text_cols_features, test_data_features], axis=1, sort=False) # - point = result.head(1).to_numpy() # + # point = [1, 0, 0, False, False, True, False, True, 0, 0, 0, 0, 0, True, # 1541514, 68, 14230] # - point # pred = model.predict(np.asarray(point).reshape(1, -1)) # pred[0] pred = model.predict(point.reshape(1, -1)) pred[0]
Automated_Account_Detection_Twitter_Data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %load_ext autoreload # %autoreload 2 import sys, os sys.path.insert(0, '..') from lib import models, graph, coarsening, utils import tensorflow as tf import numpy as np import time # %matplotlib inline # + flags = tf.app.flags FLAGS = flags.FLAGS # Graphs. flags.DEFINE_integer('number_edges', 8, 'Graph: minimum number of edges per vertex.') flags.DEFINE_string('metric', 'euclidean', 'Graph: similarity measure (between features).') # TODO: change cgcnn for combinatorial Laplacians. flags.DEFINE_bool('normalized_laplacian', True, 'Graph Laplacian: normalized.') flags.DEFINE_integer('coarsening_levels', 4, 'Number of coarsened graphs.') # Directories. flags.DEFINE_string('dir_data', os.path.join('..', 'data', 'mnist'), 'Directory to store data.') # - # # Feature graph # + def grid_graph(m, corners=False): z = graph.grid(m) dist, idx = graph.distance_sklearn_metrics(z, k=FLAGS.number_edges, metric=FLAGS.metric) A = graph.adjacency(dist, idx) # Connections are only vertical or horizontal on the grid. # Corner vertices are connected to 2 neightbors only. if corners: import scipy.sparse A = A.toarray() A[A < A.max()/1.5] = 0 A = scipy.sparse.csr_matrix(A) print('{} edges'.format(A.nnz)) print("{} > {} edges".format(A.nnz//2, FLAGS.number_edges*m**2//2)) return A t_start = time.process_time() A = grid_graph(28, corners=False) A = graph.replace_random_edges(A, 0) graphs, perm = coarsening.coarsen(A, levels=FLAGS.coarsening_levels, self_connections=False) L = [graph.laplacian(A, normalized=True) for A in graphs] print('Execution time: {:.2f}s'.format(time.process_time() - t_start)) graph.plot_spectrum(L) del A # - # # Data # + from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets(FLAGS.dir_data, one_hot=False) train_data = mnist.train.images.astype(np.float32) val_data = mnist.validation.images.astype(np.float32) test_data = mnist.test.images.astype(np.float32) train_labels = mnist.train.labels val_labels = mnist.validation.labels test_labels = mnist.test.labels t_start = time.process_time() train_data = coarsening.perm_data(train_data, perm) val_data = coarsening.perm_data(val_data, perm) test_data = coarsening.perm_data(test_data, perm) print('Execution time: {:.2f}s'.format(time.process_time() - t_start)) del perm # - # # Neural networks # + #model = fc1() #model = fc2(nhiddens=100) #model = cnn2(K=5, F=10) # K=28 is equivalent to filtering with fgcnn. #model = fcnn2(F=10) #model = fgcnn2(L[0], F=10) #model = lgcnn2_2(L[0], F=10, K=10) #model = cgcnn2_3(L[0], F=10, K=5) #model = cgcnn2_4(L[0], F=10, K=5) #model = cgcnn2_5(L[0], F=10, K=5) if False: K = 5 # 5 or 5^2 t_start = time.process_time() mnist.test._images = graph.lanczos(L, mnist.test._images.T, K).T mnist.train._images = graph.lanczos(L, mnist.train._images.T, K).T model = lgcnn2_1(L, F=10, K=K) print('Execution time: {:.2f}s'.format(time.process_time() - t_start)) ph_data = tf.placeholder(tf.float32, (FLAGS.batch_size, mnist.train.images.shape[1], K), 'data') # + common = {} common['dir_name'] = 'mnist/' common['num_epochs'] = 20 common['batch_size'] = 100 common['decay_steps'] = mnist.train.num_examples / common['batch_size'] common['eval_frequency'] = 30 * common['num_epochs'] common['brelu'] = 'b1relu' common['pool'] = 'mpool1' C = max(mnist.train.labels) + 1 # number of classes model_perf = utils.model_perf() # - if True: name = 'softmax' params = common.copy() params['dir_name'] += name params['regularization'] = 5e-4 params['dropout'] = 1 params['learning_rate'] = 0.02 params['decay_rate'] = 0.95 params['momentum'] = 0.9 params['F'] = [] params['K'] = [] params['p'] = [] params['M'] = [C] model_perf.test(models.cgcnn(L, **params), name, params, train_data, train_labels, val_data, val_labels, test_data, test_labels) # Common hyper-parameters for networks with one convolutional layer. common['regularization'] = 0 common['dropout'] = 1 common['learning_rate'] = 0.02 common['decay_rate'] = 0.95 common['momentum'] = 0.9 common['F'] = [10] common['K'] = [20] common['p'] = [1] common['M'] = [C] if True: name = 'fgconv_softmax' params = common.copy() params['dir_name'] += name params['filter'] = 'fourier' params['K'] = [L[0].shape[0]] model_perf.test(models.cgcnn(L, **params), name, params, train_data, train_labels, val_data, val_labels, test_data, test_labels) if True: name = 'sgconv_softmax' params = common.copy() params['dir_name'] += name params['filter'] = 'spline' model_perf.test(models.cgcnn(L, **params), name, params, train_data, train_labels, val_data, val_labels, test_data, test_labels) # With 'chebyshev2' and 'b2relu', it corresponds to cgcnn2_2(L[0], F=10, K=20). if True: name = 'cgconv_softmax' params = common.copy() params['dir_name'] += name params['filter'] = 'chebyshev5' # params['filter'] = 'chebyshev2' # params['brelu'] = 'b2relu' model_perf.test(models.cgcnn(L, **params), name, params, train_data, train_labels, val_data, val_labels, test_data, test_labels) # Common hyper-parameters for LeNet5-like networks. common['regularization'] = 5e-4 common['dropout'] = 0.5 common['learning_rate'] = 0.02 # 0.03 in the paper but sgconv_sgconv_fc_softmax has difficulty to converge common['decay_rate'] = 0.95 common['momentum'] = 0.9 common['F'] = [32, 64] common['K'] = [25, 25] common['p'] = [4, 4] common['M'] = [512, C] # Architecture of TF MNIST conv model (LeNet-5-like). # Changes: regularization, dropout, decaying learning rate, momentum optimizer, stopping condition, size of biases. # Differences: training data randomization, init conv1 biases at 0. if True: name = 'fgconv_fgconv_fc_softmax' # 'Non-Param' params = common.copy() params['dir_name'] += name params['filter'] = 'fourier' params['K'] = [L[0].shape[0], L[2].shape[0]] model_perf.test(models.cgcnn(L, **params), name, params, train_data, train_labels, val_data, val_labels, test_data, test_labels) if True: name = 'sgconv_sgconv_fc_softmax' # 'Spline' params = common.copy() params['dir_name'] += name params['filter'] = 'spline' model_perf.test(models.cgcnn(L, **params), name, params, train_data, train_labels, val_data, val_labels, test_data, test_labels) if True: name = 'cgconv_cgconv_fc_softmax' # 'Chebyshev' params = common.copy() params['dir_name'] += name params['filter'] = 'chebyshev5' model_perf.test(models.cgcnn(L, **params), name, params, train_data, train_labels, val_data, val_labels, test_data, test_labels) model_perf.show() if False: grid_params = {} data = (train_data, train_labels, val_data, val_labels, test_data, test_labels) utils.grid_search(params, grid_params, *data, model=lambda x: models.cgcnn(L,**x))
nips2016/mnist.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.8 64-bit (''base'': conda)' # name: python3 # --- from bs4 import BeautifulSoup as bs from splinter import Browser from webdriver_manager.chrome import ChromeDriverManager import requests import os import pandas as pd filepath = os.path.join('NewsMarsExplorationProgram.html') with open(filepath, encoding='utf-8') as file: html = file.read() chrome_driver = "/Users/patba/Desktop/ChromeDriver/chromedriver" executable_path = {'executable_path': chrome_driver} browser = Browser('chrome', **executable_path, headless=False) mars_url = 'https://redplanetscience.com/' browser.visit(mars_url) mars_html = browser.html mars_soup = bs(mars_html, 'html.parser') # get latest news title news_title = mars_soup.find('div', class_='content_title').get_text() print('The latest news title is: ' + news_title) # get paragraph text from the latest news article news_p = mars_soup.find('div', class_='article_teaser_body').get_text() print(news_p) browser.quit() # SPACE IMAGES FROM MARS executable_path = {'executable_path': chrome_driver} browser = Browser('chrome', **executable_path, headless=False) space_url = 'https://spaceimages-mars.com' browser.visit(space_url) space_html = browser.html space_soup = bs(space_html, 'html.parser') # find featured image featured_img = space_soup.find('img', class_='headerimage fade-in').get('src') print(featured_img) # assign featured img to url string featured_img_url = f'{space_url + featured_img}' featured_img_url browser.quit() # MARS FACTS executable_path = {'executable_path': chrome_driver} browser = Browser('chrome', **executable_path, headless=False) mars_facts_url = 'https://galaxyfacts-mars.com/' browser.visit(mars_facts_url) # + tags=[] # not needed #mars_facts_html = browser.html #mars_facts_soup = bs(mars_facts_html, 'html.parser') # - # scrape mars facts table planet_facts_dfs = pd.read_html(mars_facts_url) planet_facts_dfs # mars facts df mars_df = planet_facts_dfs[1] mars_df.rename(columns={0 : "Properties", 1 : "Mars Facts"}).set_index("Properties") # mars earth comparison df earth_mars_df = planet_facts_dfs[0] earth_mars_df = earth_mars_df.rename(columns={0 : "Properties", 1 : "", 2 : ""}).set_index("Properties") earth_mars_df browser.quit() # MARS HEMISPHERE executable_path = {'executable_path': chrome_driver} browser = Browser('chrome', **executable_path, headless=False) mars_hemi_url = 'https://marshemispheres.com/' browser.visit(mars_hemi_url) mars_hemi_html = browser.html mars_hemi_soup = bs(mars_hemi_html, 'html.parser') # get links to enhanced images links = [] for h in mars_hemi_soup.find_all('a', class_='itemLink product-item'): if h not in links: links.append(h.get('href')) # get titles of imgs titles = [] for h in mars_hemi_soup.find_all('h3'): if h not in titles: titles.append(h.get_text()) # check link list links[0] # + # assign featured img to url string - hemisphere urls mars_img_A = f'{mars_hemi_url + links[0]}' mars_img_B = f'{mars_hemi_url + links[2]}' mars_img_C = f'{mars_hemi_url + links[4]}' mars_img_D = f'{mars_hemi_url + links[6]}' print(mars_img_A + "\n" + mars_img_B + "\n" + mars_img_C + "\n" + mars_img_D) # - # mars hemisphere img titles mars_title_A = titles[0] mars_title_B = titles[1] mars_title_C = titles[2] mars_title_D = titles[3] print(mars_title_A + "\n" + mars_title_B + "\n" + mars_title_C + "\n" + mars_title_D) # mars hemi img list hemi_img_urls = [ {"title": mars_title_A, "url": mars_img_A}, {"title": mars_title_B, "url": mars_img_B}, {"title": mars_title_C, "url": mars_img_C}, {"title": mars_title_D, "url": mars_img_D} ] hemi_img_urls # create dict for mars hemisphere images hemi_img_dict = {'title': [mars_title_A, mars_title_B, mars_title_C, mars_title_D], 'url': [mars_img_A, mars_img_B, mars_img_C, mars_img_D]} hemi_img_dict browser.quit() # create dictionaries - earth mars comparison earth_mars_dict = earth_mars_df.T.to_dict('list') earth_mars_dict # created feature img dict feature_img_dict = {"title": "Featured Mars Image", "url": featured_img_url} # create latest news dict new_news_dict = {'title': [news_title], 'summary': [news_p]} new_news_dict # compile dictionaries to one dictionary - dicitonary of dictionary - mars_dict = {'news': new_news_dict, 'feat_img': feature_img_dict, 'mars_facts': earth_mars_dict, 'hemi_img': hemi_img_dict}
mission_to_mars.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import os,sys from scipy.stats import gamma from seaborn import load_dataset sys.path.append(os.path.abspath('..')) from seaborn_qqplot import pplot # %matplotlib inline iris = load_dataset('iris') pplot(iris, x="sepal_length", kind='pp', y=gamma, height=5, aspect=3, display_kws={"identity":True}) pplot(iris, x="sepal_length", y=gamma, kind='pp', hue="species", height=5, aspect=3) pplot(iris, x="petal_length", y=gamma, kind='p', hue="species", height=5, aspect=3) pplot(iris, x="petal_length", y=gamma, kind='qq', hue="species", height=5, aspect=3)
examples/example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- # + [markdown] nbsphinx="hidden" slideshow={"slide_type": "skip"} # This notebook is part of the $\omega radlib$ documentation: https://docs.wradlib.org. # # Copyright (c) $\omega radlib$ developers. # Distributed under the MIT License. See LICENSE.txt for more info. # + [markdown] slideshow={"slide_type": "slide"} # # Import wradlib and check its version # + [markdown] slideshow={"slide_type": "fragment"} # This simple example shows the $\omega radlib$ version at rendering time. # + slideshow={"slide_type": "fragment"} import wradlib print(wradlib.__version__)
notebooks/basics/wradlib_introduction.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="VWfUwpf4WX7s" # We have implemented base models for MNIST with keras. How can we implement more sophisticated version? # + [markdown] id="xIYEpgWdWda7" # ## Setups # + [markdown] id="uNaoETX0Wio9" # what we need is tensorflow, keras mainly, and a bit of other libraries. # + id="05e8s24MAu49" import tensorflow as tf from tensorflow import keras import numpy as np from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt # + [markdown] id="cDDIPJ4SWt7Q" # Check if gpu is available here. Are we good? # + colab={"base_uri": "https://localhost:8080/"} id="90WDZqhBLkIL" outputId="2e5921ad-2030-4213-d1c4-82fa27dd97d8" device_name = tf.test.gpu_device_name() if device_name != '/device:GPU:0': raise SystemError('GPU device not found') print('Found GPU at: {}'.format(device_name)) # + [markdown] id="zNdmSshNXY_N" # ## Load Data, EDA, and preprocess # + id="SqDhx4qSBH2a" (x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data() # + id="5dmQ3S8KJMqz" x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size = 0.1, random_state = 42) # + colab={"base_uri": "https://localhost:8080/"} id="Kh1rvfCaBNm9" outputId="2890d4f9-99c2-4af8-9f9e-34de2ad2127a" x_train.shape, y_train.shape # + colab={"base_uri": "https://localhost:8080/"} id="5dna_wLdJ0uP" outputId="1d984a92-b7a2-4446-e850-75d26ec24a7e" x_val.shape, y_val.shape # + colab={"base_uri": "https://localhost:8080/"} id="kyBe7JwyBPXJ" outputId="a0f3b865-0f43-477b-d8d6-3cd660b19e2a" x_test.shape, y_test.shape # + id="UNv0O0qjBSPW" x_train, x_val, x_test = x_train / 255.0, x_val / 255.0, x_test / 255.0 # + id="0_6UgVIyEuCD" y_train, y_val, y_test = tf.keras.utils.to_categorical(y_train), tf.keras.utils.to_categorical(y_val), tf.keras.utils.to_categorical(y_test) # + colab={"base_uri": "https://localhost:8080/"} id="DoVX-h96E4HW" outputId="dd8885ac-0c98-4ca2-af27-1e66d22fab6f" y_train[0] # + colab={"base_uri": "https://localhost:8080/", "height": 267} id="H7b9B0NjByMw" outputId="effff5e1-0ab7-4981-8693-e46989ea452b" for i in range(6): plt.subplot(2, 3, i+1) plt.imshow(x_train[i], cmap = "gray") plt.show() # + [markdown] id="K39jIoafCBE6" # ## Models # # Let's set up some hyperparameters. # + id="t-yZ-nq4B8lR" BATCH_SIZE = 64 EPOCHS = 100 loss = keras.losses.CategoricalCrossentropy() metrics = ["accuracy"] # + id="9oVscyCrBcxs" optimizer = keras.optimizers.SGD(learning_rate = 1e-03) # + [markdown] id="MPbVHWg0A0Za" # ### MLP # # Our first apporoach is Multi-layer Perceptron (MLP). # # MLP introduces hidden layer with non-linear activation functions. # # This combination can generate universal approximator. That is, theortically it can approximate every functions! # + id="m3IAZmEvAzhP" MLL_model = keras.models.Sequential([ keras.layers.Flatten(input_shape = (28, 28)), keras.layers.Dense(256, activation = "sigmoid"), keras.layers.Dense(128, activation = "sigmoid"), keras.layers.Dense(10, activation="softmax"), ]) # + colab={"base_uri": "https://localhost:8080/"} id="lJsUL4DAA-On" outputId="7fe63e98-fa6c-4914-df4d-234318c075d0" MLL_model.summary() # + id="IrRRexBpaTzz" ckpt = tf.keras.callbacks.ModelCheckpoint( filepath="./best.ckpt", save_weights_only=True, monitor='val_accuracy', mode='max', save_best_only=True) # + id="DNi_E_8Cp6_1" MLL_model.compile(loss = loss, optimizer = optimizer, metrics = metrics) # + colab={"base_uri": "https://localhost:8080/"} id="WrE-4NmR-G1d" outputId="8b9d9834-3553-4b71-fb40-10cf8e2d9e23" with tf.device(device_name = device_name): hist = MLL_model.fit(x_train, y_train, batch_size = BATCH_SIZE, epochs = EPOCHS, shuffle = True, verbose = 2, validation_data=(x_val, y_val)) # + colab={"base_uri": "https://localhost:8080/"} id="fHFOAY8v-LA3" outputId="85da58b0-a39c-463a-b130-ba64bd0ecbd1" with tf.device(device_name = device_name): MLL_model.evaluate(x_test, y_test, batch_size = BATCH_SIZE, verbose = 2) # + [markdown] id="3vbcAOf8-YEh" # ### CNN # # Convolutional neural network (CNN) performs great on most image tasks. Let's try this. # + id="NCifTPwx-XD1" x_train, x_val, x_test = np.expand_dims(x_train, -1), np.expand_dims(x_val, -1), np.expand_dims(x_test, -1) # + colab={"base_uri": "https://localhost:8080/"} id="YP4oTRFU-bIz" outputId="a36bdc28-0b9e-48df-8c17-672984effc41" x_train.shape, x_val.shape, x_test.shape # + colab={"base_uri": "https://localhost:8080/"} id="a2a1sIly-cSi" outputId="97c8874c-0ab0-44a7-c507-58f9ed63a829" CNN_model = keras.models.Sequential([ keras.layers.Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1)), keras.layers.MaxPooling2D((2, 2)), keras.layers.Flatten(), keras.layers.Dense(256, activation = "relu"), keras.layers.Dense(128, activation = "relu"), keras.layers.Dense(10, activation = "softmax"), ]) CNN_model.summary() CNN_model.compile(loss = loss, optimizer = optimizer, metrics = metrics) with tf.device(device_name = device_name): print("training start!") hist = CNN_model.fit(x_train, y_train, batch_size = BATCH_SIZE, epochs = EPOCHS, shuffle = True, verbose = 2, validation_data=(x_val, y_val)) print("evaluation start!") CNN_model.evaluate(x_test, y_test, batch_size = BATCH_SIZE, verbose = 2) # + [markdown] id="OsAd-h25-gx6" # ### Deeper CNN # # What if we go deeper with CNN layers? # + colab={"base_uri": "https://localhost:8080/"} id="_oTXnaD2-iNr" outputId="0a8df057-9aa4-4dd0-e22f-fcba0df06b32" Deeper_CNN_model = keras.models.Sequential([ keras.layers.Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1)), keras.layers.MaxPooling2D((2, 2)), keras.layers.Conv2D(64, (3, 3), activation='relu'), keras.layers.MaxPooling2D((2, 2)), keras.layers.Conv2D(128, (3, 3), activation='relu'), keras.layers.MaxPooling2D((2, 2)), keras.layers.Flatten(), keras.layers.Dense(256, activation = "relu"), keras.layers.Dense(128, activation = "relu"), keras.layers.Dense(10, activation = "softmax"), ]) Deeper_CNN_model.summary() Deeper_CNN_model.compile(loss = loss, optimizer = optimizer, metrics = metrics) with tf.device(device_name = device_name): print("training start!") hist = Deeper_CNN_model.fit(x_train, y_train, batch_size = BATCH_SIZE, epochs = EPOCHS, shuffle = True, verbose = 2, validation_data=(x_val, y_val)) print("evaluation start!") Deeper_CNN_model.evaluate(x_test, y_test, batch_size = BATCH_SIZE, verbose = 2) # + [markdown] id="S3PBJayp-m5i" # ### Deeper CNN with better Optimizer and Batchnorm # # Batchnorm was introduced to stablize the training process. Let's try them! Also, let's try with better optimizer, Adam. # + id="KvlW0NVL-mOO" adam_optimizer = tf.optimizers.Adam(learning_rate=1e-03) # + colab={"base_uri": "https://localhost:8080/"} id="5aN5iS1f-q1y" outputId="78d74006-79aa-4a52-c5ff-c45ee410a9bc" Deeper_CNN_model = keras.models.Sequential([ keras.layers.Conv2D(32, (3, 3), padding = "same", input_shape=(28, 28, 1)), keras.layers.Conv2D(32, (3, 3), padding = "same", input_shape=(28, 28, 1)), keras.layers.ReLU(), keras.layers.MaxPooling2D((2, 2)), keras.layers.BatchNormalization(), keras.layers.Conv2D(64, (3, 3), padding = "same"), keras.layers.Conv2D(64, (3, 3), padding = "same"), keras.layers.ReLU(), keras.layers.MaxPooling2D((2, 2)), keras.layers.BatchNormalization(), keras.layers.Conv2D(128, (3, 3), padding = "same"), keras.layers.Conv2D(128, (3, 3), padding = "same"), keras.layers.MaxPooling2D((2, 2)), keras.layers.BatchNormalization(), keras.layers.Flatten(), keras.layers.Dense(256), keras.layers.Dense(128), keras.layers.Dense(10), keras.layers.Softmax() ]) Deeper_CNN_model.summary() Deeper_CNN_model.compile(loss = loss, optimizer = adam_optimizer, metrics = metrics) with tf.device(device_name = device_name): print("training start!") hist = Deeper_CNN_model.fit(x_train, y_train, batch_size = BATCH_SIZE, epochs = EPOCHS, shuffle = True, verbose = 2, validation_data=(x_val, y_val)) print("evaluation start!") Deeper_CNN_model.evaluate(x_test, y_test, batch_size = BATCH_SIZE, verbose = 2)
_posts/CV/MNIST_models.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from transformers import pipeline from transformers.tokenization_utils import TruncationStrategy import tokenizers import pandas as pd import requests models = { "AlephBERT-base": { "name_or_path":"onlplab/alephbert-base", "description":"AlephBERT base model", }, "HeBERT-base-TAU": { "name_or_path":"avichr/heBERT", "description":"HeBERT model created by TAU" }, "mBERT-base-multilingual-cased": { "name_or_path":"bert-base-multilingual-cased", "description":"Multilingual BERT model" } } def get_json_from_url(url): return models return requests.get(url).json() # models = get_json_from_url('https://huggingface.co/spaces/biu-nlp/AlephBERT/raw/main/models.json') def load_model(model): pipe = pipeline('fill-mask', models[model]['name_or_path']) def do_tokenize(inputs): return pipe.tokenizer( inputs, add_special_tokens=True, return_tensors=pipe.framework, padding=True, truncation=TruncationStrategy.DO_NOT_TRUNCATE, ) def _parse_and_tokenize( inputs, tokenized=False, **kwargs ): if not tokenized: inputs = do_tokenize(inputs) return inputs pipe._parse_and_tokenize = _parse_and_tokenize return pipe, do_tokenize mode = 'Models' if mode == 'Models': model = "AlephBERT-base" masking_level = 'Tokens' model_tags = model.split('-') model_tags[0] = 'Model:' + model_tags[0] unmasker, tokenize = load_model(model) input_text = " [MASK] אתה טיפש " input_masked = None tokenized = tokenize(input_text) ids = tokenized['input_ids'].tolist()[0] subwords = unmasker.tokenizer.convert_ids_to_tokens(ids) if masking_level == 'Tokens': tokens = str(input_text).split() mask_idx = '[MASK]' if mask_idx is not None: input_masked = ' '.join(token if i != mask_idx else '[MASK]' for i, token in enumerate(tokens)) display_input = input_masked if input_masked: ids = tokenized['input_ids'].tolist()[0] subwords = unmasker.tokenizer.convert_ids_to_tokens(ids) res = unmasker(input_masked, top_k=5) if res: print(res) # res = [{'Prediction':r['token_str'], 'Completed Sentence':r['sequence'].replace('[SEP]', '').replace('[CLS]', ''), 'Score':r['score']} for r in res] # res_table = pd.DataFrame(res) # st.table(res_table) # - res[0] # + import pandas as pd import numpy as np import transformers import torch import torch.nn as nn from torch.utils.data import Dataset from torch.utils.data import DataLoader import torch.optim as optim import torch.nn.functional as F from tqdm import tqdm from transformers import BertModel, BertTokenizerFast alephbert_tokenizer = BertTokenizerFast.from_pretrained('onlplab/alephbert-base') class Encoder(nn.Module): def __init__(self): super(Encoder, self).__init__() self.alephbert_model = BertModel.from_pretrained("onlplab/alephbert-base") def forward(self,ids): # _,o2= self.alephbert_model(ids,attention_mask=mask,token_type_ids=token_type_ids, return_dict=False) # out = self.out(o2) return self.alephbert_model(ids) model=Encoder() loss_fn = nn.BCEWithLogitsLoss() #Initialize Optimizer optimizer= optim.Adam(model.parameters(),lr= 0.0001) # + class Decoder(nn.Module): def __init__(self): super(Decoder, self).__init__() self.alephbert_model = BertModel.from_pretrained("onlplab/alephbert-base") #modeling_utils.ModuleUtilsMixin.get_extended_attention_mask(self,[0],3,device) def forward(self,ids): # _,o2= self.alephbert_model(ids,attention_mask=mask,token_type_ids=token_type_ids, return_dict=False) # out = self.out(o2) return self.alephbert_model(ids) model_dec=Decoder() # - summary(BertModel.from_pretrained("onlplab/alephbert-base")) summary(BertModel.from_pretrained("avichr/heBERT")) # + class BertDataset(Dataset): def __init__(self, tokenizer,max_length): super(BertDataset, self).__init__() self.root_dir=root_dir self.train_csv=pd.read_csv('https://github.com/clairett/pytorch-sentiment-classification/raw/master/data/SST2/train.tsv', delimiter='\t', header=None) self.tokenizer=tokenizer self.target=self.train_csv.iloc[:,1] self.max_length=max_length def __len__(self): return len(self.train_csv) def __getitem__(self, index): text1 = self.train_csv.iloc[index,0] inputs = self.tokenizer.encode_plus( text1 , None, pad_to_max_length=True, add_special_tokens=True, return_attention_mask=True, max_length=self.max_length, ) ids = inputs["input_ids"] token_type_ids = inputs["token_type_ids"] mask = inputs["attention_mask"] return { 'ids': torch.tensor(ids, dtype=torch.long), 'mask': torch.tensor(mask, dtype=torch.long), 'token_type_ids': torch.tensor(token_type_ids, dtype=torch.long), 'target': torch.tensor(self.train_csv.iloc[index, 1], dtype=torch.long) } tokenizer = transformers.BertTokenizer.from_pretrained("bert-base-uncased") dataset= BertDataset(tokenizer, max_length=100) dataloader=DataLoader(dataset=dataset,batch_size=32) # - encoded_review = alephbert_tokenizer.encode_plus( "[MASK] הלכתי אתמול", return_tensors='pt', ) # + device = 'cuda' if torch.cuda.is_available() else 'cpu' input_ids = encoded_review['input_ids'].to(device) output = model(input_ids) # - _, prediction = torch.max(output[1], dim=1) print(prediction) from torchsummary import summary from transformers import modeling_utils modeling_utils.ModuleUtilsMixin.get_extended_attention_mask()
models/alephbert-base/SecondTry.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Clustering with k-Means Model # We will use cluster analysis to generate a big picture model of the weather at a local station using a minute-graunlarity data. # # Goal: Create 12 clusters of them # # **NOTE:** The dataset is in a large CSV file called *minute_weather.csv*. The download link is: https://drive.google.com/open?id=0B8iiZ7pSaSFZb3ItQ1l4LWRMTjg # ## Importing the libraries # + from sklearn.preprocessing import StandardScaler from sklearn.cluster import KMeans import os import utils from sklearn import metrics from scipy.spatial.distance import cdist import pandas as pd import numpy as np from itertools import cycle,islice import matplotlib.pyplot as plt from pandas.plotting import parallel_coordinates # %matplotlib inline # - # ## Minute Weather Data Description # # The **minute weather dataset** comes from the same source as the daily weather dataset that we used in the decision tree based classifier notebook. The main difference between these two datasets is that the minute weather dataset contains raw sensor measurements captured at one-minute intervals. Daily weather dataset instead contained processed and well curated data. The data is in the file **minute_weather.csv**, which is a comma-separated file. # # As with the daily weather data, this data comes from a weather station located in San Diego, California. The weather station is equipped with sensors that capture weather-related measurements such as air temperature, air pressure, and relative humidity. Data was collected for a period of three years, from September 2011 to September 2014, to ensure that sufficient data for different seasons and weather conditions is captured. # # Each row in **minute_weather.csv** contains weather data captured for a one-minute interval. Each row, or sample, consists of the following variables: # # * **rowID:** unique number for each row (*Unit: NA*) # * **hpwren_timestamp:** timestamp of measure (*Unit: year-month-day hour:minute:second*) # * **air_pressure:** air pressure measured at the timestamp (*Unit: hectopascals*) # * **air_temp:** air temperature measure at the timestamp (*Unit: degrees Fahrenheit*) # * **avg_wind_direction:** wind direction averaged over the minute before the timestamp (*Unit: degrees, with 0 means coming from the North, and increasing clockwise*) # * **avg_wind_speed:** wind speed averaged over the minute before the timestamp (*Unit: meters per second*) # * **max_wind_direction:** highest wind direction in the minute before the timestamp (*Unit: degrees, with 0 being North and increasing clockwise*) # * **max_wind_speed:** highest wind speed in the minute before the timestamp (*Unit: meters per second*) # * **min_wind_direction:** smallest wind direction in the minute before the timestamp (*Unit: degrees, with 0 being North and inceasing clockwise*) # * **min_wind_speed:** smallest wind speed in the minute before the timestamp (*Unit: meters per second*) # * **rain_accumulation:** amount of accumulated rain measured at the timestamp (*Unit: millimeters*) # * **rain_duration:** length of time rain has fallen as measured at the timestamp (*Unit: seconds*) # * **relative_humidity:** relative humidity measured at the timestamp (*Unit: percent*) data = pd.read_csv('./weather/minute_weather.csv') data.head() # + # check missing data total = data.isnull().sum().sort_values(ascending=False) percent = (data.isnull().sum()/data.isnull().count()*100).sort_values(ascending=False) dataMissing = pd.concat([total, percent], axis=1, keys=['Total', 'Percent']) dataMissing.head(15) # - data.shape # ## Data Sampling # Get every 10th row dfTen = data[data['rowID'] % 10 == 0] dfTen.shape dfTen.head() # ## Statistics dfTen.describe().transpose() dfTen[dfTen['rain_accumulation'] == 0].shape dfTen[dfTen['rain_duration'] == 0].shape # ## Dropping all the rows with empty rain_duration and rain_accumulation del dfTen['rain_accumulation'] del dfTen['rain_duration'] print('Rows before: ' + str(dfTen.shape[0])) dfTen = dfTen.dropna() print('Rows after: ' + str(dfTen.shape[0])) # **Lost 0.3% of dataframe** dfTen.columns # ## Select features of interest for clustering features = ['air_pressure', 'air_temp', 'avg_wind_direction', 'avg_wind_speed', 'max_wind_direction', 'max_wind_speed', 'relative_humidity' ] df = dfTen[features] df.head() # ## Scaling the features using StandardScaler X = StandardScaler().fit_transform(df) X # ## The Elbow Method def elbowMethod(data,maxK): distortions = [] K = range(1,maxK) for k in K: model = KMeans(n_clusters=k).fit(data) model.fit(data) distortions.append(sum(np.min(cdist(data,model.cluster_centers_,'euclidean'),axis=1)) / data.shape[0]) plt.plot(K,distortions,'bx-') plt.xlabel('k') plt.ylabel('Distortion') plt.title('The Elbow Method showing the optimal k') plt.show() elbowMethod(X,20) # **k = 5 seems to be a good choice** # ## Using k-Means Clustering # **For k = 12** kmeans12 = KMeans(n_clusters = 12) model12 = kmeans12.fit(X) centers12 = model12.cluster_centers_ print('model\n',model12) # **For k = 5** kmeans5 = KMeans(n_clusters = 5) model5 = kmeans5.fit(X) centers5 = model5.cluster_centers_ print('model\n',model5) # ## Plots # Function that creates a DataFrame with a column for Cluster Number def pd_centers(features,centers): colNames = list(features) colNames.append('prediction') Z = [np.append(A,index) for index,A in enumerate(centers)] P = pd.DataFrame(Z,columns=colNames) P['prediction'] = P['prediction'].astype(int) return P # Function that creates Parallel Plots def parallel_plot(data,k): myColors = list(islice(cycle(['b','r','g','y','k']), None, len(data))) plt.figure(figsize=(10,5)).gca().axes.set_ylim([-3,+3]) parallel_coordinates(data,'prediction',color = myColors,marker='o') plt.title('For k = ' + str(k)) plot5 = pd_centers(features, centers5) plot12 = pd_centers(features, centers12) # ## Dry Days parallel_plot(plot5[plot5['relative_humidity'] < -0.5],5) parallel_plot(plot12[plot12['relative_humidity'] < -0.5],12) # ## Warm Days parallel_plot(plot5[plot5['air_temp'] > 0.5],5) parallel_plot(plot12[plot12['air_temp'] > 0.5],12) # ## Cool Days parallel_plot(plot5[(plot5['relative_humidity'] > 0.5) & (plot5['air_temp'] < 0.5)],5) parallel_plot(plot12[(plot12['relative_humidity'] > 0.5) & (plot12['air_temp'] < 0.5)],12)
Python for Data Science/Week 7 - Intro to Machine Learning/Clustering Model - Weather Data Clustering with k-Means.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Simple Linear Regression # # Import relevant libraries # + import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set() from sklearn.linear_model import LinearRegression # - # # Load the data data = pd.read_csv("1.02. Multiple linear regression.csv") data.head() # # Create the regression # # Declare the dependent and independent variables x = data[["SAT", "Rand 1,2,3"]] y = data["GPA"] x.shape y.shape x_matrix = x.values.reshape(-1,1) x_matrix.shape # # Regression itself reg = LinearRegression() reg.fit(x, y) # # R-squared reg.score(x,y) # # Coefficients reg.coef_ # # Intercept reg.intercept_ # # Making predictions # + #data=1740 #data = data.values.reshape(-1,1) #reg.predict(1740) # - new_data =pd.DataFrame(data=[1740, 1760], columns=["SAT"]) new_data reg.predict(new_data) new_data["Predicted GPA"] = reg.predict(new_data) new_data plt.scatter(x,y) yhat = 0.275 + reg.coef_*x_matrix fig = plt.plot(x, yhat, lw=4, c="blue", label="regression label") plt.xlabel("SAT", fontsize=20) plt.ylabel("GPA", fontsize=20) plt.show() # # Calculating the R-squared reg.score(x,y) # ### Formula for Adjusted R-square R^2 # # $R^2_{adj.} = 1 - (1 - R^2)*\frac{n-1}{n-p-1}$ adj_R_squared = 1 - (1 - (reg.score(x,y)))*((len(data)-1)/(len(data)-2-1)) adj_R_squared x.shape # # Feature selection from sklearn.feature_selection import f_regression f_regression(x,y) p_values = f_regression(x,y)[1] p_values.round(3) # # Creating a summary table reg_summary = pd.DataFrame(data = x.columns.values, columns=["Features"]) reg_summary reg_summary["Coefficient"] = reg.coef_ reg_summary["P-values"] = p_values.round(3) reg_summary # # Standardization from sklearn.preprocessing import StandardScaler scaler = StandardScaler() scaler.fit(x) x_scaled = scaler.transform(x) x_scaled # # Regression with scaled features reg = LinearRegression() reg.fit(x_scaled, y) reg.coef_ reg.intercept_ reg_summary = pd.DataFrame([["Bias"], ["SAT"], ["Rand 1,2,3"]], columns=["Features"]) reg_summary["Weights"] = reg.intercept_, reg.coef_[0], reg.coef_[1] reg_summary # # Making predictions with the standardized coeeficients (weights) new_data = pd.DataFrame(data=[[1700,2],[1800,1]], columns=["SAT", "Rand 1,2,3"]) new_data.head() reg.predict(new_data) new_data_scaled = scaler.transform(new_data) new_data_scaled reg.predict(new_data_scaled) # # What if we removed the "Rand 1,2,3" feature? reg_simple = LinearRegression() x_simple_matrix = x_scaled[:,0].reshape(-1,1) reg_simple.fit(x_simple_matrix,y) reg_simple.predict(new_data_scaled[:,0].reshape(-1,1))
Advanced Statistical Methods in Python - ScikitLearn.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # SOGComp is SOG run for comparison with SS2DSOG5x5 # - compare starting values to check initialization of IC's # + import numpy as np import pandas as pd import datetime as dtm import matplotlib.pyplot as plt import matplotlib.dates as dts import netCDF4 as nc import os import re import pytz # %matplotlib inline # - # # read in SOG data: filename='/data/eolson/SOG/SOG-runs/SOGCompSink/profiles/hoff-SOG.dat' file_obj = open(filename, 'rt') for index, line in enumerate(file_obj): line = line.strip() if line.startswith('*FieldNames:'): field_names = line.split(': ', 1)[1].split(', ') elif line.startswith('*FieldUnits:'): field_units = line.split(': ', 1)[1].split(', ') elif line.startswith('*HoffmuellerStartYr:'): year_start = line.split(': ', 1)[1] elif line.startswith('*HoffmuellerStartDay:'): day_start = line.split(': ', 1)[1] elif line.startswith('*HoffmuellerStartSec:'): sec_start = line.split(': ', 1)[1] elif line.startswith('*HoffmuellerInterval:'): interval = line.split(': ', 1)[1] elif line.startswith('*EndOfHeader'): break data = pd.read_csv(filename, delim_whitespace=True, header=0, names=field_names, skiprows=index, chunksize=102) # Timestamp in matplotlib time dt_num = dts.date2num(dtm.datetime.strptime(year_start + ' ' + day_start, '%Y %j')) + float(sec_start)/86400 interval=float(interval) # Extract dataframe chunks into dictionary for index, chunk in enumerate(data): if index==0: da=chunk else: da=np.dstack((da,chunk)) z=da[:,0,0] t=np.arange(da.shape[2]) t=(t+1.0)*3600 tt,zz=np.meshgrid(t,-z) print field_names #print t #print day_start #print dts.num2date(dt_num) #print z # Load SS2DSOG nuts & bio data: # + resultsDir='/data/eolson/MEOPAR/SS2DSOGruns/run5x5_15/' fname='SalishSea_1h_20041019_20041020_ptrc_T.nc' f=nc.Dataset(os.path.join(resultsDir,fname)) fkeys=f.variables.keys() lons=f.variables['nav_lon'][1,:] lats=f.variables['nav_lat'][:,1] for ik in fkeys: match = re.search(r'depth.',ik) if match: zkey=match.group(0) zSS=f.variables[zkey][:] xxSS,zzSS=np.meshgrid(lons,-z[:]) xtSS,ytSS=np.meshgrid(lons,lats) print fkeys f2name='/data/eolson/MEOPAR/SS2DSOGruns/nuts_SOG5x5_S3-2014-10-19-WithMRubraMicroZooRemin.nc' f2=nc.Dataset(f2name) # - # Load IC's prepared for SS2DSOG5x5: # Plot 3 phyto from SOG, PHY and PHY2 from SS2DSOG (L) and NO3 (R): # + active="" # fig, axs = plt.subplots(2,2,figsize=(12,8)) # # # Phyto # iii=16 # pl0=axs[0,0].plot(da[:,iii,0],-z,'-',color='r') # iii=17 # pl0=axs[0,0].plot(da[:,iii,0],-z,'-',color='b') # iii=19 # pl0=axs[0,0].plot(da[:,iii,0],-z,'--',color='g') # #pl0=axs[0,0].plot(f2.variables['PHY'][0,0:101,2,2],-zSS[0:101],'-',color='k') # # axs[0,0].set_ylabel('z (m)') # axs[1,0].set_xlabel('DOC/POC/bSi') # #pl1=axs[0].plot(da[:,iii,da.shape[2]-1],-z) # ivar1=f.variables['DOC'][:,:,:,:]/7.6 # pl3=axs[0,0].plot(ivar1[0,0:101,2,2],-zSS[0:101],'.',color='m') # pl3=axs[1,0].plot(ivar1[0,0:101,2,2],-zSS[0:101],'--',color='m') # ivar2=f.variables['POC'][:,:,:,:]/7.6 # pl3=axs[0,0].plot(ivar2[0,0:101,2,2],-zSS[0:101],'.',color='c') # pl3=axs[1,0].plot(ivar2[0,0:101,2,2],-zSS[0:101],'-',color='c') # ivar3=f.variables['DSi'][:,:,:,:] # pl3=axs[0,0].plot(ivar3[0,0:101,2,2],-zSS[0:101],'.',color='y') # pl3=axs[1,0].plot(ivar3[0,0:101,2,2],-zSS[0:101],'--',color='y') # # # #axs[0,0].set_xlim([0,1]) # #print da[0:101,5,0]/ivar2[0,0:101,2,2] # #print da[0:101,6,0]/ivar1[0,0:101,2,2] # # # NO # iii=8 # pl0=axs[0,1].plot(da[:,iii,0],-z,'-',color='r') # pl0=axs[0,1].plot(f2.variables['NO3'][0,0:101,2,2],-zSS[0:101],'-',color='k') # axs[0,1].set_ylabel('z (m)') # axs[1,1].set_xlabel(field_names[iii]) # #pl1=axs[0].plot(da[:,iii,da.shape[2]-1],-z) # ivar=f.variables['NO3'][:,:,:,:] # pl3=axs[1,1].plot(ivar[0,0:100,2,2],-zSS[0:100],'--',color='r') # pl3=axs[0,1].plot(ivar[0,0:100,2,2],-zSS[0:100],'--',color='k') # #print ivar[0,0:100,2,2] # #print da[0:101,iii,0]/ivar[0,0:101,2,2] # + active="" # fig, axs = plt.subplots(2,2,figsize=(12,8)) # # # Phyto # iii=4 # pl0=axs[0,0].plot(da[:,iii,0],-z,'-',color='r') # iii=5 # pl0=axs[0,0].plot(da[:,iii,0],-z,'-',color='b') # iii=6 # pl0=axs[0,0].plot(da[:,iii,0],-z,'-',color='g') # #pl0=axs[0,0].plot(f2.variables['PHY'][0,0:101,2,2],-zSS[0:101],'-',color='k') # iii=7 # pl0=axs[0,0].plot(da[:,iii,0],-z,'-',color='k') # #pl0=axs[0,0].plot(f2.variables['PHY'][0,0:101,2,2],-zSS[0:101],'-',color='k') # axs[0,0].set_ylabel('z (m)') # axs[1,0].set_xlabel('Phyto') # #pl1=axs[0].plot(da[:,iii,da.shape[2]-1],-z) # ivar1=f.variables['PHY'][:,:,:,:] # pl3=axs[1,0].plot(ivar1[0,0:101,2,2],-zSS[0:101],'-',color='c') # pl3=axs[0,0].plot(ivar1[0,0:101,2,2],-zSS[0:101],'--',color='c') # ivar2=f.variables['PHY2'][:,:,:,:] # pl3=axs[1,0].plot(ivar2[0,0:101,2,2],-zSS[0:101],'--',color='m') # pl3=axs[0,0].plot(ivar2[0,0:101,2,2],-zSS[0:101],'--',color='m') # ivar3=f.variables['ZOO2'][:,:,:,:] # pl3=axs[1,0].plot(ivar3[0,0:101,2,2],-zSS[0:101],'.',color='y') # pl3=axs[0,0].plot(ivar3[0,0:101,2,2],-zSS[0:101],'--',color='y') # ivar4=f.variables['ZOO'][:,:,:,:] # pl4=axs[1,0].plot(ivar4[0,0:101,2,2],-zSS[0:101],'.',color='k') # pl4=axs[0,0].plot(ivar4[0,0:101,2,2],-zSS[0:101],'--',color='k') # # #axs[0,0].set_xlim([0,1]) # #print da[0:101,5,0]/ivar2[0,0:101,2,2] # #print da[0:101,6,0]/ivar1[0,0:101,2,2] # # # NO # iii=8 # pl0=axs[0,1].plot(da[:,iii,0],-z,'-',color='r') # pl0=axs[0,1].plot(f2.variables['NO3'][0,0:101,2,2],-zSS[0:101],'-',color='k') # axs[0,1].set_ylabel('z (m)') # axs[1,1].set_xlabel(field_names[iii]) # #pl1=axs[0].plot(da[:,iii,da.shape[2]-1],-z) # ivar=f.variables['NO3'][:,:,:,:] # pl3=axs[1,1].plot(ivar[0,0:100,2,2],-zSS[0:100],'--',color='r') # pl3=axs[0,1].plot(ivar[0,0:100,2,2],-zSS[0:100],'--',color='k') # #print ivar[0,0:100,2,2] # #print da[0:101,iii,0]/ivar[0,0:101,2,2] # - # Plot Si (L) and NH4 (R) # + active="" # fig, axs = plt.subplots(2,2,figsize=(12,8)) # # # Si # iii=10 # pl0=axs[0,0].plot(da[:,iii,0],-z,'-',color='r') # axs[0,0].set_ylabel('z (m)') # axs[0,0].set_xlabel('Si') # pl0=axs[0,0].plot(f2.variables['Si'][0,0:101,2,2],-zSS[0:101],'-',color='k') # #pl1=axs[0].plot(da[:,iii,da.shape[2]-1],-z) # ivar=f.variables['Si'][:,:,:,:] # pl3=axs[1,0].plot(ivar[0,0:100,2,2],-zSS[0:100],'--',color='r') # pl4=axs[0,0].plot(ivar[0,0:100,2,2],-zSS[0:100],'--',color='g') # #print da[0:100,iii,0]/ivar[0,0:100,2,2] # # # NH4 # iii=9 # pl0=axs[0,1].plot(da[:,iii,0],-z,'-',color='b') # pl0=axs[0,1].plot(f2.variables['NH4'][0,0:101,2,2],-zSS[0:101],'-',color='k') # axs[0,1].set_ylabel('z (m)') # axs[0,1].set_xlabel(field_names[iii]) # #pl1=axs[0].plot(da[:,iii,da.shape[2]-1],-z) # ivar=f.variables['NH4'][:,:,:,:] # pl3=axs[1,1].plot(ivar[0,0:100,2,2],-zSS[0:100],'--',color='b') # pl4=axs[0,1].plot(ivar[0,0:100,2,2],-zSS[0:100],'--',color='g') # axs[0,1].set_xlim([.8,1.2]) # #print da[0:100,iii,0]/ivar[0,0:100,2,2] # - # REPEAT WITH LATER TIME FOR NEMO: # + fig, axs = plt.subplots(2,2,figsize=(12,8)) ti=47 # hrs since start # Phyto iii=4 pl0=axs[0,0].plot(da[:,iii,ti],-z,'-',color='r') iii=5 pl0=axs[0,0].plot(da[:,iii,ti],-z,'-',color='g') iii=6 pl0=axs[0,0].plot(da[:,iii,ti],-z,'-',color='b') iii=7 pl0=axs[0,0].plot(da[:,iii,ti],-z,'-',color='k') #pl0=axs[0,0].plot(f2.variables['PHY'][0,0:101,2,2],-zSS[0:101],'-',color='k') axs[0,0].set_ylabel('z (m)') axs[1,0].set_xlabel('Phyto') #pl1=axs[0].plot(da[:,iii,da.shape[2]-1],-z) ivar1=f.variables['PHY2'][:,:,:,:] # diatoms pl3=axs[1,0].plot(ivar1[360*ti,0:101,2,2],-zSS[0:101],'.',color='m') pl3=axs[0,0].plot(ivar1[360*ti,0:101,2,2],-zSS[0:101],'.',color='m') ivar2=f.variables['PHY'][:,:,:,:] pl3=axs[1,0].plot(ivar2[360*ti,0:101,2,2],-zSS[0:101],'.',color='c') pl3=axs[0,0].plot(ivar2[360*ti,0:101,2,2],-zSS[0:101],'.',color='c') ivar3=f.variables['ZOO2'][:,:,:,:] pl3=axs[1,0].plot(ivar3[360*ti,0:101,2,2],-zSS[0:101],'.',color='y') pl3=axs[0,0].plot(ivar3[360*ti,0:101,2,2],-zSS[0:101],'.',color='y') ivar4=f.variables['ZOO'][:,:,:,:] pl4=axs[1,0].plot(ivar4[360*ti,0:101,2,2],-zSS[0:101],'.',color='k') pl4=axs[0,0].plot(ivar4[360*ti,0:101,2,2],-zSS[0:101],'.',color='k') axs[0,0].set_ylim([-60,0]) # NO iii=8 pl0=axs[0,1].plot(da[:,iii,ti],-z,'-',color='r') #pl0=axs[0,1].plot(f2.variables['NO3'][0,0:101,2,2],-zSS[0:101],'-',color='k') axs[0,1].set_ylabel('z (m)') axs[0,1].set_ylim([-20,0]) axs[1,1].set_xlabel(field_names[iii]) #pl1=axs[0].plot(da[:,iii,da.shape[2]-1],-z) ivar=f.variables['NO3'][:,:,:,:] pl3=axs[1,1].plot(ivar[360*ti-1,0:100,2,2],-zSS[0:100],'.',color='k') pl3=axs[0,1].plot(ivar[360*ti-1,0:100,2,2],-zSS[0:100],'.',color='k') #print ivar[0,0:100,2,2] # + fig, axs = plt.subplots(2,2,figsize=(12,8)) # Phyto iii=16 pl0=axs[0,0].plot(da[:,iii,ti],-z,'-',color='r') iii=17 pl0=axs[0,0].plot(da[:,iii,ti],-z,'-',color='b') iii=19 pl0=axs[0,0].plot(da[:,iii,ti],-z,'-',color='g') #pl0=axs[0,0].plot(f2.variables['PHY'][0,0:101,2,2],-zSS[0:101],'-',color='k') axs[0,0].set_ylabel('z (m)') axs[1,0].set_xlabel('DON/PON/bSi') #pl1=axs[0].plot(da[:,iii,da.shape[2]-1],-z) ivar1=f.variables['DOC'][:,:,:,:]/7.6 pl3=axs[1,0].plot(ivar1[360*ti,0:101,2,2],-zSS[0:101],'.',color='m') pl3=axs[0,0].plot(ivar1[360*ti,0:101,2,2],-zSS[0:101],'.',color='m') ivar2=f.variables['POC'][:,:,:,:]/7.6 pl3=axs[1,0].plot(ivar2[360*ti,0:101,2,2],-zSS[0:101],'.',color='c') pl3=axs[0,0].plot(ivar2[360*ti,0:101,2,2],-zSS[0:101],'.',color='c') ivar3=f.variables['DSi'][:,:,:,:] pl3=axs[1,0].plot(ivar3[360*ti,0:101,2,2],-zSS[0:101],'.',color='y') pl3=axs[0,0].plot(ivar3[360*ti,0:101,2,2],-zSS[0:101],'.',color='y') axs[0,0].set_ylim([-50,0]) # NO iii=8 pl0=axs[0,1].plot(da[:,iii,ti],-z,'-',color='r') #pl0=axs[0,1].plot(f2.variables['NO3'][0,0:101,2,2],-zSS[0:101],'-',color='k') axs[0,1].set_ylabel('z (m)') axs[0,1].set_ylim([-20,0]) axs[1,1].set_xlabel(field_names[iii]) #pl1=axs[0].plot(da[:,iii,da.shape[2]-1],-z) ivar=f.variables['NO3'][:,:,:,:] pl3=axs[1,1].plot(ivar[360*ti-1,0:100,2,2],-zSS[0:100],'.',color='k') pl3=axs[0,1].plot(ivar[360*ti-1,0:100,2,2],-zSS[0:100],'.',color='k') #print ivar[0,0:100,2,2] # + fig, axs = plt.subplots(2,2,figsize=(12,8)) # Si iii=10 pl0=axs[0,0].plot(da[:,iii,ti],-z,'-',color='r') axs[0,0].set_ylabel('z (m)') axs[0,0].set_xlabel('Si') axs[0,0].set_ylim([-20,0]) #pl0=axs[0,0].plot(f2.variables['Si'][0,0:101,2,2],-zSS[0:101],'-',color='k') #pl1=axs[0].plot(da[:,iii,da.shape[2]-1],-z) ivar=f.variables['Si'][:,:,:,:] pl3=axs[1,0].plot(ivar[360*ti,0:100,2,2],-zSS[0:100],'.',color='g') pl4=axs[0,0].plot(ivar[360*ti,0:100,2,2],-zSS[0:100],'.',color='g') # NH4 iii=9 pl0=axs[0,1].plot(da[:,iii,ti],-z,'.',color='b') #pl0=axs[0,1].plot(f2.variables['NH4'][0,0:101,2,2],-zSS[0:101],'-',color='k') axs[0,1].set_ylabel('z (m)') axs[0,1].set_xlabel(field_names[iii]) axs[0,1].set_ylim([-20,0]) #pl1=axs[0].plot(da[:,iii,da.shape[2]-1],-z) ivar=f.variables['NH4'][:,:,:,:] pl3=axs[1,1].plot(ivar[360*ti,0:100,2,2],-zSS[0:100],'.',color='g') pl4=axs[0,1].plot(ivar[360*ti,0:100,2,2],-zSS[0:100],'.',color='g') #axs[0,1].set_xlim([.8,1.2]) # - ivar1=f.variables['PHY2'][:,:,:,:] # diatoms print ivar1[182,0:101,2,2] f.close() f2.close() fname='SalishSea_1h_20041019_20041020_diad_T.nc' f=nc.Dataset(os.path.join(resultsDir,fname)) fkeys=f.variables.keys() lons=f.variables['nav_lon'][1,:] lats=f.variables['nav_lat'][:,1] times=np.copy(f.variables['time_counter'])/3600 for ik in fkeys: match = re.search(r'depth.',ik) if match: zkey=match.group(0) zSS=f.variables[zkey][:] xxSS,zzSS=np.meshgrid(lons,-zSS) xtSS,ytSS=np.meshgrid(lons,lats) tt2,zz2=np.meshgrid(times,-zSS) print fkeys #print f.variables['PAR'].shape #print f.variables['time_counter'] # + iii=23 fig, axs = plt.subplots(1,4,figsize=(15,5)) mesh0=axs[0].pcolormesh(tt[:,0:(48*360/2)]/3600,zz[:,0:(48*360/2)],da[:,iii,0:(48*360/2)]) fig.colorbar(mesh0,ax=axs[0]) axs[0].set_ylabel('z (m)') axs[0].set_xlabel('time steps') axs[0].set_title(field_names[iii]) #0:48*360:360 mesh1=axs[1].pcolormesh(tt2,zz2,np.transpose(f.variables['PAR'][:,:,2,2])) fig.colorbar(mesh1,ax=axs[1]) mesh2=axs[2].pcolormesh(tt[0:100,0:(48*360/2)]/3600,zz[0:100,0:(48*360/2)],da[1:101,iii,0:(48*360/2)]-np.transpose(f.variables['PAR'][0:(48*360):2,0:100,2,2])) fig.colorbar(mesh2,ax=axs[2]) d=da[:,iii,da.shape[2]-1]-da[:,iii,0] pl1=axs[3].plot(tt[5,0:(47*360)]/3600,da[5,iii,0:(47*360)],'-',color='b') pl2=axs[3].plot(tt2[4,0:(47*360)],f.variables['PAR'][0:(47*360),4,2,2],'--',color='g') axs[3].set_ylabel('z (m)') axs[3].set_xlabel('Difference') axs[3].set_title(field_names[iii]) # - iii=23 print np.shape(da[:,iii,:]) print np.shape(np.transpose(f.variables['PAR'][:,0:100,2,2])) print zz[0:5,0] print zz2[0:5,0] print np.max(f.variables['PAR']) print np.max(da[:,iii,:]) print '--' print f.variables['time_counter'][15*360]/3600 print '--' print f.variables['PAR'][15*360,0:5,2,2] print da[0:5,iii,15*360/2] print '+++' print zzSS.shape print f.variables['PAR'].shape print tt[0,:] print da[5,iii,5850:5858] # + iii=4 print iii fig, axs = plt.subplots(1,4,figsize=(20,5)) mesh0=axs[0].pcolormesh(tt,zz,da[:,iii,:]) fig.colorbar(mesh0,ax=axs[0]) axs[0].set_ylabel('z (m)') axs[0].set_xlabel('time steps') axs[0].set_title(field_names[iii]) d=da[:,iii,da.shape[2]-1]-da[:,iii,0] pl1=axs[1].plot(d,-z) axs[1].set_ylabel('z (m)') axs[1].set_xlabel('Difference') axs[1].set_title(field_names[iii]) pl2=axs[2].plot(da[:,iii,0],-z) axs[2].set_ylabel('z (m)') axs[2].set_xlabel(field_names[iii]) axs[2].set_title('timestep=1') pl3=axs[3].plot(da[:,iii,da.shape[2]-1],-z) axs[3].set_ylabel('z (m)') axs[3].set_xlabel(field_names[iii]) axs[3].set_title('timestep=end') # + iii=5 print iii fig, axs = plt.subplots(1,4,figsize=(20,5)) mesh0=axs[0].pcolormesh(tt,zz,da[:,iii,:]) fig.colorbar(mesh0,ax=axs[0]) axs[0].set_ylabel('z (m)') axs[0].set_xlabel('time steps') axs[0].set_title(field_names[iii]) d=da[:,iii,da.shape[2]-1]-da[:,iii,0] pl1=axs[1].plot(d,-z) axs[1].set_ylabel('z (m)') axs[1].set_xlabel('Difference') axs[1].set_title(field_names[iii]) pl2=axs[2].plot(da[:,iii,0],-z) axs[2].set_ylabel('z (m)') axs[2].set_xlabel(field_names[iii]) axs[2].set_title('timestep=1') pl3=axs[3].plot(da[:,iii,da.shape[2]-1],-z) axs[3].set_ylabel('z (m)') axs[3].set_xlabel(field_names[iii]) axs[3].set_title('timestep=end') # + iii=6 print iii fig, axs = plt.subplots(1,4,figsize=(20,5)) mesh0=axs[0].pcolormesh(tt,zz,da[:,iii,:]) fig.colorbar(mesh0,ax=axs[0]) axs[0].set_ylabel('z (m)') axs[0].set_xlabel('time steps') axs[0].set_title(field_names[iii]) d=da[:,iii,da.shape[2]-1]-da[:,iii,0] pl1=axs[1].plot(d,-z) axs[1].set_ylabel('z (m)') axs[1].set_xlabel('Difference') axs[1].set_title(field_names[iii]) pl2=axs[2].plot(da[:,iii,0],-z) axs[2].set_ylabel('z (m)') axs[2].set_xlabel(field_names[iii]) axs[2].set_title('timestep=1') pl3=axs[3].plot(da[:,iii,da.shape[2]-1],-z) axs[3].set_ylabel('z (m)') axs[3].set_xlabel(field_names[iii]) axs[3].set_title('timestep=end') # + iii=7 print iii fig, axs = plt.subplots(1,4,figsize=(20,5)) mesh0=axs[0].pcolormesh(tt,zz,da[:,iii,:]) fig.colorbar(mesh0,ax=axs[0]) axs[0].set_ylabel('z (m)') axs[0].set_xlabel('time steps') axs[0].set_title(field_names[iii]) d=da[:,iii,da.shape[2]-1]-da[:,iii,0] pl1=axs[1].plot(d,-z) axs[1].set_ylabel('z (m)') axs[1].set_xlabel('Difference') axs[1].set_title(field_names[iii]) pl2=axs[2].plot(da[:,iii,0],-z) axs[2].set_ylabel('z (m)') axs[2].set_xlabel(field_names[iii]) axs[2].set_title('timestep=1') pl3=axs[3].plot(da[:,iii,da.shape[2]-1],-z) axs[3].set_ylabel('z (m)') axs[3].set_xlabel(field_names[iii]) axs[3].set_title('timestep=end') # + iii=8 print iii fig, axs = plt.subplots(1,4,figsize=(20,5)) mesh0=axs[0].pcolormesh(tt,zz,da[:,iii,:]) fig.colorbar(mesh0,ax=axs[0]) axs[0].set_ylabel('z (m)') axs[0].set_xlabel('time steps') axs[0].set_title(field_names[iii]) d=da[:,iii,da.shape[2]-1]-da[:,iii,0] pl1=axs[1].plot(d,-z) axs[1].set_ylabel('z (m)') axs[1].set_xlabel('Difference') axs[1].set_title(field_names[iii]) pl2=axs[2].plot(da[:,iii,0],-z) axs[2].set_ylabel('z (m)') axs[2].set_xlabel(field_names[iii]) axs[2].set_title('timestep=1') pl3=axs[3].plot(da[:,iii,da.shape[2]-1],-z) axs[3].set_ylabel('z (m)') axs[3].set_xlabel(field_names[iii]) axs[3].set_title('timestep=end') # + iii+=1 print iii fig, axs = plt.subplots(1,4,figsize=(20,5)) mesh0=axs[0].pcolormesh(tt,zz,da[:,iii,:]) fig.colorbar(mesh0,ax=axs[0]) axs[0].set_ylabel('z (m)') axs[0].set_xlabel('time steps') axs[0].set_title(field_names[iii]) d=da[:,iii,da.shape[2]-1]-da[:,iii,0] pl1=axs[1].plot(d,-z) axs[1].set_ylabel('z (m)') axs[1].set_xlabel('Difference') axs[1].set_title(field_names[iii]) pl2=axs[2].plot(da[:,iii,0],-z) axs[2].set_ylabel('z (m)') axs[2].set_xlabel(field_names[iii]) axs[2].set_title('timestep=1') pl3=axs[3].plot(da[:,iii,da.shape[2]-1],-z) axs[3].set_ylabel('z (m)') axs[3].set_xlabel(field_names[iii]) axs[3].set_title('timestep=end') # + iii+=1 print iii fig, axs = plt.subplots(1,4,figsize=(20,5)) mesh0=axs[0].pcolormesh(tt,zz,da[:,iii,:]) fig.colorbar(mesh0,ax=axs[0]) axs[0].set_ylabel('z (m)') axs[0].set_xlabel('time steps') axs[0].set_title(field_names[iii]) d=da[:,iii,da.shape[2]-1]-da[:,iii,0] pl1=axs[1].plot(d,-z) axs[1].set_ylabel('z (m)') axs[1].set_xlabel('Difference') axs[1].set_title(field_names[iii]) pl2=axs[2].plot(da[:,iii,0],-z) axs[2].set_ylabel('z (m)') axs[2].set_xlabel(field_names[iii]) axs[2].set_title('timestep=1') pl3=axs[3].plot(da[:,iii,da.shape[2]-1],-z) axs[3].set_ylabel('z (m)') axs[3].set_xlabel(field_names[iii]) axs[3].set_title('timestep=end') # + iii+=1 print iii fig, axs = plt.subplots(1,4,figsize=(20,5)) mesh0=axs[0].pcolormesh(tt,zz,da[:,iii,:]) fig.colorbar(mesh0,ax=axs[0]) axs[0].set_ylabel('z (m)') axs[0].set_xlabel('time steps') axs[0].set_title(field_names[iii]) d=da[:,iii,da.shape[2]-1]-da[:,iii,0] pl1=axs[1].plot(d,-z) axs[1].set_ylabel('z (m)') axs[1].set_xlabel('Difference') axs[1].set_title(field_names[iii]) pl2=axs[2].plot(da[:,iii,0],-z) axs[2].set_ylabel('z (m)') axs[2].set_xlabel(field_names[iii]) axs[2].set_title('timestep=1') pl3=axs[3].plot(da[:,iii,da.shape[2]-1],-z) axs[3].set_ylabel('z (m)') axs[3].set_xlabel(field_names[iii]) axs[3].set_title('timestep=end') # + iii+=1 print iii fig, axs = plt.subplots(1,4,figsize=(20,5)) mesh0=axs[0].pcolormesh(tt,zz,da[:,iii,:]) fig.colorbar(mesh0,ax=axs[0]) axs[0].set_ylabel('z (m)') axs[0].set_xlabel('time steps') axs[0].set_title(field_names[iii]) d=da[:,iii,da.shape[2]-1]-da[:,iii,0] pl1=axs[1].plot(d,-z) axs[1].set_ylabel('z (m)') axs[1].set_xlabel('Difference') axs[1].set_title(field_names[iii]) pl2=axs[2].plot(da[:,iii,0],-z) axs[2].set_ylabel('z (m)') axs[2].set_xlabel(field_names[iii]) axs[2].set_title('timestep=1') pl3=axs[3].plot(da[:,iii,da.shape[2]-1],-z) axs[3].set_ylabel('z (m)') axs[3].set_xlabel(field_names[iii]) axs[3].set_title('timestep=end') # + iii+=1 print iii fig, axs = plt.subplots(1,4,figsize=(20,5)) mesh0=axs[0].pcolormesh(tt,zz,da[:,iii,:]) fig.colorbar(mesh0,ax=axs[0]) axs[0].set_ylabel('z (m)') axs[0].set_xlabel('time steps') axs[0].set_title(field_names[iii]) d=da[:,iii,da.shape[2]-1]-da[:,iii,0] pl1=axs[1].plot(d,-z) axs[1].set_ylabel('z (m)') axs[1].set_xlabel('Difference') axs[1].set_title(field_names[iii]) pl2=axs[2].plot(da[:,iii,0],-z) axs[2].set_ylabel('z (m)') axs[2].set_xlabel(field_names[iii]) axs[2].set_title('timestep=1') pl3=axs[3].plot(da[:,iii,da.shape[2]-1],-z) axs[3].set_ylabel('z (m)') axs[3].set_xlabel(field_names[iii]) axs[3].set_title('timestep=end') # + iii+=1 print iii fig, axs = plt.subplots(1,4,figsize=(20,5)) mesh0=axs[0].pcolormesh(tt,zz,da[:,iii,:]) fig.colorbar(mesh0,ax=axs[0]) axs[0].set_ylabel('z (m)') axs[0].set_xlabel('time steps') axs[0].set_title(field_names[iii]) d=da[:,iii,da.shape[2]-1]-da[:,iii,0] pl1=axs[1].plot(d,-z) axs[1].set_ylabel('z (m)') axs[1].set_xlabel('Difference') axs[1].set_title(field_names[iii]) pl2=axs[2].plot(da[:,iii,0],-z) axs[2].set_ylabel('z (m)') axs[2].set_xlabel(field_names[iii]) axs[2].set_title('timestep=1') pl3=axs[3].plot(da[:,iii,da.shape[2]-1],-z) axs[3].set_ylabel('z (m)') axs[3].set_xlabel(field_names[iii]) axs[3].set_title('timestep=end') # + iii+=1 print iii fig, axs = plt.subplots(1,4,figsize=(20,5)) mesh0=axs[0].pcolormesh(tt,zz,da[:,iii,:]) fig.colorbar(mesh0,ax=axs[0]) axs[0].set_ylabel('z (m)') axs[0].set_xlabel('time steps') axs[0].set_title(field_names[iii]) d=da[:,iii,da.shape[2]-1]-da[:,iii,0] pl1=axs[1].plot(d,-z) axs[1].set_ylabel('z (m)') axs[1].set_xlabel('Difference') axs[1].set_title(field_names[iii]) pl2=axs[2].plot(da[:,iii,0],-z) axs[2].set_ylabel('z (m)') axs[2].set_xlabel(field_names[iii]) axs[2].set_title('timestep=1') pl3=axs[3].plot(da[:,iii,da.shape[2]-1],-z) axs[3].set_ylabel('z (m)') axs[3].set_xlabel(field_names[iii]) axs[3].set_title('timestep=end') # + iii+=1 print iii fig, axs = plt.subplots(1,4,figsize=(20,5)) mesh0=axs[0].pcolormesh(tt,zz,da[:,iii,:]) fig.colorbar(mesh0,ax=axs[0]) axs[0].set_ylabel('z (m)') axs[0].set_xlabel('time steps') axs[0].set_title(field_names[iii]) d=da[:,iii,da.shape[2]-1]-da[:,iii,0] pl1=axs[1].plot(d,-z) axs[1].set_ylabel('z (m)') axs[1].set_xlabel('Difference') axs[1].set_title(field_names[iii]) pl2=axs[2].plot(da[:,iii,0],-z) axs[2].set_ylabel('z (m)') axs[2].set_xlabel(field_names[iii]) axs[2].set_title('timestep=1') pl3=axs[3].plot(da[:,iii,da.shape[2]-1],-z) axs[3].set_ylabel('z (m)') axs[3].set_xlabel(field_names[iii]) axs[3].set_title('timestep=end') # + iii+=1 print iii fig, axs = plt.subplots(1,4,figsize=(20,5)) mesh0=axs[0].pcolormesh(tt,zz,da[:,iii,:]) fig.colorbar(mesh0,ax=axs[0]) axs[0].set_ylabel('z (m)') axs[0].set_xlabel('time steps') axs[0].set_title(field_names[iii]) d=da[:,iii,da.shape[2]-1]-da[:,iii,0] pl1=axs[1].plot(d,-z) axs[1].set_ylabel('z (m)') axs[1].set_xlabel('Difference') axs[1].set_title(field_names[iii]) pl2=axs[2].plot(da[:,iii,0],-z) axs[2].set_ylabel('z (m)') axs[2].set_xlabel(field_names[iii]) axs[2].set_title('timestep=1') pl3=axs[3].plot(da[:,iii,da.shape[2]-1],-z) axs[3].set_ylabel('z (m)') axs[3].set_xlabel(field_names[iii]) axs[3].set_title('timestep=end') # + iii+=1 print iii fig, axs = plt.subplots(1,4,figsize=(20,5)) mesh0=axs[0].pcolormesh(tt,zz,da[:,iii,:]) fig.colorbar(mesh0,ax=axs[0]) axs[0].set_ylabel('z (m)') axs[0].set_xlabel('time steps') axs[0].set_title(field_names[iii]) d=da[:,iii,da.shape[2]-1]-da[:,iii,0] pl1=axs[1].plot(d,-z) axs[1].set_ylabel('z (m)') axs[1].set_xlabel('Difference') axs[1].set_title(field_names[iii]) pl2=axs[2].plot(da[:,iii,0],-z) axs[2].set_ylabel('z (m)') axs[2].set_xlabel(field_names[iii]) axs[2].set_title('timestep=1') pl3=axs[3].plot(da[:,iii,da.shape[2]-1],-z) axs[3].set_ylabel('z (m)') axs[3].set_xlabel(field_names[iii]) axs[3].set_title('timestep=end') # + iii+=1 print iii fig, axs = plt.subplots(1,4,figsize=(20,5)) mesh0=axs[0].pcolormesh(tt,zz,da[:,iii,:]) fig.colorbar(mesh0,ax=axs[0]) axs[0].set_ylabel('z (m)') axs[0].set_xlabel('time steps') axs[0].set_title(field_names[iii]) d=da[:,iii,da.shape[2]-1]-da[:,iii,0] pl1=axs[1].plot(d,-z) axs[1].set_ylabel('z (m)') axs[1].set_xlabel('Difference') axs[1].set_title(field_names[iii]) pl2=axs[2].plot(da[:,iii,0],-z) axs[2].set_ylabel('z (m)') axs[2].set_xlabel(field_names[iii]) axs[2].set_title('timestep=1') pl3=axs[3].plot(da[:,iii,da.shape[2]-1],-z) axs[3].set_ylabel('z (m)') axs[3].set_xlabel(field_names[iii]) axs[3].set_title('timestep=end') # - iii+=1 print iii # + active="" # fig, axs = plt.subplots(1,4,figsize=(20,5)) # mesh0=axs[0].pcolormesh(tt,zz,da[:,iii,:]) # fig.colorbar(mesh0,ax=axs[0]) # axs[0].set_ylabel('z (m)') # axs[0].set_xlabel('time steps') # axs[0].set_title(field_names[iii]) # # d=da[:,iii,da.shape[2]-1]-da[:,iii,0] # pl1=axs[1].plot(d,-z) # axs[1].set_ylabel('z (m)') # axs[1].set_xlabel('Difference') # axs[1].set_title(field_names[iii]) # # pl2=axs[2].plot(da[:,iii,0],-z) # axs[2].set_ylabel('z (m)') # axs[2].set_xlabel(field_names[iii]) # axs[2].set_title('timestep=1') # # pl3=axs[3].plot(da[:,iii,da.shape[2]-1],-z) # axs[3].set_ylabel('z (m)') # axs[3].set_xlabel(field_names[iii]) # axs[3].set_title('timestep=end') # - iii+=1 print iii # + active="" # fig, axs = plt.subplots(1,4,figsize=(20,5)) # mesh0=axs[0].pcolormesh(tt,zz,da[:,iii,:]) # fig.colorbar(mesh0,ax=axs[0]) # axs[0].set_ylabel('z (m)') # axs[0].set_xlabel('time steps') # axs[0].set_title(field_names[iii]) # # d=da[:,iii,da.shape[2]-1]-da[:,iii,0] # pl1=axs[1].plot(d,-z) # axs[1].set_ylabel('z (m)') # axs[1].set_xlabel('Difference') # axs[1].set_title(field_names[iii]) # # pl2=axs[2].plot(da[:,iii,0],-z) # axs[2].set_ylabel('z (m)') # axs[2].set_xlabel(field_names[iii]) # axs[2].set_title('timestep=1') # # pl3=axs[3].plot(da[:,iii,da.shape[2]-1],-z) # axs[3].set_ylabel('z (m)') # axs[3].set_xlabel(field_names[iii]) # axs[3].set_title('timestep=end') # - iii+=1 print iii # + active="" # fig, axs = plt.subplots(1,4,figsize=(20,5)) # mesh0=axs[0].pcolormesh(tt,zz,da[:,iii,:]) # fig.colorbar(mesh0,ax=axs[0]) # axs[0].set_ylabel('z (m)') # axs[0].set_xlabel('time steps') # axs[0].set_title(field_names[iii]) # # d=da[:,iii,da.shape[2]-1]-da[:,iii,0] # pl1=axs[1].plot(d,-z) # axs[1].set_ylabel('z (m)') # axs[1].set_xlabel('Difference') # axs[1].set_title(field_names[iii]) # # pl2=axs[2].plot(da[:,iii,0],-z) # axs[2].set_ylabel('z (m)') # axs[2].set_xlabel(field_names[iii]) # axs[2].set_title('timestep=1') # # pl3=axs[3].plot(da[:,iii,da.shape[2]-1],-z) # axs[3].set_ylabel('z (m)') # axs[3].set_xlabel(field_names[iii]) # axs[3].set_title('timestep=end') # + iii+=1 print iii fig, axs = plt.subplots(1,2,figsize=(50,5)) mesh0=axs[0].pcolormesh(tt,zz,da[:,iii,:]) fig.colorbar(mesh0,ax=axs[0]) axs[0].set_ylabel('z (m)') axs[0].set_xlabel('time steps') axs[0].set_title(field_names[iii]) # - iii+=1 print iii # + fig, axs = plt.subplots(1,4,figsize=(20,5)) mesh0=axs[0].pcolormesh(tt,zz,da[:,iii,:]) fig.colorbar(mesh0,ax=axs[0]) axs[0].set_ylabel('z (m)') axs[0].set_xlabel('time steps') axs[0].set_title(field_names[iii]) d=da[:,iii,da.shape[2]-1]-da[:,iii,0] pl1=axs[1].plot(tt[0,:],da[0,iii,:]) axs[1].set_ylabel('z (m)') axs[1].set_xlabel('Difference') axs[1].set_title(field_names[iii]) pl2=axs[2].plot(da[:,iii,0],-z) axs[2].set_ylabel('z (m)') axs[2].set_xlabel(field_names[iii]) axs[2].set_title('timestep=1') pl3=axs[3].plot(da[:,iii,da.shape[2]-1],-z) axs[3].set_ylabel('z (m)') axs[3].set_xlabel(field_names[iii]) axs[3].set_title('timestep=end') # - iii=4 print iii # + fig, axs = plt.subplots(1,4,figsize=(20,5)) mesh0=axs[0].pcolormesh(tt,zz,da[:,iii,:]) fig.colorbar(mesh0,ax=axs[0]) axs[0].set_ylabel('z (m)') axs[0].set_xlabel('time steps') axs[0].set_title(field_names[iii]) pl1=axs[1].plot(tt[0,:],da[0,iii,:]) axs[1].set_ylabel('z (m)') axs[1].set_xlabel('Difference') axs[1].set_title(field_names[iii]) pl2=axs[2].plot(da[:,iii,0],-z) axs[2].set_ylabel('z (m)') axs[2].set_xlabel(field_names[iii]) axs[2].set_title('timestep=1') pl3=axs[3].plot(da[:,iii,da.shape[2]-1],-z) axs[3].set_ylabel('z (m)') axs[3].set_xlabel(field_names[iii]) axs[3].set_title('timestep=end') # -
Elise/plotResults/plot_SOGComp_SS2DSOG_p4zprod-Sink.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] _uuid="a03b5ce8775039db4af7d6a541860380ea262bc1" # ## Exploring the Where it Pays to Attend College Dataset # # **<NAME>** - **19 November 2018** <a id="0"></a> # # * [Introduction](#1) # * [Import Modules](#2) # * [Exploring the First Data](#3) # * [Change Columns Name](#4) # * [Convert Data Type](#5) # * [Visualization](#6) # * [Undergraduate Major Salary](#7) # * [Start Career to Middle Career](#8) # * [Import Second Data](#9) # * [Visualize the Empty Data](#10) # * [Formulas for null values](#11) # * [Change Data Type](#12) # * [Fill null values with Formula](#13) # * [Visualization](#14) # * [Compare Schools](#15) # * [School Types](#16) # * [Salary Statistics](#17) # * [Import Third Data](#18) # * [Change Columns Name](#19) # * [Convert Data Type](#20) # * [Fill null values with Formula](#21) # * [Visualization](#22) # * [Region & School Visualization](#23) # * [Average starting salary for the states](#24) # * [Compare the pay for the schools](#25) # * [Conclusion](#26) # # ## INTRODUCTION <a id="1"></a> # <mark>[Return Contents](#0) # <hr> # # In this notebook, let us try and explore the data given for About college. # # What do we do in this notebook? # * we will fill the null data with a formula. # * we're going to plotly visualize the data. # # and **more**. # + [markdown] _uuid="91244daeafeae7af8bac3271f3d6b06a766943d7" # ### **Import Modules** <a id="2"></a> # <mark>[Return Contents](#0) # <hr> # # Let us first import the necessary modules. # + _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt # plotly import plotly.plotly as py from plotly.offline import init_notebook_mode, iplot init_notebook_mode(connected=True) # import graph objects as "go" import plotly.graph_objs as go # import missingno library import missingno as msno import os #print(os.listdir("../input")) # + [markdown] _uuid="bae13521ed57ba22768a49bb2e642618a0fb8041" # let's list the files in the input folder. # + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a" from subprocess import check_output print(check_output(["ls", "../input"]).decode("utf8")) # + [markdown] _uuid="4b10d5ffee75b6956447e19d94d7efdc3f4c4d06" # ### **Exploring the First Data** <a id="3"></a> # <mark>[Return Contents](#0) # <hr> # firstly, let's explore the degrees that pay back. # + _uuid="47767f6610fc46edc4e57646164ddb4a83f69418" degrees_df = pd.read_csv('../input/degrees-that-pay-back.csv') degrees_df.shape # + _uuid="6025ebaa795d7b18afa7a5a9582f9c81970bfa29" degrees_df.head() # + [markdown] _uuid="d2a89c949a2a0c5e48244e36af69b4ca555c7fb0" # ### **Change Columns Name** <a id="4"></a> # <mark>[Return Contents](#0) # <hr> # I need change to coloumns name because columns name are very complex. # + _uuid="5970e95385904027957dbe7eda8fffc3abd55bfe" degrees_df.columns = ['major','starting','midcareer','starttomid','mid_p10','mid_p25','mid_p75','mid_p90'] # + [markdown] _uuid="a0f79957903c171a702485378782a38bafb2dc69" # let's see the columns datatype. # + _uuid="b658b3009d8257859878e181260730f88db1cd25" degrees_df.info() # + [markdown] _uuid="65c87e23e86fb382f4cc3fa5d00baf809fb912ef" # ### Convert Data Type <a id="5"></a> # <mark>[Return Contents](#0) # <hr> # some columns need change to data type. For example, starting column is data type object but I'm going to convert data type. You must also convert other columns. # + _uuid="a4f09ae4d96bb5fbe46da27ed68dd9c7da17a445" someColumns = ['starting', 'midcareer', 'mid_p10', 'mid_p25', 'mid_p75', 'mid_p90'] for column in someColumns: degrees_df[column] = degrees_df[column].str.replace("$","") degrees_df[column] = degrees_df[column].str.replace(",","") degrees_df[column] = pd.to_numeric(degrees_df[column]) # + _uuid="1a6f574fbf919a218befd8cd2a45a405ae289c1d" degrees_df.info() # + _uuid="04a22a6d2f6ae527d3084aaa15ee0086a051a38a" degrees_df.head() # + [markdown] _uuid="d7434508d2748a29be394470d1acf1a41261dfb1" # Now, I'm going to sort the major by the starting salaries. # + _uuid="6f13e5637f20b54361565734b0486c41d54158c7" sorted = degrees_df.sort_values('starting', ascending=False) sorted.head() # + [markdown] _uuid="022b1f7767c63d7f4eb218283ad45244e67c1936" # ### **Visualization** <a id="6"></a> # <mark>[Return Contents](#0) # <hr> # ### Undergraduate Major Salaries <a id="7"></a> # # # Now, I converted to data type and name for columns then I am visualizing the data. First, I'm going to show you the salaries for each Undergraduate Major. # + _uuid="f1f07715abbd96906d784786e04c3db9e6dc22be" # Bar Chart trace1 = go.Bar( x=sorted.major, y=sorted.starting, name='Starting Median Salary' ) trace2 = go.Bar( x=sorted.major, y=sorted.midcareer, name='Mid-Career Median Salary' ) trace3 = go.Bar( x=sorted.major, y=sorted.mid_p10, name='Mid-Career 10th Percentile Salary' ) trace4 = go.Bar( x=sorted.major, y=sorted.mid_p25, name='Mid-Career 25th Percentile Salary' ) trace5 = go.Bar( x=sorted.major, y=sorted.mid_p75, name='Mid-Career 75th Percentile Salary' ) trace6 = go.Bar( x=sorted.major, y=sorted.mid_p90, name='Mid-Career 90th Percentile Salary' ) data = [trace1,trace2,trace3,trace4,trace5,trace6] layout = go.Layout( barmode='stack' ) fig = go.Figure(data=data, layout=layout) iplot(fig) # + [markdown] _uuid="28e9d7eed6c9fb759f14d01fa334e5d31c912d6f" # ### Start Career to Middle Career <a id="8"></a> # <mark>[Return Contents](#0) # <hr> # For Undergradute Major, I will list the salary change from start career to middle career. # + _uuid="7892a1bc476d5a06d00bbe249529ad12298ae2ea" sorted_starttomid = degrees_df.sort_values('starttomid', ascending=False) sorted_starttomid.head() # + [markdown] _uuid="f3b0dcf89125899d372c802b9f389e90192c2ab9" # Now I'm going to visualize this sequence. # + _uuid="1ee0259e1cdb2d09022adb7587e32aac36b0e577" # Line Chart trace = go.Scatter( x = sorted_starttomid.major, y = sorted_starttomid.starttomid ) layout = dict(title = 'Percent change from Starting to Mid-Career Salary', xaxis= dict(title= '',ticklen= 5,zeroline= False) ) data = [trace] fig = go.Figure(data=data, layout=layout) iplot(fig) # + [markdown] _uuid="765c91de5cc79a519819f72141f5a0fac3b581f0" # ### **Import Second Data** <a id="9"></a> # <mark>[Return Contents](#0) # <hr> # let's explore our second set of data. # + _uuid="bde235b369085534ffb808aacf815bae5eac1b66" salariesByCollege_df = pd.read_csv('../input/salaries-by-college-type.csv') salariesByCollege_df.shape # + _uuid="3a5ca28865ee83f80ab44d91d6017f96ae9007f3" salariesByCollege_df.head() # + [markdown] _uuid="b11c88d97254b8070180ee24720f60fd4164c14e" # ### **Visualize the Empty Data** <a id="10"></a> # <mark>[Return Contents](#0) # <hr> # Let's visualize the empty data. # + _uuid="dcc592b9a34a9af87ad72c6734b698c07737ee14" msno.matrix(salariesByCollege_df) plt.show() # Other visualization type #msno.bar(salariesByCollege_df) #plt.show() # + _uuid="7c850459a2d14f63b4764b0b7a2fb2a02e2c40af" salariesByCollege_df.info() # + [markdown] _uuid="65a8e9853fa70e218c7a681cf82bc02e3792f8ff" # ### **Formulas for null values** <a id="11"></a> # <mark>[Return Contents](#0) # <hr> # Totaly 38 data is null. Null data is only available in Mid-Career 10th and in Mid-Career 90th. Now, I will make a formula for null data; # # **For Mid-Career 10th Formula = (midcareer + mid_p25) / 3 = mid_p10** # # **For Mid-Career 90th Formula = (mid_p75 - mid_p25) + mid_p75** # # These formulas give more or less results. # # firstly I am changing some column names. # + _uuid="a016fe014d78d7fb90f1ee9c615ab2bf52cb9c29" salariesByCollege_df.columns = ['schoolname', 'schooltype', 'starting', 'midcareer', 'mid_p10', 'mid_p25', 'mid_p75', 'mid_p90'] # + _uuid="3b05021c1176826a665500b53768d8a134412057" salariesByCollege_df.info() # + [markdown] _uuid="58e10ba4cdb35c2b25fba26e3a95c1ad0a2d9f91" # ### Change Data Type <a id="12"></a> # <mark>[Return Contents](#0) # <hr> # let's change the data type. # + _uuid="0369cb359ef669b873ba106e17d06f3f7207e537" columns = ['starting', 'midcareer', 'mid_p10', 'mid_p25', 'mid_p75', 'mid_p90'] for column in columns: salariesByCollege_df[column] = salariesByCollege_df[column].str.replace("$","") salariesByCollege_df[column] = salariesByCollege_df[column].str.replace(",","") salariesByCollege_df[column] = pd.to_numeric(salariesByCollege_df[column]) # + _uuid="1d06d0eccaf3bc63162584280595c5e6096a5746" # there are two from some universities. salariesByCollege_df.schoolname.value_counts().head(25) # + [markdown] _uuid="b2f1ea040b4047513b7b269e93c2ac1d3873b556" # let's see the data info. # + _uuid="9910e36174e9896b3bb65b39b131ce2ffd08a8ac" salariesByCollege_df.info() # + [markdown] _uuid="ac89cd7ba5d9c6bddc7c6ebcadc512f975ebe696" # ### **Fill null values with Formula** <a id="13"></a> # <mark>[Return Contents](#0) # <hr> # let's fix some null columns. # + _uuid="ef55b55f447164a83d3c5632fac47f5bbd3ae679" # this code checks to see if the cell is empty. if salariesByCollege_df.iloc[[1],[7]].isnull().any().any(): print('This column is null') # + _uuid="07dd19bd11e7fef5ae5c1dd22688179e23399fb7" columns = ['mid_p10','mid_p90'] for i in range(269): for column in columns: if column == 'mid_p10': if salariesByCollege_df.iloc[[i],[4]].isnull().any().any(): salariesByCollege_df.iloc[[i],[4]] = (salariesByCollege_df.iloc[[i],[3]].values + salariesByCollege_df.iloc[[i],[5]].values) / 3 if column == 'mid_p90': if salariesByCollege_df.iloc[[i],[7]].isnull().any().any(): salariesByCollege_df.iloc[[i],[7]] = (salariesByCollege_df.iloc[[i],[6]].values - salariesByCollege_df.iloc[[i],[5]].values) + salariesByCollege_df.iloc[[i],[6]].values # + [markdown] _uuid="033ccfa03152e6d6c426644ef2a82d8e642d0d4e" # let's control it. # + _uuid="f9c60710fd3fffe05d24fc6320aa5478a164cd6b" salariesByCollege_df.head() # Empty data, not empty. # it worked. # + [markdown] _uuid="24ae4f0fd0f1e565107355aacf48c92569f6686b" # ### Visualization <a id="14"></a> # <mark>[Return Contents](#0) # <hr> # # ### Compare Schools <a id="15"></a> # # Now, let's compare the pay for the schools. # + _uuid="113e2b2a213e341e349bd7a4b98a65fafd80f1f8" # Line Charts # prepare data frame df = salariesByCollege_df.sort_values('starting', ascending=False) df = df.iloc[:55,:] new_df = df.reset_index() # Creating trace1 trace1 = go.Scatter( x = new_df.index.values, y = df.starting, mode = "lines", name = "Starting Median Salary", marker = dict(color = 'rgba(324, 97, 98, 0.8)'), text= df.schoolname) # Creating trace2 trace2 = go.Scatter( x = new_df.index.values, y = df.midcareer, mode = "lines+markers", name = "Mid-Career Median Salary", marker = dict(color = 'rgba(61, 100, 94, 0.8)'), text= df.schoolname) data = [trace1, trace2] layout = dict(title = 'Starting Median Salary and Mid-Career Median Salary', xaxis= dict(title= 'Universities Rank',ticklen= 5,zeroline= False) ) fig = dict(data = data, layout = layout) iplot(fig) # + [markdown] _uuid="5a2df0513f6129bac237968590f67797896e48e0" # ### School Types <a id="16"></a> # <mark>[Return Contents](#0) # <hr> # Let's visualize how many school types there are. # + _uuid="2a7a7d37b597a6e1852729e9f82e528394fd1c44" # Basic Bar Chart data = [go.Bar( x=salariesByCollege_df.schooltype.value_counts().index, y=salariesByCollege_df.schooltype.value_counts().values )] layout = {"title": "How many school types are there?", "xaxis": {"title": "School Type", }, "yaxis": {"title": "Value"}} fig = go.Figure(data=data, layout=layout) iplot(fig) # + [markdown] _uuid="b594a1db83938de835252d775e934648adbc9ea7" # ### Salary Statistics <a id="17"></a> # <mark>[Return Contents](#0) # <hr> # + _uuid="e3802ccf1b203fa0751849f771f473c22e714e94" # Colored Box Plot # prepare data frame df1 = salariesByCollege_df.sort_values('starting', ascending=False) trace0 = go.Box( y=df1.starting, name = 'Starting Career Median Salary', marker = dict( color = 'rgb(214, 12, 140)', ) ) trace1 = go.Box( y=df1.midcareer, name = 'Mid-Career Median Salary', marker = dict( color = 'rgb(0, 128, 128)', ) ) trace2 = go.Box( y=df1.mid_p10, name = 'Mid-Career 10th Percentile Salary', marker = dict( color = 'rgb(12, 15, 75)', ) ) trace3 = go.Box( y=df1.mid_p25, name = 'Mid-Career 25th Percentile Salary', marker = dict( color = 'rgb(12, 100, 190)', ) ) trace4 = go.Box( y=df1.mid_p75, name = 'Mid-Career 75th Percentile Salary', marker = dict( color = 'rgb(12, 128, 128)', ) ) trace5 = go.Box( y=df1.mid_p90, name = 'Mid-Career 90th Percentile Salary', marker = dict( color = 'rgb(12, 12, 140)', ) ) data = [trace0, trace1,trace2,trace3,trace4,trace5] iplot(data) # + [markdown] _uuid="4bcee95d3b1a31ba1e07a2ba9a774019f7666cfa" # ### **Import Third Data** <a id="18"></a> # <mark>[Return Contents](#0) # <hr> # let's import last data set. # + _uuid="36a2aa8a51804e974d8430d235fb0f31dae293d3" salary_region_df = pd.read_csv('../input/salaries-by-region.csv') salary_region_df.shape # + _uuid="1a69f5e295e58440d1370987ab2d934c7daf9cb6" salary_region_df.head() # + [markdown] _uuid="fe1abba98579914db359105c26188585aec6ed9a" # ### **Change Columns Name** <a id="19"></a> # <mark>[Return Contents](#0) # <hr> # There are empty data as above. But first we have to change the name and data type. Let's the start. # + _uuid="c0d5024b690e03a2e4274f608739fd97cef436e7" salary_region_df.columns = ['schoolname','region','starting','midcareer','mid_p10','mid_p25','mid_p75','mid_p90'] salary_region_df.info() # + [markdown] _uuid="35ed4ff3e6befed09cc96577364e9329219322d8" # ### **Convert Data Type** <a id="20"></a> # <mark>[Return Contents](#0) # <hr> # let's the change data type. # + _uuid="9cce0a5e20a241e67b32ae342f4eabc5203833de" regionColumns = ['starting', 'midcareer', 'mid_p10', 'mid_p25', 'mid_p75', 'mid_p90'] for column in regionColumns: salary_region_df[column] = salary_region_df[column].str.replace("$","") salary_region_df[column] = salary_region_df[column].str.replace(",","") salary_region_df[column] = pd.to_numeric(salary_region_df[column]) # + _uuid="9d7f78aa494c3ed8e67d6f2fea6dadf02bbf6a0b" salary_region_df.info() # + _uuid="9fc6f1dfedbaebc35838f256c60cc0263d5fb8f8" salary_region_df.head() # + [markdown] _uuid="e81f0f6500fee15b42676d1e17734fd90504bed8" # ### **Fill null values with Formula** <a id="21"></a> # <mark>[Return Contents](#0) # <hr> # I will use the same formula we used above here. # + _uuid="65d943caa1a64389bc7d8bd7f0238478cd5a0324" columns = ['mid_p10','mid_p90'] for i in range(269): for column in columns: if column == 'mid_p10': if salary_region_df.iloc[[i],[4]].isnull().any().any(): salary_region_df.iloc[[i],[4]] = (salary_region_df.iloc[[i],[3]].values + salary_region_df.iloc[[i],[5]].values) / 3 if column == 'mid_p90': if salary_region_df.iloc[[i],[7]].isnull().any().any(): salary_region_df.iloc[[i],[7]] = (salary_region_df.iloc[[i],[6]].values - salary_region_df.iloc[[i],[5]].values) + salary_region_df.iloc[[i],[6]].values # + _uuid="a2bf0ac5f9f8c5e26552c1c321cc98a4a9590f20" salary_region_df.head() # + [markdown] _uuid="1b27faaeecb8db4f5bf3df8d3b20e9054b03e170" # ### Visualization <a id="22"></a> # <mark>[Return Contents](#0) # <hr> # # ### Region & School Visualization <a id="23"></a> # # Let's the visualization. # + _uuid="278a37863fdeb23d7252cb59e1fbe762ce0a1e74" # Bubble Charts trace0 = go.Scatter( x=salary_region_df.region.value_counts().index, y=salary_region_df.region.value_counts().values, mode='markers', marker=dict( color=['rgb(93, 100, 210)', 'rgb(255, 144, 14)', 'rgb(44, 160, 101)', 'rgb(255, 65, 54)'], opacity=[1, 0.8, 0.6, 0.4], size=[100, 80, 60, 40,20], ) ) layout = {"title": "How many region are there and how many schools are there?", "xaxis": {"title": "Region", }, "yaxis": {"title": "Number of Schools"}} data = [trace0] fig = go.Figure(data=data, layout=layout) iplot(fig) # + [markdown] _uuid="ab96f55ef202af110d26d4987616e690fd78ba16" # ### **Average starting salary for the states** <a id="24"></a> # <mark>[Return Contents](#0) # <hr> # let's find the average starting salary for the states. # + _uuid="b061fde61bc8163bed00d5201c85c41cd6911902" california_starting = 0 western_starting = 0 midwestern_starting = 0 southern_starting = 0 northeastern_staring = 0 for i in range(269): if salary_region_df.iloc[[i],[1]].values == 'Northeastern': northeastern_staring += salary_region_df.iloc[[i],[2]].values if salary_region_df.iloc[[i],[1]].values == 'Southern': southern_starting += salary_region_df.iloc[[i],[2]].values if salary_region_df.iloc[[i],[1]].values == 'Midwestern': midwestern_starting += salary_region_df.iloc[[i],[2]].values if salary_region_df.iloc[[i],[1]].values == 'Western': western_starting += salary_region_df.iloc[[i],[2]].values if salary_region_df.iloc[[i],[1]].values == 'California': california_starting += salary_region_df.iloc[[i],[2]].values northeastern_staring = int(northeastern_staring/100) southern_starting = int(southern_starting/79) midwestern_starting = int(midwestern_starting/71) western_starting = int(western_starting/42) california_starting = int(california_starting/28) data = {'region' : ['Northeastern', 'Southern', 'Midwestern', 'Western','California'], 'startingmed': [northeastern_staring,southern_starting,midwestern_starting,western_starting,california_starting]} datas = pd.DataFrame(data, columns=['region','startingmed']) # + _uuid="f5b4e5a03e225f14602b102cab838721e53a8195" datas # + [markdown] _uuid="bcd5527fbbcd0b7b7006a1ee46a46dac65028c6c" # Let's visualize this datas. # + _uuid="d4d13923c9c8e9d83791158b2b062afe9a3ea474" # Customizing Individual Bar Colors trace0 = go.Bar( x=datas.region.values, y=datas.startingmed.values, marker=dict( color=['rgba(24,158,242,1)', 'rgba(44,167,244,1)', 'rgba(134,185,216,1)', 'rgba(50,104,137,1)', 'rgba(47,72,88,1)']), ) data = [trace0] layout = go.Layout( title='According to the state, average starting salary', xaxis= {"title": "Region", }, yaxis= {"title": "Average Starting Salary"} ) fig = go.Figure(data=data, layout=layout) iplot(fig) # + [markdown] _uuid="bc608fde5a2b017ec1961604847515380a0c2beb" # ### **Compare the pay for the schools** <a id="25"></a> # <mark>[Return Contents](#0) # <hr> # Now, let's compare the pay for the schools. # + _uuid="fe6221ad17355ff8303bedaef4509312f4954831" # Line Charts # prepare data frame df = salary_region_df.sort_values('starting', ascending=False) df = df.iloc[:55,:] new_df = df.reset_index() # Creating trace1 trace1 = go.Scatter( x = new_df.index.values, y = df.starting, mode = "lines", name = "Starting Median Salary", marker = dict(color = 'rgba(16, 112, 2, 0.8)'), text= df.schoolname) # Creating trace2 trace2 = go.Scatter( x = new_df.index.values, y = df.midcareer, mode = "lines+markers", name = "Mid-Career Median Salary", marker = dict(color = 'rgba(80, 26, 80, 0.8)'), text= df.schoolname) data = [trace1, trace2] layout = dict(title = 'Starting Median Salary and Mid-Career Median Salary', xaxis= dict(title= 'Universities Rank',ticklen= 5,zeroline= False) ) fig = dict(data = data, layout = layout) iplot(fig) # + [markdown] _uuid="f68c7cb232b4fdc6f6b41777c7d297515785a6f6" # ## Conclusion <a id="26"></a> # <mark>[Return Contents](#0) # <hr> # * If you like it, thank you for you upvotes. # * If you have any question, I will happy to hear it
EDA of Salaries by college, region, and major.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: da-conda # language: python # name: auto_da-conda # --- # %matplotlib inline from os.path import join import pandas as pd import skbio import matplotlib.pyplot as plt from skbio.stats import ordination import seaborn as sns from skbio.stats.distance import permanova sns.palplot(sns.color_palette("YlGnBu", 100)) colors = sns.color_palette("YlGnBu", 100) def filter_dm_and_map(dm, map_df): ids_to_exclude = set(dm.ids) - set(map_df.index.values) ids_to_keep = set(dm.ids) - ids_to_exclude filtered_dm = dm.filter(ids_to_keep) filtered_map = map_df.loc[ids_to_keep] return filtered_dm, filtered_map home = '/home/office-microbe-files' map_fp = join(home, 'master_map_150908.txt') sample_md = pd.read_csv(map_fp, sep='\t', index_col=0, dtype=str) sample_md = sample_md[sample_md['16SITS'] == '16S'] sample_md = sample_md[sample_md['OfficeSample'] == 'yes'] sample_md['Meta'] = sample_md.Description.apply(lambda x: '.'.join(x.split('.')[:3])) uw_dm = skbio.DistanceMatrix.read(join(home, 'core_div_97/bdiv_even1000/unweighted_unifrac_dm.txt')) w_dm = skbio.DistanceMatrix.read(join(home, 'core_div_97/bdiv_even1000/weighted_unifrac_dm.txt')) dist_mat = w_dm alpha_div_fp = '/home/office-microbe-files/core_div_out/arare_max1000/alpha_div_collated/PD_whole_tree.txt' alpha_div = pd.read_csv(alpha_div_fp, sep='\t', index_col=0) alpha_div = alpha_div.T.drop(['sequences per sample', 'iteration']) alpha_cols = [e for e in alpha_div.columns if '1000' in e] alpha_div = alpha_div[alpha_cols] sample_md = pd.concat([sample_md, alpha_div], axis=1, join='inner') sample_md['MeanAlpha'] = sample_md[alpha_cols].mean(axis=1) # #Plot all samples colored by run and season # ##Filter out all replicates and blanks # replicate_ids = '''F2F.2.Ce.021 F2F.2.Ce.022 F2F.3.Ce.021 F2F.3.Ce.022 F2W.2.Ca.021 F2W.2.Ca.022 F2W.2.Ce.021 F2W.2.Ce.022 F3W.2.Ce.021 F3W.2.Ce.022 F1F.3.Ca.021 F1F.3.Ca.022 F1C.3.Ca.021 F1C.3.Ca.022 F1W.2.Ce.021 F1W.2.Ce.022 F1W.3.Dr.021 F1W.3.Dr.022 F1C.3.Dr.021 F1C.3.Dr.022 F2W.3.Dr.059 F3F.2.Ce.078'''.split('\n') reps = sample_md[sample_md['Description'].isin(replicate_ids)] reps = reps.drop(reps.drop_duplicates('Description').index).index filt_md = sample_md.drop(reps, inplace=False) # Filter distance matrix and map to match each other filt_dm, filt_md = filter_dm_and_map(dist_mat, filt_md) # Calculate pcoa filt_pcoa = ordination.pcoa(filt_dm) # ###add pcoa to mapping file filt_md.loc[:, 'PC1'] = filt_pcoa.samples.loc[filt_md.index, 'PC1'] filt_md.loc[:, 'PC2'] = filt_pcoa.samples.loc[filt_md.index, 'PC2'] filt_md.loc[:, 'PC3'] = filt_pcoa.samples.loc[filt_md.index, 'PC3'] with plt.rc_context(dict(sns.axes_style("darkgrid"), **sns.plotting_context("notebook", font_scale=2.5))): fig, ax = plt.subplots(1, 1) fig.set_size_inches(12, 10) groups = filt_md.groupby('Run') vals = [99, 0, 66, 33] c = 0 x = 'PC1' y = 'PC2' for name, group in groups: ax.scatter(group[x], group[y], label=name, c=colors[vals[c]], s=100) c += 1 ax.legend(['Run 1', 'Run 2', 'Run 3'], loc='best') plt.setp(ax.get_xticklabels(), visible=False) plt.setp(ax.get_yticklabels(), visible=False) ax.set_xlabel('{pc} ({num:.{digits}f}%)'.format(pc=x, num=filt_pcoa.proportion_explained[x]*100, digits=3)) ax.set_ylabel('{pc} ({num:.{digits}f}%)'.format(pc=y, num=filt_pcoa.proportion_explained[y]*100, digits=3)) plt.savefig('all_samples_run_weighted.svg', dpi=300) with plt.rc_context(dict(sns.axes_style("darkgrid"), **sns.plotting_context("notebook", font_scale=2.5))): fig, ax = plt.subplots(1, 1) fig.set_size_inches(12, 10) groups = filt_md.groupby('Period') vals = [99, 66, 0, 33] c = 0 x = 'PC1' y = 'PC2' alphas = [1, 1, .3, 1] for name, group in groups: ax.scatter(group[x], group[y], label=name, c=colors[vals[c]], s=100) c += 1 ax.legend(['Summer', 'Fall', 'Winter', 'Spring'], loc='best') plt.setp(ax.get_xticklabels(), visible=False) plt.setp(ax.get_yticklabels(), visible=False) ax.set_xlabel('{pc} ({num:.{digits}f}%)'.format(pc=x, num=filt_pcoa.proportion_explained[x]*100, digits=3)) ax.set_ylabel('{pc} ({num:.{digits}f}%)'.format(pc=y, num=filt_pcoa.proportion_explained[y]*100, digits=3)) plt.savefig('all_samples_season_weighted.svg', dpi=300) # We need 4 pcoa plots # # 1. Plot all samples except for replicates and blanks colored by season # 2. Plot all samples except for replicates and blanks colored by Run # 3. Plot samples that were from the same season, but different runs. # 4. Plot different seasons but on the same runs. run_4_md = sample_md[(sample_md['Run'] == '4')].copy() run_4_md = run_4_md[(run_4_md.Meta.duplicated(keep='first')) | run_4_md.Meta.duplicated(keep='last')] run_4_dm, run_4_md = filter_dm_and_map(dist_mat, run_4_md) run_4_pcoa = ordination.pcoa(run_4_dm) run_4_md.loc[:, 'PC1'] = run_4_pcoa.samples.loc[run_4_md.index, 'PC1'] run_4_md.loc[:, 'PC2'] = run_4_pcoa.samples.loc[run_4_md.index, 'PC2'] run_4_md.loc[:, 'PC3'] = run_4_pcoa.samples.loc[run_4_md.index, 'PC3'] def midpoint(x, y): return ((x[0] + x[1])/2, (y[0] + y[1])/2) with plt.rc_context(dict(sns.axes_style("darkgrid"), **sns.plotting_context("notebook", font_scale=2.5))): fig, ax = plt.subplots(1, 1) fig.set_size_inches(12, 10) groups = run_4_md.groupby('Period') vals = [99, 0, 0, 33] c = 0 x = 'PC1' y = 'PC2' for name, group in groups: ax.scatter(group[x], group[y], label=name, c=colors[vals[c]], s=150) c += 1 ax.legend(['Summer', 'Fall'], loc='best') plt.setp(ax.get_xticklabels(), visible=False) plt.setp(ax.get_yticklabels(), visible=False) ax.set_xlabel('{pc} ({num:.{digits}f}%)'.format(pc=x, num=run_4_pcoa.proportion_explained[x]*100, digits=3)) ax.set_ylabel('{pc} ({num:.{digits}f}%)'.format(pc=y, num=run_4_pcoa.proportion_explained[y]*100, digits=3)) #To get rid of lines just comment the next few lines out for id_ in run_4_md.Description.apply(lambda x: '.'.join(x.split('.')[:3])).unique(): x3, y3 = run_4_md[(run_4_md.Meta == id_) & (run_4_md.Period == '1')][[x, y]].values[0] x1, y1 = run_4_md[(run_4_md.Meta == id_) & (run_4_md.Period == '2')][[x, y]].values[0] x2, y2 = midpoint([x1, x3], [y1, y3]) ax.plot([x1, x2], [y1, y2], c=colors[0], linewidth=3.0) ax.plot([x2, x3], [y2, y3], c=colors[99], linewidth=3.0) plt.savefig('reps_samples_season_weighted.svg', dpi=300) per_1_md = sample_md[(sample_md['ProjectID'].isin(replicate_ids)) & (sample_md['Period'] == '1')].copy() per_1_md = per_1_md[per_1_md.Meta.isin(per_1_md[per_1_md.Run =='3']['Meta'])] per_1_dm, per_1_md = filter_dm_and_map(dist_mat, per_1_md) per_1_pcoa = ordination.pcoa(per_1_dm) per_1_md.loc[:, 'PC1'] = per_1_pcoa.samples.loc[per_1_md.index, 'PC1'] per_1_md.loc[:, 'PC2'] = per_1_pcoa.samples.loc[per_1_md.index, 'PC2'] per_1_md.loc[:, 'PC3'] = per_1_pcoa.samples.loc[per_1_md.index, 'PC3'] # + # per_1_md = per_1_md[per_1_md['PlateLocation'] !='floor'] # + with plt.rc_context(dict(sns.axes_style("darkgrid"), **sns.plotting_context("notebook", font_scale=2.5))): fig, ax = plt.subplots(1, 1) fig.set_size_inches(12, 10) groups = per_1_md.groupby('Run') vals = [99, 0, 50, 33] c = 0 x = 'PC1' y = 'PC2' for name, group in groups: ax.scatter(group[x], group[y], label=name, c=colors[vals[c]], s=200) c += 1 ax.legend(['Run 1', 'Run 3', 'Run 4'], loc='best') plt.setp(ax.get_xticklabels(), visible=False) plt.setp(ax.get_yticklabels(), visible=False) ax.set_xlabel('{pc} ({num:.{digits}f}%)'.format(pc=x, num=per_1_pcoa.proportion_explained[x]*100, digits=3)) ax.set_ylabel('{pc} ({num:.{digits}f}%)'.format(pc=y, num=per_1_pcoa.proportion_explained[y]*100, digits=3)) for id_ in per_1_md.Description.apply(lambda x: '.'.join(x.split('.')[:3])).unique(): x3, y3 = per_1_md[(per_1_md.Meta == id_) & (per_1_md.Run == '1')][[x, y]].values[0] x1, y1 = per_1_md[(per_1_md.Meta == id_) & (per_1_md.Run == '3')][[x, y]].values[0] x2, y2 = midpoint([x1, x3], [y1, y3]) ax.plot([x1, x2], [y1, y2], c=colors[0], linewidth=3.0) ax.plot([x2, x3], [y2, y3], c=colors[99], linewidth=3.0) for id_ in per_1_md.Description.apply(lambda x: '.'.join(x.split('.')[:3])).unique(): x3, y3 = per_1_md[(per_1_md.Meta == id_) & (per_1_md.Run == '1')][[x, y]].values[0] x1, y1 = per_1_md[(per_1_md.Meta == id_) & (per_1_md.Run == '4')][[x, y]].values[0] x2, y2 = midpoint([x1, x3], [y1, y3]) ax.plot([x1, x2], [y1, y2], c=colors[50], linewidth=3.0) ax.plot([x2, x3], [y2, y3], c=colors[99], linewidth=3.0) for id_ in per_1_md.Description.apply(lambda x: '.'.join(x.split('.')[:3])).unique(): x3, y3 = per_1_md[(per_1_md.Meta == id_) & (per_1_md.Run == '3')][[x, y]].values[0] x1, y1 = per_1_md[(per_1_md.Meta == id_) & (per_1_md.Run == '4')][[x, y]].values[0] x2, y2 = midpoint([x1, x3], [y1, y3]) ax.plot([x1, x2], [y1, y2], c=colors[50], linewidth=3.0) ax.plot([x2, x3], [y2, y3], c=colors[0], linewidth=3.0) plt.savefig('reps_samples_run_weighted.svg', dpi=300) # - # #Alpha diversity plots # + with plt.rc_context(dict(sns.axes_style("darkgrid"), **sns.plotting_context("notebook", font_scale=2.5))): plt.figure(figsize=(12, 8)) ax = sns.violinplot(x="Period", y="MeanAlpha", data=filt_md.sort("Period"), palette= [colors[e] for e in [99, 66, 0, 33]]) ax.set_xticklabels(['Summer', 'Fall', 'Winter', 'Spring']) ax.set_xlabel('Season') ax.set_ylabel('Phylogentic Diversity') plt.savefig('all_samples_season_alpha.svg', dpi=300) # - with plt.rc_context(dict(sns.axes_style("darkgrid"), **sns.plotting_context("notebook", font_scale=2.5))): plt.figure(figsize=(12, 8)) ax = sns.violinplot(x="Run", y="MeanAlpha", data=filt_md.sort("Run"), palette= [colors[e] for e in [99, 0, 66]]) ax.set_xticklabels(['Run 1', 'Run 2', 'Run 3']) ax.set_ylabel('Phylogentic Diversity') plt.savefig('all_samples_run_alpha.svg', dpi=300) with plt.rc_context(dict(sns.axes_style("darkgrid"), **sns.plotting_context("notebook", font_scale=2.5))): plt.figure(figsize=(12, 8)) ax = sns.violinplot(x="Period", y="MeanAlpha", data=run_4_md.sort("Period"), palette= [colors[e] for e in [99, 0]]) ax.set_xticklabels(['Summer', 'Fall']) ax.set_ylabel('Phylogenetic Diversity') ax.set_xlabel('Season') plt.savefig('reps_samples_season_alpha.svg', dpi=300) with plt.rc_context(dict(sns.axes_style("darkgrid"), **sns.plotting_context("notebook", font_scale=2.5))): plt.figure(figsize=(12, 8)) ax = sns.violinplot(x="Run", y="MeanAlpha", data=per_1_md.sort_values("Run"), palette= [colors[e] for e in [99, 0, 50]]) ax.set_xticklabels(['Run 1', 'Run 3', 'Run 4']) ax.set_ylabel('Phylogenetic Diversity') plt.savefig('reps_samples_run_alpha.svg', dpi=300) # #Permanova on replicates uw_dm = skbio.DistanceMatrix.read(join(home, 'core_div_97/bdiv_even1000/unweighted_unifrac_dm.txt')) w_dm = skbio.DistanceMatrix.read(join(home, 'core_div_97/bdiv_even1000/weighted_unifrac_dm.txt')) run_md = per_1_md[per_1_md['Run'] != '4'] dm, run = filter_dm_and_map(uw_dm, run_md) permanova(dm, run, column='Run', permutations=9999) run_md = per_1_md[per_1_md['Run'] != '3'] dm, run = filter_dm_and_map(uw_dm, run_md) permanova(dm, run, column='Run', permutations=9999) run_md = per_1_md[per_1_md['Run'] != '1'] dm, run = filter_dm_and_map(uw_dm, run_md) permanova(dm, run, column='Run', permutations=9999)
Final/Figure-5/figure-5.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Xarxa Neuronal Bàsica amb MNIST # # Aquest document ha estat creat per l'[<NAME>](https://github.com/EricAlcaide). Crèdits a [Hvass-Labs](https://github.com/Hvass-Labs) pel codi en versió original i la implementació amb Tensorflow que ha estat modificat i traduït per als propòsits d'aquest treball. # ## Introducció # # Aquest arxiu ha estat creat amb l'objectiu de demostrar que els ordinadors poden aprendre. En aquest cas, entrenarem una xarxa neuronal bàsica per tal de reconèixer caràcters escrits a mà. # # Primerament, carregarem el set de dades MNIST, un conjunt de 70.000 imatges de números escrits a mà amb la seva corresponent classificació. Aquestes 70.000 imatges es divideixen en: 55.000 imatges per entrenament, 5.000 imatges per a la validació i 10.000 més per a l'avaluament. # Posteriorment crearem el **model** (part del programa que serà l'encarregada "d'aprendre") amb Numpy, l'entrenarem per tal d'optimitzar la seva eficàcia i l'avaluarem. Finalment comentarem els resultats obtinguts i extreurem conclusions. # # Les llibreries/dependències que usarem seran les següents: # * **Numpy:** per tal de fer càlculs matemàtics # * **Matplotlib:** per tal d'imprimir els gràfics i visualitzar les dades # # ## Definició del problema # # Es tracta clarament d'un **problema de classificació** ja que a partir d'unes dades d'entrenament hem de ser capaços de classificar correctament unes dades que desconeixem. El nostre set d'imatges conté números que van del 0 al 9, per tant, són 10 números diferents. Això ens obliga a crear 10 classes diferents, una per a cada número. # # Com el nostre propòsit és crear un model el més basic possible, crearem una xarxa neuronal amb 10 neurones, amb l'esperançca que cadascuna s'activi al reconèixer un dígit diferent diferent. Per tant, podem dir que la xarxa neuronal que anem a crear és de capa única. Per a aplicacions reals existeixen xarxes neuronals molt més sofisticades que aconsegueixen molt millors resultats com ara xarxes multicapa, recurrents, convolucionals, etc. import numpy as np import matplotlib.pyplot as plt # For reproducibility np.random.seed(7) from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) print("Training:", len(mnist.train.labels), "Eval:", len(mnist.validation.labels), "Test:", len(mnist.test.labels)) # ## Codificació "One-Hot" # # El set de dades MNIST ha estat carregat amb codificació "One-Hot", com podem veure aquí: ```one_hot=True```. Això significa que les etiquetes dels nombres han estat convertides d'un únic dígit (ex. 7), a un vector de 10 dimensions en el qual una d'elles és un 1 i les altres 0. Aquest tipus de codificació representa un avantatge per al càlcul de la funció de cost del nostre model. print(mnist.test.labels[0:1, :]) # One-hot encoding example: 7 mnist.test.labels_mod = np.array([onehot.argmax() for onehot in mnist.test.labels]) size_img = 28 # MNIST images are 28*28 px img_flatten_size = size_img**2 # Flatten images into 28*28-dimensional vector shape_img = (size_img, size_img) num_classes = 10 # Number of different classes def display_imgs(imgs, pred_class = None, true_class = None): # Create a 3*3 figure to display images. fig, axes = plt.subplots(1, 4) fig.subplots_adjust(hspace=0.3, wspace=0.3) for i, ax in enumerate(axes.flat): # Print image. ax.imshow(imgs[i].reshape(shape_img), cmap='binary') # Display labels if pred_class is not None: xlabel = "Pred class: {0}, \n True: {1}".format(pred_class[i], true_class[i]) ax.set_xlabel(xlabel) # Remove x and y axis. ax.set_xticks([]) ax.set_yticks([]) plt.show() # Some MNIST examples display_imgs(mnist.test.images[0:4]) # ## Variables del model # # El nostre model serà senzill: una multiplicació de matrius i una suma que després passarem per la funció softmax. Aquesta funció retornarà els valors en una matriu de les mateixes dimensions de manera que tots els valors sumin 1. # # La funció softmax es defineix com a: # # <img src="softmax.png" style="width:200px;"> # # La forma del nostre model tindrà la següent estructura **softmax(w·x.T + b)**. Bàsicament es tracta d'un classificador linear (per tant amb la forma d'equació de la recta y = mx + n) on **w** serà la matriu dels pendents i **b** serà la matriu de desviacions de les rectes. El resultat que retornarà el nostre model serà un vector de valors on cada valor serà la probabilitat de pertànyer a aquella classe. # Initialize weights to small random values (noise) and biases to 0 weights = np.random.randn(num_classes, img_flatten_size)*0.02 biases = np.zeros((num_classes, 1)) def softmax(x): """ Compute softmax values for each sets of scores in x. All values add up to 1. Added - np.max(x) for numerical stability. """ e_x = np.exp(x) - np.max(x) return e_x / e_x.sum() # ## Funció de Loss i funció de Cost # # Aquestes dues funcions ens serviran per guiar l'optimització del nostre model. És un fet comú que mesurem com de malament ho està fent el nostre model per tal de minimitzar aquest valor i millorar el model. # # La funció de cost serà l'anomenada **cross-entropy**, que és la mitjana de les diferents funcions de loss entre el número d'exemples i la funció de loss serà l'error en la classificació per a un exemple. # # La intuïció darrera de la funció de loss és que tractem de fer el màxim semblant possible el resultat del nostre model a la codificació `one_hot`. Per tant, intentarem que quan y<sub>i</sub> és 1, y<sub>pred<sub>i</sub></sub> sigui el més gran possible. Com tots els valors de la funció softmax sumaran 1, el fet que intentem augmentar y<sub>pred<sub>i</sub></sub> significa que estem disminuïnt els altres valors (fent-los propers a 0). El signe negatiu es deu a que volem una funció convexa per tal que ens sigui més fàcil l'optimització. Per tant, si la predicció és semblant al resultat esperat, las loss serà petita, i si és diferent, serà gran. El nostre objectiu és trobar els valors dels pesos que minimitzin la funció de loss i cost i, per tant, generar aprenentatge. # # Les respectives equacions són: # # Cost | Loss # :-------------------------:|:-------------------------: # ![cost](cost.png) | ![loss](loss.png) # + def loss(y, y_hat): return - np.sum(np.array(y)*np.log(np.array(y_hat)), axis=0) def cost(Y, Y_HAT): return 1/len(Y)*(np.sum(loss(Y, Y_HAT), axis=0)) # - # ### Mètode d'optimització # # L'optimitzador que usarem serà el Gradient Descent, que serà explicat posteriorment amb la Regressió lineal. Bàsicament es tracta de prendre petits passos en la direcció que marquen els gradients (derivades parcials per a cada valor que volem optimitzar). Declararem el coeficient d'aprenentatge (alpha) de manera arbitrària a un valor petit entre 0 i 1. # + # Forward propagation of values def forward_prop(weights, biases, X): z = np.dot(weights, X.T)+biases return softmax(z.T) # Backward propagation of errors def backprop(w, b, x, y, y_pred, batch_size, alpha): # Calculate cost cost_ = cost(y, y_pred) # Backpropagation - Calculate derivatives dz = y_pred - y dw = (1/batch_size) * np.dot(x.T,dz) db = (1/batch_size) * np.sum(dz, axis=0, keepdims=True) # Update weights and biases - Gradient descent w -= alpha*dw.T b -= alpha*db.T return w,b # - def manager(alpha, batches, batch_size, weights, biases): for i in range(batches): # Get the training batch x,y = mnist.train.next_batch(batch_size) # Forward pass - make predictions y_pred = forward_prop(weights, biases, x) # Backprop - update weights and biases weights, biases = backprop(weights, biases, x, y, y_pred, batch_size, alpha) return weights, biases def eval_model(w,b,x,y): pred = np.argmax(forward_prop(w,b,x), axis=1) true_vals = np.array([onehot.argmax() for onehot in y]) # Compare predicted and true values acc = np.sum(np.array([1 if pred[i]==true_vals[i] else 0 for i in range(len(pred))]), axis=0)/len(x) return "Accuracy: {0}%".format(acc*100) # Initial test with all trainig images val_size = len(mnist.test.labels) x,y = mnist.test.next_batch(val_size) eval_model(weights, biases, x, y) def display_w(w): # Take min and max values to create a colormap p_min, p_max = np.min(w), np.max(w) # Create a figure fig, axes = plt.subplots(3,4) fig.subplots_adjust(hspace=0.3, wspace=0.3) for i, ax in enumerate(axes.flat): if i<10: # Take weights and reshape them to display image = w[i, :].reshape(size_img, size_img) # Declare the label ax.set_xlabel("Weights: {0}".format(i)) # Show image. Blue points are positive, red ones are negative ax.imshow(image, vmin=p_min, vmax=p_max, cmap='seismic_r') # Remove x and y axis ax.set_xticks([]) ax.set_yticks([]) plt.show() display_w(weights) # Display the evolution for i in range(10): weights, biases = manager(0.05, int((i+5)/2), 128, weights, biases) display_w(weights) # Initial test with all trainig images val_size = len(mnist.test.labels) x,y = mnist.test.next_batch(val_size) eval_model(weights, biases, x, y) # Some examples to visualize predictions display_imgs(mnist.test.images[0:4], np.argmax(forward_prop(weights, biases, mnist.test.images[0:4]), axis=1), np.argmax(mnist.test.labels[0:4], axis=1)) # ## Conclusions # # Com podem veure a dalt, el nostre simple model ha estat capaç d'aprendre a reconèixer els diferents dígits escrits a mà amb una precisió del 68%. El fet és que 68% d'eficàcia a MNIST no és gaire bo, però el propòsit d'aquest "experiment" no era aconseguir una precisió envejable, sinó demostrar que un ordinador pot aprendre, en aquest cas, a reconèixer dígits escrits a mà. # # Si mirem l'evolució dels diferents pesos per a cada píxel de cada número, podem veure clarament com aquests van prenent la forma del número indicat en cada cas. Malgrat això, no aconseguim una precisió gaire alta, i això es deu principalment a que no hem entrenat gaire el nostre model. Donada la seva estructura i les característiques del set de dades MNIST, un classificador lineal unicapa no pot superar el 94% d'eficàcia sense tècniques de Data Augmentation. # # Si seguim entrenant el nostre model (com podeu veure <a href="https://github.com/EricAlcaide/tr_files/blob/master/TensorFlow/xarxa_neuronal_cat.ipynb">aquí</a>) la xarxa neuronal acaba arribant a un 92%, però la representació interna que utilitza per tal de classificar els diferents dígits no s'assembla tant a allò que esperaríem. # # La conclusió més important que podem extreure d'aquest "experiment" és que els ordinadors NO aprenen com nosaltres, ja que no són persones. El seu aprenentatge es pot semblar al nostre, però mai serà igual. No obstant, són capaços d'aprendre, i això ja és molt.
NN/neural_network.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Robot Juggling # # # In this assignment you will learn how to control a robot to juggle a ball. To achieve this goal, you will program a velocity controlled robot, such that it causes the ball to bounce with some desired periodic motion. Specifically, you will implement a hybrid controller that uses a *mirror control law* within the framework of Box 2D, a 2D physics simulator. # # This assignment will be broken into the following components: # # 1. Robot Juggling Demo # # a. Box 2D Simulator # # 2. PD-Control of a robot # # a. Reference and Error signals # # b. Implementing P-controller # # c. Implementing a PD-controller # # 3. Robot Juggling # # a. Ball Dynamics (projectile) # # b. Ball dynamics with collision # # c. Mirror control law # # # Robot Juggling Demo # # The following code will run the instructor's solution of the juggling demo. Try not to look at the instructor's code until after completing the assignment! # # To run the code, first hit the cell below. Then hit the play button from the toolbar. # <img src="media/play.png" width="70%"/> # from tutorial import * play_full_solution() # If a window pops up like this picture then you have successfully run the demo. # # <img width="50%" src="media/robo_juggle.gif"/> # # So what exactly are you looking at? As mentioned previously we are simulating the physics using Box2D. # However, the window that popped up is merely a visulazation of the simulator that uses pygame. Pygame is a software package used to create games in python. It allows the use of keyboard and mouse input. For example, if you hit 'q' or 'esc' the window should close. *Note: in OS X the window may not close, however the simulation will stop.* # # # The visulazation is showing an instance of robot juggling. # * The **robot** is a dark rectangular paddle. # * The **ball** is a magenta circle bouncing up and down # * The yellow and red horizontal bars show the desired heights. # * The upper yellow bar shows the desired peak height of the ball # * The lower red bar shows the desired impact location for the ball and the paddle. # * The ground is the green rectangle. It is a static object. Ideally your ball will never touch the ground! # # ### Reference frames # # All of the objects has a local reference frame located in the center of its body. # # Our fixed frame is an interial reference frame with $\hat{x}$ increasing to the right and $\hat{y}$ increasing upwards. <!--The origin is located in the bottom left corner.--> # # # # ### Simulation # # All of the objects listed above are *rigid bodies* that (approximately) obey the laws of physics $\vec{F} = m \vec{a}$. By definition, rigid bodies are not able to penetrate one another. # # For every discrete time step, Box2D will attempt to tell us the state (position, velocity, and acceleration) of each body in our domain. During the time step, Box2D will integrate the state of the objects (based on the forces applied). Then, if the state of the objects are in conflict (e.g: overlapping geometry) it will try to resolve the penetration error. The following article goes into more depth about the simulation. # # For our purpose, it's important to know that the physics simulator will approximate the state of the world and can have errors in numerical accuracy. # # # -------------------------------------------------------------- #
code/robot_juggle/Introduction to Box 2D.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Practice on Numpy and Pandas # # Here are some task for you, to refresh what you saw in the notebooks before. # # It will help you strengthen the **learning objectives** from the notebook before. You should be able to # - create numpy arrays # - manipulate them with basic mathematics operators # - extract rows, columns and items by indexing # - use aggregation methods (like sum, min, max, std) on numpy arrays # # # **Part 1**: Creating numpy arrays. # # - Please create a numpy array from the list: # ```Python # my_list = [1, 2, 5, 6, 8] # ```` # # - Please create a numpy array containing the values from 1 to 10. # - Please create a numpy array from 0 to 5 cwith a stepsize of 0.2 # - Create a numpy array with the shape 2,3 with random values # #1 import numpy as np my_list_1 = np.arange(1,11) my_list_1 #2 my_list_2 = np.arange(0,5, 0.2) my_list_2 #3 my_list_3 = np.random.rand(2,3) my_list_3 # **Part 2**: Manipulate numpy arrays. # # 1. Please create a numpy array with (1, 3, '4', 7, 12, '0'). Define the content as integer. # 2. Check the data type of the objects and the shape of the array # 3. Update the 4th value to 30. # 4. Reshape the array to a 2x3 matrix. # 5. Please add 8 to the first row and 12 to the second row. # 6. Mulitply the first column with 2, the second with 3 and the third with 4. # 7. Please summ up all numbers in the first row, and all numbers in the second row. # 8. Similarly, search for the largest number for each column. # 9. Extract the number in the second column and the first row. # 10. Check at which index the value is exactly 48. #1 my_list_4 = np.array([1, 3, '4', 7, 12, '0'], np.int32) my_list_4 #2 print(type(my_list_4)) print(my_list_4.shape) #3 my_list_4 = np.where(my_list_4 == 4, 30, my_list_4) my_list_4 #4 my_list_4 = np.array(my_list_4).reshape(2, 3) print(my_list_4) #5 my_list_4 = my_list_4 + [[8], [12]] my_list_4 #6 my_list_4 = my_list_4 * [2, 3, 4] my_list_4 #7 my_list_4 .sum(axis=1) #8 my_list_4.max(axis=0) #9 my_list_4[:1, 1] #10 print(np.where(my_list_4 == 48))
08_numpy_practice.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import urllib import urllib.request ## for Python 3 users import pandas as pd # ### read in an online google doc csv # tb_existing_url_csv = 'https://docs.google.com/spreadsheets/d/1X5Jp7Q8pTs3KLJ5JBWKhncVACGsg5v4xu6badNs4C7I/pub?gid=0&output=csv' local_tb_existing_file = 'tb_existing_100.csv' existing_f = urllib.request.urlretrieve(tb_existing_url_csv, local_tb_existing_file) ##Python 2 users should specify urllib.urlretrieve # ### read in csv into data frame # + existing_df = pd.read_csv( local_tb_existing_file, index_col = 0, thousands = ',') existing_df.index.names = ['country'] existing_df.columns.names = ['year'] ##specified index_col to be 0 since we want the country names to be the ##row labels ## seperator is a comma # - existing_df.head() from sklearn.decomposition import KernelPCA
section2/online_csv.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Analyse des tâches # Le but de ce chapitre est d'approfondir [l'analyse des tâches](https://docs.computecanada.ca/wiki/Running_jobs/fr#Suivi_des_t.C3.A2ches) en attente, en cours et les tâches terminées. # # Remplissons la liste de tâches gérées par l'ordonnanceur : # ``` # # cat scripts/sleep-300s.sh # sbatch scripts/sleep-300s.sh # ``` # ### Tâches en attente # Pour afficher les tâches en attente, on utilise la commande : # * [`squeue -t pending`](https://slurm.schedmd.com/squeue.html) pour toutes les tâches en attente # * `squeue -t pending -u $USER` pour seulement **vos** tâches en attente # * ou bien la commande suivante pour toutes les tâches triées par priorité descendante : # ``` # squeue -t pending --sort=-p --format="%.16i %.9p %.8u %.16a %.18P %.10l %.4C %.7m" # ``` # * `%i` : identifiant numérique de la tâche # * `%p` : priorité de la tâche (entre 0.0 et 1.0) # * `%u` : nom d'utilisateur # * `%a` : compte de calcul utilisé # * `%P` : [partition de la grappe](https://docs.computecanada.ca/wiki/Job_scheduling_policies/fr#Pourcentages_des_n.C5.93uds_disponibles) # * `%l` : temps demandé # * `%C` : nombre de processeurs demandé # * `%m` : mémoire demandée # # Dans tous les cas, l'utilisation de `squeue` crée une certaine charge sur l'ordonnanceur (à cause du grand nombre de tâches à gérer), alors il vaut mieux **se limiter à un appel par minute** sur les grappes nationales # * Si jamais il vous venait l'idée d'utiliser la commande `watch`, **ne le faites pas!** # Une chose à savoir : vos tâches sont généralement en compétition avec des [tâches de la même partition](https://docs.computecanada.ca/wiki/Job_scheduling_policies/fr#Pourcentages_des_n.C5.93uds_disponibles). # Ainsi, **une tâche CPU n'a aucun impact sur le temps d'attente d'une tâche GPU**. # Vous pouvez utiliser la commande suivante pour obtenir le détail de votre tâche, incluant la partition à laquelle elle est assignée : # ``` # scontrol show job -dd <jobid> # ``` # Ce qu'il faut aussi comprendre de la [politique d'ordonnancement](https://docs.computecanada.ca/wiki/Job_scheduling_policies/fr) : # * **La priorité de votre tâche dépend majoritairement de l'utilisation récente** de votre groupe de recherche # * Pour le reste, la priorité dépend aussi du temps d'attente, mais dans une très faible proportion # * L'information concernant **l'utilisation du groupe** est donnée par la commande [`sshare`](https://docs.computecanada.ca/wiki/Job_scheduling_policies/fr#Priorisation_selon_la_juste_part) : # ``` # sshare -l -A def-prof1_cpu -u prof1,grad2,postdoc3 # sshare -l # Tous les comptes # ``` # * Plus le `LevelFS` du groupe est élevé, plus la priorité est élevée # * L'utilisation est considérée normale si `LevelFS` est autour de 1.0 # * Si `LevelFS` est proche de 0.0, la priorité du groupe sera basse # * **L'utilisation récente perd de son importance avec le temps qui passe.** # Ainsi, son importance est progressivement réduite avec un ratio réduisant l'impact de 50% à chaque 7 jours # * Par conséquent, le `LevelFS` et la priorité augmentent à nouveau lorsque le groupe reste en attente # ### Tâches actives # Pour afficher les tâches actives, on utilise la commande : # * [`squeue -t running`](https://slurm.schedmd.com/squeue.html) pour toutes les tâches actives # * `squeue -t running -u $USER` pour seulement **vos** tâches actives # * ou bien la commande suivante pour toutes les tâches actives triées selon le temps qui leur reste : # ``` # squeue -t running --sort=+M --format="%.14i %.18P %.10M %.4C %.7m %R" # ``` # * `%M` : temps de calcul écoulé # * `%R` : liste des noeuds alloués # Pendant que vos tâches sont actives, vous pouvez vous connecter par SSH aux noeuds de calcul correspondants afin de valider que l'exécution se passe bien : # # Tâche CPU : # ``` # sbatch --cpus-per-task=2 scripts/nbody-openmp.sh # ``` # Tâche GPU : # ``` # sbatch scripts/cuda-matmul.sh # ``` # # * Identifier le ou les noeud(s) avec : `squeue -u $USER` # * Connexion avec : `ssh NOEUD` # * Inspection avec `top`, `htop`, `nvtop` et/ou `nvidia-smi` : # * Est-ce que vos processus s'exécutent avec un pourcentage de 100%? # * Est-ce que vos processus parallèles s'exécutent avec un pourcentage de `n` * 100%, où `n` est le nombre de processeurs par tâche Slurm? # * Est-ce que les GPUs sont utilisés à leur plein potentiel? # * Note : il est préférable de regarder la consommation en Watts plutôt que le pourcentage affiché # * Est-ce que le noeud de calcul semble pleinement utilisé? # # Toutes ces informations devraient vous permettre d'ajuster votre script de soumission et d'optimiser l'exécution parallèle de votre programme. # ### Tâches terminées # Lorsqu'une [tâche est complétée](https://docs.computecanada.ca/wiki/Running_jobs/fr#T.C3.A2ches_termin.C3.A9es), la commande la plus simple pour obtenir des informations sur cette tâche est la commande : # ``` # seff <jobid> # ``` # Autrement, c'est aussi possible d'utiliser la commande Slurm `sacct` : # ``` # sacct -j <jobid> --format=JobID,JobName,MaxRSS,Elapsed # ``` # Encore une fois, ces informations permettent d'ajuster vos scripts et de mieux déterminer vos besoins en ressources. # Ultimement, l'utilisation totale du groupe est disponible sur CCDB : # * https://ccdb.computecanada.ca/me/group_usage # # En tenant compte de l'utilisation passée, il devient possible d'estimer ses besoins pour le [Concours d'allocation de ressources](https://www.computecanada.ca/page-daccueil-du-portail-de-recherche/acces-aux-ressources/concours-dallocation-des-ressources/?lang=fr). # #### Exercice - Analyse d'exécution d'un pipeline # * Lancer à nouveau le pipeline avec la commande : # ``` # bash scripts/nbody-pipeline.sh # ``` # * Au besoin, relancer le pipeline si vos tâches se sont exécutées trop rapidement # * Surveiller vos tâches : # ``` # squeue -t pending -u $USER # squeue -t running -u $USER # ``` # * Surveiller toutes les tâches : # ``` # squeue -t pending # squeue -t running # ``` # * Trier les tâches selon un critère : # ``` # squeue -t pending --sort=-p --format="%.16i %.9p %.8u %.16a %.18P %.10l %.4C %.7m" # squeue -t running --sort=+M --format="%.14i %.18P %.10M %.4C %.7m %R" # ``` # * Trouver vos tâches récentes avec : # ``` # sacct # ``` # * Analyser quelques tâches complétées avec : # ``` # seff <jobid> # sacct -j <jobid> --format=JobID,JobName,MaxRSS,Elapsed # ```
analyse-taches.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## SQL - part 3 # + from sqlite3 import connect ''' Establish a connection to the database. This statement creates the file iat the given path if it does not exist. The file was provided so the statement should just establish the connection. ''' connection = connect('../datasets/org.Hs.eg.sqlite') cursor = connection.cursor() # - # #### Major SQL commands: SELECT, INSERT, DELETE, UPDATE # #### SELECT - Retrieves data from one or more tables and doesn’t change the data at all # # * SELECT * (means all columns), or the comma separated names of the columns of data you wish to return # * Returns columns (left to right) in the order received. # * '*' selects ALL rows and ALL columns and returns them by column order and row_id # * FROM is the table source or sources (comma separated) # * WHERE (optional) is the predicate clause: conditions for the query # * Evaluates to True or False for each row # * This clause almost always includes Column-Value pairs. # * Omitting the Where clause returns ALL the records in that table. # * Note: the match is case sensitive # * ORDER BY (optional) indicates a sort order for the output data # * default is row_id, which can be very non-intuitive # * ASCending or DESCending can be appended to change the sort order. (ASC is default) # * GROUP BY (optional) groups by a column and creates summary data for a different column # * HAVING (optional) allows restrictions on the rows selected # * a GROUP BY clause is required before HAVING # * LIMIT (optional) reduces the number of rows retrieved to the number provided after this clause # * In most SQL clients, the ";" indicates the end of a statement and requests execution # # + def get_header(cursor): ''' Makes a tab delimited header row from the cursor description. Arguments: cursor: a cursor after a select query Returns: string: A string consisting of the column names separated by tabs, no new line ''' return '\t'.join([row[0] for row in cursor.description]) def get_results(cursor): ''' Makes a tab delimited table from the cursor results. Arguments: cursor: a cursor after a select query Returns: string: A string consisting of the column names separated by tabs, no new line ''' res = list() for row in cursor.fetchall(): res.append('\t'.join(list(map(str,row)))) return "\n".join(res) # + # In every SQLite database, there is a special table: sqlite_master # sqlite_master - describes the contents of the database sql = ''' SELECT type, name FROM sqlite_master WHERE type = "table"; ''' cursor.execute(sql) print(get_header(cursor)) print(get_results(cursor)) # - # #### A `PRIMARY KEY` is a very important concept to understand. # * It is the designation for a column or a set of columns from a table. # * It is recommended to be a serial value and not something related to the business needs of the data in the table. # # * A primary key is used to uniquely identify a row of data; combined with a column name, uniquely locates a data entry # * A primary key by definition must be `UNIQUE` and `NOT NULL` # * The primary key of a table, should be a (sequential) non-repeating and not null value # * Primary keys are generally identified at time of table creation # * A common method for generating a primary key, is to set the datatype to `INTEGER` and declare `AUTOINCREMENT` which will function when data is inserted into the table # * Primary keys can be a composite of 2 or more columns that uniquely identify the data in the table # # # #### A `FOREIGN KEY` is a column(s) that points to the `PRIMARY KEY` of another table # # * The purpose of the foreign key is to ensure referential integrity of the data. # In other words, only values that are supposed to appear in the database are permitted.<br> # Only the values that exist in the `PRIMARY KEY` column are allowed to be present in the FOREIGN KEY column. # # They are also the underpinning of how tables are joined and relationships portrayed in the database # # #### JOIN tables # # * Multiple tables contain different data that we want to retrieve from a single query # * In order to assemble data as part of a query, a JOIN between tables is needed # * This is a very common practice, since it’s rare for all the data you want to be in a single table # # # * INNER JOIN - return only those rows where there is matching content in BOTH tables (is the default when JOIN is used) # * OUTER JOIN - returns all rows from both tables even if one of the tables is blank # * SELF JOIN - can be used to join a table to itself (through aliasing), to compare data internal to the table # # ```sql # SELECT ... FROM table1 [INNER] JOIN table2 ON conditional_expression # ``` # sql = ''' SELECT * FROM gene_info AS gi INNER JOIN go_bp AS go ON gi._id = go._id --WHERE evidence = "ND" LIMIT 5;''' cursor.execute(sql) print(get_header(cursor)) print(get_results(cursor)) # gene information for gene with the max number of associated go terms sql = ''' SELECT gi.symbol, go_term_no FROM gene_info AS gi INNER JOIN (SELECT _id, count(go_id) AS go_term_no FROM go_bp GROUP BY _id) AS go ON gi._id == go._id WHERE go_term_no IN (SELECT max(go_term_no) FROM (SELECT _id, count(go_id) AS go_term_no FROM go_bp GROUP BY _id)); ''' cursor.execute(sql) print(get_header(cursor)) print(get_results(cursor)) # gene information for gene with the max number of associated go terms sql = ''' SELECT _id, count(go_id) AS go_term_no1 FROM go_bp GROUP BY _id HAVING go_term_no1 IN ( SELECT max(go_term_no) FROM (SELECT _id, count(go_id) AS go_term_no FROM go_bp GROUP BY _id ) ); ''' cursor.execute(sql) print(get_header(cursor)) print(get_results(cursor)) # gene information for gene with the max number of associated go terms sql = ''' SELECT * FROM gene_info WHERE _id IN ( SELECT _id FROM ( SELECT _id, count(go_id) AS go_term_no1 FROM go_bp GROUP BY _id HAVING go_term_no1 IN ( SELECT max(go_term_no) FROM (SELECT _id, count(go_id) AS go_term_no FROM go_bp GROUP BY _id ) ))); ''' cursor.execute(sql) print(get_header(cursor)) print(get_results(cursor)) # #### See the create table statement # + # sql column in the sqlite_master table sql = ''' SELECT sql FROM sqlite_master WHERE type= "table" and name == "go_bp"; ''' cursor.execute(sql) print(get_header(cursor)) print(get_results(cursor)) # - # #### Guidelines for database design: # # * Normalization is the process of creating or re-arranging data relationships so that it will be easy to store and retrieve data efficiently. Data is normalized to achieve the following goals: # * Eliminate data redundancies and save space # * Make it easier to change data # * Simplify the enforcement of referential integrity constraints # * Produce a design that is a 'good' representation of the real world (one that is intuitively easy to understand and a good base for further growth) # # * Make it easier to change data by avoiding to provide multiple values separated by commas in a column # * All columns in a table should depend on the primary key, all extra related information should be in other tables linked by foreign keys # # https://support.microsoft.com/en-us/help/283878/description-of-the-database-normalization-basics # ### CREATE TABLE - statement # https://www.sqlitetutorial.net/sqlite-create-table/ # # ```sql # CREATE TABLE [IF NOT EXISTS] [schema_name].table_name ( # column_1 data_type PRIMARY KEY, # column_2 data_type NOT NULL, # column_3 data_type DEFAULT 0, # table_constraints # ) [WITHOUT ROWID]; # ``` # # In this syntax: # # * First, specify the name of the table that you want to create after the CREATE TABLE keywords. The name of the table cannot start with sqlite_ because it is reserved for the internal use of SQLite. # * Second, use `IF NOT EXISTS` option to create a new table if it does not exist. Attempting to create a table that already exists without using the IF NOT EXISTS option will result in an error. # * Third, optionally specify the schema_name to which the new table belongs. The schema can be the main database, temp database or any attached database. # * Fourth, specify the column list of the table. Each column has a name, data type, and the column constraint. SQLite supports `PRIMARY KEY, UNIQUE, NOT NULL`, and `CHECK` column constraints. # * Fifth, specify the table constraints such as PRIMARY KEY, FOREIGN KEY, UNIQUE, and CHECK constraints. # * Finally, optionally use the `WITHOUT ROWID` option. By default, a row in a table has an implicit column, which is referred to as the rowid, oid or _rowid_ column. The rowid column stores a 64-bit signed integer key that uniquely identifies the row inside the table. If you don’t want SQLite creates the rowid column, you specify the WITHOUT ROWID option. A table that contains the rowid column is known as a rowid table. Note that the WITHOUT ROWID option is only available in SQLite 3.8.2 or later. # https://www.sqlite.org/syntaxdiagrams.html#create-table-stmt # # <img src = "https://www.sqlite.org/images/syntax/create-table-stmt.gif" width="800"/> # Each value stored in an SQLite database (or manipulated by the database engine) has one of the following storage classes: # https://www.sqlite.org/datatype3.html # * `NULL`. The value is a NULL value. # * `INTEGER`. The value is a signed integer, stored in 1, 2, 3, 4, 6, or 8 bytes depending on the magnitude of the value. # * `REAL`. The value is a floating point value, stored as an 8-byte IEEE floating point number. # * `TEXT`. The value is a text string, stored using the database encoding (UTF-8, UTF-16BE or UTF-16LE). # * `BLOB`. The value is a blob of data, stored exactly as it was input. # The `sqlite_master` has the following create statement: # ```sql # CREATE TABLE sqlite_master ( type TEXT, name TEXT, tbl_name TEXT, rootpage INTEGER, sql TEXT ); # ``` # #### Create the table `gene` with the columns: `gene_id`, `gene_symbol`. `gene_name` # ##### The `connection` object methods can be used to save or revert/reset the changes after a command that makes changes to the database # ##### `COMMIT` - save the changes # ##### `ROLLBACK` - revert the changes # # + sql=''' CREATE TABLE IF NOT EXISTS go_bp_ALT ( gene_go_id INTEGER PRIMARY KEY AUTOINCREMENT, gene_id INTEGER NOT NULL, -- REFERENCES genes _id go_id CHAR(10) NOT NULL, -- GO ID evidence CHAR(30) NOT NULL, -- GO evidence information FOREIGN KEY (gene_id) REFERENCES genes (_id) ); ''' try: cursor.execute(sql) except connection.DatabaseError: print("Creating the go_bp_ALT table resulted in a database error!") connection.rollback() raise else: connection.commit() finally: print("done!") # - # ##### Similar error handling, as seen above, can be when executing any statement that changes the database. # ##### Check if the new table appears in the `sqlite_master` table sql = ''' SELECT name FROM sqlite_master WHERE name LIKE "go_bp%" LIMIT 4; ''' cursor.execute(sql) print(cursor.fetchall()) # # <br><br> # The `sqlite_sequence` table is created and initialized automatically whenever a regular table is created if it has a column with the `AUTOINCREMENT` option set.<br> # https://www.sqlite.org/autoinc.html # # ##### Check if the new table appears in the `sqlite_master` table sql = ''' SELECT name FROM sqlite_master WHERE name LIKE "sqlite_%" LIMIT 10; ''' cursor.execute(sql) print(cursor.fetchall()) # ### INDEXING # # Indexes are lookup table, like the index of a book. # They are usually created for columns that have unique/ or less redundant values and provide a way to quicky search # the values.<br> # Indexing creates a copy of the indexed columns together with a link to the location of the additional information.<br> # The index data is stored in a data structure that allows for fast sorting. <br> # E.g.: balanced-tree - every leaf is at most n nodes away from the root) that allows for fast sorting. <br> # All queries (statements) regarding an indexed table are applied to the index # # # * One important function in Relational Databases is to be able to create indexes on columns in tables # * These indexes are pre-calculated and stored in the database # * Indexes should be created on columns that are used in queries and joins # * columns that appear in conditions (WHERE, JOIN ... ON) # * They will rapidly speed up query return rate and improve query performance # # To create an index use the following command: # # ```sql # CREATE INDEX indexName ON tableName (columnName) # ``` sql = ''' CREATE INDEX gene_go_idx ON go_bp_ALT (gene_go_id) ''' cursor.execute(sql) connection.commit() # ##### Check if the new index appears in the `sqlite_master` table sql = ''' SELECT name FROM sqlite_master WHERE type= "index"; ''' cursor.execute(sql) print(get_header(cursor)) print(get_results(cursor)) # #### Remove the index sql = ''' DROP INDEX gene_go_idx ''' cursor.execute(sql) connection.commit() # ##### Check if the index was removed from the `sqlite_master` table sql = ''' SELECT name, sql FROM sqlite_master WHERE type= "index" AND name = "gene_go_idx"; ''' cursor.execute(sql) print(get_header(cursor)) print(get_results(cursor)) # ### INSERT - statement # # Makes changes to the database table<br> # Adds new data to a table (if the constraints are met) # Constraint examples: # * For one designated column or a group of columns that are designated as Primary Key the values are unique # * The value inserted in a column that has a Foreign Key constraint should exist in the column that it refers to # # ```sql # INSERT INTO <tablename> (<column1>, <column2>, <column3>) VALUES (value1, value2, value3); # ``` # # ##### One simple INSERT command adds 1 row of data at a time into an existing table # # ##### Connection object allows us to: # * ##### COMMIT - save the changes # * ##### ROLLBACK - reverts/discards the changes # <br> # # ##### Let's see what is in the table (it should be nothing): sql = ''' SELECT * FROM go_bp_ALT; ''' cursor.execute(sql) print(get_header(cursor)) print(get_results(cursor)) # <br> # # ##### Let's try an insert: # ```sql # INSERT INTO <tablename> (<column1>, <column2>, <column3>) VALUES (value1, value2, value3); # ``` # + values_list = [1234,"GO:1234","CM_EV"] sql = ''' INSERT INTO go_bp_ALT (gene_id, go_id, evidence) VALUES (?,?,?); ''' cursor.execute(sql,values_list) connection.commit() # + # This command retrieves the identifier of the last row from the most current query # The gene_go_id id_value = cursor.lastrowid id_value # - # <br> # # # ##### We have a row in the table!!! And the gene_go_id was automatically generated. sql = ''' SELECT * FROM go_bp_ALT ; ''' cursor.execute(sql) print(get_header(cursor)) print(get_results(cursor)) # #### You can have a Python "table" structure (list of lists) of insert values and get them all inserted in one command, each sublist having the correct number of values. # # + values_tbl = [[1235,"GO:1235","CM_EV"], [1236,"GO:1236","CM_EV"], [1236,"GO:1237","CM_EV"]] sql = ''' INSERT INTO go_bp_ALT (gene_id, go_id, evidence) VALUES (?,?,?); ''' cursor.executemany(sql,values_tbl) connection.commit() # - sql = ''' SELECT * FROM go_bp_ALT ; ''' cursor.execute(sql) print(get_header(cursor)) print(get_results(cursor)) go_tbl = [["GO:1238","ND"], ["GO:1239","ND"], ["GO:1240","IDE"]] gene_id = 4 for go_elem in go_tbl: go_elem.insert(0,gene_id) print(go_tbl) sql = ''' INSERT INTO go_bp_ALT (gene_id, go_id, evidence) VALUES (?,?,?); ''' cursor.executemany(sql,go_tbl) connection.commit() sql = ''' SELECT * FROM go_bp_ALT ; ''' cursor.execute(sql) print(get_header(cursor)) print(get_results(cursor)) # #### UPDATE - statement - changes the table rows # # # Modifies data (already in a table) in all rows matching the WHERE clause # # ```sql # UPDATE table_name # SET column1 = value1, column2 = value2...., columnN = valueN # WHERE [condition]; # ``` # # Update is generally a single row command, but use of the where clause can cause data to be updated in multiple rows <br> # (whether you intended to or not !!!!) # # The following statement updates the evidence for all entries for all genes associated with the 2 biological processses sql = ''' UPDATE go_bp_ALT SET gene_id = 5, go_id = "GO:1234" WHERE gene_id = 4; ''' cursor.execute(sql) connection.commit() sql = ''' SELECT * FROM go_bp_ALT ; ''' cursor.execute(sql) print(get_header(cursor)) print(get_results(cursor)) # #### DELETE - statement - deletes table rows # # * MAKES CHANGES TO THE DATA # * Row level deletion – can’t delete less than this. # # ```sql # DELETE FROM <tablename> WHERE <column> = <value> # ``` # # * The WHERE predicate is the same as for the SELECT statement, that is, it determines which rows will be deleted # # sql = ''' DELETE FROM go_bp_ALT WHERE go_id IN ("GO:1234","GO:1236"); ''' cursor.execute(sql) connection.commit() sql = ''' SELECT * FROM go_bp_ALT ; ''' cursor.execute(sql) print(get_header(cursor)) print(get_results(cursor)) # ```sql # DELETE FROM <tablename>; # ``` # # * This would delete all rows of data from a table. # * Preserves table structure (table still exists) # * Optimized for speed in SQLite, no row-by-row execution. # * EXISTS <table_name> still evaluates to True # sql = ''' DELETE FROM go_bp_ALT; ''' cursor.execute(sql) connection.commit() sql = ''' SELECT * FROM go_bp_ALT ; ''' cursor.execute(sql) print(get_header(cursor)) print(get_results(cursor)) # <br> # # #### `DROP TABLE` - statement - removes a table (permanently) sql = ''' DROP TABLE IF EXISTS go_bp_ALT; ''' cursor.execute(sql) connection.commit() sql = ''' SELECT name AS "TABLE NAME" FROM sqlite_master WHERE name LIKE "go_bp%"; ''' cursor.execute(sql) print(get_header(cursor)) print(get_results(cursor)) # #### VIEW in a database # # * A view is a virtual table which can be created from a query on existing tables # * Views are created to give a more human readable version of the normalized data / tables # * http://www.sqlitetutorial.net/sqlite-create-view/ # * An SQLite view is read only # ```sql # CREATE [TEMP] VIEW [IF NOT EXISTS] view_name(column-name-list) AS # select-statement; # ``` # gene go information for easy access sql = ''' SELECT symbol, go_id, evidence FROM gene_info AS gi INNER JOIN go_bp AS go ON gi._id == go._id WHERE evidence IN ("EXP","IDA") ; ''' cursor.execute(sql) print(get_header(cursor)) print(get_results(cursor)) # gene go information for easy access sql = ''' CREATE VIEW IF NOT EXISTS gene_go_info (symbol, go_id, evidence) AS SELECT symbol, go_id, evidence FROM gene_info AS gi INNER JOIN go_bp AS go ON gi._id == go._id WHERE evidence IN ("EXP","IDA") ; ''' cursor.execute(sql) connection.commit() # gene go information sql = ''' SELECT * FROM gene_go_info LIMIT 10; ''' cursor.execute(sql) print(get_header(cursor)) print(get_results(cursor)) sql = ''' SELECT type, name AS "TABLE NAME" FROM sqlite_master WHERE name = "gene_go_info"; ''' cursor.execute(sql) print(get_header(cursor)) print(get_results(cursor)) # ```sql # DROP VIEW [IF EXISTS] view_name; # ``` # gene go information for easy access sql = ''' DROP VIEW IF EXISTS gene_go_info; ''' cursor.execute(sql) connection.commit() sql = ''' SELECT type, name AS "TABLE NAME" FROM sqlite_master WHERE name = "gene_go_info"; ''' cursor.execute(sql) print(get_header(cursor)) print(get_results(cursor)) # #### JOIN tables # # * Multiple tables contain different data that we want to retrieve from a single query # * In order to assemble data as part of a query, a JOIN between tables is needed # * This is a very common practice, since it’s rare for all the data you want to be in a single table # # # * INNER JOIN - return only those rows where there is matching content in BOTH tables (is the default when JOIN is used) # * OUTER JOIN - returns all rows from both tables even if one of the tables is blank # * SELF JOIN - can be used to join a table to itself (through aliasing), to compare data internal to the table # # ```sql # SELECT ... FROM table1 [INNER] JOIN table2 ON conditional_expression # ``` # sql = ''' SELECT gi._id, symbol, evidence FROM gene_info AS gi INNER JOIN go_bp AS go ON gi._id = go._id LIMIT 5; ''' cursor.execute(sql) print(get_header(cursor)) print(get_results(cursor)) sql = ''' SELECT * FROM genes LIMIT 5; ''' cursor.execute(sql) print(get_header(cursor)) print(get_results(cursor)) sql = ''' SELECT sql FROM sqlite_master WHERE name = "go_bp"; ''' cursor.execute(sql) print(get_header(cursor)) print(get_results(cursor)) sql = ''' SELECT symbol, gene_id, count(go_id) FROM gene_info AS gi INNER JOIN go_bp AS go ON gi._id = go._id INNER JOIN genes g ON g._id = gi._id GROUP BY symbol, gene_id LIMIT 5; ''' cursor.execute(sql) print(get_header(cursor)) print(get_results(cursor)) sql = ''' SELECT min(go_no) FROM ( SELECT symbol, gene_id, count(go_id) go_no FROM gene_info AS gi INNER JOIN go_bp AS go ON gi._id = go._id INNER JOIN genes g ON g._id = gi._id GROUP BY symbol, gene_id ); ''' cursor.execute(sql) print(get_header(cursor)) print(get_results(cursor)) # + # And close() cursor.close() connection.close() # - # #### To remove the database, delete the .sqlite file.
completed_classes/SQL_part3_ran_section2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Demo using holoviews for vector fields import holoviews as hv hv.notebook_extension() import numpy as np # + def create_vectorfield(freq=1, phase=0): x,y = np.mgrid[-10:10,-10:10] * 0.25 sine_rings = np.sin(freq * (x**2+y**2 + phase))*np.pi+np.pi exp_falloff = 1/np.exp((x**2+y**2)/8) vector_data = np.array([x.flatten()/5., # X positions y.flatten()/5., # Y positions sine_rings.flatten(), # Arrow angles exp_falloff.flatten()]) # Arrow sizes scalar_data = sine_rings return vector_data, scalar_data # - matrices = {(phase, freq): hv.VectorField(create_vectorfield(freq, phase)[0].T, label='my_label', group='my_group') for freq in [0.05, 0.1, 0.25, 0.5, 1.0, 1.5, 2.0] # Frequencies for phase in [0, np.pi/2, np.pi, 3*np.pi/2, 2*np.pi]} # Phases # ## show vector field # + # %%opts VectorField (color='blue') vector_data, scalar_data = create_vectorfield(freq=0.1) hv.VectorField(vector_data.T, label='label', group='group') # - # # show scalar field # + # polar angle hv.Image(scalar_data) # - # # explore vector field as function of two parameters (freq and phase): hv.HoloMap(matrices, kdims=['phase', 'frequency']) matrices2 = {(phase, freq): hv.Image(create_vectorfield(freq, phase)[1]) * hv.VectorField(create_vectorfield(freq, phase)[0].T, label='my_label', group='my_group') for freq in [0.1, 0.25, 0.5, 1.0, 1.25, 1.5] # Frequencies for phase in [0, np.pi/2, np.pi, 3*np.pi/2, 2*np.pi]} # Phases # + # %%opts VectorField (color='r') Image (cmap='gray') hv.HoloMap(matrices2, kdims=['phase', 'frequency']) # - # Thanks to <NAME> for pointing me towards Holoviews.
dev/demo-holoviews-vectorfields.ipynb
// --- // jupyter: // jupytext: // text_representation: // extension: .cpp // format_name: light // format_version: '1.5' // jupytext_version: 1.14.4 // kernelspec: // display_name: C++17 // language: C++17 // name: xcpp17 // --- // # Function // ## Basic template<class T, class U> auto add(T t, U u) { return t + u; } // + #include <iostream> auto x1 = add(1, 1.2); std::cout << "x1 = " << x1 << std::endl; // - // ## Cross-product // // Taken from [Rosetta Code](https://rosettacode.org/wiki/Cartesian_product_of_two_or_more_lists#C.2B.2B). void print(const std::vector<std::vector<int>>& v) { for (const auto &p : v) { int counter = 0; int n = p.size(); std::cout << "("; for (const auto &e : p) { std::cout << e; if (counter < n - 1) { std::cout << ", "; } counter++; } std::cout << ")" << std::endl; } } auto product(const std::vector<std::vector<int>>& lists) { std::vector<std::vector<int>> result; auto it = std::find_if(std::begin(lists), std::end(lists), [](auto e) { return e.size() == 0; }); if (it != std::end(lists)) { return result; } for (auto &e : lists[0]) { result.push_back({e}); } for (size_t i = 1; i < lists.size(); ++i) { std::vector<std::vector<int>> temp; for (auto &e : result) { for (auto f : lists[i]) { auto e_tmp = e; e_tmp.push_back(f); temp.push_back(e_tmp); } } result = temp; } return result; } std::vector<std::vector<int>> x2 = { { 0, 1 }, { 0, 1 }, { 0, 1, 2 } }; auto x3 = product(x2); print(x3); // ## Cross-product, generic template<class T> void gprint(const std::vector<std::vector<T>>& v) { for (const auto &p : v) { int counter = 0; int n = p.size(); std::cout << "("; for (const auto &e : p) { std::cout << e; if (counter < n - 1) { std::cout << ", "; } counter++; } std::cout << ")" << std::endl; } } template<class T> auto gproduct(const std::vector<std::vector<T>>& lists) { std::vector<std::vector<T>> result; auto it = std::find_if(std::begin(lists), std::end(lists), [](auto e) { return e.size() == 0; }); if (it != std::end(lists)) { return result; } for (auto &e : lists[0]) { result.push_back({e}); } for (size_t i = 1; i < lists.size(); ++i) { std::vector<std::vector<T>> temp; for (auto &e : result) { for (auto f : lists[i]) { auto e_tmp = e; e_tmp.push_back(f); temp.push_back(e_tmp); } } result = temp; } return result; } auto x4 = gproduct(x2); gprint(x4); // + #include <string> std::vector<std::vector<std::string>> x5 = { { "false", "true" }, { "false", "true" }, { "low", "medium", "high" } }; auto x6 = gproduct(x5); gprint(x6); // + std::vector<std::vector<std::string>> x7 = { { "false", "true" }, { "false", "true" } }; auto x8 = gproduct(x7); gprint(x8); // + std::vector<std::vector<std::string>> x9 = { { "false", "true" } }; auto x10 = gproduct(x9); gprint(x10); // - // ## Group elements in a list into sub-lists template<class T> auto groupList(const int n, const std::vector<T> &list) { int nLists = list.size() % n == 0 ? list.size() / n : list.size() / n + 1; std::vector<std::vector<T>> subLists; subLists.reserve(nLists); std::vector<T> subList; subList.reserve(n); const int listSize = list.size(); for (int i = 0; i < listSize; i++) { if (i != 0 && i % n == 0) { std::vector<T> temp(subList); subLists.push_back(temp); subList.clear(); } subList.push_back(list.at(i)); } subLists.push_back(subList); return subLists; } // Group vector of 6 elements into groups of 2. std::vector<int> numList1 = {1, 2, 3, 4, 5, 6}; gprint(groupList(2, numList1)); // Group vector of 6 elements into groups of 3. gprint(groupList(3, numList1)); // Group vector of 7 elements into groups of 2. std::vector<int> numList2 = {1, 2, 3, 4, 5, 6, 7}; gprint(groupList(2, numList2)); // Group vector of 7 elements into groups of 3. gprint(groupList(3, numList2)); std::vector<std::string> sList1 = {"John", "Jack", "Joe", "Jerry", "James", "Jeff", "Jacobi"}; gprint(groupList(2, sList1)); gprint(groupList(3, sList1));
sphinx/cpp/source/function.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Future Work and Open Questions # In our papers ([SVCCA](https://arxiv.org/abs/1706.05806), [Insights on Representational Similarity](https://arxiv.org/abs/1806.05759)), we explored some questions using these technqiues, but many other open questions remain: # # 1. Applying some of these methods to understand properties of generative models. # 2. Exploring low rank based compression, particularly with PLS # 3. Understanding learning dynamics and representational similarity of generalizing/memorizing networks through training. # 4. Identifying the important neurons in a layer, and determining how many there are. Relatedly, identifying which neurons are become sensitive to which classes. # 5. Pinpointing the effect of different kinds of layers on the representation. # 6. Comparing representations in biological and artificial neural networks. # # We hope the code and tutorials are a helpful tool in better understanding representational properties of neural networks! #
tutorials/004_Future_Work_and_Open_Questions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # How to Make a Text Classifier: Fake News Edition # <br><br> # # <b>Agenda:</b> # # We are going to build a naive bayes classifier for the purpose of classifying news as "FAKE" or "REAL" # # - Prepare the corpus (documents) for modeling using count and tfidf vectorizers # - Train a naive bayes model on the vectorized documents # - Use grid search to optimize our model. # [Article about this project](https://opendatascience.com/blog/how-to-build-a-fake-news-classification-model/) # ## “A lie gets halfway around the world before the truth has a chance to get its pants on.” – <NAME> # # <b>“What is fake news?”</b> # <br><br> # <b>Can you build a model that can differentiate between “Real” news vs “Fake” news.</b> # Requirements: pandas, numpy, matplotlib, sklearn, nltk # # This is in python 2 #Imports import pandas as pd import matplotlib.pyplot as plt # %matplotlib inline import seaborn as sns from nltk.stem.snowball import SnowballStemmer from sklearn.cross_validation import cross_val_score from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.naive_bayes import MultinomialNB from sklearn.pipeline import make_pipeline from sklearn.grid_search import GridSearchCV pd.set_option("max.columns", 100) # Load in the data set df = pd.read_csv("fake_real_news.csv", usecols=["title","text", "label"]) #view data df.head() #View tail df.head() # + #Print 10 random titles for i in df.title.sample(n= 10): print i, "\n" # + #Print article print df.text[300][:1000] # - # ### The Articles # # - 4594 articles published between October 2015 and December 2016. # - The "FAKE" articles came from this [Kaggle page.](https://www.kaggle.com/mrisdal/fake-news) # - The "REAL" articles came from www.allsides.com and are from publications like New York Times, WSJ, Bloomberg, NPR, and the Guardian. # ### Tokenizing text with count and TFIDF Vectorizers # # Before we can build a model, we have to turn words to numbers. # - **What:** Separate text into units such as sentences or words # - **Why:** Gives structure to previously unstructured text # - **Notes:** Relatively easy with English language text, not easy with some languages # From the [scikit-learn documentation](http://scikit-learn.org/stable/modules/feature_extraction.html#text-feature-extraction): # # > Text Analysis is a major application field for machine learning algorithms. However the raw data, a sequence of symbols cannot be fed directly to the algorithms themselves as most of them expect **numerical feature vectors with a fixed size** rather than the **raw text documents with variable length**. # # We will use [CountVectorizer](http://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.CountVectorizer.html) to "convert text into a matrix of token counts": # + #Assign text to X variable and labels to y X = df.text y = df.label # - #Intialize Count Vectorizer count_vec = CountVectorizer() #Fit Count Vectorizer dtm_cv = count_vec.fit_transform(X) #Convert it to a pandas data frame df_cv = pd.DataFrame(dtm_cv.toarray(), columns=count_vec.get_feature_names()) #Look at df_cv df_cv.head() #How big is data df_cv.shape #Print first 100 feature names print count_vec.get_feature_names()[:100] #Print random slice of feature names print count_vec.get_feature_names()[19000:19200] # Let's configure out count vectorizer # - **lowercase:** boolean, True by default # - Convert all characters to lowercase before tokenizing. # - **ngram_range:** tuple (min_n, max_n) # - The lower and upper boundary of the range of n-values for different n-grams to be extracted. All values of n such that min_n <= n <= max_n will be used. # - <b>stop_words</b> : string {‘english’}, list, or None (default) # - If ‘english’, a built-in stop word list for English is used. # - If a list, that list is assumed to contain stop words, all of which will be removed from the resulting tokens. Only applies if analyzer == 'word'. # - If None, no stop words will be used. max_df can be set to a value in the range [0.7, 1.0) to automatically detect and filter stop words based on intra corpus document frequency of terms. # - **max_features:** int or None, default=None # - If not None, build a vocabulary that only consider the top max_features ordered by term frequency across the corpus. # - **min_df:** float in range [0.0, 1.0] or int, default=1 # - When building the vocabulary ignore terms that have a document frequency strictly lower than the given threshold. This value is also called cut-off in the literature. If float, the parameter represents a proportion of documents, integer absolute counts. # Train a Count Vectorizer with lowercase words, includes words and two-word phrases, filters out stop words, and only uses words that show up at least 3 times. #Intialize Count Vectorizer count_vec = CountVectorizer(lowercase=True, stop_words="english", ngram_range=(1,2), min_df = 3) #Fit Count Vectorizer dtm_cv = count_vec.fit_transform(X) #Convert it to a pandas data frame df_cv = pd.DataFrame(dtm_cv.toarray(), columns=count_vec.get_feature_names()) #Look at data df_cv.head() #Look at random slice of features print count_vec.get_feature_names()[80000:80100] # Time for TFIDF Vectorizer # - **What:** Computes "relative frequency" that a word appears in a document compared to its frequency across all documents # - **Why:** More useful than "term frequency" for identifying "important" words in each document (high frequency in that document, low frequency in other documents) # - **Notes:** Used for search engine scoring, text summarization, document clustering #Intialize Count Vectorizer tf_vec = TfidfVectorizer() #Fit Count Vectorizer dtm_tf = tf_vec.fit_transform(X) #Convert it to a pandas data frame df_tf = pd.DataFrame(dtm_tf.toarray(), columns=tf_vec.get_feature_names()) #View data df_tf.head() # Now that we have our two vectorized datasets. Let's get into the modeling. # # ### Naive Bayes # Bayes Theorem covers the probabilistic relationship between multiple variables, and specifically allows us to define one conditional in terms of the underlying probabilities and the inverse condition. Specifically, it can be defined as: # # $$P(y|x) = P(y)P(x|y)/P(x)$$ # # This means the probability of y given x condition equals the probability of y times the probability of x given y condition divided by the probability of x. # # This theorem can be extended to when x is a vector (containing the multiple x variables used as inputs for the model) to: # # $$P(y|x_1,...,x_n) = P(y)P(x_1,...,x_n|y)/P(x_1,...,x_n)$$ # Let's pretend we have an email with three words: "Send money now." We'll use Naive Bayes to classify it as **ham or spam.** # # $$P(spam \ | \ \text{send money now}) = \frac {P(\text{send money now} \ | \ spam) \times P(spam)} {P(\text{send money now})}$$ # # By assuming that the features (the words) are **conditionally independent**, we can simplify the likelihood function: # # $$P(spam \ | \ \text{send money now}) \approx \frac {P(\text{send} \ | \ spam) \times P(\text{money} \ | \ spam) \times P(\text{now} \ | \ spam) \times P(spam)} {P(\text{send money now})}$$ # # We can calculate all of the values in the numerator by examining a corpus of **spam email**: # # $$P(spam \ | \ \text{send money now}) \approx \frac {0.2 \times 0.1 \times 0.1 \times 0.9} {P(\text{send money now})} = \frac {0.0018} {P(\text{send money now})}$$ # # We would repeat this process with a corpus of **ham email**: # # $$P(ham \ | \ \text{send money now}) \approx \frac {0.05 \times 0.01 \times 0.1 \times 0.1} {P(\text{send money now})} = \frac {0.000005} {P(\text{send money now})}$$ # # All we care about is whether spam or ham has the **higher probability**, and so we predict that the email is **spam**. # #### Key takeaways # # - The **"naive" assumption** of Naive Bayes (that the features are conditionally independent) is critical to making these calculations simple. # - The **normalization constant** (the denominator) can be ignored since it's the same for all classes. # - The **prior probability** is much less relevant once you have a lot of features. # ### <b>Pros</b>: # #### - Very fast. Adept at handling tens of thousands of features which is why it's used for text classification # #### - Works well with a small number of observations # #### - Isn't negatively affected by "noise" # # ### <b>Cons</b>: # #### - Useless for probabilities. Most of the time assigns probabilites that are close to zero or one # #### - It is literally "naive". Meaning it assumes features are independent. # Test NB model on the count vectorized data. Re-run the Count Vectorizer with stop_words = "english" #Intialize Count Vectorizer count_vec = CountVectorizer(stop_words="english") #Fit Count Vectorizer dtm_cv = count_vec.fit_transform(X) #Convert it to a pandas data frame df_cv = pd.DataFrame(dtm_cv.toarray(), columns=count_vec.get_feature_names()) # Fit model #Initialize model model = MultinomialNB() #Fit model with df_cv and y model.fit(df_cv, y) #score the model model.score(df_cv, y) # Re-run the TFIDF Vectorizer with stop_words = "english" #Intialize Count Vectorizer tf_vec = TfidfVectorizer() #Fit Count Vectorizer dtm_tf = tf_vec.fit_transform(X) #Convert it to a pandas data frame df_tf = pd.DataFrame(dtm_tf.toarray(), columns=tf_vec.get_feature_names()) #Initialize model model = MultinomialNB() #Fit model with df_cv and y model.fit(df_tf, y) #score the model model.score(df_tf, y) # Some good scores! Or are they? # Time for some cross validation. #Call cross_val_score on the count vectorized dataset. Call .values on df_cv cross_val_score(MultinomialNB(), df_cv.values, y, cv = 5, scoring="accuracy").mean() #Call cross_val_score on the tfidf vectorized dataset. Call .values on df_tf cross_val_score(MultinomialNB(), df_tf.values, y, cv = 5, scoring="accuracy").mean() # Which one wins? Count or TFIDF vectorizer? # Now we're going to optimize our model by testing out every possible configuration # ### Grid Searching # # https://machinelearningmastery.com/how-to-tune-algorithm-parameters-with-scikit-learn/ # Create dictionaries for our "grids" aka every possible combination of configuration. #Grid dictionary for count vectorized data param_grid_cv = {} param_grid_cv["countvectorizer__ngram_range"] = [(1,1), (1,2), (2,2)] param_grid_cv["countvectorizer__max_features"] = [1000,5000, 10000] #Grid dictionary for tfidf vectorized data param_grid_tf = {} param_grid_tf["tfidfvectorizer__ngram_range"] = [(1,1), (1,2), (2,2)] param_grid_tf["tfidfvectorizer__max_features"] = [1000, 5000, 10000] # Make a pipeline #Create pipeline for count vectorized data pipe_cv = make_pipeline(CountVectorizer(stop_words="english"), MultinomialNB()) #Create pipeline for tfidf vectorized data pipe_tf = make_pipeline(TfidfVectorizer(stop_words="english"), MultinomialNB()) # Establish the grids #Create grid for count vectorized data grid_cv = GridSearchCV(pipe_cv, param_grid_cv, cv=3, scoring='accuracy') #Create grid for tfidf vectorized data grid_tf = GridSearchCV(pipe_tf, param_grid_tf, cv=3, scoring='accuracy') # This is gonna take a while so let's measure how long it takes #Import time library from time import time #Fit and time the grid_cv t = time() #Fit grid_cv on X and y grid_cv.fit(X,y) print time()-t #Fit and time the grid_tf t = time() #Fit grid_cv on X and y grid_tf.fit(X,y) print time()-t #Look at the best parameters and the best scores for count vectorized data print(grid_cv.best_params_) print(grid_cv.best_score_) #Look at the best parameters and the best scores for tfidf vectorized data print(grid_tf.best_params_) print(grid_tf.best_score_) # ## Bonus Section: How to find the "fakest" and "realest" words # + #Fit count vectorizer and NB model count_vec = CountVectorizer() #Fit Count Vectorizer dtm_cv = count_vec.fit_transform(X) model = MultinomialNB() model.fit(dtm_cv, y) model.score(dtm_cv, y) # - #Assign feature list to tokens tokens = count_vec.get_feature_names() #Counts words in fake articles fake_token_count = model.feature_count_[0,:] fake_token_count #Counts words in real articles real_token_count = model.feature_count_[1,:] real_token_count #Input tokens, fake_token_count, and real_token_count into a pandas data frame tok_df = pd.DataFrame({"token":tokens, "fake":fake_token_count, "real":real_token_count}).set_index("token") #Add 1 to fake and real columns tok_df["fake"] += 1 tok_df.real += 1 #Divide each value in the fake and real columns by their corresponding class_count value tok_df.fake = tok_df.fake/model.class_count_[0] tok_df.real = tok_df.real/model.class_count_[1] #Derive the ratio between fake and real tok_df["ratio"] = tok_df.fake/tok_df.real # Time to see the "fakest" words tok_df.sort_values(by="ratio", ascending=False).head(40) # "Realest" words tok_df.sort_values(by="ratio", ascending=True).head(40) # Let's plot them top_20_fake = tok_df.sort_values(by="ratio", ascending=False)["ratio"].iloc[:20] top_20_fake tok_df["real_ratio"] = tok_df.real/tok_df.fake top_20_real = tok_df.sort_values(by="real_ratio", ascending=False)["real_ratio"].iloc[:20] top_20_real # Plot fakes top_20_fake.plot(kind="bar", figsize=(14, 10)) # Plot reals top_20_real.plot(kind="bar", figsize=(14, 10))
Fake News Classifier Notebook Completed.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="hGClrhQA9SAk" # # Деревья решений # + [markdown] id="veekMy8WRjBi" # ## Построение дерева # + [markdown] id="SYkVwAFiUHXj" # Опишем жадный алгоритм построения бинарного дерева решений: # 1. Начинаем со всей обучающей выборки $X$, которую помещаем в корень $R_1$. # 2. Задаём функционал качества $Q(X, j, t)$ и критерий остановки. # 3. Запускаем построение из корня: $SplitNode(1, R_1)$ # # Функция $SplitNode(m, R_m)$ # 1. Если выполнен критерий остановки, то выход. # 2. Находим наилучший с точки зрения $Q$ предикат: $j, t$: $[x_j<t]$ # 3. Помещаем предикат в вкршину и получаем с его помощью разбиение $X$ на две части: $R_{left} = \lbrace x|x_j<t \rbrace$ и $R_{right} = \lbrace x|x_j \geqslant t \rbrace$ # 4. Поместим $R_{left}$ и $R_{right}$ соответсвенно в левое и правое поддерево. # 5. Рекурсивно повторяем $SplitNode(left, R_{left})$ и $SplitNode(right, R_{right})$. # # В конце поставим в соответствие каждому листу ответ. Для задачи классификации - это самый частый среди объектов класс или вектор с долями классов (можно интерпретировать как вероятности): # $$ c_v = \arg \max_{k\in Y} \sum_{(x_i,y_i) \in R_v} [y_i=k] $$ # + [markdown] id="9P6FsdBog4Ai" # ## Функционал качества для деревьев решений # # + [markdown] id="9VAKO0aykGBD" # Энтропия Шеннона для системы с N возможными состояниями определяется по формуле: # $$H = - \sum_{i=0}^{N} p_i\log_2p_i $$ # + [markdown] id="5582B-1Fn2bw" # где $p_i$ – вероятности нахождения системы в $i$-ом состоянии. # # Это очень важное понятие теории информации, которое позволяет оценить количество информации (степень хаоса в системе). Чем выше энтропия, тем менее упорядочена система и наоборот. С помощью энтропии мы формализуем функционал качества для разделение выборки (для задачи классификации). # + id="PbcMUd7bvk05" import numpy as np import pandas as pd import matplotlib.pyplot as plt import random from pprint import pprint # + [markdown] id="4AdLxP9CowTm" # Код для расчёта энтропии: # + id="2mT8Jq8Av2sM" def entropy(y): _, counts = np.unique(y, return_counts=True) probabilities = counts / counts.sum() entropy = sum(probabilities * -np.log2(probabilities)) return entropy # + [markdown] id="Xk9etb2vo7fK" # Здесь $y$ - это массив значений целевой переменной # + [markdown] id="07TCw0USzLus" # Энтропия – по сути степень хаоса (или неопределенности) в системе. Уменьшение энтропии называют приростом информации (information gain, IG). # # Обочначим $R_v$ - объекты, которые нужно разделить в помощью предиката в вершине $v$. Запишем формулу для расчёта информационного прироста: # $$ Q = IG = H(R_v) - (H(R_{left})+H(R_{right}))$$ # # На каждом шаге нам нужно максимизировать этот функционал качества. Как это делать? Например, так можно перебрать $t$ для выбранного $j$. # + [markdown] id="trEWHDoXg_p9" # Предыдущая версия формулы прироста информации слишком упрощена. В работе необходимо использовать более устойчивую формулу, которая учитывает не только энтропию подмножеств, но и их размер. # # $$ Q = IG = H(R_v) - \Big (\frac{|R_{left}|} {|R_{v}|} H(R_{left})+ \frac{|R_{right}|} {|R_{v}|} H(R_{right})\Big)$$ # # где, $|R_{v}|$, $|R_{left}|$ и $|R_{right}|$ - количество элементов в соответствующих множествах. # + [markdown] id="9xmN6V_N1xBr" # # ### Задание 4.1 # + [markdown] id="nWFHZScF2CBF" # Реализуйте алгоритм построения дерева. Должны быть отдельные функции (методы) для расчёта энтропии (уже есть), для разделения дерева (используйте `pandas`), для подсчёта функционала качества $IG$, для выбора наилучшего разделения (с учетом признакоd и порогов), для проверки критерия остановки. # # Для набора данных `iris` реализуйте алгоритм и минимум три из разными критерия остановки из перечисленных ниже: # * максимальной глубины дерева = 5 # * минимального числа объектов в листе = 5 # * максимальное количество листьев в дереве = 5 # * purity (остановка, если все объекты в листе относятся к одному классу) # # Реализуйте функцию `predict` (на вход функции подаётся датафрейм с объектами) # # Оцените точность каждой модели с помощью метрики точность (`from sklearn.metrics import accuracy_score` или реализовать свою). # + colab={"base_uri": "https://localhost:8080/"} id="6VBOTyC6XcNL" outputId="ea03eb99-e18d-4aea-cdc2-7f7311577c19" from sklearn import datasets iris = datasets.load_iris() print(iris.data[:5]) print(iris.feature_names) # + id="sNnKRppykKDH" from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target, test_size=0.33, random_state=42) # + colab={"base_uri": "https://localhost:8080/", "height": 295} id="Z6UOqdxzkbO8" outputId="c84e8c0e-183a-4d66-bb3b-eb881b0dfd50" plt.scatter(X_train[:,2],X_train[:,3],c=y_train, cmap=plt.cm.Dark2) plt.title('Petal plot') plt.xlabel('petal length') plt.ylabel('petal width') plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 417} id="9n5ilgJBnGbL" outputId="3149f6c4-55ba-4e91-b4b5-15c29ae9dbfa" df = pd.DataFrame(data= np.c_[X_train, y_train], columns= iris['feature_names'] + ['target']) df # + id="EgeqxMLCgYeG" def ig(parent, y_left, y_right): return parent.get_entropy() - (len(y_left) / parent.get_samples() * entropy(y_left) + len(y_right) / parent.get_samples() * entropy(y_right)) # + id="2mT82PP_bfmM" class Node: def __init__(self, X, y, depth, parent_node = None): self.parent_node = parent_node self.left_child = None self.right_child = None self.X = X self.y = y self.depth = depth self.column = None self.limit = None def get_entropy(self): return entropy(self.y) def get_samples(self): return len(self.y) def get_depth(self): return self.depth def is_leaf(self): return (self.left_child == None) and (self.right_child == None) def prediction(self): a, counts = np.unique(self.y, return_counts=True) max_counts = 0 prediction_value = None for i in range(len(a)): if counts[i] > max_counts: prediction_value = a[i] return prediction_value # + id="98MaWlpeW8-j" class ImplementationOfDecisionTree: def __init__(self, max_depth, min_objects, max_leafs): self.max_depth = max_depth self.min_objects = min_objects self.max_leafs = max_leafs self.root = None self.start_depth = 0 self.count_leaf = 1 def fit(self, X_train, y_train): self.root = Node(X_train, y_train, self.start_depth) self.split(self.root) def split(self, current_node): if self.should_stop(current_node): return best_ig = 0 best_split_left_y, best_split_right_y = [], [] best_split_left_X, best_split_right_X = [], [] split_left, split_right = [], [] for column in range(len(current_node.X[0])): X_column = current_node.X[:, column] for value in X_column: split_left = current_node.y[X_column <= value] split_right = current_node.y[X_column > value] if (len(split_left) < self.min_objects) or (len(split_right) < self.min_objects): continue current_ig = ig(current_node, split_left, split_right) if current_ig > best_ig: best_ig = current_ig best_split_left_y = split_left best_split_right_y = split_right best_split_left_X = current_node.X[X_column <= value] best_split_right_X = current_node.X[X_column > value] current_node.column = column current_node.limit = value if (best_ig == 0): return current_node.left_child = Node(best_split_left_X, best_split_left_y, current_node.get_depth() + 1, current_node) current_node.right_child = Node(best_split_right_X, best_split_right_y, current_node.get_depth() + 1, current_node) self.count_leaf += 1 self.split(current_node.left_child) self.split(current_node.right_child) def should_stop(self, current_node): return (current_node.get_depth() == self.max_depth) or (self.count_leaf == self.max_leafs) or (current_node.get_entropy() == 0) def predict(self, X_test): current_node = self.root while not(current_node.is_leaf()): if X_test[current_node.column] <= current_node.limit: current_node = current_node.left_child else: current_node = current_node.right_child return current_node.prediction() # + id="5DKUVPO8nL39" tree = ImplementationOfDecisionTree(5, 5, 5) tree.fit(X_train, y_train) # + colab={"base_uri": "https://localhost:8080/"} id="ZembV99TVykK" outputId="bf444b52-65f7-4b41-a94c-d3322386be98" test_prediction = np.array([tree.predict(X_test[i]) for i in range(len(X_test))]) test_prediction # + colab={"base_uri": "https://localhost:8080/"} id="ZCdmCib4aMo9" outputId="821ad004-8a98-4750-cc72-8373703d87d8" from sklearn.metrics import accuracy_score accuracy_score(y_test, test_prediction) # + [markdown] id="3mLVy6yCbSDj" # сравним с деревом из sklearn # + colab={"base_uri": "https://localhost:8080/"} id="tm6LT8EGbYd2" outputId="f4475370-7027-46f6-dd40-1b659f90836e" from sklearn.tree import DecisionTreeClassifier sklearn_tree = DecisionTreeClassifier(criterion = "entropy", max_depth = 5, min_samples_leaf = 5, max_leaf_nodes = 5) sklearn_tree.fit(X_train, y_train) sklearn_prediction = sklearn_tree.predict(X_test) accuracy_score(y_test, sklearn_prediction) # + [markdown] id="BkyCjLcy_CTM" # ## Случайный лес # + [markdown] id="7fKZe1FyRgCa" # Опишем алгоритм случайный лес (*random forest*) и попутно разберём основные идеи: # # 1. Зададим $N$ - число деревьев в лесу. # 2. Для каждого $n$ из $N$ сгенерируем свою выборку $X_n$. Пусть $m$ - это количество объектов в $X$. При генерации каждой $X_n$ мы будем брать объекты $m$ раз с возвращением. То есть один и тот же объект может попасть в выборку несколько раз, а какие-то объекты не попадут. (Этот способ назвается бутстрап). # 3. По каждой $X_n$ построим решающее дерево $b_n$. Обычно стараются делать глубокие деревья. В качестве критериев остановки можно использовать `max_depth` или `min_samples_leaf` (например, пока в каждом листе не окажется по одному объекту). При каждом разбиении сначала выбирается $k$ (эвристика $k = \sqrt d$, где $d$ - это число признаков объектов из выборки $X$) случайных признаков из исходных, и оптимальное разделение выборки ищется только среди них. Обратите внимание, что мы не выбрасываем оставшиеся признаки! # 4. Итоговый алгоритм будет представлять собой результат голосования (для классификации) и среднее арифметическое (для регрессии). Модификация алгоритма предполагает учёт весов каждого отдельного слабого алгоритма в ансамбле, но в этом особо нет смысла. # # + [markdown] id="YJBQ8lc0WyrN" # ### Задание 4.2 # + [markdown] id="y594Jn04ZTCm" # В качестве набора данных используйте: https://www.kaggle.com/mathchi/churn-for-bank-customers # # Там есть описание и примеры работы с этими данными. Если кратко, речь идёт про задачу прогнозирования оттока клиентов. Есть данные о 10 тысячах клиентов банка, часть из которых больше не являются клиентами. # + [markdown] id="be_mLbdVW2oG" # Используя либо свою реализацию, либо `DecisionTreeClassifier` с разными настройками из `sklearn.tree` реализйте алгоритм "случайный лес". # # Найдите наилучшие гиперпараметры этого алгоритма: количество деревьев, критерий остановки, функционал качества, минимальное количество объектов в листьях и другие. # # Нельзя использовать готовую реализацию случайного леса из `sklearn`. # # В подобных задачах очень важна интерпретируемость алгоритма. Попытайтесь оценить информативность признаков, т.е. ответить а вопрос, значения каких признаков являются самыми важными индикаторами того, что банк потеряет клиента. # + colab={"resources": {"http://localhost:8080/nbextensions/google.colab/files.js": {"data": "<KEY>", "ok": true, "headers": [["content-type", "application/javascript"]], "status": 200, "status_text": ""}}, "base_uri": "https://localhost:8080/", "height": 73} id="0py8nsne4d7b" outputId="831bc85e-3c15-4b97-c159-4ca1b09b580e" from google.colab import files import io uploaded = files.upload() # + colab={"base_uri": "https://localhost:8080/", "height": 437} id="atTgcu1XFpVj" outputId="89b648e5-bb91-49c6-96a0-ca2bdde5b9c8" bank = pd.read_csv(io.BytesIO(uploaded['churn.csv'])) bank # + colab={"base_uri": "https://localhost:8080/"} id="YJrVKwWBerSK" outputId="e8e06168-3012-47fc-882b-b43d50ef336a" bank_target = np.array(bank['Exited']) bank_target # + colab={"base_uri": "https://localhost:8080/", "height": 585} id="n1UwO62nIzdm" outputId="c2bbde6e-d091-4166-e32b-f0228e97cd37" bank_data = bank.drop(columns=['RowNumber', 'CustomerId', 'Surname', 'Exited']) geography_dict = {'France' : 0, 'Spain' : 1, 'Germany' : 2} gender_dict = {'Male' : 0, 'Female' : 1} for i in range(len(bank_data)): bank_data['Geography'][i] = geography_dict[bank_data['Geography'][i]] bank_data['Gender'][i] = gender_dict[bank_data['Gender'][i]] bank_data # + id="VT6kV1VpmNei" X_train, X_test, y_train, y_test = train_test_split(bank_data, bank_target, test_size=0.33, random_state=42) # + id="G0M4U7YTsPbZ" from sklearn.tree import DecisionTreeClassifier # + id="3jWrlVX_ojW9" class ImplementationOfRandomForest: def __init__(self, N, max_depth, min_objects): self.N = N self.max_depth = max_depth self.min_objects = min_objects self.forest = [] def fit(self, X_train, y_train): k = round((X_train.shape[1]) ** 0.5) for i in range(self.N): rnd = random.randint(1, 31 * self.N) X_prepared = X_train.sample(frac=1, replace=True, random_state=rnd) y_prepared = y_train[X_prepared.index] tree_forest = DecisionTreeClassifier(max_features=k, max_depth=self.max_depth, min_samples_leaf=self.min_objects) tree_forest.fit(X_prepared, y_prepared) self.forest.append(tree_forest) def predict(self, X_test): vote = [] result = [] for tree_forest in self.forest: vote.append(tree_forest.predict(X_test)) for i in range(len(vote[0])): vote_tree = [] for iter in range(len(vote)): vote_tree.append(vote[iter][i]) a, counts = np.unique(vote_tree, return_counts=True) max_counts = 0 prediction_value = None for j in range(len(a)): if counts[j] > max_counts: prediction_value = a[j] result.append(prediction_value) return result def important_features(self): vote = [] result = [] for tree_forest in self.forest: vote.append(tree_forest.feature_importances_) for i in range(len(vote[0])): vote_tree = 0 for iter in range(len(vote)): vote_tree += vote[iter][i] result.append(vote_tree / len(vote)) return result # + id="oMbPYq76IBil" def best_hyperparameters(X_train, X_test, y_train, y_test): best_N, best_max_depth, best_min_objects = 0, 0, 0 best_accuracy_score = 0 for N in range(10,50,10): for max_depth in range(1,10): for min_objects in [1, 3, 5, 9, 15]: current_forest = ImplementationOfRandomForest(N, max_depth, min_objects) current_forest.fit(X_train.reset_index(drop=True), y_train) current_y_prediction = current_forest.predict(X_test) current_score = accuracy_score(y_test, current_y_prediction) if current_score > best_accuracy_score: best_accuracy_score = current_score best_N, best_max_depth, best_min_objects = N, max_depth, min_objects return best_N, best_max_depth, best_min_objects # + id="4pB6gd4E7zIG" best_N, best_max_depth, best_min_objects = best_hyperparameters(X_train, X_test, y_train, y_test) # + colab={"base_uri": "https://localhost:8080/"} id="BiZTZWnvLt_n" outputId="f28fd0d9-a2f6-4b68-9b49-9739de73c376" forest = ImplementationOfRandomForest(best_N, best_max_depth, best_min_objects) forest.fit(X_train.reset_index(drop=True), y_train) y_prediction = forest.predict(X_test) accuracy_score(y_test, y_prediction) # + colab={"base_uri": "https://localhost:8080/", "height": 357} id="DQuv2O5NK4hQ" outputId="2cb858df-2fac-4b3a-90ab-a70ce18ee995" pd_imp = pd.DataFrame(forest.important_features()) pd_imp.insert(1, 'Features', bank_data.columns) pd_imp_sort = pd_imp.sort_values(by=0, ascending=False) pd_imp_sort
Machine Learning/lab4/lab4.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np from cpso_particle import COMB_Particle from cpso_swarm import COMB_Swarm npart = 2 c1 = 2.1 c2 = 2.2 c3 = 2.3 ndim = 190 alpha = 0.5 test_size = 0.3 x_bounds = (-6.0, 6.0) v_bounds = (-4.0, 0.25) w_bounds = (0.4, 0.9) t_bounds = (0, 3) data_path = 'working_data/prepped_for_classifier/data.csv' target_path = 'working_data/prepped_for_classifier/target.csv' swarm = COMB_Swarm(npart, c1, c2, c3, ndim, alpha, test_size, x_bounds, v_bounds, w_bounds, t_bounds, data_path, target_path) # That's good, the `__init__` method didn't raise any errors. swarm.npart swarm.c1 swarm.c2 swarm.c3 swarm.ndim swarm.alpha swarm.x_bounds swarm.v_bounds swarm.w_bounds swarm.t_bounds swarm.gbest swarm.gbest_counter swarm.gbinary swarm.g_fitness swarm.abest swarm.abinary swarm.a_fitness swarm.swarm swarm.clf swarm.data swarm.data.shape swarm.target swarm.target.shape swarm.X_train swarm.X_train.shape swarm.y_train swarm.y_train.shape swarm.X_test swarm.X_test.shape swarm.y_test swarm.y_test.shape swarm.final_scores # So far so good. All the basic attribute assignments appear to have worked successfully. However, I still need to initialize my particles. swarm.initialize_particles() # + active="" # --------------------------------------------------------------------------- # NameError Traceback (most recent call last) # <ipython-input-37-558cb996ec64> in <module>() # ----> 1 swarm.initialize_particles() # # ~/Programs/cfs/cfs-reu/pso/cpso_swarm.py in initialize_particles(self) # 300 """ # 301 for i in range(self.npart): # --> 302 self.swarm.append(COMB_Particle(self.c1, self.c2, self.c3, # 303 self.ndim, self.x_bounds, # 304 self.v_bounds, self.w_bounds)) # # ~/Programs/cfs/cfs-reu/pso/cpso_swarm.py in eval_fitness(self, b) # 501 Raises # 502 ------ # --> 503 None # 504 """ # 505 # clf_perf is the same as Pb in the above equation # # NameError: name 'test_classify' is not defined # - # Oops. In my `eval_fitness()` method, it calls the class method, `test_classify()` without using self. # + active="" # --------------------------------------------------------------------------- # IndexError Traceback (most recent call last) # <ipython-input-36-558cb996ec64> in <module>() # ----> 1 swarm.initialize_particles() # # ~/Programs/cfs/cfs-reu/pso/cpso_swarm.py in initialize_particles(self) # 303 self.ndim, self.x_bounds, # 304 self.v_bounds, self.w_bounds)) # --> 305 f = self.eval_fitness(self.swarm[i].b) # 306 # NOTE REFERENCE : See docstring above. # 307 self.swarm[i].p_fitness = f # # ~/Programs/cfs/cfs-reu/pso/cpso_swarm.py in eval_fitness(self, b) # 504 """ # 505 # clf_perf is the same as Pb in the above equation # --> 506 clf_perf = self.test_classify(b) # 507 f = ((self.alpha*clf_perf) # 508 + ( (1-self.alpha) # # ~/Programs/cfs/cfs-reu/pso/cpso_swarm.py in test_classify(self, b) # 463 None # 464 """ # --> 465 scores = cross_val_score(self.clf, self.X_train[:, b], # 466 self.y_train, cv=10) # 467 return scores.mean() # # IndexError: boolean index did not match indexed array along dimension 1; dimension is 190 but corresponding boolean dimension is 5 # - # I forgot that I set small, workable parameters that didn't match the data I passed it. I need to generate some throwaway data of a suitable size. # I'm running into a host of issues because my trash, test data is so unlike what the code is designed for. I'm going to just use the working data and see what happens. # That's good, my initialize particles didn't raise any errors. swarm.swarm swarm.swarm[0] attr = ['x', 'v', 'b', 'pbest', 'pbinary', 'p_fitness', 'w', 'c1', 'c2', 'c3', 'ndim', 'x_bounds', 'v_bounds', 'w_bounds'] for a in attr: print('{:12} : {}'.format(a, str(getattr(swarm.swarm[0], a)))) print('\n' + '-'*80 + '\n') # At a glance, that all looks good. I tested COMB_Particle already so I won't do much thorough testing, but it looks like everything propagated correctly. w should still be 0, because it is initialized in the first run of the actual algorithm in `execute_search()`. I'll double check that all the position vectors are independent copies and not references to the same location. (swarm.swarm[0].x == swarm.swarm[0].pbest).all() (swarm.swarm[0].b == swarm.swarm[0].pbinary).all() swarm.swarm[0].x[0] swarm.swarm[0].x[0] = 0 (swarm.swarm[0].x == swarm.swarm[0].pbest).all() swarm.swarm[0].b[0] swarm.swarm[0].b[0] = False (swarm.swarm[0].b == swarm.swarm[0].pbinary).all() swarm.swarm[0].x[0] = 5.123637298457574 swarm.swarm[0].b[0] = True (swarm.swarm[0].x == swarm.swarm[0].pbest).all() (swarm.swarm[0].b == swarm.swarm[0].pbinary).all() # Looks good to me. Let's check the other particle and make sure it is different. swarm.swarm[1] for a in attr: print('{:12} : {}'.format(a, str(getattr(swarm.swarm[1], a)))) print('\n' + '-'*80 + '\n') # Again this all looks reasonable. Sanity checks are all good. Everything that should be different from the first particle is different and everything that shouldn't be different is the same. (swarm.swarm[1].x == swarm.swarm[1].pbest).all() (swarm.swarm[1].b == swarm.swarm[1].pbinary).all() swarm.swarm[1].x[0] swarm.swarm[1].x[0] = 0 (swarm.swarm[1].x == swarm.swarm[1].pbest).all() swarm.swarm[1].b[0] swarm.swarm[1].b[0] = True (swarm.swarm[1].b == swarm.swarm[1].pbinary).all() swarm.swarm[1].x[0] = -3.2698038841035957 swarm.swarm[1].b[0] = False (swarm.swarm[1].x == swarm.swarm[1].pbest).all() (swarm.swarm[1].b == swarm.swarm[1].pbinary).all() # Again, all these sanity checks seem to work. I'm not testing very rigorously because I trust the internal workings of the COMB_Particle class based on a previous testing session. # Things that I am not sure if they worked or not: `eval_fitness()` swarm.gbest swarm.g_fitness swarm.gbest_counter swarm.gbinary swarm.abest swarm.a_fitness swarm.abinary # Those look correct. I manually matched them to the 1st particle in the list, which certainly has a higher fitness reported by `eval_fitness()` so this assignment of the global and archived bests appears to be working. swarm.gbest_counter = 3 swarm.shuffle_gbest() swarm.gbest swarm.g_fitness swarm.gbest_counter swarm.gbinary swarm.abest swarm.a_fitness swarm.abinary # Proofreading the code, it seems to do what I expect. This one is harder to prove, but the behavior appears to work. I will go back up to the cell containing a call to `shuffle_gbest()` and keep manually calling it until I get a fitness that is greater than a_fitness so I can see if abest updates to match the new gbest. # I manually called it until I got tired of it and it didn't ever randomly come up with a position better than the first abest, so I'm going to manually change a_fitness to force the call. swarm.a_fitness = 0.5 swarm.gbest_counter = 3 swarm.shuffle_gbest() swarm.gbest swarm.g_fitness swarm.gbest_counter swarm.gbinary swarm.abest swarm.a_fitness swarm.abinary # That definitely worked! # I don't need to check `convert_pos_to_binary()` because it is a copy of the method in the COMB_Particle class. # `eval_fitness()` is mostly dependent on `test_classify()`. However, the actual fitness function can be check to make sure all my parentheses are in the right place, etc. I also should check that the `np.count_nonzero()` function is doing what I expect. I think I chose it while the binary arrays were still int arrays where members were in {0, 1}. Now they're booleans. I expect, it should still be good. testa = np.array([True, False, True, True, False]) np.count_nonzero(testa) # That works fine. def test_eval_fitness(b): clf_perf = 0.5 f = ((alpha*clf_perf) + ( (1-alpha) * ((ndim-np.count_nonzero(b)) / ndim) ) ) return f alpha = 0.2 ndim = 5 # This should give f = 0.2 * 0.5 + (1-0.2) * ((5 - 3)/5) = 0.42 test_eval_fitness(testa) # Excellent, the 4 at the end is almost certainly a floating point error. The equation does what I want it to do. Now I just have to test `test_classify()` to make sure that it is returning what I want it to. # `test_classify()` is a very simple method. It runs a 10-fold cross validation on the feature subset it is passed and returns the mean of the 10 scores. Almost all of the work is hidden in an sklearn method. I'll triple check the documentation of the method I'm using, and manually ensure that the correct feature subset is being used. # + active="" # def test_classify(self, b): # """Return a classification performance for a binary position vector. # # Runs a classification with the clf attribute as a classifier and # X_train and y_train as the feature data/correct classifications. # Can be modified to classify any way, as long as it returns a metric # of performance. # # Currently runs a 10-fold cross validation and returns the mean # classification accuracy. # # Parameters # ---------- # b : 1-Dimensional ndarray, size ndim; Holds a binary position vector # where each value ϵ {0, 1} and represents respectively the # exclusion or inclusion of that feature in the subset used for # training. # # Returns # ------- # scores.mean() : The mean of the classification accuracies returned # by the 10-fold cross validation. # # Raises # ------ # None # """ # scores = cross_val_score(self.clf, self.X_train[:, b], # self.y_train, cv=10) # return scores.mean() # # - # `cross_val_score()` takes 3 positional arguments and a number of named arguments. # The first positional argument, `estimator`, is an estimator object implementing `fit` and is used to predict the data. I double checked with the `sklearn.svm.SVC` class, and it does implement `fit`. The second positional argument, `X`, is an array-like object that holds the data to fit. I'm passing a slice of the training data, `X_train`, created by the boolean array, `b`, that is being given to it. I need to double check that this syntax is producing the behavior I want. Finally, the third positional argument is `y`. Actually, I just checked the documentation, `y` is actually a named argument. I should specifically call it out in my call. I'll fix that. `y` is another array-like object that holds the target variable to predict. `cv` is a named argument, that if an integer (like 20), then `cross_val_score()` calls a `cv`-Fold Cross-Validation. `cross_val_score()` returns an ndarray of size (`cv`,) holding the scores returned for the cross-validation. In my method, `test_classify()`, `scores.mean()` is calling an internal method from an ndarray to return the arithmetic mean of the values in the array. testa = np.linspace(1, 25, 25).reshape(5,5) testa testb = np.array([False, True, True, False, True]) testb # So if I slice `testa` with `testb`, it should return the columns starting with 2, 3, and 5. testa[:, testb] # Good! Additionally, according to the numpy documentation, slicing returns a view of the original array, not a copy. Thus, I should be careful not to edit anything. However, `cross_val_score()` does not edit the data, it only analyzes it and returns its own results in a separate array. So I'm satisfied with this. # `final_scores()` is essentially identical to `test_classify()`. I double checked that the variables were all correct, it should be good. # The only thing left is `execute_search()`. This is difficult to test. I'll walk through the code line by line to make sure the algorithm is doing what I want, and then I'll step it through a couple time steps, one step at a time to see where it's going. # + active="" # def execute_search(self): # """Execute a full run of the COMB-PSO Algorithm. # # Completes one full run of the COMB-PSO Algorithm using an internal # classifier object and a swarm of COMB_Particle objects, returning # a 1-Dimensional ndarray containing the best position found # by the algorithm. # # NOTE: initialize_particles and initialize_classifer MUST be called # before this method will run correctly. # # Parameters # ---------- # None # # Returns # ------- # None # # Raises # ------ # None # """ # for i in range(1, t_bounds[1]): # self.t = i # for p in swarm: # p.update_inertia(self.gbinary) # p.update_velocity(self.gbest, self.abest) # p.update_position() # p.update_binary_position() # f = self.eval_fitness(p.b) # if f > p.p_fitness: # p.pbest = p.x.copy() # p.pbinary = p.b.copy() # p.p_fitness = f # if f > self.g_fitness: # # -1 because the counter should be 0 during the # # next comparison to updated positions. The other # # 2 resets (shuffle_gbest and init) happen after # # the "self.gbest_counter += 1" line. This one # # happens before. # self.gbest_counter = -1 # self.gbest = p.x.copy() # self.gbinary = p.b.copy() # self.g_fitness = f # if self.g_fitness > self.a_fitness: # self.abest = self.gbest.copy() # self.abinary = self.gbinary.copy() # self.a_fitness = self.g_fitness # self.gbest_counter += 1 # if self.gbest_counter >= 3: # self.shuffle_gbest() # - # Okay, here we go. # # **Lines 24-25:** sets up the for loop and updates the time variable. This for loop steps the entire swarm through time. Each iteration is one step in time. Each step, the swarm must: # # * move # * check fitness # * update best positions # # It runs from t=1 (t=0 is initial state) until the maximum time specified in the second (max) argument of `t_bounds`. This means that the time variable is stepped forward, *Then* the state is updated. So if I want to report the state of the swarm at t, then the reporting needs to happen at the end of the loop. # # **Line 26:** sets up the second for loop. This for loop steps through every particle in the swarm. Each iteration is one particle moving through one step in time. For each step, the next particle must: # # * update inertia # * update velocity # * update position # * report best variables # # The loop runs through every `p` (particle) in `swarm`. Oops, that should be `self.swarm`. I'll change it. # # **Lines 27-30:** These lines call the current particle's update methods. These should be in the correct order. The convert to binary method depends on an updated position. The position equation depends on an updated velocity. The velocity equation depends on an updated inertia. The inertia equation depends on the previous particle binary position and the previous global best binary position. So, the order of update should be: # # 1. Previous time step's particle's `pbinary` and swarm's `gbinary`. # 2. Inertia # 3. Velocity # 4. Position # 5. Binary Position. # # Which is definitely the case! # # **Line 31:** This line calls the `eval-fitness()` method on the current particle's new binary position. It assigns it to the local variable, f. This allows the algorithm to perform multiple comparisons without recalculating every time. # # **Lines 32-35:** This is an if statement. The condition for execution is that the current particle's current position fitness is better than the current particle's stored best fitness. If the new fitness is better, then the current position is better than the particle's stored best position. Thus, it should be updated. The next three lines update `pbest`, `pbinary`, and `p_fitness` with copies of the current values. This is VITAL that the stored state is updated with copies, NOT with recalculations. The binary vectors are calculated using a random value. Thus, identical positions may give different binary vectors. However, I want my algorithm to return a static, predictable value that can be mapped to the returned fitness and used in future research. So I need to know the exact binary vector that produced these results. Additionally, I want to reduce computational time so copying is better than recalculating. # # **Lines 36-45:** This is a carbon copy of Lines 32-35 with one extra line. The comparison is with the swarm attribute, `g_fitness` (and associated state variables). The extra line is to update the counter. If `gbest` stagnates for 3 iterations, it is archived in abest and then shuffled (using the `shuffle_gbest()` method). The comment explains why the counter is set to -1 instead of 0. # # **Lines 46-49:** This is similar to the two previous sections. It updates the archived best position/binary/fitness. However, it is Outside the particle for loop because it is only dependent on the global best of the entire swarm. Thus, rather than repeating it for every particle, this code waits until the global best is established, then performs a single check/update to save time. # # **Line 50:** This line merely increments the `gbest_counter`. This should be done every time the swarm moves through a single time step. # # **Lines 51-52:** This if statement tests for stagnation. If the global best has not changed in 3 iterations, it is archived into abest and then randomly reinitialized to add stochasticity and "shake up" the swarm's velocity. It also has a significant effect on the inertia. A conceptual manual step by step is below to check values of 0, -1, and 3 depending on where you are in the code. # ### Double check for the stagnation values # #### Case 1: Stagnation # t = 0 (Initialization): counter is initialized at 0. gbest has existed *through* 0 updates of position (it was created after this first random positioning). # # t = 1: counter is 0 at the beginning of the loop, counter is 1 at the end of the loop. At the check, gbest has not changed through 1 update of position (update from random init to t=1). # # t = 2: counter is 1 at the beginning of the loop, gbest did not change, counter is 2 at the end of the loop. At the check, gbest has not changed through 2 updates of position. # # t = 3: counter is 2 at the beginning of the loop, gbest did not change, counter is 3 at the end of the loop. At the check, gbest has not changed through 3 updates of position. Shuffle triggers. counter is 0 at the end of the loop. gbest has existed through 0 updates of position (it was just randomized). # # t = 4: counter is 0 at the beginning of the loop, gbest did not change, counter is 1 at the end of the loop. At the check, gbest has not changed through 1 update of position. # # ------------------------------------------------------------------------------------ # # #### Case 2: Reset # # t = 0 (Initialization): counter is initialized at 0. gbest has existed *through* 0 updates of position (it was created after this first random positioning). # # t = 1: counter is 0 at the beginning of the loop, counter is 1 at the end of the loop. At the check, gbest has not changed through 1 update of position (update from random init to t=1). # # t = 2: counter is 1 at the beginning of the loop, gbest did not change, counter is 2 at the end of the loop. At the check, gbest has not changed through 2 updates of position. # # t = 3: counter is 2 at the beginning of the loop, gbest changes, counter is set to -1, counter is 0 (increment statement). At the check, gbest has not changed through 0 updates of position. # # t = 4: counter is 0 at the beginning of the loop, gbest did not change, counter is 1 at the end of the loop. At the chekc, gbest has not changed through 1 update of position. # Good! This looks to be the type of behavior I'm looking for. I double checked that all the method arguments are coorect and that all the comparisons are comparing correclty. I think I trust this about as much as I can where I am the only evaluator. I'll step the swarm through a couple of time steps and do some sanity checks. npart = 2 c1 = 2.1 c2 = 2.2 c3 = 2.3 ndim = 190 alpha = 0.5 test_size = 0.3 x_bounds = (-6.0, 6.0) v_bounds = (-4.0, 0.25) w_bounds = (0.4, 0.9) t_bounds = (0, 3) data_path = 'working_data/prepped_for_classifier/data.csv' target_path = 'working_data/prepped_for_classifier/target.csv' swarm = COMB_Swarm(npart, c1, c2, c3, ndim, alpha, test_size, x_bounds, v_bounds, w_bounds, t_bounds, data_path, target_path) swarm.initialize_particles() swarm.t_bounds swarm.t_bounds = (0, 2) swarm.execute_search() # + active="" # --------------------------------------------------------------------------- # NameError Traceback (most recent call last) # <ipython-input-195-1ef6310b14dc> in <module>() # ----> 1 swarm.execute_search() # # ~/Programs/cfs/cfs-reu/pso/cpso_swarm.py in execute_search(self) # 341 None # 342 """ # --> 343 for i in range(1, t_bounds[1]): # 344 self.t = i # 345 for p in self.swarm: # # NameError: name 't_bounds' is not defined # - # I missed a `self`. swarm.t swarm.swarm swarm.g_fitness swarm.swarm[0].p_fitness swarm.swarm[1].p_fitness swarm.execute_search() swarm.g_fitness swarm.swarm[0].p_fitness swarm.swarm[1].p_fitness # All of that looks good. It looks like it's progressing. I think I'm going to do a run and see what happens. npart = 100 c1 = 2.1 c2 = 2.1 c3 = 2.1 ndim = 190 alpha = 0.8 test_size = 0.3 x_bounds = (-6.0, 6.0) v_bounds = (-4.0, 0.25) w_bounds = (0.4, 0.9) t_bounds = (0, 50) data_path = 'working_data/prepped_for_classifier/data.csv' target_path = 'working_data/prepped_for_classifier/target.csv' # + # swarm = COMB_Swarm(npart, c1, c2, c3, ndim, alpha, test_size, # x_bounds, v_bounds, w_bounds, t_bounds, # data_path, target_path) # + # swarm.initialize_particles() # + # swarm.execute_search() # + active="" # --------------------------------------------------------------------------- # ValueError Traceback (most recent call last) # <ipython-input-121-1ef6310b14dc> in <module>() # ----> 1 swarm.execute_search() # # ~/Programs/cfs/cfs-reu/pso/cpso_swarm.py in execute_search(self) # 348 p.update_position() # 349 p.update_binary_position() # --> 350 f = self.eval_fitness(p.b) # 351 if f > p.p_fitness: # 352 p.pbest = p.x.copy() # # ~/Programs/cfs/cfs-reu/pso/cpso_swarm.py in eval_fitness(self, b) # 504 """ # 505 # clf_perf is the same as Pb in the above equation # --> 506 clf_perf = self.test_classify(b) # 507 f = ((self.alpha*clf_perf) # 508 + ( (1-self.alpha) # # ~/Programs/cfs/cfs-reu/pso/cpso_swarm.py in test_classify(self, b) # 464 """ # 465 scores = cross_val_score(self.clf, self.X_train[:, b], # --> 466 y=self.y_train, cv=10) # 467 return scores.mean() # 468 # # ~/anaconda3/lib/python3.6/site-packages/sklearn/model_selection/_validation.py in cross_val_score(estimator, X, y, groups, scoring, cv, n_jobs, verbose, fit_params, pre_dispatch) # 340 n_jobs=n_jobs, verbose=verbose, # 341 fit_params=fit_params, # --> 342 pre_dispatch=pre_dispatch) # 343 return cv_results['test_score'] # 344 # # ~/anaconda3/lib/python3.6/site-packages/sklearn/model_selection/_validation.py in cross_validate(estimator, X, y, groups, scoring, cv, n_jobs, verbose, fit_params, pre_dispatch, return_train_score) # 204 fit_params, return_train_score=return_train_score, # 205 return_times=True) # --> 206 for train, test in cv.split(X, y, groups)) # 207 # 208 if return_train_score: # # ~/anaconda3/lib/python3.6/site-packages/sklearn/externals/joblib/parallel.py in __call__(self, iterable) # 777 # was dispatched. In particular this covers the edge # 778 # case of Parallel used with an exhausted iterator. # --> 779 while self.dispatch_one_batch(iterator): # 780 self._iterating = True # 781 else: # # ~/anaconda3/lib/python3.6/site-packages/sklearn/externals/joblib/parallel.py in dispatch_one_batch(self, iterator) # 623 return False # 624 else: # --> 625 self._dispatch(tasks) # 626 return True # 627 # # ~/anaconda3/lib/python3.6/site-packages/sklearn/externals/joblib/parallel.py in _dispatch(self, batch) # 586 dispatch_timestamp = time.time() # 587 cb = BatchCompletionCallBack(dispatch_timestamp, len(batch), self) # --> 588 job = self._backend.apply_async(batch, callback=cb) # 589 self._jobs.append(job) # 590 # # ~/anaconda3/lib/python3.6/site-packages/sklearn/externals/joblib/_parallel_backends.py in apply_async(self, func, callback) # 109 def apply_async(self, func, callback=None): # 110 """Schedule a func to be run""" # --> 111 result = ImmediateResult(func) # 112 if callback: # 113 callback(result) # # ~/anaconda3/lib/python3.6/site-packages/sklearn/externals/joblib/_parallel_backends.py in __init__(self, batch) # 330 # Don't delay the application, to avoid keeping the input # 331 # arguments in memory # --> 332 self.results = batch() # 333 # 334 def get(self): # # ~/anaconda3/lib/python3.6/site-packages/sklearn/externals/joblib/parallel.py in __call__(self) # 129 # 130 def __call__(self): # --> 131 return [func(*args, **kwargs) for func, args, kwargs in self.items] # 132 # 133 def __len__(self): # # ~/anaconda3/lib/python3.6/site-packages/sklearn/externals/joblib/parallel.py in <listcomp>(.0) # 129 # 130 def __call__(self): # --> 131 return [func(*args, **kwargs) for func, args, kwargs in self.items] # 132 # 133 def __len__(self): # # ~/anaconda3/lib/python3.6/site-packages/sklearn/model_selection/_validation.py in _fit_and_score(estimator, X, y, scorer, train, test, verbose, parameters, fit_params, return_train_score, return_parameters, return_n_test_samples, return_times, error_score) # 456 estimator.fit(X_train, **fit_params) # 457 else: # --> 458 estimator.fit(X_train, y_train, **fit_params) # 459 # 460 except Exception as e: # # ~/anaconda3/lib/python3.6/site-packages/sklearn/svm/base.py in fit(self, X, y, sample_weight) # 147 self._sparse = sparse and not callable(self.kernel) # 148 # --> 149 X, y = check_X_y(X, y, dtype=np.float64, order='C', accept_sparse='csr') # 150 y = self._validate_targets(y) # 151 # # ~/anaconda3/lib/python3.6/site-packages/sklearn/utils/validation.py in check_X_y(X, y, accept_sparse, dtype, order, copy, force_all_finite, ensure_2d, allow_nd, multi_output, ensure_min_samples, ensure_min_features, y_numeric, warn_on_dtype, estimator) # 571 X = check_array(X, accept_sparse, dtype, order, copy, force_all_finite, # 572 ensure_2d, allow_nd, ensure_min_samples, # --> 573 ensure_min_features, warn_on_dtype, estimator) # 574 if multi_output: # 575 y = check_array(y, 'csr', force_all_finite=True, ensure_2d=False, # # ~/anaconda3/lib/python3.6/site-packages/sklearn/utils/validation.py in check_array(array, accept_sparse, dtype, order, copy, force_all_finite, ensure_2d, allow_nd, ensure_min_samples, ensure_min_features, warn_on_dtype, estimator) # 468 " a minimum of %d is required%s." # 469 % (n_features, shape_repr, ensure_min_features, # --> 470 context)) # 471 # 472 if warn_on_dtype and dtype_orig is not None and array.dtype != dtype_orig: # # ValueError: Found array with 0 feature(s) (shape=(160, 0)) while a minimum of 1 is required. # - # At some point, for some reason, the eval_fitness method got passed a binary array with all False. I'm going to add some code to check for that and stop cleanly so that I can investigate. # + active="" # I added # # if np.count_nonzero(p.b) == self.ndim: # print('All False Binary Array at time: {}'.format(self.t)) # print('index of p: {}'.format(self.swarm.index(p))) # return # # before "f = self.eval_fitness(p.b)" # + # swarm = COMB_Swarm(npart, c1, c2, c3, ndim, alpha, test_size, # x_bounds, v_bounds, w_bounds, t_bounds, # data_path, target_path) # + # swarm.initialize_particles() # + # swarm.execute_search() # + active="" # --------------------------------------------------------------------------- # ValueError Traceback (most recent call last) # <ipython-input-119-1ef6310b14dc> in <module>() # ----> 1 swarm.execute_search() # # ~/Programs/cfs/cfs-reu/pso/cpso_swarm.py in execute_search(self) # 352 print('index of p: {}'.format(self.swarm.index(p))) # 353 return # --> 354 f = self.eval_fitness(p.b) # 355 if f > p.p_fitness: # 356 p.pbest = p.x.copy() # # ~/Programs/cfs/cfs-reu/pso/cpso_swarm.py in eval_fitness(self, b) # 508 """ # 509 # clf_perf is the same as Pb in the above equation # --> 510 clf_perf = self.test_classify(b) # 511 f = ((self.alpha*clf_perf) # 512 + ( (1-self.alpha) # # ~/Programs/cfs/cfs-reu/pso/cpso_swarm.py in test_classify(self, b) # 468 """ # 469 scores = cross_val_score(self.clf, self.X_train[:, b], # --> 470 y=self.y_train, cv=10) # 471 return scores.mean() # 472 # # ~/anaconda3/lib/python3.6/site-packages/sklearn/model_selection/_validation.py in cross_val_score(estimator, X, y, groups, scoring, cv, n_jobs, verbose, fit_params, pre_dispatch) # 340 n_jobs=n_jobs, verbose=verbose, # 341 fit_params=fit_params, # --> 342 pre_dispatch=pre_dispatch) # 343 return cv_results['test_score'] # 344 # # ~/anaconda3/lib/python3.6/site-packages/sklearn/model_selection/_validation.py in cross_validate(estimator, X, y, groups, scoring, cv, n_jobs, verbose, fit_params, pre_dispatch, return_train_score) # 204 fit_params, return_train_score=return_train_score, # 205 return_times=True) # --> 206 for train, test in cv.split(X, y, groups)) # 207 # 208 if return_train_score: # # ~/anaconda3/lib/python3.6/site-packages/sklearn/externals/joblib/parallel.py in __call__(self, iterable) # 777 # was dispatched. In particular this covers the edge # 778 # case of Parallel used with an exhausted iterator. # --> 779 while self.dispatch_one_batch(iterator): # 780 self._iterating = True # 781 else: # # ~/anaconda3/lib/python3.6/site-packages/sklearn/externals/joblib/parallel.py in dispatch_one_batch(self, iterator) # 623 return False # 624 else: # --> 625 self._dispatch(tasks) # 626 return True # 627 # # ~/anaconda3/lib/python3.6/site-packages/sklearn/externals/joblib/parallel.py in _dispatch(self, batch) # 586 dispatch_timestamp = time.time() # 587 cb = BatchCompletionCallBack(dispatch_timestamp, len(batch), self) # --> 588 job = self._backend.apply_async(batch, callback=cb) # 589 self._jobs.append(job) # 590 # # ~/anaconda3/lib/python3.6/site-packages/sklearn/externals/joblib/_parallel_backends.py in apply_async(self, func, callback) # 109 def apply_async(self, func, callback=None): # 110 """Schedule a func to be run""" # --> 111 result = ImmediateResult(func) # 112 if callback: # 113 callback(result) # # ~/anaconda3/lib/python3.6/site-packages/sklearn/externals/joblib/_parallel_backends.py in __init__(self, batch) # 330 # Don't delay the application, to avoid keeping the input # 331 # arguments in memory # --> 332 self.results = batch() # 333 # 334 def get(self): # # ~/anaconda3/lib/python3.6/site-packages/sklearn/externals/joblib/parallel.py in __call__(self) # 129 # 130 def __call__(self): # --> 131 return [func(*args, **kwargs) for func, args, kwargs in self.items] # 132 # 133 def __len__(self): # # ~/anaconda3/lib/python3.6/site-packages/sklearn/externals/joblib/parallel.py in <listcomp>(.0) # 129 # 130 def __call__(self): # --> 131 return [func(*args, **kwargs) for func, args, kwargs in self.items] # 132 # 133 def __len__(self): # # ~/anaconda3/lib/python3.6/site-packages/sklearn/model_selection/_validation.py in _fit_and_score(estimator, X, y, scorer, train, test, verbose, parameters, fit_params, return_train_score, return_parameters, return_n_test_samples, return_times, error_score) # 456 estimator.fit(X_train, **fit_params) # 457 else: # --> 458 estimator.fit(X_train, y_train, **fit_params) # 459 # 460 except Exception as e: # # ~/anaconda3/lib/python3.6/site-packages/sklearn/svm/base.py in fit(self, X, y, sample_weight) # 147 self._sparse = sparse and not callable(self.kernel) # 148 # --> 149 X, y = check_X_y(X, y, dtype=np.float64, order='C', accept_sparse='csr') # 150 y = self._validate_targets(y) # 151 # # ~/anaconda3/lib/python3.6/site-packages/sklearn/utils/validation.py in check_X_y(X, y, accept_sparse, dtype, order, copy, force_all_finite, ensure_2d, allow_nd, multi_output, ensure_min_samples, ensure_min_features, y_numeric, warn_on_dtype, estimator) # 571 X = check_array(X, accept_sparse, dtype, order, copy, force_all_finite, # 572 ensure_2d, allow_nd, ensure_min_samples, # --> 573 ensure_min_features, warn_on_dtype, estimator) # 574 if multi_output: # 575 y = check_array(y, 'csr', force_all_finite=True, ensure_2d=False, # # ~/anaconda3/lib/python3.6/site-packages/sklearn/utils/validation.py in check_array(array, accept_sparse, dtype, order, copy, force_all_finite, ensure_2d, allow_nd, ensure_min_samples, ensure_min_features, warn_on_dtype, estimator) # 468 " a minimum of %d is required%s." # 469 % (n_features, shape_repr, ensure_min_features, # --> 470 context)) # 471 # 472 if warn_on_dtype and dtype_orig is not None and array.dtype != dtype_orig: # # ValueError: Found array with 0 feature(s) (shape=(160, 0)) while a minimum of 1 is required. # - # The same error happened and my code didn't trigger so I didn't ID the bug correctly. I'll leave the detection code in though. Maybe the array with 0 features is in the data arrays, not the subset? swarm.X_train swarm.y_train swarm.y_train.shape swarm.t # Dang it, I messed up my error reporter. `np.count_nonzero()` returns number of `True` values in the array. So it should trigger if it is equal to 0, not `self.ndim`. I'll fix and try again. # + active="" # Fixed it to: # # if np.count_nonzero(p.b) == 0: # print('All False Binary Array at time: {}'.format(self.t)) # print('index of p: {}'.format(self.swarm.index(p))) # return # + # swarm = COMB_Swarm(npart, c1, c2, c3, ndim, alpha, test_size, # x_bounds, v_bounds, w_bounds, t_bounds, # data_path, target_path) # + # swarm.initialize_particles() # + # swarm.execute_search() # + active="" # All False Binary Array at time: 2 # index of p: 92 # + # swarm.swarm[92].b # + active="" # array([False, False, False, False, False, False, False, False, False, # False, False, False, False, False, False, False, False, False, # False, False, False, False, False, False, False, False, False, # False, False, False, False, False, False, False, False, False, # False, False, False, False, False, False, False, False, False, # False, False, False, False, False, False, False, False, False, # False, False, False, False, False, False, False, False, False, # False, False, False, False, False, False, False, False, False, # False, False, False, False, False, False, False, False, False, # False, False, False, False, False, False, False, False, False, # False, False, False, False, False, False, False, False, False, # False, False, False, False, False, False, False, False, False, # False, False, False, False, False, False, False, False, False, # False, False, False, False, False, False, False, False, False, # False, False, False, False, False, False, False, False, False, # False, False, False, False, False, False, False, False, False, # False, False, False, False, False, False, False, False, False, # False, False, False, False, False, False, False, False, False, # False, False, False, False, False, False, False, False, False, # False, False, False, False, False, False, False, False, False, # False, False, False, False, False, False, False, False, False, # False]) # + # swarm.swarm[92].x # + active="" # array([-4.64818611, -3.62157461, -4.1202559 , -4.39905422, -0.97758956, # -5.09274813, -2.67323161, -3.84548314, -4.38768838, -3.01846374, # -3.71510302, -5.4674244 , -4.92595152, -2.62222098, -4.50612815, # -6. , -2.23466941, -5.48997166, -5.68036448, -5.00379626, # -1.94537345, -6. , -3.50160634, -6. , -3.73965313, # -5.75 , -5.33677081, -3.63463852, -6. , -4.61797446, # -4.76638556, -4.50542255, -2.65217644, -3.02306214, -5.71284723, # -5.90422188, -2.30497214, -5.11002454, -3.37564289, -1.62789692, # -2.14823599, -4.36176958, -3.73719674, -5.450959 , -5.55243124, # -4.38262978, -5.73237549, -3.77878997, -5.83654013, -5.06286995, # -2.05695052, -1.42169399, -2.64501254, -6. , -2.85144567, # -5.75 , -5.8831365 , -4.85645521, -0.18304774, -4.53658094, # -3.92417358, -4.48551069, -6. , -4.77548031, -4.45714535, # -1.00827048, -4.87394768, -2.48824524, -4.16512754, -4.90084314, # -4.79802879, -6. , -5.75 , -4.31493617, -2.90577575, # -3.47253942, -6. , -6. , -3.14790402, -5.61402612, # -2.20161263, -3.53843054, -6. , -3.24356209, -3.78443114, # -3.34156586, -3.09270286, -2.90416452, -6. , -5.68111591, # -2.15202541, -6. , -4.06631662, -4.07553927, -5.28380018, # -6. , -2.03364405, -5.48631784, -5.16148745, -2.09344597, # -6. , -3.50887384, -2.28832748, -4.56468131, -6. , # -4.44526201, -4.12967049, -4.89863329, -3.26637154, -3.52670011, # -5.46837907, -3.83171192, -5.74473155, -6. , -2.59526682, # -6. , -3.34841755, -6. , -5.42334894, -5.21262147, # -6. , -4.13265687, -5.45951758, -6. , -5.48398331, # -3.30059638, -2.11999601, -2.17996479, -4.81857601, -2.90491116, # -6. , -0.50411578, -0.71860724, -2.6818376 , -5.20329051, # -2.5745986 , -5.81362119, -3.3208862 , -3.72065773, -5.6946677 , # -6. , -6. , -3.55986992, -5.07623907, -5.9902029 , # -0.4881174 , -4.80586727, -6. , -6. , -4.48725864, # -2.67591446, -5.37409819, -3.4973477 , -6. , -5.34818748, # -6. , -4.83769415, -5.1204695 , -2.84761686, -3.68393234, # -6. , -5.02484908, -3.67449262, -5.97278626, -5.07643408, # -5.51940655, -2.17703706, -4.03876517, -6. , -2.70283718, # -6. , -6. , -5.79983096, -6. , -3.22782775, # -4.57126983, -1.64273515, -3.19757863, -3.77165188, -4.39571111, # -5.16751685, -3.85981301, -5.66840311, -5.43180279, -4.29124068, # -4.2978648 , -6. , -4.91013763, -6. , -5.70091647]) # - # I think I know what's happening. b is constructed from a sigmoid function and a randomly chosen comparison number. Thus, it is totally feasible, especially given a large swarm/end time for a binary vector to accidentally become all False. However, this breaks the classifier. So, I should add code to catch this case in the fitness function where if an all False array is given, the fitness is 0. swarm = COMB_Swarm(npart, c1, c2, c3, ndim, alpha, test_size, x_bounds, v_bounds, w_bounds, t_bounds, data_path, target_path) swarm.initialize_particles() swarm.execute_search() swarm.t swarm.a_fitness np.count_nonzero(swarm.abinary) swarm.final_eval() swarm.final_scores swarm.final_scores.mean() from sklearn.model_selection import cross_val_score test = cross_val_score(swarm.clf, swarm.X_train[:, swarm.abinary], swarm.y_train, cv=10) test swarm.clf.fit(swarm.X_train[:, swarm.abinary], swarm.y_train) swarm.clf.score(swarm.X_test[:, swarm.abinary], swarm.y_test) swarm2 = COMB_Swarm(npart, c1, c2, c3, ndim, alpha, test_size, x_bounds, v_bounds, w_bounds, t_bounds, data_path, target_path) swarm2.initialize_particles() swarm2.execute_search() swarm2.a_fitness np.count_nonzero(swarm2.abinary) swarm2.clf.fit(swarm2.X_train[:, swarm2.abinary], swarm2.y_train) swarm2.clf.score(swarm2.X_train[:, swarm2.abinary], swarm2.y_train) swarm2.clf.score(swarm2.X_test[:, swarm2.abinary], swarm2.y_test) # Well, in both test cases, no obvious errors occurred and the model Way overfit to the training data. Only giving back a 2 features that did better than 80% accuracy on the training data and as good or worse than the baseline for the test data. # I think I'm done with this notebook. The code appears to run correctly and actual experiments can be done in other scripts or notebooks.
pso/COMB_Swarm_Testing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Analyse des modèles # Observons quelques exemples d'ECG mal classés afin de voir s'il existe un pattern dans la mauvaise classification : from tensorflow import keras import pandas as pd import matplotlib.pyplot as plt from random import choice from sklearn.metrics import classification_report import seaborn as sns ANN_1 = keras.models.load_model("../models/ANN_1") CNN_1 = keras.models.load_model("../models/CNN_1") mit_test = pd.read_csv("../data/mitbih_test.csv", header=None) mit_train = pd.read_csv("../data/mitbih_train.csv", header=None) X_train = mit_train.iloc[:,:-1] y_train = mit_train.iloc[:,-1] X_test = mit_test.iloc[:,:-1] y_test = mit_test.iloc[:,-1] CNN_predict = pd.Series(CNN_1.predict(X_test).argmax(1)) pd.crosstab(y_test, CNN_predict, rownames = ["reel"], colnames = ["predict"]) print(classification_report(y_test, CNN_predict)) CNN_output = pd.concat([y_test.astype(int), CNN_predict], axis=1) CNN_output.columns = ["reel", "predict_CNN"] # Les rappels des classes 1 et 3 sont les moins bonnes. Regardons des exemples d'ECG de classe 1 clasifiées en 0 par le CNN, puis des exemples de classe 1 correctement classifiés : # + fig,ax = plt.subplots(3,3, figsize = (16,9)) sns.set_theme() for i in range(3): indice_0 = choice(CNN_output[(CNN_output["reel"] == 1) & (CNN_output["predict_CNN"] == 0)].index) ax[0,i].plot(X_test.iloc[indice_0], c="red") ax[0,i].get_xaxis().set_visible(False) ax[0,i].get_yaxis().set_visible(False) ax[0,1].set_title("Exemples d'ECG de classe 1 classés en 0 par le CNN", fontsize = 15) ax[0,i].text(160, 0.9, indice_0) for i in range(3): indice_1 = choice(CNN_output[(CNN_output["reel"] == 1) & (CNN_output["predict_CNN"] == 1)].index) ax[1,i].plot(X_test.iloc[indice_1], c="green") ax[1,i].get_xaxis().set_visible(False) ax[1,i].get_yaxis().set_visible(False) ax[1,1].set_title("Exemples d'ECG de classe 1 correctement classés en 1 par le CNN", fontsize = 15) ax[1,i].text(160, 0.9, indice_1) for i in range(3): indice_2 = choice(CNN_output[(CNN_output["reel"] == 0) & (CNN_output["predict_CNN"] == 0)].index) ax[2,i].plot(X_test.iloc[indice_2], c="green") ax[2,i].get_xaxis().set_visible(False) ax[2,i].get_yaxis().set_visible(False) ax[2,1].set_title("Exemples d'ECG de classe 0 correctement classés en 0 par le CNN", fontsize = 15) ax[2,i].text(160, 0.9, indice_2) # - # On voit que le modèle arrive à bien détecter les grandes vagues T caractéristiques de certaines "classe 1". Par contre on voit que certaines grosses vagues T sont classées en 0 et l'algorithme les classe en 1.
notebooks/3 - Analyse_modeles.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/abidshafee/python_tips/blob/main/String_Operations.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="fsHZIhlcTOrM" text = "A CS School Blog article" # + colab={"base_uri": "https://localhost:8080/"} id="mXO0RzouTXl1" outputId="600652ec-39e3-4a66-dca2-7fb84191a4f7" print(len(text)) # + [markdown] id="-slygi8XVXEe" # ### **lower() and upper() and capitalize() method** # + colab={"base_uri": "https://localhost:8080/"} id="OIMib0ecVLIQ" outputId="3113c59f-d058-4fe3-a067-2e4de741f77a" print(text.lower()) # + colab={"base_uri": "https://localhost:8080/"} id="-7Iit6l0VPH9" outputId="f949933e-112d-428a-f956-50c85da27158" print(text.upper()) # + colab={"base_uri": "https://localhost:8080/"} id="I9RoLvh_TkV_" outputId="b9357919-7996-4ffc-d93b-258728b33547" print(text.capitalize()) # + [markdown] id="dklj2P4_bew6" # ### **Splitting String** # + colab={"base_uri": "https://localhost:8080/"} id="EmfngO_dEyVr" outputId="b39cae72-2a1f-484a-a2b5-beef6bcabc96" list_str = text.split() print(list_str) print(type(list_str)) # + colab={"base_uri": "https://localhost:8080/"} id="cb2WbWcTbkdh" outputId="741a165e-fc5f-4333-aabc-2259146b6def" print(text.split(sep=' ', maxsplit=2)) # + colab={"base_uri": "https://localhost:8080/"} id="G_2l2dCLdGks" outputId="67be37aa-83f1-4c61-ba02-18f6ee227219" print(text.rsplit(sep=' ', maxsplit=2)) # + [markdown] id="k0nuEciQmETE" # ### **Split Line** # + colab={"base_uri": "https://localhost:8080/"} id="6Hodcm3jmIc6" outputId="ed9e9008-f729-4955-e091-971b38fac6a4" text3 = "This is a\nCS School blog\nArticle" print(text3) # + colab={"base_uri": "https://localhost:8080/"} id="khqQYaA0msN6" outputId="e1038ec2-0162-4cb3-c346-c1846f3d5345" print(text3.splitlines()) # + [markdown] id="bLZJVJihFUy4" # ### **Slicing | String Index** # + colab={"base_uri": "https://localhost:8080/"} id="S-VlbPUNGMnY" outputId="bec779e8-0b6e-4dda-dd50-67a5584821eb" # Accessing index #string print(text[0]) print(text[-1]) # access list element print(list_str[0]) # first list item print(list_str[-1]) #last item # + colab={"base_uri": "https://localhost:8080/"} id="04vqpWv-GYOA" outputId="5b931c30-f18b-4386-91d6-eb598aaa3e96" #slicing print(list_str[0:2]) #slicing list print(text[0:3]) # slicing string # + [markdown] id="2gJ1b7OXWos6" # ### **count() Method [case sensitive]** # + colab={"base_uri": "https://localhost:8080/"} id="CftLDgmqUVvi" outputId="09927d09-1c3c-4ad7-a6b3-c231e91759c0" print(text.count('S')) # + colab={"base_uri": "https://localhost:8080/"} id="F50X7tl7VjIW" outputId="e6093106-b742-4dc8-911c-ec9d6c21f81c" print(text.count('CS')) # + [markdown] id="Mar4FOS6LE91" # ### **Replace() Method** # + id="VhwdACfXXjc9" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="37a79eae-95d6-426d-c41b-91743c9ab4c8" text.replace('Blog', 'CompSci') # + colab={"base_uri": "https://localhost:8080/"} id="wtwE8W5jLZ5E" outputId="10ab49b4-7ad3-45c6-d917-349a31e494d2" print(text) # + [markdown] id="XJJGSyFLN6Z4" # ### **Strip() Method** # + colab={"base_uri": "https://localhost:8080/"} id="iU1JRGuxOZcb" outputId="96d2b3f3-1e23-4212-a19e-bb4ebc89fa5e" text2 = ' ' + text + '' + '\n' print(text2) # + colab={"base_uri": "https://localhost:8080/"} id="zsjfS7s0Lfpv" outputId="aa71e0af-0cac-44da-9c36-8c515505f6df" print(text2.rstrip()) # + [markdown] id="6vpPKFg7ip_F" # ### **Separator.join(iterable)** # join returns a string after performing operation # + id="Il5j8mJGON1u" colab={"base_uri": "https://localhost:8080/"} outputId="8a8bb549-47a6-473e-cf53-ca86d66a8c22" list_str # + colab={"base_uri": "https://localhost:8080/"} id="aNBl_asdioHo" outputId="b87b2df2-5f6e-4750-89e9-556313817e68" # lets join on the list print(' '.join(list_str)) # + colab={"base_uri": "https://localhost:8080/"} id="kj2vGBH-qGzm" outputId="284f7438-cda3-4e65-8ec1-72f9378d6507" # Or, print(' + '.join(list_str)) # + [markdown] id="OibeTEmI11Y4" # ### **Find() Method** # + id="iZmwBhV9jW_P" colab={"base_uri": "https://localhost:8080/"} outputId="621f2d11-1bbd-435c-ca36-f0ba5b4a46b0" print(text.find('compSci')) # + colab={"base_uri": "https://localhost:8080/"} id="hkxon5CdVijw" outputId="3c7a84c3-d12a-40cf-f841-e939fc74e231" print(text.find('CS')) # + [markdown] id="QcAY9bWYaRDW" # ### **Formatted String** # + id="Zp4UBOmmVubU" colab={"base_uri": "https://localhost:8080/"} outputId="458a412b-f217-44b5-f3c2-bb1f1aab5463" greet = 'Hello' blog = 'CS School' f_str = '{} there, Welcome to {}!'.format(greet, blog) print(f_str) # + colab={"base_uri": "https://localhost:8080/"} id="THzj2P9ISTMF" outputId="d1a7ba93-a394-43ce-ec68-92d02977fc56" print(f'{greet} there, Welcome to {blog}!') # + colab={"base_uri": "https://localhost:8080/"} id="6eEtJmzzTtAl" outputId="58fdad6b-3e7e-4c11-b8de-59ab6d3ef687" print(f'{greet.upper()} there, Welcome to {blog}!') # + id="fLhW5gdoVIT1" colab={"base_uri": "https://localhost:8080/"} outputId="bbdc73c0-5d42-4ab4-a7f6-a2b95c579035" print(dir(text)) # + id="u5tse8dWVZ21" colab={"base_uri": "https://localhost:8080/"} outputId="c29f76dc-2bb6-4ab8-e304-3c86fb795c2a" print(help(str)) # + [markdown] id="uSQe_YtkaAbs" # ### **Startswith** # + colab={"base_uri": "https://localhost:8080/"} id="9uVCIrLaXWtT" outputId="69f68e54-fffe-4cff-d783-85ca56a6fbb2" text.startswith('CS') # + colab={"base_uri": "https://localhost:8080/"} id="deoHUslbaUIn" outputId="4180a5e7-f503-4b26-fef7-a5caaa6d98f6" text.endswith('article') # + id="Jk2L6kxbcr-v"
String_Operations.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: smdenv # language: python # name: smdenv # --- # # Load the training Data # * Set DATA_SMI same as data.ipynb # > This is done to filter generated molecules from trained ones # * Set NB_MOL to the number of expected new molecules DATA_SMI = '/home/ansary/RESEARCH/nCov/pySMD/data/sample_dataset_c_34_128.smi' # @param with open(DATA_SMI) as f: train_smiles = [s.rstrip() for s in f] NB_MOL=100 # @param # # Load Trained Model # * Download the trained weights from drive and place them in **weights** folder # + import os from coreLib.models import LSTM_Chem,generate from coreLib.utils import SmilesTokenizer,check_smi from rdkit import Chem tokenizer=SmilesTokenizer() from tqdm.notebook import tqdm TOKENIZER_TABLE_LEN=len(tokenizer.table) WEIGHT_PATH=os.path.join(os.getcwd(),'weights','LSTM_CHEM.h5') model = LSTM_Chem(256,TOKENIZER_TABLE_LEN) model.load_weights(WEIGHT_PATH) print('Loaded Model Weights') # - # # Generate New Molecules new_smiles=[] print('Generating New Molecules') for _ in tqdm(range(NB_MOL)): smile='G' smi=generate(model,smile,tokenizer) smi=check_smi(smi) if smi is not None and smi not in train_smiles: new_smiles.append(smi) generated = [Chem.MolFromSmiles(x) for x in new_smiles] Chem.Draw.MolsToGridImage(generated, molsPerRow=3, maxMols=NB_MOL, subImgSize=(400, 400))
gen_lstm_chem.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + import os, sys, time, copy import numpy as np import matplotlib.pyplot as plt from matplotlib import cm import pickle sys.path.append('../') sys.path.append('../Lib') sys.path.append('../Models') sys.path.append('../Protocols') from cell_models import kernik, protocols, paci_2018 import mod_protocols import protocol_lib import mod_kernik as kernik import mod_trace as trace from Models.br1977 import BR1977 from ord2011 import ORD2011 import model_response # + is_optimized_protocol = False trial_conditions = "ORD2011_Leem_v1" if is_optimized_protocol: trial_conditions = "ORD2011_504_101_4_-121_61_10_5" prestep = 5000 window = 10 step_size = 5 holding_step = 500 only_end = False with_artefact = False model_name = trial_conditions.split('_')[0] path_to_data = f"ga_results/{trial_conditions}" if not os.path.exists(path_to_data): os.makedirs(path_to_data) # + final_protocol = None if is_optimized_protocol: files = os.listdir(path_to_data) for f in files: if ('pkl' in f) and (f'p{prestep}' in f) and (f'h{holding_step}' in f) and (f'oe{only_end}' in f): file_name = f print(file_name) final_protocol = pickle.load(open(f"{path_to_data}/{file_name}", 'rb')) else : final_protocol = protocol_lib.VoltageClampProtocol() # steps=steps final_protocol.add( protocol_lib.VoltageClampStep(voltage=-80, duration=100) ) final_protocol.add( protocol_lib.VoltageClampStep(voltage=-90, duration=100) ) final_protocol.add( protocol_lib.VoltageClampStep(voltage=-80, duration=100) ) final_protocol.add( protocol_lib.VoltageClampStep(voltage=-35, duration=40) ) final_protocol.add( protocol_lib.VoltageClampStep(voltage=-80, duration=200) ) final_protocol.add( protocol_lib.VoltageClampStep(voltage=-40, duration=40) ) final_protocol.add( protocol_lib.VoltageClampStep(voltage=0, duration=40) ) # <- why?? vo final_protocol.add( protocol_lib.VoltageClampStep(voltage=40, duration=500) ) final_protocol.add( protocol_lib.VoltageClampRamp(voltage_start=40, voltage_end=-120, duration=200)) # ramp step # final_protocol.add( protocol_lib.VoltageClampStep(voltage=-80, duration=100) ) # final_protocol.add( protocol_lib.VoltageClampStep(voltage=0, duration=100) ) # final_protocol.add( protocol_lib.VoltageClampStep(voltage=60, duration=500) ) # final_protocol.add( protocol_lib.VoltageClampRamp(voltage_start=60, voltage_end=-80, duration=200)) # ramp step print(f'The protocol is {final_protocol.get_voltage_change_endpoints()[-1]} ms') # times = np.arange(0, final_protocol.get_voltage_change_endpoints()[-1], 1) # final_protocol.plot_voltage_clamp_protocol(times) # + start_time = time.time() # Simualatino ######################################################################################################### currents = ['I_Na', 'I_Kr', 'I_Ks', 'I_To', 'I_CaL', 'I_K1', 'I_NaL' ] tr = None if model_name=='ORD2011' : model = ORD2011(final_protocol, is_exp_artefact=with_artefact) tr = model_response.get_model_response_JK(model, final_protocol, prestep=prestep) elif model_name=='OHara2017': model = '../mmt-model-files/ohara-cipa-v1-2017_VC.mmt' tr = model_response.get_model_response_with_myokit(model, final_protocol, prestep=prestep) elif model_name=='BR1977' : currents = ['I_Na', 'I_si', 'I_K1', 'I_x1'] model = BR1977(final_protocol) tr = model_response.get_model_response_JK(model, final_protocol, prestep=prestep) elif model_name=='Kernik': currents = ['I_Na', 'I_Kr', 'I_Ks', 'I_To', 'I_F', 'I_CaL', 'I_K1'] k = kernik.KernikModel(is_exp_artefact=with_artefact) tr = k.generate_response(final_protocol, is_no_ion_selective=False) # Contributions ######################################################################################################### current_contributions = tr.current_response_info.get_current_contributions( time=tr.t, window=window, step_size=step_size) max_contributions = tr.current_response_info.get_max_current_contributions(tr.t, window=window, step_size=step_size) print("--- %s seconds ---"%(time.time()-start_time)) # + start_time = time.time() # Simualatino ######################################################################################################### tr2 = None if model_name=='ORD2011' : model = ORD2011(final_protocol, is_exp_artefact=with_artefact) model.change_cell(1) model.ina.G_adj = 1 model.ikr.G_adj = 1 model.iks.G_adj = 1 model.ito.G_adj = 1 model.ical.G_adj = 1 model.ik1.G_adj = 0.1 model.inal.G_adj = 1 # model.inaca.G_adj = 1 # model.inak.G_adj = 1 # model.ikb.G_adj = 1 # model.inab.G_adj = 1 # model.icab.G_adj = 1 # model.ipca.G_adj = 1 tr2 = model_response.get_model_response_JK(model, final_protocol, prestep=prestep) elif model_name=='OHara2017': model = '../mmt-model-files/ohara-cipa-v1-2017_VC.mmt' tr2 = model_response.get_model_response_with_myokit(model, final_protocol, prestep=prestep) elif model_name=='BR1977' : model = BR1977(final_protocol) tr2 = model_response.get_model_response_JK(model, final_protocol, prestep=prestep) elif model_name=='Kernik': k = kernik.KernikModel(is_exp_artefact=with_artefact) tr2 = k.generate_response(final_protocol, is_no_ion_selective=False) # Contributions ######################################################################################################### current_contributions2 = tr.current_response_info.get_current_contributions( time=tr2.t, window=window, step_size=step_size) max_contributions2 = tr2.current_response_info.get_max_current_contributions(tr2.t, window=window, step_size=step_size) print("--- %s seconds ---"%(time.time()-start_time)) # - def plot_1d(ax, data, title=None, colors=None, ylabel=None, xlim=None, ylim=None): for i, d in enumerate(data): keys = list(d.keys()) c = None if colors: c = colors[i] ax.plot( d[keys[0]], d[keys[1]], label=keys[1], color=c, linewidth=2) if title: ax.set_title(ylabel) if xlim: ax.set_xlim(xlim[0], xlim[1]) if ylim: ax.set_ylim(ylim[0], ylim[1]) if ylabel: ax.set_ylabel( ylabel, fontsize=14) ax.set_xlabel('Time (ms)', fontsize=14) ax.tick_params( axis="x", labelsize=14) ax.tick_params( axis="y", labelsize=14) ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) ax.legend(loc=2, prop={'size': 14}) # ax.grid() return ax current_name = 'I_Kr' # ['I_Na', 'I_Kr', 'I_Ks', 'I_To', 'I_CaL', 'I_K1', 'I_NaL' ] max_cont = max_contributions[max_contributions['Current']==current_name] max_cont start_time = max_cont['Time Start'].values[0] current_contributions[current_contributions['Time Start']==start_time] start_time = max_cont['Time Start'].values[0] current_contributions2[current_contributions2['Time Start']==start_time] start_time = max_cont['Time Start'].values[0] print("Contrl :", current_contributions[current_contributions['Time Start']==start_time][current_name].values[0]) print("Treatment :", current_contributions2[current_contributions2['Time Start']==start_time][current_name].values[0]) # + ''' Plot ''' fig, axes = plt.subplots(4,1, figsize=(15,15)) fig.suptitle(model_name, fontsize=14) xlim = None #(9670, 9720)#(start_time-10, start_time+20) ylim = None #(-30, 3 ) ## plot_1d(axes[0], [{'x': tr.t, 'Voltage': final_protocol.get_voltage_clamp_protocol(tr.t)},], ylabel='Voltage (mV)', colors=['k', 'r'], xlim=xlim, ylim=None) plot_1d(axes[1], [{'x': tr.t, 'Control': tr.current_response_info.get_current_summed()}, {'x': tr2.t, 'Treatment': tr2.current_response_info.get_current_summed()}], ylabel='I_Ion (nA/nF)', colors=['k', 'r'], xlim=xlim, ylim=ylim) plot_1d(axes[2], [{'x': tr.t, 'Control': tr.current_response_info.get_current([current_name])}, {'x': tr2.t, 'Treatment': tr2.current_response_info.get_current([current_name])}], ylabel=f'{current_name} (nA/nF)', colors=['k', 'r'], xlim=xlim, ylim=ylim) plot_1d(axes[3], [{'x': current_contributions['Time Mid'], 'Control': current_contributions[current_name] }, {'x': current_contributions2['Time Mid'], 'Treatment': current_contributions2[current_name] }], ylabel='Constributions (%)', colors=['k', 'r'], xlim=xlim, ylim=(0,1)) #========================================================================================================= contribution = max_contributions[max_contributions["Current"]==current_name]['Contribution'].values[0] start = max_contributions[max_contributions["Current"]==current_name]['Time Start'].values[0] end = max_contributions[max_contributions["Current"]==current_name]['Time End'].values[0] for i in range(4): axes[i].axvspan(start, end, color='g', alpha=0.4) #========================================================================================================= #========================================================================================================= if is_optimized_protocol: total_duration = holding_step for i, name in enumerate( currents): ## short_protocol = pickle.load(open(f"{path_to_data}/short_{name}_p{prestep}_oe{only_end}_a{with_artefact}.pkl", 'rb')) for j in range(4): axes[j].axvspan(total_duration, total_duration+short_protocol.get_voltage_change_endpoints()[-1], color='b' if name==current_name else 'k', alpha=.1) total_duration += short_protocol.get_voltage_change_endpoints()[-1]+holding_step #========================================================================================================= # ax_im = axes[2].scatter(tr.t, tr.current_response_info.get_current([current_name]), c=c_li[current_name], cmap=cm.copper, vmin=0, vmax=1) # fig = plt.gcf() # cbar_ax = fig.add_axes([0.92, 0.08, 0.05, 0.85]) # fig.colorbar(ax_im, cax=cbar_ax) plt.subplots_adjust(left=0.07, bottom=0.05, right=0.9, top=0.96, wspace=0.5, hspace=0.11) plt.show() if is_optimized_protocol: fig.savefig( f"{path_to_data}/{trial_conditions}_h{holding_step}_p{prestep}_oe{only_end}_a{with_artefact}-{current_name}.jpg", dpi=200) else: fig.savefig( f"{path_to_data}/{trial_conditions}_p{prestep}_a{with_artefact}-{current_name}.jpg", dpi=200) plt.close(fig) # - # + ''' Plot ''' fig, axes = plt.subplots(len(currents)+3,1, figsize=(15,30)) fig.suptitle(model_name, fontsize=14) xlim = None # model_scipy.times.min(), model_scipy.times.max() ylim = None # plot_1d(axes[0], x=tr.t, ys={'Voltage' : final_protocol.get_voltage_clamp_protocol(tr.t)}, ylabel='Voltage (mV)', colors=['k'], xlim=xlim, ylim=ylim) # plot_1d(axes[1], x=tr.t, ys={'I_ion' : tr.current_response_info.get_current_summed()}, ylabel='I_Ion (nA/nF)', colors=['k'], xlim=xlim, ylim=ylim) # plot_1d(axes[-1], x=current_contributions['Time Mid'], ys={current_name : current_contributions[current_name] for current_name in currents }, ylabel='Constributions (%)') # # plot_1d(axes[-1], x=tr.t, ys={current_name : c_li[current_name] }, ylabel='Constributions (%)', colors=['k'], xlim=xlim, ylim=ylim) # plot_1d(axes[0], x=tr.t, ys={'Voltage' : final_protocol.get_voltage_clamp_protocol(tr.t)}, ylabel='Voltage (mV)', colors=['k'], xlim=xlim, ylim=ylim) # plot_1d(axes[1], x=tr.t, ys={'I_ion' : tr.current_response_info.get_current_summed()}, ylabel='I_Ion (nA/nF)', colors=['r'], xlim=xlim, ylim=ylim) # plot_1d(axes[-1], x=current_contributions['Time Mid'], ys={current_name : current_contributions[current_name] for current_name in currents }, ylabel='Constributions (%)') # # plot_1d(axes[-1], x=tr.t, ys={current_name : c_li[current_name] }, ylabel='Constributions (%)', colors=['k'], xlim=xlim, ylim=ylim) plot_1d(axes[0], [{'x': tr.t, 'Voltage': final_protocol.get_voltage_clamp_protocol(tr.t)},], ylabel='Voltage (mV)', colors=['k', 'r'], xlim=xlim, ylim=None) plot_1d(axes[1], [{'x': tr.t, 'Control': tr.current_response_info.get_current_summed()}, {'x': tr2.t, 'Treatment': tr2.current_response_info.get_current_summed()}], ylabel='I_Ion (nA/nF)', colors=['k', 'r'], xlim=xlim, ylim=ylim) # plot_1d(axes[3], # [{'x': current_contributions['Time Mid'], 'Control': current_contributions[current_name] }, # {'x': current_contributions2['Time Mid'], 'Treatment': current_contributions2[current_name] }], # ylabel='Constributions (%)', colors=['k', 'r'], xlim=xlim, ylim=None) total_duration = holding_step for i, name in enumerate( currents): ax = axes[i+2] if is_optimized_protocol: short_protocol = pickle.load(open(f"{path_to_data}/short_{name}_p{prestep}_oe{only_end}_a{with_artefact}.pkl", 'rb')) for j in range(len(currents)+3): axes[j].axvspan(total_duration, total_duration+short_protocol.get_voltage_change_endpoints()[-1], color='b' if i+2==j else 'k', alpha=.1) total_duration += short_protocol.get_voltage_change_endpoints()[-1]+holding_step ## color_li = [] for t in tr.t: idx = current_contributions['Time Mid'].sub(t).abs().idxmin() color_li.append( current_contributions[name].loc[idx] ) ax_im = ax.scatter(tr.t, tr.current_response_info.get_current([name]), c=color_li, cmap=cm.copper, vmin=0, vmax=1, label=[name, name]) ## contribution = max_contributions[max_contributions["Current"]==name]['Contribution'].values[0] start = max_contributions[max_contributions["Current"]==name]['Time Start'].values[0] end = max_contributions[max_contributions["Current"]==name]['Time End'].values[0] ax.axvspan(start, end, color='g', alpha=0.4) # ax[i].set_title(name) ax.set_xlabel('Time (ms)', fontsize=14) ax.set_ylabel(f'{name} (nA/nF)', fontsize=14) ax.tick_params(axis="x", labelsize=14) ax.tick_params(axis="y", labelsize=14) ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) ax.text(0.01, 0.8, name, transform=ax.transAxes, fontsize=14, verticalalignment='top', c='k', bbox=dict(boxstyle='round', facecolor='wheat', alpha=0.2)) if xlim : ax_temp.set_xlim(model_scipy.times.min(), model_scipy.times.max()) if ylim : ax_temp.set_ylim(ylim[0], ylim[1]) fig = plt.gcf() cbar_ax = fig.add_axes([0.92, 0.08, 0.05, 0.85]) fig.colorbar(ax_im, cax=cbar_ax) plt.subplots_adjust(left=0.07, bottom=0.05, right=0.9, top=0.96, wspace=0.5, hspace=0.15) plt.show() if is_optimized_protocol: fig.savefig( f"{path_to_data}/{trial_conditions}_h{holding_step}_p{prestep}_oe{only_end}_a{with_artefact}-2.jpg", dpi=200) else: fig.savefig( f"{path_to_data}/{trial_conditions}_p{prestep}_a{with_artefact}-2.jpg", dpi=200) plt.close(fig) # - print("Complete")
Optimize_protocol/show_results2.ipynb