repo_name
stringlengths
6
77
path
stringlengths
8
215
license
stringclasses
15 values
content
stringlengths
335
154k
ToqueWillot/M2DAC
FDMS/TME2/TME2FDMS_Florian_Toque.ipynb
gpl-2.0
%matplotlib inline import sklearn import matplotlib.pyplot as plt import seaborn as sns import numpy as np import random import copy from sklearn.datasets import fetch_mldata from sklearn import cross_validation from sklearn import base from sklearn.linear_model import Lasso from sklearn.linear_model import ElasticNet import matplotlib.pyplot as plt """ Explanation: FDMS TME2 Florian Toque In this notebook we will train classifier optimizing accuracy and reducing the number of features used at the same time. We will compare L1 and L2 classification and confirm our classifier performance with sklearn Lasso and ElasticNet classifiers. End of explanation """ ds = sklearn.datasets.make_classification(n_samples=2500, n_features=30, # 30 features n_informative=8, # only 8 informatives ones n_redundant=0, n_repeated=2, # and 2 duplicate n_classes=2, n_clusters_per_class=1, weights=None, flip_y=0.03, class_sep=0.8, hypercube=True, shift=0.0, scale=1.0, shuffle=True, random_state=None) X= ds[0] y= ds[1] # labels: [0,1] -> [-1,1] for idx,i in enumerate(y): if (i==0): y[idx]=-1 print(X[0]) print(y[0]) """ Explanation: Data generation End of explanation """ class GradientDescent(base.BaseEstimator): def __init__(self,theta,lamb,eps): self.theta=theta self.eps=eps self.lamb=lamb self.used_features=len(theta) def fit(self,X,y,nbIt=1000,printevery=-1): l=len(X) xTrans = X.transpose() for i in xrange(0,nbIt): #index = np.random.randint(l) loss = np.dot(X, self.theta) - y cost = np.sum(loss ** 2) * (1 / l) + (self.lamb*np.linalg.norm(self.theta)) gradient = np.dot(xTrans,(np.dot(self.theta,xTrans)-y)) if i%(nbIt/100)==0: thetaprime = self.theta - self.eps * (np.sign(theta)*self.lamb) else: thetaprime = self.theta - self.eps * gradient for k in xrange(0,len(theta)): self.theta[k] = 0 if thetaprime[k]*theta[k]<0 else thetaprime[k] if printevery!=-1 and i%printevery==0: print("Iteration %s | Cost: %f | Score: %.03f" % (str(i).ljust(6), cost,self.score(X,y))) ttt = self.nb_used_features() print("%d features used"%(ttt)) self.used_features=ttt elif i%1000==0: ttt = self.nb_used_features() self.used_features=ttt def predict(self,x): ret=[] for i in x: ret.append(1 if np.dot(i,self.theta)>0 else -1) return ret def score(self,X,y): cpt=0.0 allpred = self.predict(X) for idx,i in enumerate(allpred): cpt += 1 if i==y[idx] else 0 return cpt/len(X) def nb_used_features(self): cpt=0 for ii in self.theta: if ii==0: cpt+=1 return len(self.theta)-cpt theta = copy.deepcopy(X[0]) lamb=500 eps=0.00001 gd = GradientDescent(theta,lamb,eps) nbIterations = 5000 gd.fit(X,y,nbIterations,printevery=nbIterations/10) scores = cross_validation.cross_val_score(gd, X, y, cv=5,scoring="accuracy") print("Cross validation scores: %s, mean: %.02f"%(scores,np.mean(scores))) """ Explanation: L1 Advantage: good features selection L1 gradient pseudocode End of explanation """ eps=0.00001 la = [] cross_sc = [] used_features = [] for lamb in np.arange(0,5000,200): theta = copy.deepcopy(X[0]) gd = GradientDescent(theta,lamb,eps) nbIterations = 4000 gd.fit(X,y,nbIterations) scoresSvm = cross_validation.cross_val_score(gd, X, y, cv=5,scoring="accuracy") print("Lamda: %s | Cross val mean: %.03f | Features: %d"%(str(lamb).ljust(5),np.mean(scoresSvm),gd.used_features)) #print("Lamda: %.02f | Cross val mean: %.02f | Features: %d"%(lamb,gd.score(X,y),gd.used_features)) cross_sc.append(np.mean(scoresSvm)) la.append(lamb) used_features.append(gd.used_features) fig, ax1 = plt.subplots() ax2 = ax1.twinx() ax1.plot(la, cross_sc, '#6DC433') ax2.plot(la, used_features, '#5AC8ED') ax1.set_xlabel('lambda') ax1.set_ylabel('Cross val score', color='#6DC433') ax2.set_ylabel('Nb features used', color='#5AC8ED') ax1.yaxis.grid(False) ax2.grid(False) plt.show() """ Explanation: Selecting lambda We have only 8 informatives features over 30, and 2 repeated. We thrive to reach this number of features, while keeping a good classification score. End of explanation """ class GradientDescentL2(base.BaseEstimator): def __init__(self,theta,lamb,eps): self.theta=theta self.eps=eps self.lamb=lamb self.used_features=len(theta) def fit(self,X,y,nbIt=1000,printevery=-1): l=len(X) xTrans = X.transpose() for i in xrange(0,nbIt): index = np.random.randint(l) loss = np.dot(X, self.theta) - y cost = np.sum(loss ** 2) * (1 / l) + (self.lamb*np.linalg.norm(self.theta))**2 gradient = np.dot(xTrans,(np.dot(self.theta,xTrans)-y)) if i%(nbIt/100)==0: thetaprime = self.theta - self.eps * (np.sign(theta)*self.lamb) else: thetaprime = self.theta - self.eps * gradient for k in xrange(0,len(theta)): self.theta[k] = 0 if thetaprime[k]*theta[k]<0 else thetaprime[k] if printevery!=-1 and i%printevery==0: print("Iteration %s | Cost: %f | Score: %.03f" % (str(i).ljust(6), cost,self.score(X,y))) ttt = self.nb_used_features() print("%d features used"%(ttt)) self.used_features=ttt elif i%1000==0: ttt = self.nb_used_features() self.used_features=ttt def predict(self,x): ret=[] for i in x: ret.append(1 if np.dot(i,self.theta)>0 else -1) return ret def score(self,X,y): cpt=0.0 allpred = self.predict(X) for idx,i in enumerate(allpred): cpt += 1 if i==y[idx] else 0 return cpt/len(X) def nb_used_features(self): cpt=0 for ii in self.theta: if ii==0: cpt+=1 return len(self.theta)-cpt """ Explanation: We can see that only 8 features are usefull (score 87%), with more features we can make overfitting and scores are only 2% better. L2 The difference between the L1 and L2 regularization is that L1 work on the sum of the weights, and L2 work on the sum of the square of the weights, and is therefor more sensitive to outliers. Advantage: good predictions with significants constraints End of explanation """ ds = sklearn.datasets.make_classification(n_samples=200, n_features=30, # 30 features n_informative=8, # only 8 informatives ones n_redundant=0, n_repeated=2, # and 2 duplicate n_classes=2, n_clusters_per_class=1, weights=None, flip_y=0.01, class_sep=0.8, hypercube=True, shift=0.0, scale=1.0, shuffle=True, random_state=None) X= ds[0] y= ds[1] # labels: [0,1] -> [-1,1] for idx,i in enumerate(y): if (i==0): y[idx]=-1 theta = copy.deepcopy(X[0]) lamb=1000 eps=0.00001 gd = GradientDescentL2(theta,lamb,eps) #gd.tmp nbIterations = 5000 gd.fit(X,y,nbIterations,printevery=nbIterations/10) scores = cross_validation.cross_val_score(gd, X, y, cv=5,scoring="accuracy") print("Cross validation scores: %s, mean: %.02f"%(scores,np.mean(scores))) """ Explanation: Test with only 200 samples End of explanation """ eps=0.00001 la = [] cross_sc = [] used_features = [] for lamb in np.arange(0,4000,200): theta = copy.deepcopy(X[0]) gd = GradientDescentL2(theta,lamb,eps) nbIterations = 5000 gd.fit(X,y,nbIterations) scoresSvm = cross_validation.cross_val_score(gd, X, y, cv=5,scoring="accuracy") print("Lamda: %s | Cross val mean: %.03f | Features: %d"%(str(lamb).ljust(5),np.mean(scoresSvm),gd.used_features)) cross_sc.append(np.mean(scoresSvm)) la.append(lamb) used_features.append(gd.used_features) fig, ax1 = plt.subplots() ax2 = ax1.twinx() ax1.plot(la, cross_sc, '#6DC433') ax2.plot(la, used_features, '#5AC8ED') ax1.set_xlabel('lambda') ax1.set_ylabel('Cross val score', color='#6DC433') ax2.set_ylabel('Nb features used', color='#5AC8ED') ax1.yaxis.grid(False) ax2.grid(False) plt.show() """ Explanation: We can see that L2 is more sensitive to outliers because it chooses 7 usefull features against 8 for L1 Selecting lambda Similar to L1 End of explanation """ #used to cross-val on lasso and elastic-net def scorer(estimator, X, y): pred = estimator.predict(X) cpt=0.0 for idx,i in enumerate(pred): if i<0: cpt += 1 if y[idx]==-1 else 0 else: cpt += 1 if y[idx]==1 else 0 return cpt/len(y) lass = Lasso(alpha = 0.2) lass.fit(X,y) scores = cross_validation.cross_val_score(lass, X, y, cv=5,scoring=scorer) print("Cross validation scores: %s, mean: %.02f"%(scores,np.mean(scores))) print(lass.coef_) print("Feature used: %d"%np.count_nonzero(lass.coef_)) eps=0.00001 la = [] cross_sc = [] used_features = [] for lamb in np.arange(0.05,1.05,0.05): theta = copy.deepcopy(X[0]) gd = Lasso(alpha = lamb) nbIterations = 4000 gd.fit(X,y) scoresSvm = cross_validation.cross_val_score(gd, X, y, cv=5,scoring=scorer) print("Lamda: %s | Cross val mean: %.03f | Features: %d"%(str(lamb).ljust(5),np.mean(scoresSvm),np.count_nonzero(gd.coef_))) #print("Lamda: %.02f | Cross val mean: %.02f | Features: %d"%(lamb,gd.score(X,y),gd.used_features)) cross_sc.append(np.mean(scoresSvm)) la.append(lamb) used_features.append(np.count_nonzero(gd.coef_)) fig, ax1 = plt.subplots() ax2 = ax1.twinx() ax1.plot(la, cross_sc, '#6DC433') ax2.plot(la, used_features, '#5AC8ED') ax1.set_xlabel('lambda') ax1.set_ylabel('Cross val score', color='#6DC433') ax2.set_ylabel('Nb features used', color='#5AC8ED') ax1.yaxis.grid(False) ax2.grid(False) plt.show() """ Explanation: L2 chooses less features than L1, and keeps a good score. Evaluation using sklearn Lasso Sklearn's Lasso works the same, although way faster, and a lambda 0 < λ < 1 is more practical End of explanation """ lass = ElasticNet(alpha = 0.2, l1_ratio=0) lass.fit(X,y) scores = cross_validation.cross_val_score(lass, X, y, cv=5,scoring=scorer) print("Cross validation scores: %s, mean: %.02f"%(scores,np.mean(scores))) print("Feature used: %d"%np.count_nonzero(lass.coef_)) lass = ElasticNet(alpha = 0.2, l1_ratio=0.5) lass.fit(X,y) scores = cross_validation.cross_val_score(lass, X, y, cv=5,scoring=scorer) print("Cross validation scores: %s, mean: %.02f"%(scores,np.mean(scores))) print("Feature used: %d"%np.count_nonzero(lass.coef_)) lass = ElasticNet(alpha = 0.2, l1_ratio=1) lass.fit(X,y) scores = cross_validation.cross_val_score(lass, X, y, cv=5,scoring=scorer) print("Cross validation scores: %s, mean: %.02f"%(scores,np.mean(scores))) print("Feature used: %d"%np.count_nonzero(lass.coef_)) """ Explanation: Comparaison of L1 and L2 using sklearn ElasticNet End of explanation """ eps=0.00001 la = [] cross_sc = [] used_features = [] for lamb in np.arange(0.05,1.05,0.05): theta = copy.deepcopy(X[0]) gd = ElasticNet(alpha = 0.2, l1_ratio=lamb) nbIterations = 4000 gd.fit(X,y) scoresSvm = cross_validation.cross_val_score(gd, X, y, cv=5,scoring=scorer) print("Lamda: %s | Cross val mean: %.03f | Features: %d"%(str(lamb).ljust(5),np.mean(scoresSvm),np.count_nonzero(gd.coef_))) #print("Lamda: %.02f | Cross val mean: %.02f | Features: %d"%(lamb,gd.score(X,y),gd.used_features)) cross_sc.append(np.mean(scoresSvm)) la.append(lamb) used_features.append(np.count_nonzero(gd.coef_)) fig, ax1 = plt.subplots() ax2 = ax1.twinx() ax1.plot(la, cross_sc, '#FF9900') ax2.plot(la, used_features, '#9933FF') ax1.set_xlabel('L1 L2 ratio') ax1.set_ylabel('Cross val score', color='#FF9900') ax2.set_ylabel('Nb features used', color='#9933FF') ax1.yaxis.grid(False) ax2.grid(False) plt.show() """ Explanation: We observe that, as expected, the more we take L1 into account the less features are used. End of explanation """
jsafyan/style-transfer-theano
src/vgg19/neural-style-transfer.ipynb
mit
from NeuralStyle import NeuralStyleTransfer """ Explanation: Neural Style Transfer End of explanation """ content_path = 'images/tubingen.jpg' style_path = 'images/starry_night.jpg' nst = NeuralStyleTransfer(content_path, style_path, image_w=300, image_h=300) """ Explanation: Tutorial Specify the paths for the content and style Class defaults: image_w = 500 width (in pixels) of the generated image image_h = 500 height (in pixels) of the generated image style_weight=2e5 weight on style content_weight=0.001 weight on content. This ratio deviates from Gatys et al. (who used 1e-3 and 1-4 rather than 1e-8) End of explanation """ nst.fit(iterations=75, save_every_n=10, optimizer='adam') adam_losses = nst.losses import matplotlib.pyplot as plt %matplotlib inline plt.imshow(nst.final_image()) """ Explanation: Optimization There are two optimizers available, L-BFGS and Adam. L-BFGS converges more quickly on lower costs, but 1) requires more memory 2) goes out to the CPU. Adam has the advantage that is is lower memory and is in Theano so it stays on the GPU, but it can require more/different tuning to achieve comparable results (and often fails to achieve as good results as L-BFGS) Adam: End of explanation """ nst_lbfgs = NeuralStyleTransfer(content_path, style_path, image_w=300, image_h=300) nst_lbfgs.fit(iterations=75, save_every_n=10, optimizer='l-bfgs') processed = [nst.deprocess(x) for x in nst.xs] processed_lbfgs = [nst_lbfgs.deprocess(x) for x in nst_lbfgs.xs] # len(nst.xs) fig,(ax1,ax2) = plt.subplots(1,2,figsize = (10,5)) ax1.imshow(nst.final_image()) ax1.axis('off') ax1.set_title('Adam') ax2.imshow(nst_lbfgs.final_image()) ax2.axis('off') ax2.set_title('L-BFGS') plt.tight_layout() plt.savefig('Adam_vs_LBFGS.png') %matplotlib inline plt.plot(nst.losses,label = 'Adam') plt.plot(nst_lbfgs.losses,label = 'LBFGS') plt.legend() plt.xlim(0,75) plt.ylim(0,200000) plt.xlabel('Number of Optimization Iterations') plt.ylabel('Loss') plt.title('Comparing Optimization Methods Adam vs LBFGS ') plt.savefig('Optimization Comparisons.png') import matplotlib.pyplot as plt %matplotlib inline plt.imshow(nst.final_image()) """ Explanation: L-BFGS: End of explanation """
ML4DS/ML4all
TM3.Topic_Models_with_MLlib/TM3_TMwithMLlib.ipynb
mit
%matplotlib inline import matplotlib.pyplot as plt import pylab # Required imports from wikitools import wiki from wikitools import category # import nltk import nltk from nltk.tokenize import word_tokenize from nltk.stem import WordNetLemmatizer from nltk.corpus import stopwords from test_helper import Test import collections from pyspark.mllib.clustering import LDA, LDAModel from pyspark.mllib.linalg import Vectors # import gensim # import numpy as np # import lda # import lda.datasets """ Explanation: Topic Modelling with MLlib Author: Jesús Cid Sueiro Date: 2016/04/10 In this notebook we will explore the utilitis for Topic Modelling available on MLlib. End of explanation """ site = wiki.Wiki("https://en.wikipedia.org/w/api.php") # Select a category with a reasonable number of articles (>100) cat = "Economics" # cat = "Pseudoscience" print cat """ Explanation: 1. Corpus acquisition. In this notebook we will explore some tools for text processing and analysis and two topic modeling algorithms available from Python toolboxes. To do so, we will explore and analyze collections of Wikipedia articles from a given category, using wikitools, that makes easy the capture of content from wikimedia sites. (As a side note, there are many other available text collections to test topic modelling algorithm. In particular, the NLTK library has many examples, that can explore them using the nltk.download() tool. import nltk nltk.download() for instance, you can take the gutemberg dataset Mycorpus = nltk.corpus.gutenberg text_name = Mycorpus.fileids()[0] raw = Mycorpus.raw(text_name) Words = Mycorpus.words(text_name) Also, tools like Gensim or Sci-kit learn include text databases to work with). In order to use Wikipedia data, we will select a single category of articles: End of explanation """ # Loading category data. This may take a while print "Loading category data. This may take a while..." cat_data = category.Category(site, cat) corpus_titles = [] corpus_text = [] for n, page in enumerate(cat_data.getAllMembersGen()): print "\r Loading article {0}".format(n + 1), corpus_titles.append(page.title) corpus_text.append(page.getWikiText()) n_art = len(corpus_titles) print "\nLoaded " + str(n_art) + " articles from category " + cat """ Explanation: You can try with any other categories. Take into account that the behavior of topic modelling algorithms may depend on the amount of documents available for the analysis. Select a category with at least 100 articles. You can browse the wikipedia category tree here, https://en.wikipedia.org/wiki/Category:Contents, for instance. We start downloading the text collection. End of explanation """ # n = 5 # print corpus_titles[n] # print corpus_text[n] """ Explanation: Now, we have stored the whole text collection in two lists: corpus_titles, which contains the titles of the selected articles corpus_text, with the text content of the selected wikipedia articles You can browse the content of the wikipedia articles to get some intuition about the kind of documents that will be processed. End of explanation """ corpusRDD = sc.parallelize(corpus_text, 4) print "\nRDD created with {0} elements".format(corpusRDD.count()) Test.assertTrue(corpusRDD.count() >= 100, "Your corpus_tokens has less than 100 articles. Consider using a larger dataset") """ Explanation: Now, we will load the text collection into an RDD End of explanation """ # You can comment this if the package is already available. # Select option "d) Download", and identifier "punkt" # nltk.download() """ Explanation: 2. Corpus Processing Topic modelling algorithms process vectorized data. In order to apply them, we need to transform the raw text input data into a vector representation. To do so, we will remove irrelevant information from the text data and preserve as much relevant information as possible to capture the semantic content in the document collection. Thus, we will proceed with the following steps: Tokenization Homogeneization Cleaning Vectorization The first three steps are independent for each document, so they can be parallelized. 2.1. Tokenization, Homogeneization and Cleaning. For the first steps, we will use some of the powerfull methods available from the Natural Language Toolkit. In order to use the word_tokenize method from nltk, you might need to get the appropriate libraries using nltk.download(). You must select option "d) Download", and identifier "punkt" End of explanation """ # You can comment this if the package is already available. # Select option "d) Download", and identifier "stopwords" # nltk.download() """ Explanation: Also, we need to load a list of english stopwords. Select now identifier "stopwords" End of explanation """ stopwords_en = stopwords.words('english') print "The stopword list contains {0} elements: ".format(len(stopwords_en)) print stopwords_en """ Explanation: You can check the stopword list. This is a standard python list of strings. We could modify it by removing words or adding new ones if required. End of explanation """ def getTokenList(doc, stopwords_en): # scode: tokens = <FILL IN> # Tokenize docs tokens = word_tokenize(doc.decode('utf-8')) # scode: tokens = <FILL IN> # Remove non-alphanumeric tokens and normalize to lowercase tokens = [t.lower() for t in tokens if t.isalnum()] # scode: tokens = <FILL IN> # Remove stopwords tokens = [t for t in tokens if t not in stopwords_en] return tokens Test.assertEquals(getTokenList('The rain in spain stays mainly in the plane', stopwords_en), [u'rain', u'spain', u'stays', u'mainly', u'plane'], 'getTokenList does not return the expected results') """ Explanation: Task: Create a method getTokenList with two inputs: a document (string) and a stopword list, and completes the first three steps of the corpus processing, as follows: Tokenization: convert string to utf-8 and transform the string into a list of tokens, using word_tokenize from nltk.tokenize. Homogeneization: transform capital letters to lowercase and remove non alphanumeric tokens. Cleaning: remove stopwords Return the result of cleaning (a list of tokens). End of explanation """ # scode: corpus_tokensRDD = <FILL IN> corpus_tokensRDD = (corpusRDD .map(lambda x: getTokenList(x, stopwords_en)) .cache()) # print "\n Let's check tokens after cleaning:" print corpus_tokensRDD.take(1)[0][0:30] Test.assertEquals(corpus_tokensRDD.count(), n_art, "The number of documents in the original set does not correspond to the size of corpus_tokensRDD") Test.assertTrue(all([c==c.lower() for c in corpus_tokensRDD.take(1)[0]]), 'Capital letters have not been removed') Test.assertTrue(all([c.isalnum() for c in corpus_tokensRDD.take(1)[0]]), 'Non alphanumeric characters have not been removed') Test.assertTrue(len([c for c in corpus_tokensRDD.take(1)[0] if c in stopwords_en])==0, 'Stopwords have not been removed') """ Explanation: Task: Apply getTokenList to all documents in the corpus and save the result in a corpus_tokensRDD End of explanation """ # Select stemmer. stemmer = nltk.stem.SnowballStemmer('english') # scode: corpus_stemRDD = <FILL IN> corpus_stemRDD = corpus_tokensRDD.map(lambda x: [stemmer.stem(token) for token in x]) print "\nLet's check the first tokens from document 0 after stemming:" print corpus_stemRDD.take(1)[0][0:30] Test.assertTrue((len([c for c in corpus_stemRDD.take(1)[0] if c!=stemmer.stem(c)]) < 0.1*len(corpus_stemRDD.take(1)[0])), 'It seems that stemming has not been applied properly') """ Explanation: 2.2. Stemming / Lemmatization Now we will apply stemming and lemmatization to corpus_tokensRDD. We will test our topic models over the resulting RDDs, to test their differences. Task: Apply stemming to all documents corpus_tokensRDD and save the result in a new RDD, corpus_stemmedRDD. End of explanation """ # You can comment this if the package is already available. # Select option "d) Download", and identifier "wordnet" # nltk.download() """ Explanation: Alternatively, we can apply lemmatization. For english texts, we can use the lemmatizer from NLTK, which is based on WordNet. If you have not used wordnet before, you will likely need to download it from nltk End of explanation """ wnl = WordNetLemmatizer() # scode: corpus_lemmatRDD = <FILL IN> corpus_lemmatRDD = (corpus_tokensRDD .map(lambda x: [wnl.lemmatize(token) for token in x])) print "\nLet's check the first tokens from document 0 after stemming:" print corpus_lemmatRDD.take(1)[0][0:30] """ Explanation: Task: Lemmatize all documents corpus_tokensRDD using the .lemmatize() method, from the WordNetLemmatizer object created in the first line and save the result in a new RDD, corpus_lemRDD. End of explanation """ # corpus_wcRDD = <FILL IN> corpus_wcRDD = (corpus_stemRDD .map(collections.Counter) .map(lambda x: [(t, x[t]) for t in x])) print corpus_wcRDD.take(1)[0][0:30] Test.assertTrue(corpus_wcRDD.count() == n_art, 'List corpus_clean does not contain the expected number of articles') Test.assertTrue(corpus_wcRDD.flatMap(lambda x: x).map(lambda x: x[1]).sum()== corpus_stemRDD.map(len).sum(), 'The total token count in the output RDD is not consistent with the total number of input tokens') """ Explanation: One of the advantages of the lemmatizer method is that the result of lemmatization is still a true word, which is more advisable for the presentation of text processing results and lemmatization. However, without using contextual information, lemmatize() does not remove grammatical differences. This is the reason why "is" or "are" are preserved and not replaced by infinitive "be". As an alternative, we can apply .lemmatize(word, pos), where 'pos' is a string code specifying the part-of-speech (pos), i.e. the grammatical role of the words in its sentence. For instance, you can check the difference between wnl.lemmatize('is') and wnl.lemmatize('is, pos='v'). 2.4. Vectorization Up to this point, we have transformed the raw text collection of articles in a list of articles, where each article is a collection of the word roots that are most relevant for semantic analysis. Now, we need to convert these data (a list of token lists) into a numerical representation (a list of vectors, or a matrix). 2.4.1. Word Count As a first step, we compute the word count for every document in the corpus. Task: Compute a new RDD from corpus_stemRDD where each element is a list of tuples related to a document. The key of each tuple is a token, and its value the number of occurrences of this token in the document. To do so, you can use method Counter from collections. End of explanation """ # scode: wcRDD = < FILL IN > wcRDD = (corpus_wcRDD .flatMap(lambda x: x) .reduceByKey(lambda x, y: x + y)) print wcRDD.take(30) """ Explanation: At this point, we have got a representation of documents as list of tuples (token, word_count) in corpus_wcRDD. From this RDD, we can compute a dictionary containing all tokens in the corpus as keys, and their respective number of occurrences as values. Task: Using corpus_wcRDD compute a new RDD of (key, value) pairs, where keys are the tokens in the whole corpus and their respective values are the total number of occurences in the corpus. End of explanation """ # Token Dictionary: n_tokens = wcRDD.count() # scode: TD = wcRDD.<FILL IN> TD = wcRDD.takeOrdered(n_tokens, lambda x: -x[1]) # scode: D = <FIll IN> # Extract tokens from TD D = map(lambda x: x[0], TD) # scode: token_count = <FILL IN> # Extract token counts from TD token_count = map(lambda x: x[1], TD) # ALTERNATIVELY: TD_RDD = wcRDD.sortBy(lambda x: -x[1]) D_RDD = TD_RDD.map(lambda x: x[0]) token_countRDD = TD_RDD.map(lambda x: x[1]) print TD """ Explanation: Task: Take all tuples in wcRDD in decreasing order of the number of token counts in variable TD and compute two lists: token_count: a list of token counts, in decreasing order. D: A list of tokens, in the same order. End of explanation """ # SORTED TOKEN FREQUENCIES (II): # plt.rcdefaults() # Example data n_bins = 25 y_pos = range(n_bins-1, -1, -1) hot_tokens = D[0:n_bins] z = [float(t)/n_art for t in token_count[0:n_bins]] plt.barh(y_pos, z, align='center', alpha=0.4) plt.yticks(y_pos, hot_tokens) plt.xlabel('Average number of occurrences per article') plt.title('Token distribution') plt.show() """ Explanation: We can visualize the token distribution using D and token_count, for the most frequent terms End of explanation """ # INDICE INVERTIDO: EJEMPLO: # D = ['token1', 'token2', 'token3', 'token4'] # D[1] = 'token2' # invD = {'token1': 0, 'token2': 1, 'token3': 2, 'token4': 3} # invD['token2'] = 1 # Compute inverse dictionary # scode: invD = <FILL IN> invD = dict(zip(D, xrange(n_tokens))) ### ALTERNATIVELY: # invD_RDD = D_RDD.zipWithIndex() ### Tuples (token, index) # Compute RDD replacing tokens by token_ids # scode: corpus_sparseRDD = <FILL IN> corpus_sparseRDD = corpus_wcRDD.map(lambda x: [(invD[t[0]], t[1]) for t in x]) # Convert list of tuplas into Vectors.sparse object. corpus_sparseRDD = corpus_sparseRDD.map(lambda x: Vectors.sparse(n_tokens, x)) """ Explanation: 3. Latent Dirichlet Allocation In order to apply the LDA algorithm, we need to represent the input documents in the format required by MLlib. More specifically. The input data should be an RDD where each element is a tuple (doc_id, vector) where doc_id is an integer document identifier, and vector can be a sparse or dense vector from class Vectors. We will use sparse vectors, which are more adequate for large vocabularies. To compute the sparse vectors, we must first transform the lists of tuples (token, value) in wcRDD into a lists of (token_id, value), pairs, thus replacing each token by a numerical identifier. We will proceed in two steps: Compute an inverse dictionary, invD, transforming tokens into numbers. Apply the inverse dictionary to compute a new RDD from wcRDD replacing each token by its token_id. [ Task: complete the two steps outlined above. End of explanation """ corpus4lda = corpus_sparseRDD.zipWithIndex().map(lambda x: [x[1], x[0]]).cache() """ Explanation: The only remaining step consists on adding an identifier to each document of the corpus. Task: Apply method zipWithIndex to corpus_sparseRDD in order to add consecutive integer identifiers to all documents in the corpus. End of explanation """ print "Training LDA: this might take a while..." # scode: ldaModel = LDA.<FILL IN> ldaModel = LDA.train(corpus4lda, k=3) """ Explanation: That's all. We can already call to the lda algorithm.' Task: Train an LDA model with 3 topics and the corpus obtained in corpus4lda. Check the LDA documentation to find the appropriate command. End of explanation """ # Output topics. Each is a distribution over words (matching word count vectors) print("Learned topics (as distributions over vocab of " + str(ldaModel.vocabSize()) + " words):") topics = ldaModel.topicsMatrix() """ Explanation: The whole topics matrix can be computed using the .topicsMatrix() method. End of explanation """ n_bins = 25 # Example data y_pos = range(n_bins-1, -1, -1) pylab.rcParams['figure.figsize'] = 16, 8 # Set figure size for i in range(3): topic = ldaModel.describeTopics(maxTermsPerTopic=n_bins)[i] tokens = [D[n] for n in topic[0]] weights = topic[1] plt.subplot(1, 3, i+1) plt.barh(y_pos, weights, align='center', alpha=0.4) plt.yticks(y_pos, tokens) plt.xlabel('Average number of occurrences per article') plt.title('Token distribution') """ Explanation: Alternatively, we can use the .describeTopics method that returns the most relevan terms for each topic, and it is more useful for a graphical plot. Task: Represent the 25 most relevant terms for each topic using bar plots. End of explanation """ from pyspark.mllib.common import callMLlibFunc, JavaModelWrapper from pyspark.mllib.linalg.distributed import RowMatrix class SVD(JavaModelWrapper): """Wrapper around the SVD scala case class""" @property def U(self): """ Returns a RowMatrix whose columns are the left singular vectors of the SVD if computeU was set to be True.""" u = self.call("U") if u is not None: return RowMatrix(u) @property def s(self): """Returns a DenseVector with singular values in descending order.""" return self.call("s") @property def V(self): """ Returns a DenseMatrix whose columns are the right singular vectors of the SVD.""" return self.call("V") def computeSVD(row_matrix, k, computeU=False, rCond=1e-9): """ Computes the singular value decomposition of the RowMatrix. The given row matrix A of dimension (m X n) is decomposed into U * s * V'T where * s: DenseVector consisting of square root of the eigenvalues (singular values) in descending order. * U: (m X k) (left singular vectors) is a RowMatrix whose columns are the eigenvectors of (A X A') * v: (n X k) (right singular vectors) is a Matrix whose columns are the eigenvectors of (A' X A) :param k: number of singular values to keep. We might return less than k if there are numerically zero singular values. :param computeU: Whether of not to compute U. If set to be True, then U is computed by A * V * sigma^-1 :param rCond: the reciprocal condition number. All singular values smaller than rCond * sigma(0) are treated as zero, where sigma(0) is the largest singular value. :returns: SVD object """ java_model = row_matrix._java_matrix_wrapper.call("computeSVD", int(k), computeU, float(rCond)) return SVD(java_model) from pyspark.ml.feature import * from pyspark.mllib.linalg import Vectors data = [(Vectors.dense([0.0, 1.0, 0.0, 7.0, 0.0]),), (Vectors.dense([2.0, 0.0, 3.0, 4.0, 5.0]),), (Vectors.dense([4.0, 0.0, 0.0, 6.0, 7.0]),)] df = sqlContext.createDataFrame(data,["features"]) pca_extracted = PCA(k=2, inputCol="features", outputCol="pca_features") model = pca_extracted.fit(df) features = model.transform(df) # this create a DataFrame with the regular features and pca_features # We can now extract the pca_features to prepare our RowMatrix. pca_features = features.select("pca_features").rdd.map(lambda row : row[0]) mat = RowMatrix(pca_features) # Once the RowMatrix is ready we can compute our Singular Value Decomposition svd = computeSVD(mat,2,True) print svd.s # DenseVector([9.491, 4.6253]) print svd.U.rows.collect() # [DenseVector([0.1129, -0.909]), DenseVector([0.463, 0.4055]), DenseVector([0.8792, -0.0968])] print svd.V # DenseMatrix(2, 2, [-0.8025, -0.5967, -0.5967, 0.8025], 0) """ Explanation: Exercise: Explore the influence of the topicConcentration parameter. Show in barplots the most relevant tokens for each topic for large values of this parameter. Unfortunately, we cannot capture the document distributions over topics, in the current version of pySpark mllib (1.6). 4. Latent Semantic Indexing LSI is not specifically available in MLlib, There are methods to compute the SVD decomposition of a matrix, which is the core transformation for LSI, but, unfortunately, SVD decomposition is available in Java and Scala, but not in python. The following code, taken from Stackoverflow, can be used to compute the SVD. End of explanation """
sraejones/phys202-2015-work
assignments/assignment05/InteractEx03.ipynb
mit
%matplotlib inline from matplotlib import pyplot as plt import numpy as np from IPython.html.widgets import interact, interactive, fixed from IPython.display import display """ Explanation: Interact Exercise 3 Imports End of explanation """ def soliton(x, t, c, a): """Return phi(x, t) for a soliton wave with constants c and a.""" phi = (0.5 * c)*((1/ np.cosh((c * 0.5)/ 2*(x-c*t-a))**2)) return phi assert np.allclose(soliton(np.array([0]),0.0,1.0,0.0), np.array([0.5])) """ Explanation: Using interact for animation with data A soliton is a constant velocity wave that maintains its shape as it propagates. They arise from non-linear wave equations, such has the Korteweg–de Vries equation, which has the following analytical solution: $$ \phi(x,t) = \frac{1}{2} c \mathrm{sech}^2 \left[ \frac{\sqrt{c}}{2} \left(x - ct - a \right) \right] $$ The constant c is the velocity and the constant a is the initial location of the soliton. Define soliton(x, t, c, a) function that computes the value of the soliton wave for the given arguments. Your function should work when the postion x or t are NumPy arrays, in which case it should return a NumPy array itself. End of explanation """ tmin = 0.0 tmax = 10.0 tpoints = 100 t = np.linspace(tmin, tmax, tpoints) xmin = 0.0 xmax = 10.0 xpoints = 200 x = np.linspace(xmin, xmax, xpoints) c = 1.0 a = 0.0 """ Explanation: To create an animation of a soliton propagating in time, we are going to precompute the soliton data and store it in a 2d array. To set this up, we create the following variables and arrays: End of explanation """ # YOUR CODE HERE phi = np.ones([xpoints,tpoints]) for i in x: for j in t: phi[i,j] = soliton(x[i],t[j],c,a) assert phi.shape==(xpoints, tpoints) assert phi.ndim==2 assert phi.dtype==np.dtype(float) assert phi[0,0]==soliton(x[0],t[0],c,a) """ Explanation: Compute a 2d NumPy array called phi: It should have a dtype of float. It should have a shape of (xpoints, tpoints). phi[i,j] should contain the value $\phi(x[i],t[j])$. End of explanation """ def plot_soliton_data(i=0): """Plot the soliton data at t[i] versus x.""" # YOUR CODE HERE plt.plot(soliton(x,t[i],c,a)) plt.xlabel('t[i]') plt.ylabel('x[j]') plt.xlim(0,200) plt.ylim(0,0.55) plt.box(False) plt.title('phi') plot_soliton_data(0) assert True # leave this for grading the plot_soliton_data function """ Explanation: Write a plot_soliton_data(i) function that plots the soliton wave $\phi(x, t[i])$. Customize your plot to make it effective and beautiful. End of explanation """ # YOUR CODE HERE interactive(plot_soliton_data,i=(0,99,1)) assert True # leave this for grading the interact with plot_soliton_data cell """ Explanation: Use interact to animate the plot_soliton_data function versus time. End of explanation """
mbeyeler/opencv-machine-learning
notebooks/08.04-Implementing-Agglomerative-Hierarchical-Clustering.ipynb
mit
from sklearn.datasets import make_blobs X, y = make_blobs(n_samples=10, random_state=100) """ Explanation: <!--BOOK_INFORMATION--> <a href="https://www.packtpub.com/big-data-and-business-intelligence/machine-learning-opencv" target="_blank"><img align="left" src="data/cover.jpg" style="width: 76px; height: 100px; background: white; padding: 1px; border: 1px solid black; margin-right:10px;"></a> This notebook contains an excerpt from the book Machine Learning for OpenCV by Michael Beyeler. The code is released under the MIT license, and is available on GitHub. Note that this excerpt contains only the raw code - the book is rich with additional explanations and illustrations. If you find this content useful, please consider supporting the work by buying the book! <!--NAVIGATION--> < Classifying handwritten digits using k-means | Contents | 9. Using Deep Learning to Classify Handwritten Digits > Implementing Agglomerative Hierarchical Clustering Although OpenCV does not provide an implementation of agglomerative hierarchical clustering, it is a popular algorithm that should, by all means, belong to our machine learning repertoire. We start out by generating 10 random data points, just like in the previous figure: End of explanation """ from sklearn import cluster agg = cluster.AgglomerativeClustering(n_clusters=3) """ Explanation: Using the familiar statistical modeling API, we import the AgglomerativeClustering algorithm and specify the desired number of clusters: End of explanation """ labels = agg.fit_predict(X) """ Explanation: Fitting the model to the data works, as usual, via the fit_predict method: End of explanation """ import matplotlib.pyplot as plt %matplotlib inline plt.style.use('ggplot') plt.figure(figsize=(10, 6)) plt.scatter(X[:, 0], X[:, 1], c=labels, s=100) """ Explanation: We can generate a scatter plot where every data point is colored according to the predicted label: End of explanation """
liganega/Gongsu-DataSci
notebooks/GongSu03_Python_DataTypes_Part_1.ipynb
gpl-3.0
print("Hello World") """ Explanation: 파이썬 기본 자료형 1부 파이썬 언어에서 사용되는 값들의 기본 자료형을 살펴본다. 변수에 할당될 수 있는 가장 단순한 자료형에는 네 종류가 있다: 정수 자료형(int): ..., -3, -2, -1, 0, 1, 2, 3, 등등 1 + 2, -2 * 3, 등등 부동소수점 자료형(float): 1.2, 0.333333, -1.2, -3.7680, 등등 2.0/3.5, 3.555 + 3.4 * 7.9, 등등 불리언 자료형(bool): True, False를 포함하여 두 값으로 계산될 수 있는 값 예: 1 == 1, 2 &lt; 3, 1 + 1 &gt; 3 and 2 &lt; 3, 등등 문자열 자료형(str): 'a', 'abc', 'engineering', ... 등등 'abc' * 2, 'engineering' + 'math', 등등 이번 장 주요 내용: 정수, 부동소수점, 불리언 자료형을 소개. 문자열 자료형은 다음 장에서 다룸. 변수에 할당된 값과 그 값의 자료형을 알아내는 데에 사용하는 두 개의 함수의 기본적인 활용법 print() 함수: 변수에 할당된 값을 확인할 때 사용 type() 함수: 값의 자료형을 확인할 때 사용 특정 자료형과 관련하여 많이 사용되는 함수와 메소드 살펴보기 파이썬 명령어 기초 사용법 Spyder, IDLE 등을 사용하여 파이썬 명령어를 실행할 수 있다. 명령 프롬프트(prompt)는 보통 아래의 모양을 갖는다. &gt;&gt;&gt; 또는 In [1]: 파이썬은 "스크립트 언어"에 속한다. 즉, 코드를 작성한 후에 바로 실행시킬 수 있다. C와 Java 등의 언어는 코드를 작성한 후에 코드가 들어 있는 파일을 컴파일하여 생성된 목적코드(object code)를 실행하기 때문에 컴파일 언어라고 불린다. 예를 들어, print() 함수를 이용하여 터미널 화면에 문자열 값을 보여주고 싶다면 단순히 아래와 같이 코드를 작성하고 실행하면 된다. 주의: print는 "출력하다", "화면에 보여주다", "인쇄하다" 등으로 번역한다. 반면에 함수를 정의할 때 사용하는 return은 "값을 돌려준다" "리턴한다" 등으로 번역하여 사용한다. print와 return은 사용 용도다 서로 완전히 다르다. 나중에 차이점을 배우게 된다. End of explanation """ a = 1 + 1 a """ Explanation: 변수를 선언하고 값을 바로 확인할 수 있다. End of explanation """ 2 + 3 a = 2 + 3 a + 1 42 - 15.3 100 * 11 7 / 5 -7/5 7.0 / 5 """ Explanation: 파이썬을 계산기처럼 활용할 수도 있다. End of explanation """ 7//5 7.0//5 -7//5 -7.0//5 """ Explanation: 주의: 파이썬3에서는 나눗셈 연산자(/)는 무조건 부동소수점을 리턴한다. 파이썬2에서는 나눗셈 연산자(/)는 정수 자료형인 경우 몫을 계산한다. 반면에 부동소수점이 사용되면 부동소수점을 리턴한다. ```python In [22]: 7 / 2 Out[22]: 3 In [23]: 7.0 / 2 Out[23]: 3.5 ``` 몫을 계산하는 연산자는 // 이다. End of explanation """ 7%5 -7%5 -7.0%5 """ Explanation: 나머지를 계산하는 연산자는 % 이다. End of explanation """ 2 ** 3 9 ** 0.5 """ Explanation: 지수 계산: 예를 들어, 2의 3승을 계산하고자 할 때 사용한다. End of explanation """ # int a_number = 2 a_number = 2 a_word = 'dog' """ Explanation: 변수 선언 및 활용 컴퓨터 프로그램을 데이터를 이용하여 다음과 같은 일들을 처리하기 위한 명령문들의 나열로 생각할 수 있다. * 데이터 읽기 * 데이터 생성하기 * 데이터 계산하기 * 데이터 변환하기 * 데이터 정리하기 * 데이터 저장하기 특정 데이터를 조작하기 위해서는 해당 데이터를 저장하거나 불러올 수 있어야 한다. 그러기 위해서 변수를 활용한다. 변수를 일종의 그릇에 비유할 수 있으며, 변수에 할당된 데이터는 그릇에 담겨진 내용물에 해당한다. 파이썬에서 변수의 이름을 지으려면 기본적으로 세 가지 규칙을 따라야 한다. 반드시 영어 알파벳 문자(a-z,A-Z) 또는 밑줄기호(_)로 시작해야 하며, 이후에는 알파벳, 숫자(0-9), 밑줄기호가 임의로 사용될 수 있다. 파이썬 예약어(def, from, import 등)를 변수 이름으로 사용하면 안된다. 대소문자를 구분해야 한다: 'YOU', 'you', 'You', 'yOu'는 모두 다른 이름으로 처리된다. '-', '+', '*','/' 등의 연산자 기호는 이름에 사용될 수 없다. '@', '$', '?' 등의 기호도 사용되지 않는다. 변수 선언 변수에 특정 값을 할당하는 것을 변수 선언이라 부른다. 변수 선언은 아래 모양을 갖춘다. 변수이름 = 할당할 값 예를 들어 아래에서 a_number라는 변수이름에 정수 2가 할당되었고, a_word 변수에는 dog라는 문자열이 할당되었다. 주의: 변수를 생성하고자 할 때 값을 초기화하면 된다. 즉, 변수를 미리 선언할 필요가 없다. C와 Java와의 주요 차이점 중의 하나이다. 자료형을 선언할 필요가 없다. 변수의 자료형을 파이썬이 알아서 판단한다. 이를 동적 타이핑(dynamic typing)이라 한다. End of explanation """ print(a_number) print(a_word) """ Explanation: 예를 들어, C 언어의 경우 아래와 같이 선언해야 한다. int a_number = 2 char a_word[] = 'dog' 변수에 할당된 값을 확인하기 위해 print() 함수를 이용한다. End of explanation """ type(a_number) type(a_word) """ Explanation: 변수에 할당된 값의 자료형을 확인하려면 type() 함수를 호출한다. End of explanation """ a_number + 7 (a_number * 6.0) / 5 """ Explanation: 선언된 변수를 이용하여 연산을 할 수도 있다. End of explanation """ first_result = 8 / 3.5 first_result """ Explanation: 연산의 결과를 변수에 할당할 수 있다. 해당 변수에는 연산의 결과만을 기억한다. End of explanation """ type(first_result) """ Explanation: 계산된 결과의 자료형도 type() 함수를 이용하여 확인할 수 있다. End of explanation """ "Bull " + a_word a_word * 2 """ Explanation: 문자열의 경우 덧셈과 곱셈 연산자를 사용할 수 있다. End of explanation """ a_number + a_word """ Explanation: 하지만 변수에 할당된 값의 자료형에 따라 연산의 가능여부가 결정된다. 예를 들어, 숫자의 문자열의 합은 정의되어 있지 않으며, 실행할 경우 오류가 발생한다. End of explanation """ print(a_number) a_number = 5 print(a_number) """ Explanation: 주의: 오류 내용을 초보자가 이해하기는 어렵다. 여기서는 자료형이 맞지 않아 오류가 발생할 경우에 TypeError가 발생한다는 사실만을 기억해 두면 좋다. 변수에 할당된 값은 변경이 가능하다. 원래 할당된 값을 변경할 수 있다는 의미에서 변수라 부른다. 변수가 아닌 숫자를 상수라 부른다. End of explanation """ new_float = 4.0 print(new_float) """ Explanation: 기본 자료형 파이썬에는 8개의 자료형이 미리 선언되어 있다. 그중 네 개는 단순자료형이며, 나머지 네 개는 컬렉션 자료형(모음 자료형)이다. 단순 자료형 하나의 값만을 대상으로 한다는 의미에서 단순 자료형이다. 즉, 정수 하나, 부동소수점 하나, 불리언 값 하나, 문자열 하나 등등. 정수(int) 부동소수점(float) 불리언 값(bool) 문자열(str) 컬렉션 자료형 여러 개의 값들을 하나로 묶어서 다룬다는 의미에서 컬렉션 자료형이다. 리스트(list) 튜플(tuple) 집합(set) 사전(dictionary) 여기서는 단순 자료형을 소개하고, 컬렉션 자료형은 이후에 다룬다. 정수(int) 일반적으로 알고 있는 정수(자연수, 0, 음의 정수)들의 자료형을 나타내면 덧셈, 뺄셈, 곱셈, 나눗셈 등의 일반 연산이 가능하다. 부동소수점(float) 부동소수점은 원래 실수를 컴퓨터에서 다루기 위해 개발되었으나 실제로는 유리수 일부만을 다룬다. 무리수인 원주율 $\pi$의 경우에도 컴퓨터의 한계로 인해 소수점 이하 적당한 자리에서 끊어서 사용한다. End of explanation """ int(4.8) """ Explanation: 정수와 실수 사이에 강제로 형변환 가능하다. 실수를 정수로 변환하고자 할 경우 int() 함수를 사용한다. 그러면 소수점 이하는 버려진다. End of explanation """ float(2) """ Explanation: 정수를 실수로 형변환하려면 float() 함수를 사용한다. End of explanation """ basic_int = 2 print(float(basic_int)) print(type(basic_int)) float_basic_int = float(basic_int) print(type(float_basic_int)) """ Explanation: 주의: 변수를 형변환한다고 해서 변수에 할당된 값이 변하는 것은 아니다. 다만, 형변환한 값을 다른 변수에 저장해서 활용할 수는 있다. End of explanation """ int = 4 print("What have we done to int?", int) int(5.0) """ Explanation: 키워드 관련 주의사항 지금까지 살펴보았듯이 float, int, print, type와 같은 단어는 녹색으로 표시되는데 이는 그 단어들이 파이썬에서 특별한 역할을 수행하는 키워드이기 때문이다. 그런 키워드를 재정의할 수는 있지만 하지 않는 것이 좋다. 혹여 실수로 아래와 같은 일을 할 수도 있는데 매우 조심해야 한다. End of explanation """ del int int(5.0) """ Explanation: 즉, int() 함수의 본래의 정의가 사라졌다. 이럴 때는 아래와 같이 원래의 함수로 되돌릴 수 있다. End of explanation """ eqn1 = 2 * 3 - 2 print(eqn1) eqn2 = -2 + 2 * 3 print( eqn2 ) eqn3 = -2 + (2 % 3) print( eqn3 ) eqn4 = (.3 + 5) // 2 print(eqn4) eqn5 = 2 ** 4 // 2 print(eqn5) """ Explanation: 연산자 우선순위 일반적으로 알려진 연산자들 사이의 우선순위를 알아야 한다. 줄여서 PEMDAS(펨다스)로 기억하면 좋다. PEMDAS: * 괄호(Parentheses) * 지수승(Exponents) * 곱셈(Multiplication) * 나눗셈(Division) * 덧셈(Addition) * 뺄셈(Subtraction). 왼쪽에 오는 연산자의 우선순위가 높다. 지수승을 나타내는 기호는 **이다. End of explanation """ puppy = True print(puppy) type(puppy) puppies = False """ Explanation: 불리언 값(bool) if 또는 while 문에서 사용되는 불리언 자료형에는 두 개의 값만 사용된다. * True * False 이 두 개의 값만을 이용하여 복잡한 프로그램을 구현할 수 있다. 예제: 강아지를 한 마리만 갖고 있다고 가정하자. 이것을 표현하기 위해 puppy(강아지 한마리)라는 변수에 True를 할당하고, 여러 마리의 강아지를 뜻하는 puppies 변수에는 False를 할당한다. End of explanation """ puppy, puppies = True, False print("Do I have a puppy?", puppy) print("Do I have puppies?", puppies) """ Explanation: 두 개의 변수 선언을 아래와 같이 동시에 할 수 있다. 등호기호 왼편과 오른편에 사용되는 변수와 값의 개수가 동일해야 함에 주의한다. End of explanation """ True and True True and False """ Explanation: 주의: 위에서 사용된 print함수의 사용법을 기억해둔다. print 함수는 인자를 여러 개 받을 수 있으며 그 값들을 차례대로 동시에 한 줄에 출력한다. 각각의 값들은 스페이스(space)로 구분되어진다. 불리언 연산자 and, not, or 세 개의 연산자를 이용하여 불리언 연산을 할 수 있다. 각 연산자의 의미는 일반적으로 알려진 것과 동일하다. End of explanation """ puppy and puppies not puppies not puppy """ Explanation: 불리언 자료형의 변수를 이용하여 연산을 수행할 수도 있다. End of explanation """ puppy and not puppies puppy or puppies False or False """ Explanation: 불리언 연산자 우선순위 not 연산자의 우선순위가 가장 높다. End of explanation """ 4 == 4 4 == 5 4 != 2 4 != 4 4 > 2 4 > 4 4 >= 4 False or False """ Explanation: 숫자 비교 일반적으로 사용하는 숫자들의 비교를 나타내는 연산자들은 다음과 같다. 리턴값은 모두 불리언 자료형이다. !=: 다른지 여부를 판단 ==: 같은지 여부를 판단 &lt;=: 작거나 같은지 여부를 판단 &gt;=: 크거나 같은지 여부를 판단 &lt;: 작은지 여부를 판단 &gt;: 큰지 여부를 판단 End of explanation """ def average(a, b): """ 두 개의 숫자 a와 b가 주어졌을 때, 두 숫자의 평균을 리턴하는 함수""" return (a + b) * 0.5 """ Explanation: 연습문제 연습 두 숫자의 평균값을 구하는 함수를 아래와 같이 작성할 수 있다. 주의: 함수에 대해서는 이후에 좀 더 자세히 다룬다. 여기서는 함수를 작성하는 방식에 주의한다. 함수 작성요령: def 함수이름(인자1, 인자2, ..., 인자k): 함수본체 return 리턴값 End of explanation """ average(10, 20) average(10, 4) """ Explanation: 주의: 큰 따옴표 세 개("""...""")로 둘러싸인 부분은 문서화를 위해 사용되며 주석으로 처리된다. 즉, 정의되는 함수의 의미와 역할에 대한 설명을 담는다. 영어로 독스트링(docstring)이라 부른다. 주석 등에 한글을 사용하고자 할 경우 아래 문장이 문서 맨 첫줄에 있어야 한다. # coding: utf-8 End of explanation """ help(average) """ Explanation: 함수에 대한 정보를 얻고자 할 경우 help() 함수를 활용할 수 있다. 그러면 앞서 average 함수를 정의할 때 함께 적어 넣은 독스트링이 보여진다. End of explanation """ def even_test(n): if n%2 == 0: return True else: return False even_test(17) even_test(4) """ Explanation: 연습 주어진 자연수 n이 짝수면 True를, 홀수면 False를 리턴하는 함수 even_test(n)을 정의하라. 활용 예: ```python In [11]: even_test(27) Out[11]: False In [12]: even_test(4) Out[12]: True ``` 견본답안: End of explanation """ def even_test1(n): if not n%2: return True else: return False even_test1(17) even_test1(4) """ Explanation: 아래 방식도 가능하다. (이유를 스스로 설명할 수 있어야 한다.) End of explanation """ def distance(a, b): return abs(a-b) """ Explanation: 연습 두 숫자 a와 b의 사이의 거리를 리턴하는 함수 distance(a, b)를 정의하라. 활용 예: ``` In [11]: distance(3, 4) Out[11]: 1 In [12]: distance(3, 1) Out[12]: 2 ``` 아래 코드에서 pass 부분을 수정해서 채워야 한다. def distance(a, b): """if-else문을 사용하지 않고도 가능하다.""" pass 견본답안: End of explanation """ distance(3, 4) distance(3, 1) """ Explanation: abs 함수는 인자로 입력된 숫자의 절대값을 리턴하는 함수이다. End of explanation """ import math def circle_area(r): return math.pi * r**2 circle_area(3) circle_area(math.pi) """ Explanation: 연습 반지름이 r인 원의 넓이를 리턴하는 함수 circle_area(r)를 정의하라. 활용 예: ```python In [11]: circle_area(3) Out[11]: 28.274333882308138 In [12]: circle_area(pi) Out[12]: 31.006276680299816 ``` 주의: 원주율 pi를 사용하려면 math 모듈을 임포트해야 한다. 견본답안: End of explanation """ import math def geometic_mean(a, b): c = math.sqrt(a * b) return c """ Explanation: 연습 두 숫자의 기하평균(geometric mean)을 리턴하는 함수 geometric_mean(a, b) 함수를 정의하라. 두 숫자 a와 b의 기하평균을 c라 하면, 두 변의 길이가 각각 a와 b인 직사각형의 넓이와 변의 길이가 c인 정사각형의 넓이가 동일함을 의미한다. 활용 예: In [ ]: geometric_mean(2, 2) Out[ ]: 2.0 In [ ]: geometric_mean(2, 8) Out[ ]: 4.0 In [ ]: geometric_mean(2, 1) Out[ ]: 1.4142135623730951 힌트: 제곱근을 계산해주는 sqrt()를 이용한다. 단, sqrt() 함수를 이용하려면 먼저 math 라는 모듈을 아래와 같이 임포트 해야 한다. import math 이후에 math.sqrt(3)와 같은 형식으로 제곱근 함수를 호출할 수 있다. 견본답안: End of explanation """ geometic_mean(2, 2) geometic_mean(2, 8) geometic_mean(2, 1) """ Explanation: sqrt에 대해 알고 싶으면 help 함수를 활용한다. help(math.sqrt) End of explanation """ def pyramid_volume(A, h): """4각뿔의 부피는 밑면적 * 높이 * 1/3 리턴값이 항상 float 자료형이 되도록 한다.""" V = A * h / 3.0 return V """ Explanation: 연습 바닥면적이 A이고 높이가 h인 피라미드의 부피를 리턴하는 함수 pyramid_volume(A, h)를 정의하라. 활용 예: In [ ]: pyramid_volume(1, 2) Out[ ]: 0.6666666666666666 견본답안: End of explanation """ pyramid_volume(1, 2) """ Explanation: 주의: 3이 아니라 3.0으로 나누는 것에 주의하라. 파이썬3에서는 상관이 없다. End of explanation """ # 하루는 아래 숫자만큼의 초로 이루어진다. # 하루 = 24시간 * 60분 * 60초. daysec = 60 * 60 * 24 # 이제 초를 일 단위로 변경할 수 있다. def seconds2days(sec): """ sec을 일 단위로 변경하는 함수. 강제형변환에 주의할 것""" return (sec/daysec) seconds2days(43200) """ Explanation: 연습 초(second) 단위의 숫자를 받아서 일(day) 단위의 값으로 되돌려주는 seconds2days(sec) 함수를 정의하라. 입력값은 int 또는 float 일 수 있으며 리턴값은 float 자료형이어야 한다. 활용 예: In [ ]: seconds2days(43200) Out[ ]: 0.5 견본답안: End of explanation """ def box_surface(a, b, c): """ 각 변의 길이가 각각 a, b, c인 박스의 표면적을 리턴하는 함수. 힌트: 6개의 면의 합을 구하면 된다""" s1, s2, s3 = a * b, b * c, c * a return 2 * (s1 + s2 + s3) box_surface(1, 1, 1) box_surface(2, 2, 3) """ Explanation: 파이썬2의 경우에는 아래와 같이 정의해도 된다. python def seconds2days(sec): return (float(sec)/daysec) 연습 변의 길이가 각각 a, b, c인 직각육면체의 표면적을 계산해주는 함수 box_surface(a, b, c)를 정의하라. 예를 들어, 박스를 페인트칠하고자 할 때 필요한 페인트의 양을 계산하는 문제이다. 활용 예: In [ ]: box_surface(1, 1, 1) Out[ ]: 6 In [ ]: box_surface(2, 2, 3) Out[ ]: 32 견본답안: End of explanation """ def triangle_area(a, b, c): s = (a + b + c) / 2.0 A = (s * (s - a) * (s - b) * (s - c)) return math.sqrt(A) triangle_area(2, 2, 3) """ Explanation: 연습 변의 길이가 각각 a, b, c인 삼각형의 면적 A를 계산하는 함수 triangle_area(a, b, c)를 정의하라. 다음 등식을 이용할 수 있다. A = (s * (s - a) * (s - b) * (s - c)) ** 0.5 s = (a + b + c) / 2 아래 사이트 참조: https://ko.wikipedia.org/wiki/%EC%82%BC%EA%B0%81%ED%98%95 견본답안: End of explanation """
VVard0g/ThreatHunter-Playbook
docs/notebooks/windows/07_discovery/WIN-190826010110.ipynb
mit
from openhunt.mordorutils import * spark = get_spark() """ Explanation: Remote Service Control Manager Handle Metadata | Metadata | Value | |:------------------|:---| | collaborators | ['@Cyb3rWard0g', '@Cyb3rPandaH'] | | creation date | 2019/08/26 | | modification date | 2020/09/20 | | playbook related | [] | Hypothesis Adversaries might be attempting to open up a handle to the service control manager (SCM) database on remote endpoints to check for local admin access in my environment. Technical Context Often times, when an adversary lands on an endpoint, the current user does not have local administrator privileges over the compromised system. While some adversaries consider this situation a dead end, others find it very interesting to identify which machines on the network the current user has administrative access to. One common way to accomplish this is by attempting to open up a handle to the service control manager (SCM) database on remote endpoints in the network with SC_MANAGER_ALL_ACCESS (0xF003F) access rights. The Service Control Manager (SCM) is a remote procedure call (RPC) server, so that service configuration and service control programs can manipulate services on remote machines. Only processes with Administrator privileges are able to open a handle to the SCM database. This database is also known as the ServicesActive database. Therefore, it is very effective to check if the current user has administrative or local admin access to other endpoints in the network. Offensive Tradecraft An adversary can simply use the Win32 API function OpenSCManagerA to attempt to establish a connection to the service control manager (SCM) on the specified computer and open the service control manager database. If this succeeds (A non-zero handle is returned), the current user context has local administrator acess to the remote host. Additional reading * https://github.com/OTRF/ThreatHunter-Playbook/tree/master/docs/library/windows/service_control_manager.md Security Datasets | Metadata | Value | |:----------|:----------| | docs | https://securitydatasets.com/notebooks/atomic/windows/07_discovery/SDWIN-190518224039.html | | link | https://raw.githubusercontent.com/OTRF/Security-Datasets/master/datasets/atomic/windows/discovery/host/empire_find_localadmin_smb_svcctl_OpenSCManager.zip | Analytics Initialize Analytics Engine End of explanation """ sd_file = "https://raw.githubusercontent.com/OTRF/Security-Datasets/master/datasets/atomic/windows/discovery/host/empire_find_localadmin_smb_svcctl_OpenSCManager.zip" registerMordorSQLTable(spark, sd_file, "sdTable") """ Explanation: Download & Process Security Dataset End of explanation """ df = spark.sql( ''' SELECT `@timestamp`, Hostname, SubjectUserName, ProcessName, ObjectName FROM sdTable WHERE LOWER(Channel) = "security" AND EventID = 4656 AND ObjectType = "SC_MANAGER OBJECT" AND ObjectName = "ServicesActive" AND AccessMask = "0xf003f" AND NOT SubjectLogonId = "0x3e4" ''' ) df.show(10,False) """ Explanation: Analytic I Detects non-system users failing to get a handle of the SCM database. | Data source | Event Provider | Relationship | Event | |:------------|:---------------|--------------|-------| | File | Microsoft-Windows-Security-Auditing | User requested access File | 4656 | End of explanation """ df = spark.sql( ''' SELECT `@timestamp`, Hostname, SubjectUserName, ProcessName, ObjectName, PrivilegeList, ObjectServer FROM sdTable WHERE LOWER(Channel) = "security" AND EventID = 4674 AND ObjectType = "SC_MANAGER OBJECT" AND ObjectName = "ServicesActive" AND PrivilegeList = "SeTakeOwnershipPrivilege" AND NOT SubjectLogonId = "0x3e4" ''' ) df.show(10,False) """ Explanation: Analytic II Look for non-system accounts performing privileged operations on protected subsystem objects such as the SCM database | Data source | Event Provider | Relationship | Event | |:------------|:---------------|--------------|-------| | File | Microsoft-Windows-Security-Auditing | User requested access File | 4674 | End of explanation """ df = spark.sql( ''' SELECT `@timestamp`, Hostname, Application, SourcePort, SourceAddress, DestPort, DestAddress FROM sdTable WHERE LOWER(Channel) = "security" AND EventID = 5156 AND Application LIKE "%\\\services.exe" AND LayerRTID = 44 ''' ) df.show(10,False) """ Explanation: Analytic III Look for inbound network connections to services.exe from other endpoints in the network. Same SourceAddress, but different Hostname | Data source | Event Provider | Relationship | Event | |:------------|:---------------|--------------|-------| | Process | Microsoft-Windows-Security-Auditing | Process connected to Port | 5156 | | Process | Microsoft-Windows-Security-Auditing | Process connected to Ip | 5156 | End of explanation """ df = spark.sql( ''' SELECT `@timestamp`, Hostname, User, SourcePort, SourceIp, DestinationPort, DestinationIp FROM sdTable WHERE Channel = "Microsoft-Windows-Sysmon/Operational" AND EventID = 3 AND Image LIKE "%\\\services.exe" ''' ) df.show(10,False) """ Explanation: Analytic IV Look for several network connection maded by services.exe from different endpoints to the same destination | Data source | Event Provider | Relationship | Event | |:------------|:---------------|--------------|-------| | Process | Microsoft-Windows-Security-Auditing | Process connected to Port | 3 | | Process | Microsoft-Windows-Security-Auditing | Process connected to Ip | 3 | End of explanation """ df = spark.sql( ''' SELECT o.`@timestamp`, o.Hostname, o.SubjectUserName, o.ObjectType,o.ObjectName, o.PrivilegeList, a.IpAddress FROM sdTable o INNER JOIN ( SELECT Hostname,TargetUserName,TargetLogonId,IpAddress FROM sdTable WHERE LOWER(Channel) = "security" AND EventID = 4624 AND LogonType = 3 AND NOT TargetUserName LIKE "%$" ) a ON o.SubjectLogonId = a.TargetLogonId WHERE LOWER(o.Channel) = "security" AND o.EventID = 4656 AND NOT o.SubjectLogonId = "0x3e4" ''' ) df.show(10,False) """ Explanation: Analytic V Look for non-system accounts performing privileged operations on protected subsystem objects such as the SCM database from other endpoints in the network | Data source | Event Provider | Relationship | Event | |:------------|:---------------|--------------|-------| | Authentication log | Microsoft-Windows-Security-Auditing | User authenticated Host | 4624 | | File | Microsoft-Windows-Security-Auditing | User requested access File | 4656 | End of explanation """
barjacks/pythonrecherche
Kursteilnehmer/Sven Millischer/06 /03 Python Functions, 10 Übungen.ipynb
mit
def test(element): element = element * 2 return element """ Explanation: 03 Python Functions, 10 Übungen Hier nochmals zur Erinnerung, wie Funktionen geschrieben werden. End of explanation """ test(5) """ Explanation: Multipliziert Integers oder Floats mit 2 End of explanation """ lst = [12, 45, 373, 1028] def highstnbr(mylist): mylist.sort() return mylist[-1] highstnbr(lst) """ Explanation: 1.Schreibe eine Funktion, die aus einer Liste, die grösste Zahl herauszieht. Es ist verboten mit "max" zu arbeiten. :-) End of explanation """ lst = [12, 45, 373, 1028] def addtntor(mylist): total=0 for elem in mylist: total+=elem return total addtntor(lst) """ Explanation: 2.Schreibe eine Funktion, die alle Elemente einer Liste, addiert. Es ist verboten mit "sum" zu arbeiten. End of explanation """ lst = [12, 45, 373, 1028] def multplr(mylist): total=1 for elem in mylist: total*=elem return total multplr(lst) """ Explanation: 3.Schreibe eine Funktion, die alle Elemente einer Liste multipliziert. End of explanation """ spruch = "hallo" def mirror(mylist): for elem in mylist: return mylist[::-1] mirror(spruch) 5.Schreibe eine Funktion, die prüft, ob eine Zahl in einer bestimmten Zahlenfolge zu finden ist. liste = [45, 34, 64, 45] def searchnbr(mylist): if 56 in mylist: return "Treffer" else: return "Kein Treffer" searchnbr(liste) """ Explanation: 4.Schreibe eine Funktion, die einen String nimmt, und spiegelt. Also "hallo" zu "ollah". End of explanation """ liste = [5,5,5,5,3,2,11,5] list(set(liste)) evenlst.append(x) evenlst = [] """ Explanation: 6.Lösche die mehrfach genannten Elemente aus der folgenden Liste. End of explanation """ lst = [34,23,22,443,45,78,23,89,23] for x in lst: if x % 2 == 0: print(x) """ Explanation: 7.Drucke die geraden Zahlen aus der folgenden Liste aus: End of explanation """ satz = "In Oesterreich zeichnet sich ein Rechtsrutsch ab. OeVP und FPOe haben stark zugelegt. Gemaess der neusten Hochrechnung ist die Partei von Sebastian Kurz mit 31,6 Prozent der Stimmen Wahlsiegerin, auf Platz zwei folgt die SPÖ (26,9 Prozent) vor der FPOe (26,0 Prozent)." def counting_caps(phrase): caps = 0 for x in phrase: if x.isupper(): caps += 1 return caps counting_caps(satz) """ Explanation: 8.Prüfe mit einer Funktionen, wieviele Grossbuchstaben in folgendem Satz zu finden sind. End of explanation """ satz = "In Oesterreich zeichnet sich ein Rechtsrutsch ab. OeVP und FPOe haben stark zugelegt. Gemaess der neusten Hochrechnung ist die Partei von Sebastian Kurz mit 31,6 Prozent der Stimmen Wahlsiegerin, auf Platz zwei folgt die SPÖ (26,9 Prozent) vor der FPOe (26,0 Prozent)." def counting_character(phrase): character = 0 for x in phrase: if x.count('e'): character += 1 return character counting_character(satz) """ Explanation: 9.Prüfe mit einer Funktionen, wieviele 'e's in folgendem Satz zu finden sind. End of explanation """
turbomanage/training-data-analyst
courses/machine_learning/deepdive/02_generalization/create_datasets.ipynb
apache-2.0
from google.cloud import bigquery import seaborn as sns import pandas as pd import numpy as np import shutil """ Explanation: <h1> Explore and create ML datasets </h1> In this notebook, we will explore data corresponding to taxi rides in New York City to build a Machine Learning model in support of a fare-estimation tool. The idea is to suggest a likely fare to taxi riders so that they are not surprised, and so that they can protest if the charge is much higher than expected. <div id="toc"></div> Let's start off with the Python imports that we need. End of explanation """ rawdata = """ SELECT pickup_datetime, pickup_longitude, pickup_latitude, dropoff_longitude, dropoff_latitude, passenger_count, trip_distance, tolls_amount, fare_amount, total_amount FROM `nyc-tlc.yellow.trips` WHERE ABS(MOD(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING)), EVERY_N)) = 1 """ query = rawdata.replace("EVERY_N", "100000") print(query) trips = bigquery.Client().query(query).to_dataframe() print("Total dataset is {} taxi rides".format(len(trips))) trips[:10] """ Explanation: <h3> Extract sample data from BigQuery </h3> The dataset that we will use is <a href="https://bigquery.cloud.google.com/table/nyc-tlc:yellow.trips">a BigQuery public dataset</a>. Click on the link, and look at the column names. Switch to the Details tab to verify that the number of records is one billion, and then switch to the Preview tab to look at a few rows. Write a SQL query to pick up the following fields <pre> pickup_datetime, pickup_longitude, pickup_latitude, dropoff_longitude, dropoff_latitude, passenger_count, trip_distance, tolls_amount, fare_amount, total_amount </pre> from the dataset and explore a small part of the data. Make sure to pick a repeatable subset of the data so that if someone reruns this notebook, they will get the same results. End of explanation """ ax = sns.regplot(x = "trip_distance", y = "fare_amount", ci = None, truncate = True, data = trips) """ Explanation: <h3> Exploring data </h3> Let's explore this dataset and clean it up as necessary. We'll use the Python Seaborn package to visualize graphs and Pandas to do the slicing and filtering. End of explanation """ tollrides = trips[trips['tolls_amount'] > 0] tollrides[tollrides['pickup_datetime'] == '2014-05-20 23:09:00'] """ Explanation: Hmm ... do you see something wrong with the data that needs addressing? It appears that we have a lot of invalid data that is being coded as zero distance and some fare amounts that are definitely illegitimate. Let's remove them from our analysis. We can do this by modifying the BigQuery query to keep only trips longer than zero miles and fare amounts that are at least the minimum cab fare ($2.50). What's up with the streaks at \$45 and \$50? Those are fixed-amount rides from JFK and La Guardia airports into anywhere in Manhattan, i.e. to be expected. Let's list the data to make sure the values look reasonable. Let's examine whether the toll amount is captured in the total amount. End of explanation """ trips.describe() """ Explanation: Looking a few samples above, it should be clear that the total amount reflects fare amount, toll and tip somewhat arbitrarily -- this is because when customers pay cash, the tip is not known. So, we'll use the sum of fare_amount + tolls_amount as what needs to be predicted. Tips are discretionary and do not have to be included in our fare estimation tool. Let's also look at the distribution of values within the columns. End of explanation """ def showrides(df, numlines): import matplotlib.pyplot as plt lats = [] lons = [] goodrows = df[df['pickup_longitude'] < -70] for iter, row in goodrows[:numlines].iterrows(): lons.append(row['pickup_longitude']) lons.append(row['dropoff_longitude']) lons.append(None) lats.append(row['pickup_latitude']) lats.append(row['dropoff_latitude']) lats.append(None) sns.set_style("darkgrid") plt.plot(lons, lats) showrides(trips, 10) showrides(tollrides, 10) """ Explanation: Hmm ... The min, max of longitude look strange. Finally, let's actually look at the start and end of a few of the trips. End of explanation """ def sample_between(a, b): basequery = """ SELECT (tolls_amount + fare_amount) AS fare_amount, pickup_longitude AS pickuplon, pickup_latitude AS pickuplat, dropoff_longitude AS dropofflon, dropoff_latitude AS dropofflat, passenger_count*1.0 AS passengers FROM `nyc-tlc.yellow.trips` WHERE trip_distance > 0 AND fare_amount >= 2.5 AND pickup_longitude > -78 AND pickup_longitude < -70 AND dropoff_longitude > -78 AND dropoff_longitude < -70 AND pickup_latitude > 37 AND pickup_latitude < 45 AND dropoff_latitude > 37 AND dropoff_latitude < 45 AND passenger_count > 0 """ sampler = "AND ABS(MOD(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING)), EVERY_N)) = 1" sampler2 = "AND {0} >= {1}\n AND {0} < {2}".format( "ABS(MOD(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING)), EVERY_N * 100))", "(EVERY_N * {})".format(a), "(EVERY_N * {})".format(b) ) return "{}\n{}\n{}".format(basequery, sampler, sampler2) def create_query(phase, EVERY_N): """Phase: train (70%) valid (15%) or test (15%)""" query = "" if phase == 'train': # Training query = sample_between(0, 70) elif phase == 'valid': # Validation query = sample_between(70, 85) else: # Test query = sample_between(85, 100) return query.replace("EVERY_N", str(EVERY_N)) print(create_query('train', 100000)) def to_csv(df, filename): outdf = df.copy(deep = False) outdf.loc[:, 'key'] = np.arange(0, len(outdf)) # rownumber as key # Reorder columns so that target is first column cols = outdf.columns.tolist() cols.remove('fare_amount') cols.insert(0, 'fare_amount') print(cols) # new order of columns outdf = outdf[cols] outdf.to_csv(filename, header = False, index_label = False, index = False) print("Wrote {} to {}".format(len(outdf), filename)) for phase in ['train', 'valid', 'test']: query = create_query(phase, 100000) df = bigquery.Client().query(query).to_dataframe() to_csv(df, 'taxi-{}.csv'.format(phase)) """ Explanation: As you'd expect, rides that involve a toll are longer than the typical ride. <h3> Quality control and other preprocessing </h3> We need to do some clean-up of the data: <ol> <li>New York city longitudes are around -74 and latitudes are around 41.</li> <li>We shouldn't have zero passengers.</li> <li>Clean up the total_amount column to reflect only fare_amount and tolls_amount, and then remove those two columns.</li> <li>Before the ride starts, we'll know the pickup and dropoff locations, but not the trip distance (that depends on the route taken), so remove it from the ML dataset</li> <li>Discard the timestamp</li> </ol> Let's change the BigQuery query appropriately. In production, we'll have to carry out the same preprocessing on the real-time input data. End of explanation """ !ls -l *.csv """ Explanation: <h3> Verify that datasets exist </h3> End of explanation """ !head taxi-train.csv """ Explanation: We have 3 .csv files corresponding to train, valid, test. The ratio of file-sizes correspond to our split of the data. End of explanation """ import pandas as pd import numpy as np import shutil def distance_between(lat1, lon1, lat2, lon2): # Haversine formula to compute distance "as the crow flies". Taxis can't fly of course. dist = np.degrees(np.arccos(np.sin(np.radians(lat1)) * np.sin(np.radians(lat2)) + np.cos(np.radians(lat1)) * np.cos(np.radians(lat2)) * np.cos(np.radians(lon2 - lon1)))) * 60 * 1.515 * 1.609344 return dist def estimate_distance(df): return distance_between(df['pickuplat'], df['pickuplon'], df['dropofflat'], df['dropofflon']) def compute_rmse(actual, predicted): return np.sqrt(np.mean((actual - predicted)**2)) def print_rmse(df, rate, name): print("{1} RMSE = {0}".format(compute_rmse(df['fare_amount'], rate * estimate_distance(df)), name)) FEATURES = ['pickuplon','pickuplat','dropofflon','dropofflat','passengers'] TARGET = 'fare_amount' columns = list([TARGET]) columns.extend(FEATURES) # in CSV, target is the first column, after the features columns.append('key') df_train = pd.read_csv('taxi-train.csv', header = None, names = columns) df_valid = pd.read_csv('taxi-valid.csv', header = None, names = columns) df_test = pd.read_csv('taxi-test.csv', header = None, names = columns) rate = df_train['fare_amount'].mean() / estimate_distance(df_train).mean() print("Rate = ${0}/km".format(rate)) print_rmse(df_train, rate, 'Train') print_rmse(df_valid, rate, 'Valid') print_rmse(df_test, rate, 'Test') """ Explanation: Looks good! We now have our ML datasets and are ready to train ML models, validate them and evaluate them. <h3> Benchmark </h3> Before we start building complex ML models, it is a good idea to come up with a very simple model and use that as a benchmark. My model is going to be to simply divide the mean fare_amount by the mean trip_distance to come up with a rate and use that to predict. Let's compute the RMSE of such a model. End of explanation """
hunterherrin/phys202-2015-work
assignments/assignment05/MatplotlibEx03.ipynb
mit
%matplotlib inline import matplotlib.pyplot as plt import numpy as np """ Explanation: Matplotlib Exercise 3 Imports End of explanation """ def well2d(x, y, nx, ny, L=1.0): """Compute the 2d quantum well wave function.""" i=np.sin(nx*np.pi*x/L) o=np.sin(ny*np.pi*y/L) return((2/L)*i*o) psi = well2d(np.linspace(0,1,10), np.linspace(0,1,10), 1, 1) assert len(psi)==10 assert psi.shape==(10,) """ Explanation: Contour plots of 2d wavefunctions The wavefunction of a 2d quantum well is: $$ \psi_{n_x,n_y}(x,y) = \frac{2}{L} \sin{\left( \frac{n_x \pi x}{L} \right)} \sin{\left( \frac{n_y \pi y}{L} \right)} $$ This is a scalar field and $n_x$ and $n_y$ are quantum numbers that measure the level of excitation in the x and y directions. $L$ is the size of the well. Define a function well2d that computes this wavefunction for values of x and y that are NumPy arrays. End of explanation """ x = np.linspace(0,1,100) y=np.linspace(0,1,100) X, Y = np.meshgrid(x, y) P = well2d(X, Y, 3, 2, L=1.0) plt.contourf(X, Y, P) plt.colorbar() plt.set_cmap('cool') plt.tight_layout() plt.title('Wave Function for Different x and y combinations') plt.xlabel('x') plt.ylabel('y') assert True # use this cell for grading the contour plot """ Explanation: The contour, contourf, pcolor and pcolormesh functions of Matplotlib can be used for effective visualizations of 2d scalar fields. Use the Matplotlib documentation to learn how to use these functions along with the numpy.meshgrid function to visualize the above wavefunction: Use $n_x=3$, $n_y=2$ and $L=0$. Use the limits $[0,1]$ for the x and y axis. Customize your plot to make it effective and beautiful. Use a non-default colormap. Add a colorbar to you visualization. First make a plot using one of the contour functions: End of explanation """ plt.pcolormesh(X, Y, P) plt.colorbar() plt.set_cmap('seismic') plt.tight_layout() plt.title('Wave Function for Different x and y combinations') plt.xlabel('x') plt.ylabel('y') assert True # use this cell for grading the pcolor plot """ Explanation: Next make a visualization using one of the pcolor functions: End of explanation """
tensorflow/docs-l10n
site/zh-cn/agents/tutorials/4_drivers_tutorial.ipynb
apache-2.0
#@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Explanation: Copyright 2021 The TF-Agents Authors. End of explanation """ !pip install tf-agents from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from tf_agents.environments import suite_gym from tf_agents.environments import tf_py_environment from tf_agents.policies import random_py_policy from tf_agents.policies import random_tf_policy from tf_agents.metrics import py_metrics from tf_agents.metrics import tf_metrics from tf_agents.drivers import py_driver from tf_agents.drivers import dynamic_episode_driver """ Explanation: 驱动程序 <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://tensorflow.google.cn/agents/tutorials/4_drivers_tutorial"><img src="https://tensorflow.google.cn/images/tf_logo_32px.png">在 TensorFlow.org 上查看</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/zh-cn/agents/tutorials/4_drivers_tutorial.ipynb"><img src="https://tensorflow.google.cn/images/colab_logo_32px.png">在 Google Colab 运行</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/zh-cn/agents/tutorials/4_drivers_tutorial.ipynb"><img src="https://tensorflow.google.cn/images/GitHub-Mark-32px.png">在 Github 上查看源代码</a> </td> <td> <a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/zh-cn/agents/tutorials/4_drivers_tutorial.ipynb"><img src="https://tensorflow.google.cn/images/download_logo_32px.png">下载笔记本</a> </td> </table> 简介 强化学习的常见模式是在环境中执行策略,持续指定的步数或片段数。在诸如数据收集、评估和生成代理视频期间会采用这种模式。 使用 Python 编程非常简单,但在 TensorFlow 中编程和调试则要复杂得多,因为它涉及 tf.while 循环、tf.cond 和 tf.control_dependencies。因此,我们将运行循环这一概念抽象成一个名为 driver 的类,并在 Python 和 TensorFlow 中提供经过充分测试的实现。 此外,驱动程序在每步遇到的数据都会保存在名为 Trajectory 的命名元组内,并广播给一组观察者(例如回放缓冲区和指标)。这些数据包括环境观测值、策略建议的操作、获得的奖励、当前和下一个步骤的类型等。 设置 如果尚未安装 TF-Agents 或 Gym,请运行以下命令: End of explanation """ env = suite_gym.load('CartPole-v0') policy = random_py_policy.RandomPyPolicy(time_step_spec=env.time_step_spec(), action_spec=env.action_spec()) replay_buffer = [] metric = py_metrics.AverageReturnMetric() observers = [replay_buffer.append, metric] driver = py_driver.PyDriver( env, policy, observers, max_steps=20, max_episodes=1) initial_time_step = env.reset() final_time_step, _ = driver.run(initial_time_step) print('Replay Buffer:') for traj in replay_buffer: print(traj) print('Average Return: ', metric.result()) """ Explanation: Python 驱动程序 PyDriver 类采用 Python 环境、Python 策略和观察者列表在每个时间步骤更新。主要方法为 run(),它会使用策略中的操作逐步执行环境,直到至少满足以下终止条件之一:步数达到 max_steps 或片段数达到 max_episodes。 实现方式大致如下: ```python class PyDriver(object): def init(self, env, policy, observers, max_steps=1, max_episodes=1): self._env = env self._policy = policy self._observers = observers or [] self._max_steps = max_steps or np.inf self._max_episodes = max_episodes or np.inf def run(self, time_step, policy_state=()): num_steps = 0 num_episodes = 0 while num_steps &lt; self._max_steps and num_episodes &lt; self._max_episodes: # Compute an action using the policy for the given time_step action_step = self._policy.action(time_step, policy_state) # Apply the action to the environment and get the next step next_time_step = self._env.step(action_step.action) # Package information into a trajectory traj = trajectory.Trajectory( time_step.step_type, time_step.observation, action_step.action, action_step.info, next_time_step.step_type, next_time_step.reward, next_time_step.discount) for observer in self._observers: observer(traj) # Update statistics to check termination num_episodes += np.sum(traj.is_last()) num_steps += np.sum(~traj.is_boundary()) time_step = next_time_step policy_state = action_step.state return time_step, policy_state ``` 以下示例展示了在 CartPole 环境中运行随机策略,将结果保存到回放缓冲区并计算一些指标。 End of explanation """ env = suite_gym.load('CartPole-v0') tf_env = tf_py_environment.TFPyEnvironment(env) tf_policy = random_tf_policy.RandomTFPolicy(action_spec=tf_env.action_spec(), time_step_spec=tf_env.time_step_spec()) num_episodes = tf_metrics.NumberOfEpisodes() env_steps = tf_metrics.EnvironmentSteps() observers = [num_episodes, env_steps] driver = dynamic_episode_driver.DynamicEpisodeDriver( tf_env, tf_policy, observers, num_episodes=2) # Initial driver.run will reset the environment and initialize the policy. final_time_step, policy_state = driver.run() print('final_time_step', final_time_step) print('Number of Steps: ', env_steps.result().numpy()) print('Number of Episodes: ', num_episodes.result().numpy()) # Continue running from previous state final_time_step, _ = driver.run(final_time_step, policy_state) print('final_time_step', final_time_step) print('Number of Steps: ', env_steps.result().numpy()) print('Number of Episodes: ', num_episodes.result().numpy()) """ Explanation: TensorFlow 驱动程序 TensorFlow 中也有驱动程序,其功能与 Python 驱动程序类似,区别是使用 TF 环境、TF 策略、TF 观察者等。我们目前有 2 种 TensorFlow 驱动程序:DynamicStepDriver(在给定的有效环境步数后终止),以及 DynamicEpisodeDriver(在给定的片段数后终止)。让我们看一下 DynamicEpisode 的实际应用示例。 End of explanation """
google/applied-machine-learning-intensive
content/04_classification/04_classification_project/colab.ipynb
apache-2.0
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Explanation: <a href="https://colab.research.google.com/github/google/applied-machine-learning-intensive/blob/master/content/04_classification/04_classification_project/colab.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> Copyright 2020 Google LLC. End of explanation """ ! chmod 600 kaggle.json && (ls ~/.kaggle 2>/dev/null || mkdir ~/.kaggle) && cp kaggle.json ~/.kaggle/ && echo 'Done' ! kaggle competitions download -c titanic ! ls """ Explanation: Classification Project In this project you will apply what you have learned about classification and TensorFlow to complete a project from Kaggle. The challenge is to achieve a high accuracy score while trying to predict which passengers survived the Titanic ship crash. After building your model, you will upload your predictions to Kaggle and submit the score that you get. The Titanic Dataset Kaggle has a dataset containing the passenger list on the Titanic. The data contains passenger features such as age, gender, ticket class, as well as whether or not they survived. Your job is to create a binary classifier using TensorFlow to determine if a passenger survived or not. The Survived column lets you know if the person survived. Then, upload your predictions to Kaggle and submit your accuracy score at the end of this Colab, along with a brief conclusion. To get the dataset, you'll need to accept the competition's rules by clicking the "I understand and accept" button on the competition rules page. Then upload your kaggle.json file and run the code below. End of explanation """ # Your code goes here """ Explanation: Note: If you see a "403 - Forbidden" error above, you still need to click "I understand and accept" on the competition rules page. Three files are downloaded: train.csv: training data (contains features and targets) test.csv: feature data used to make predictions to send to Kaggle gender_submission.csv: an example competition submission file Step 1: Exploratory Data Analysis Perform exploratory data analysis and data preprocessing. Use as many text and code blocks as you need to explore the data. Note any findings. Repair any data issues you find. Student Solution End of explanation """ # Your code goes here """ Explanation: Step 2: The Model Build, fit, and evaluate a classification model. Perform any model-specific data processing that you need to perform. If the toolkit you use supports it, create visualizations for loss and accuracy improvements. Use as many text and code blocks as you need to explore the data. Note any findings. Student Solution End of explanation """ # Your code goes here """ Explanation: Step 3: Make Predictions and Upload To Kaggle In this step you will make predictions on the features found in the test.csv file and upload them to Kaggle using the Kaggle API. Use as many text and code blocks as you need to explore the data. Note any findings. Student Solution End of explanation """ # Your code goes here """ Explanation: What was your Kaggle score? Record your score here Step 4: Iterate on Your Model In this step you're encouraged to play around with your model settings and to even try different models. See if you can get a better score. Use as many text and code blocks as you need to explore the data. Note any findings. Student Solution End of explanation """
tensorflow/docs-l10n
site/ko/tutorials/generative/dcgan.ipynb
apache-2.0
#@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Explanation: Copyright 2019 The TensorFlow Authors. End of explanation """ !pip install tensorflow-gpu==2.0.0-rc1 import tensorflow as tf tf.__version__ # GIF를 만들기위해 설치합니다. !pip install imageio import glob import imageio import matplotlib.pyplot as plt import numpy as np import os import PIL from tensorflow.keras import layers import time from IPython import display """ Explanation: 심층 합성곱 생성적 적대 신경망 <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://www.tensorflow.org/tutorials/generative/dcgan"> <img src="https://www.tensorflow.org/images/tf_logo_32px.png" /> TensorFlow.org에서 보기</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ko/tutorials/generative/dcgan.ipynb"> <img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> 구글 코랩(Colab)에서 실행하기</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/ko/tutorials/generative/dcgan.ipynb"> <img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> 깃허브(GitHub)소스 보기</a> </td> <td> <a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/ko/tutorials/generative/dcgan.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> </td> </table> Note: 이 문서는 텐서플로 커뮤니티에서 번역했습니다. 커뮤니티 번역 활동의 특성상 정확한 번역과 최신 내용을 반영하기 위해 노력함에도 불구하고 공식 영문 문서의 내용과 일치하지 않을 수 있습니다. 이 번역에 개선할 부분이 있다면 tensorflow/docs-l10n 깃헙 저장소로 풀 리퀘스트를 보내주시기 바랍니다. 문서 번역이나 리뷰에 참여하려면 docs-ko@tensorflow.org로 메일을 보내주시기 바랍니다. 이 튜토리얼은 심층 합성곱 생성적 적대 신경망 (Deep Convolutional Generative Adversarial Networks, DCGAN)을 이용하여, 손으로 쓴 숫자들을 어떻게 생성할 수 있는지 보여줍니다. 이 코드는 케라스 Sequential API와 tf.GradientTape 훈련 루프를 사용하여 작성됐습니다. 생성적 적대 신경망(GANs)은 무엇인가요? 생성적 적대 신경망 (Generative Adversarial Networks, GANs)은 요즘 컴퓨터 과학에서 가장 흥미로운 아이디어 중 하나입니다. 두개의 모델이 적대적인 과정을 통해 동시에 훈련됩니다. 생성자 ("예술가")는 진짜처럼 보이는 이미지를 생성하도록 배우는 와중에, 감별자 ("예술비평가")는 가짜의 이미지로부터 진짜를 구별하게 되는 것을 배우게 됩니다. 훈련과정 동안 생성자는 점차 실제같은 이미지를 더 잘 생성하게 되고, 감별자는 점차 진짜와 가짜를 더 잘 구별하게됩니다. 이 과정은 감별자가 가짜 이미지에서 진짜 이미지를 더이상 구별하지 못하게 될때, 평형상태에 도달하게 됩니다. 이 노트북은 이 과정을 MNIST 데이터를 이용하여 보여줍니다. 아래의 애니메이션은 50 에포크(epoch)동안 훈련한 생성자가 생성해낸 연속된 이미지들을 보여줍니다. 이미지들은 랜덤한 잡음으로 부터 시작되었고, 점차 시간이 지남에 따라 손으로 쓴 숫자들을 닮아가게 됩니다. 생성적 적대 신경망 (GANs)에 대해 더 배우고 싶으시다면, MIT의 Intro to Deep Learning 수업을 추천합니다. 텐서플로와 다른 라이브러리 불러오기 End of explanation """ (train_images, train_labels), (_, _) = tf.keras.datasets.mnist.load_data() train_images = train_images.reshape(train_images.shape[0], 28, 28, 1).astype('float32') train_images = (train_images - 127.5) / 127.5 # 이미지를 [-1, 1]로 정규화합니다. BUFFER_SIZE = 60000 BATCH_SIZE = 256 # 데이터 배치를 만들고 섞습니다. train_dataset = tf.data.Dataset.from_tensor_slices(train_images).shuffle(BUFFER_SIZE).batch(BATCH_SIZE) """ Explanation: 데이터셋 로딩 및 준비 생성자와 감별자를 훈련하기위해 MNIST 데이터셋을 사용할것입니다. 생성자는 손글씨 숫자 데이터를 닮은 숫자들을 생성할 것입니다. End of explanation """ def make_generator_model(): model = tf.keras.Sequential() model.add(layers.Dense(7*7*256, use_bias=False, input_shape=(100,))) model.add(layers.BatchNormalization()) model.add(layers.LeakyReLU()) model.add(layers.Reshape((7, 7, 256))) assert model.output_shape == (None, 7, 7, 256) # 주목: 배치사이즈로 None이 주어집니다. model.add(layers.Conv2DTranspose(128, (5, 5), strides=(1, 1), padding='same', use_bias=False)) assert model.output_shape == (None, 7, 7, 128) model.add(layers.BatchNormalization()) model.add(layers.LeakyReLU()) model.add(layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', use_bias=False)) assert model.output_shape == (None, 14, 14, 64) model.add(layers.BatchNormalization()) model.add(layers.LeakyReLU()) model.add(layers.Conv2DTranspose(1, (5, 5), strides=(2, 2), padding='same', use_bias=False, activation='tanh')) assert model.output_shape == (None, 28, 28, 1) return model """ Explanation: 모델 만들기 생성자와 감별자는 케라스 Sequential API를 이용해 정의됩니다. 생성자 생성자는 시드값 (seed; 랜덤한 잡음)으로부터 이미지를 생성하기 위해, tf.keras.layers.Conv2DTranspose (업샘플링) 층을 이용합니다. 처음 Dense층은 이 시드값을 인풋으로 받습니다. 그 다음 원하는 사이즈 28x28x1의 이미지가 나오도록 업샘플링을 여러번 합니다. tanh를 사용하는 마지막 층을 제외한 나머지 각 층마다 활성함수로 tf.keras.layers.LeakyReLU을 사용하고 있음을 주목합시다. End of explanation """ generator = make_generator_model() noise = tf.random.normal([1, 100]) generated_image = generator(noise, training=False) plt.imshow(generated_image[0, :, :, 0], cmap='gray') """ Explanation: (아직 훈련이 되지않은) 생성자를 이용해 이미지를 생성해봅시다. End of explanation """ def make_discriminator_model(): model = tf.keras.Sequential() model.add(layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same', input_shape=[28, 28, 1])) model.add(layers.LeakyReLU()) model.add(layers.Dropout(0.3)) model.add(layers.Conv2D(128, (5, 5), strides=(2, 2), padding='same')) model.add(layers.LeakyReLU()) model.add(layers.Dropout(0.3)) model.add(layers.Flatten()) model.add(layers.Dense(1)) return model """ Explanation: 감별자 감별자는 합성곱 신경망(Convolutional Neural Network, CNN) 기반의 이미지 분류기입니다. End of explanation """ discriminator = make_discriminator_model() decision = discriminator(generated_image) print (decision) """ Explanation: (아직까지 훈련이 되지 않은) 감별자를 사용하여, 생성된 이미지가 진짜인지 가짜인지 판별합니다. 모델은 진짜 이미지에는 양수의 값 (positive values)을, 가짜 이미지에는 음수의 값 (negative values)을 출력하도록 훈련되어집니다. End of explanation """ # 이 메서드는 크로스 엔트로피 손실함수 (cross entropy loss)를 계산하기 위해 헬퍼 (helper) 함수를 반환합니다. cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True) """ Explanation: 손실함수와 옵티마이저 정의 두 모델의 손실함수와 옵티마이저를 정의합니다. End of explanation """ def discriminator_loss(real_output, fake_output): real_loss = cross_entropy(tf.ones_like(real_output), real_output) fake_loss = cross_entropy(tf.zeros_like(fake_output), fake_output) total_loss = real_loss + fake_loss return total_loss """ Explanation: 감별자 손실함수 이 메서드는 감별자가 가짜 이미지에서 얼마나 진짜 이미지를 잘 판별하는지 수치화합니다. 진짜 이미지에 대한 감별자의 예측과 1로 이루어진 행렬을 비교하고, 가짜 (생성된) 이미지에 대한 감별자의 예측과 0으로 이루어진 행렬을 비교합니다. End of explanation """ def generator_loss(fake_output): return cross_entropy(tf.ones_like(fake_output), fake_output) """ Explanation: 생성자 손실함수 생성자의 손실함수는 감별자를 얼마나 잘 속였는지에 대해 수치화를 합니다. 직관적으로 생성자가 원활히 수행되고 있다면, 감별자는 가짜 이미지를 진짜 (또는 1)로 분류를 할 것입니다. 여기서 우리는 생성된 이미지에 대한 감별자의 결정을 1로 이루어진 행렬과 비교를 할 것입니다. End of explanation """ generator_optimizer = tf.keras.optimizers.Adam(1e-4) discriminator_optimizer = tf.keras.optimizers.Adam(1e-4) """ Explanation: 감별자와 생성자는 따로 훈련되기 때문에, 감별자와 생성자의 옵티마이저는 다릅니다. End of explanation """ checkpoint_dir = './training_checkpoints' checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt") checkpoint = tf.train.Checkpoint(generator_optimizer=generator_optimizer, discriminator_optimizer=discriminator_optimizer, generator=generator, discriminator=discriminator) """ Explanation: 체크포인트 저장 이 노트북은 오랫동안 진행되는 훈련이 방해되는 경우에 유용하게 쓰일 수 있는 모델의 저장방법과 복구방법을 보여줍니다. End of explanation """ EPOCHS = 50 noise_dim = 100 num_examples_to_generate = 16 # 이 시드를 시간이 지나도 재활용하겠습니다. # (GIF 애니메이션에서 진전 내용을 시각화하는데 쉽기 때문입니다.) seed = tf.random.normal([num_examples_to_generate, noise_dim]) """ Explanation: 훈련 루프 정의하기 End of explanation """ # `tf.function`이 어떻게 사용되는지 주목해 주세요. # 이 데코레이터는 함수를 "컴파일"합니다. @tf.function def train_step(images): noise = tf.random.normal([BATCH_SIZE, noise_dim]) with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape: generated_images = generator(noise, training=True) real_output = discriminator(images, training=True) fake_output = discriminator(generated_images, training=True) gen_loss = generator_loss(fake_output) disc_loss = discriminator_loss(real_output, fake_output) gradients_of_generator = gen_tape.gradient(gen_loss, generator.trainable_variables) gradients_of_discriminator = disc_tape.gradient(disc_loss, discriminator.trainable_variables) generator_optimizer.apply_gradients(zip(gradients_of_generator, generator.trainable_variables)) discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator, discriminator.trainable_variables)) def train(dataset, epochs): for epoch in range(epochs): start = time.time() for image_batch in dataset: train_step(image_batch) # GIF를 위한 이미지를 바로 생성합니다. display.clear_output(wait=True) generate_and_save_images(generator, epoch + 1, seed) # 15 에포크가 지날 때마다 모델을 저장합니다. if (epoch + 1) % 15 == 0: checkpoint.save(file_prefix = checkpoint_prefix) # print (' 에포크 {} 에서 걸린 시간은 {} 초 입니다'.format(epoch +1, time.time()-start)) print ('Time for epoch {} is {} sec'.format(epoch + 1, time.time()-start)) # 마지막 에포크가 끝난 후 생성합니다. display.clear_output(wait=True) generate_and_save_images(generator, epochs, seed) """ Explanation: 훈련 루프는 생성자가 입력으로 랜덤시드를 받는 것으로부터 시작됩니다. 그 시드값을 사용하여 이미지를 생성합니다. 감별자를 사용하여 (훈련 세트에서 갖고온) 진짜 이미지와 (생성자가 생성해낸) 가짜이미지를 분류합니다. 각 모델의 손실을 계산하고, 그래디언트 (gradients)를 사용해 생성자와 감별자를 업데이트합니다. End of explanation """ def generate_and_save_images(model, epoch, test_input): # `training`이 False로 맞춰진 것을 주목하세요. # 이렇게 하면 (배치정규화를 포함하여) 모든 층들이 추론 모드로 실행됩니다. predictions = model(test_input, training=False) fig = plt.figure(figsize=(4,4)) for i in range(predictions.shape[0]): plt.subplot(4, 4, i+1) plt.imshow(predictions[i, :, :, 0] * 127.5 + 127.5, cmap='gray') plt.axis('off') plt.savefig('image_at_epoch_{:04d}.png'.format(epoch)) plt.show() """ Explanation: 이미지 생성 및 저장 End of explanation """ %%time train(train_dataset, EPOCHS) """ Explanation: 모델 훈련 위에 정의된 train() 메서드를 생성자와 감별자를 동시에 훈련하기 위해 호출합니다. 생성적 적대 신경망을 학습하는 것은 매우 까다로울 수 있습니다. 생성자와 감별자가 서로를 제압하지 않는 것이 중요합니다. (예를 들어 학습률이 비슷하면 한쪽이 우세해집니다.) 훈련 초반부에는 생성된 이미지는 랜덤한 노이즈처럼 보입니다. 훈련이 진행될수록, 생성된 숫자는 점차 진짜처럼 보일 것입니다. 약 50 에포크가 지난 후, MNIST 숫자와 닮은 이미지가 생성됩니다. 코랩에서 기본 설정으로 실행하면, 에포크마다 1분정도 소요될 것입니다. End of explanation """ checkpoint.restore(tf.train.latest_checkpoint(checkpoint_dir)) """ Explanation: 마지막 체크포인트를 복구합니다. End of explanation """ # 에포크 숫자를 사용하여 하나의 이미지를 보여줍니다. def display_image(epoch_no): return PIL.Image.open('image_at_epoch_{:04d}.png'.format(epoch_no)) display_image(EPOCHS) """ Explanation: GIF 생성 End of explanation """ anim_file = 'dcgan.gif' with imageio.get_writer(anim_file, mode='I') as writer: filenames = glob.glob('image*.png') filenames = sorted(filenames) last = -1 for i,filename in enumerate(filenames): frame = 2*(i**0.5) if round(frame) > round(last): last = frame else: continue image = imageio.imread(filename) writer.append_data(image) image = imageio.imread(filename) writer.append_data(image) import IPython if IPython.version_info > (6,2,0,''): display.Image(filename=anim_file) """ Explanation: imageio로 훈련 중에 저장된 이미지를 사용해 GIF 애니메이션을 만듭니다. End of explanation """ try: from google.colab import files except ImportError: pass else: files.download(anim_file) """ Explanation: 코랩에서 작업하고 있다면, 아래의 코드에서 애니메이션을 다운로드 받을 수 있습니다: End of explanation """
mcamack/Jupyter-Notebooks
NLP/NLP101 - Tokenization, Sentiment.ipynb
apache-2.0
from nltk.tokenize import TreebankWordTokenizer sentence = "How does nltk tokenize this sentence?" tokenizer = TreebankWordTokenizer() tokenizer.tokenize(sentence) """ Explanation: Natural Language Processing (NLP) Overview corpus - collection of texts lexicon - collection of words (or sequences) we put into our index bag-of-words - take each word and count how many times it appears n-gram - count how often each set of n words appears Tokenization break text into tokens based on characters, words, sentences, etc. Tokenizing Sentences End of explanation """ from nltk.tokenize.casual import casual_tokenize tweet = "OMG @twitterguy that was sooooooooo cool :D :D :D!!!!" print(casual_tokenize(tweet)) casual_tokenize(tweet, reduce_len=True, strip_handles=True) """ Explanation: Tokenizing Social Media End of explanation """ from nltk.util import ngrams list(ngrams(sentence.split(), 2)) """ Explanation: N-grams End of explanation """ import nltk nltk.download("stopwords") stop_words = nltk.corpus.stopwords.words("english") stop_words[:10] """ Explanation: Stop-words End of explanation """ nltk.download('vader_lexicon') from nltk.sentiment.vader import SentimentIntensityAnalyzer sia = SentimentIntensityAnalyzer() negative_sentence = "This is the worst!!! I hate it so much :( :(" sia.polarity_scores(negative_sentence) sia.polarity_scores(tweet) """ Explanation: Sentiment VADER (Valence Aware Dictionary for sEntiment Reasoning) End of explanation """
DJCordhose/ai
notebooks/tf2/tf-low-level.ipynb
mit
!pip install -q tf-nightly-gpu-2.0-preview import tensorflow as tf print(tf.__version__) # a small sanity check, does tf seem to work ok? hello = tf.constant('Hello TF!') print("This works: {}".format(hello)) # this should return True even on Colab tf.test.is_gpu_available() tf.test.is_built_with_cuda() !nvidia-smi tf.executing_eagerly() """ Explanation: <a href="https://colab.research.google.com/github/DJCordhose/ai/blob/master/notebooks/tf2/tf-low-level.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> Introduction to Neural Networks with Low Level TensorFlow 2 Based on * This thread is a crash course on everything you need to know to use TensorFlow 2.0 + Keras for deep learning research: https://twitter.com/fchollet/status/1105139360226140160 * Colab Notebook tf.keras for Researchers: https://colab.research.google.com/drive/17u-pRZJnKN0gO5XZmq8n5A2bKGrfKEUg#scrollTo=UHOOlixcQ9Gl * Effective TensorFlow 2: https://www.tensorflow.org/alpha/guide/effective_tf2 End of explanation """ input = [[-1], [0], [1], [2], [3], [4]] output = [[2], [1], [0], [-1], [-2], [-3]] import matplotlib.pyplot as plt plt.xlabel('input') plt.ylabel('output') plt.plot(input, output, 'ro') """ Explanation: Transforming an input to a known output End of explanation """ plt.plot(input, output) plt.plot(input, output, 'ro') """ Explanation: relation between input and output is linear End of explanation """ w = tf.constant([[1.5], [-2], [1]], dtype='float32') x = tf.constant([[10, 6, 8]], dtype='float32') b = tf.constant([6], dtype='float32') y = tf.matmul(x, w) + b print(y) """ Explanation: Defining the model to train untrained single unit (neuron) also outputs a line from same input, although another one The Artificial Neuron: Foundation of Deep Neural Networks (simplified, more later) a neuron takes a number of numerical inputs multiplies each with a weight, sums up all weighted input and adds bias (constant) to that sum from this it creates a single numerical output for one input (one dimension) this would be a description of a line for more dimensions this describes a hyper plane that can serve as a decision boundary this is typically expressed as a matrix multplication plus an addition <img src='https://djcordhose.github.io/ai/img/insurance/neuron211.jpg'> This can be expressed using a matrix multiplication End of explanation """ from tensorflow.keras.layers import Layer class LinearLayer(Layer): """y = w.x + b""" def __init__(self, units=1, input_dim=1): super(LinearLayer, self).__init__() w_init = tf.random_normal_initializer(stddev=2) self.w = tf.Variable( initial_value = w_init(shape=(input_dim, units), dtype='float32'), trainable=True) b_init = tf.zeros_initializer() self.b = tf.Variable( initial_value = b_init(shape=(units,), dtype='float32'), trainable=True) def call(self, inputs): return tf.matmul(inputs, self.w) + self.b linear_layer = LinearLayer() """ Explanation: Defining a layer with a random number of neurons and inputs End of explanation """ x = tf.constant(input, dtype=tf.float32) y_true = tf.constant(output, dtype=tf.float32) y_true y_pred = linear_layer(x) y_pred plt.plot(x, y_pred) plt.plot(input, output, 'ro') """ Explanation: Output of a single untrained neuron End of explanation """ loss_fn = tf.losses.mean_squared_error # loss_fn = tf.losses.mean_absolute_error loss = loss_fn(y_true=tf.squeeze(y_true), y_pred=tf.squeeze(y_pred)) print(loss) tf.keras.losses.mean_squared_error == tf.losses.mean_squared_error """ Explanation: Loss - Mean Squared Error Loss function is the prerequisite to training. We need an objective to optimize for. We calculate the difference between what we get as output and what we would like to get. Mean Squared Error $MSE = {\frac {1}{n}}\sum {i=1}^{n}(Y{i}-{\hat {Y_{i}}})^{2}$ https://en.wikipedia.org/wiki/Mean_squared_error End of explanation """ # a simple example # f(x) = x^2 # f'(x) = 2x # x = 4 # f(4) = 16 # f'(4) = 8 (that's what we expect) def tape_sample(): x = tf.constant(4.0) # open a GradientTape with tf.GradientTape() as tape: tape.watch(x) y = x * x dy_dx = tape.gradient(y, x) print(dy_dx) # just a function in order not to interfere with x on the global scope tape_sample() """ Explanation: Minimize Loss by changing parameters of neuron Move in parameter space in the direction of a descent <img src='https://djcordhose.github.io/ai/img/gradients.jpg'> https://twitter.com/colindcarroll/status/1090266016259534848 Job of the optimizer <img src='https://djcordhose.github.io/ai/img/manning/optimizer.png' height=500> For this we need partial derivations TensorFlow offers automatic differentiation: https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/GradientTape tape will record operations for automatic differentiation either by making it record explicily (watch) or by declaring a varible to be trainable (which we did in the layer above) End of explanation """ linear_layer = LinearLayer() linear_layer.w, linear_layer.b linear_layer.trainable_weights EPOCHS = 200 learning_rate = 1e-2 losses = [] weights = [] biases = [] weights_gradient = [] biases_gradient = [] for step in range(EPOCHS): with tf.GradientTape() as tape: # forward pass y_pred = linear_layer(x) # loss value for this batch loss = loss_fn(y_true=tf.squeeze(y_true), y_pred=tf.squeeze(y_pred)) # just for logging losses.append(loss.numpy()) weights.append(linear_layer.w.numpy()[0][0]) biases.append(linear_layer.b.numpy()[0]) # get gradients of weights wrt the loss gradients = tape.gradient(loss, linear_layer.trainable_weights) weights_gradient.append(gradients[0].numpy()[0][0]) biases_gradient.append(gradients[1].numpy()[0]) # backward pass, changing trainable weights linear_layer.w.assign_sub(learning_rate * gradients[0]) linear_layer.b.assign_sub(learning_rate * gradients[1]) print(loss) plt.xlabel('epochs') plt.ylabel('loss') # plt.yscale('log') plt.plot(losses) plt.figure(figsize=(20, 10)) plt.plot(weights) plt.plot(biases) plt.plot(weights_gradient) plt.plot(biases_gradient) plt.legend(['slope', 'offset', 'gradient slope', 'gradient offset']) """ Explanation: Training End of explanation """ y_pred = linear_layer(x) y_pred plt.plot(x, y_pred) plt.plot(input, output, 'ro') # single neuron and single input: one weight and one bias # slope m ~ -1 # y-axis offset y0 ~ 1 # https://en.wikipedia.org/wiki/Linear_equation#Slope%E2%80%93intercept_form linear_layer.trainable_weights """ Explanation: Line drawn by neuron after training result after training is not perfect, but almost looks like the same line https://en.wikipedia.org/wiki/Linear_equation#Slope%E2%80%93intercept_form End of explanation """ optimizer = tf.keras.optimizers.SGD(learning_rate=1e-2) EPOCHS = 500 losses = [] linear_layer = LinearLayer() for step in range(EPOCHS): with tf.GradientTape() as tape: # Forward pass. y_pred = linear_layer(x) # Loss value for this batch. loss = loss_fn(y_true=tf.squeeze(y_true), y_pred=tf.squeeze(y_pred)) losses.append(loss) # Get gradients of weights wrt the loss. gradients = tape.gradient(loss, linear_layer.trainable_weights) # Update the weights of our linear layer. optimizer.apply_gradients(zip(gradients, linear_layer.trainable_weights)) # plt.yscale('log') plt.ylabel("loss") plt.xlabel("epochs") plt.plot(losses) y_pred = linear_layer(x) plt.plot(x, y_pred) plt.plot(input, output, 'ro') linear_layer.trainable_weights """ Explanation: Prebuilt Optimizers do this job (but a bit more efficient and sohpisticated) End of explanation """ import numpy as np a = -1 b = 1 n = 50 x = tf.constant(np.random.uniform(0, 1, n), dtype='float32') y = tf.constant(a*x+b + 0.1 * np.random.normal(0, 1, n), dtype='float32') plt.scatter(x, y) x = tf.reshape(x, (n, 1)) y_true = tf.reshape(y, (n, 1)) linear_layer = LinearLayer() a = linear_layer.w.numpy()[0][0] b = linear_layer.b.numpy()[0] def plot_line(a, b, x, y_true): fig, ax = plt.subplots() y_pred = a * x + b line = ax.plot(x, y_pred) ax.plot(x, y_true, 'ro') return fig, line plot_line(a, b, x, y_true) # the problem is a little bit harder, train for a little longer EPOCHS = 2000 losses = [] lines = [] linear_layer = LinearLayer() for step in range(EPOCHS): # Open a GradientTape. with tf.GradientTape() as tape: # Forward pass. y_pred = linear_layer(x) # Loss value for this batch. loss = loss_fn(y_true=tf.squeeze(y_true), y_pred=tf.squeeze(y_pred)) losses.append(loss) a = linear_layer.w.numpy()[0][0] b = linear_layer.b.numpy()[0] lines.append((a, b)) # Get gradients of weights wrt the loss. gradients = tape.gradient(loss, linear_layer.trainable_weights) # Update the weights of our linear layer. optimizer.apply_gradients(zip(gradients, linear_layer.trainable_weights)) print(loss) # plt.yscale('log') plt.ylabel("loss") plt.xlabel("epochs") plt.plot(losses) """ Explanation: More data points, more noisy End of explanation """ a, b = lines[0] plot_line(a, b, x, y_true) """ Explanation: Lines model draws over time Initial Step End of explanation """ a, b = lines[500] plot_line(a, b, x, y_true) """ Explanation: After 500 Steps End of explanation """ a, b = lines[1999] plot_line(a, b, x, y_true) """ Explanation: Final Step End of explanation """ import numpy as np x = tf.reshape(tf.constant(np.arange(-1, 4, 0.1), dtype='float32'), (50, 1)) y_pred = linear_layer(x) plt.figure(figsize=(20, 10)) plt.plot(x, y_pred) y_pred_relu = tf.nn.relu(y_pred) plt.plot(x, y_pred_relu) y_pred_sigmoid = tf.nn.sigmoid(y_pred) plt.plot(x, y_pred_sigmoid) y_pred_tanh = tf.nn.tanh(y_pred) plt.plot(x, y_pred_tanh) plt.plot(input, output, 'ro') plt.legend(['no activation', 'relu', 'sigmoid', 'tanh']) """ Explanation: Understandinging the effect of activation functions Typically, the output of a neuron is transformed using an activation function which compresses the output to a value between 0 and 1 (sigmoid), or between -1 and 1 (tanh) or sets all negative values to zero (relu). <img src='https://raw.githubusercontent.com/DJCordhose/deep-learning-crash-course-notebooks/master/img/neuron.jpg'> Typical Activation Functions <img src='https://djcordhose.github.io/ai/img/activation-functions.jpg'> End of explanation """ from matplotlib.colors import ListedColormap a = -1 b = 1 n = 100 # all points X = np.random.uniform(0, 1, (n, 2)) # our line line_x = np.random.uniform(0, 1, n) line_y = a*line_x+b plt.plot(line_x, line_y, 'r') # below and above line y = X[:, 1] > a*X[:, 0]+b y = y.astype(int) plt.xlabel("x1") plt.ylabel("x2") plt.scatter(X[:,0], X[:,1], c=y, cmap=ListedColormap(['#AA6666', '#6666AA']), marker='o', edgecolors='k') y """ Explanation: Logictic Regression So far we were inferring a continous value for another, now we want to classify. Imagine we have a line that separates two categories in two dimensions. End of explanation """ class SigmoidLayer(LinearLayer): """y = sigmoid(w.x + b)""" def __init__(self, **kwargs): super(SigmoidLayer, self).__init__(**kwargs) def call(self, inputs): return tf.sigmoid(super().call(inputs)) """ Explanation: We compress output between 0 and 1 using sigmoid to match y everything below 0.5 counts as 0, everthing above as 1 End of explanation """ x = tf.constant(X, dtype='float32') y_true = tf.constant(y, dtype='float32') x.shape model = SigmoidLayer(input_dim=2) """ Explanation: We have 2d input now End of explanation """ loss_fn = tf.losses.binary_crossentropy # standard optimizer using advanced properties optimizer = tf.keras.optimizers.Adam(learning_rate=1e-1) # https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/metrics/Accuracy m = tf.keras.metrics.Accuracy() EPOCHS = 1000 losses = [] accuracies = [] for step in range(EPOCHS): # Open a GradientTape. with tf.GradientTape() as tape: # Forward pass. y_pred = model(x) # Loss value for this batch. loss = loss_fn(y_true=tf.squeeze(y_true), y_pred=tf.squeeze(y_pred)) y_pred_binary = (tf.squeeze(y_pred) > 0.5).numpy().astype(float) m.update_state(tf.squeeze(y_true), y_pred_binary) accuracy = m.result().numpy() losses.append(loss) accuracies.append(accuracy) # Get gradients of weights wrt the loss. gradients = tape.gradient(loss, model.trainable_weights) # Update the weights of our linear layer. optimizer.apply_gradients(zip(gradients, model.trainable_weights)) print(loss) print(accuracy) plt.yscale('log') plt.ylabel("loss") plt.xlabel("epochs") plt.plot(losses) plt.ylabel("accuracy") plt.xlabel("epochs") plt.plot(accuracies) y_pred = model(x) y_pred_binary = (tf.squeeze(y_pred) > 0.5).numpy().astype(float) y_pred_binary y_true - y_pred_binary # below and above line plt.xlabel("x1") plt.ylabel("x2") plt.scatter(X[:,0], X[:,1], c=y_pred_binary, cmap=ListedColormap(['#AA6666', '#6666AA']), marker='o', edgecolors='k') """ Explanation: Reconsidering the loss function cross entropy is an alternative to squared error cross entropy can be used as an error measure when a network's outputs can be thought of as representing independent hypotheses activations can be understood as representing the probability that each hypothesis might be true the loss indicates the distance between what the network believes this distribution should be, and what the teacher says it should be http://www.cse.unsw.edu.au/~billw/cs9444/crossentropy.html End of explanation """ from tensorflow.keras.layers import Dense model = tf.keras.Sequential() model.add(Dense(units=1, activation='sigmoid', input_dim=2)) model.summary() %%time model.compile(loss=loss_fn, # binary cross entropy, unchanged from low level example optimizer=optimizer, # adam, unchanged from low level example metrics=['accuracy']) # does a similar thing internally as our loop from above history = model.fit(x, y_true, epochs=EPOCHS, verbose=0) loss, accuracy = model.evaluate(x, y_true) loss, accuracy plt.yscale('log') plt.ylabel("accuracy") plt.xlabel("epochs") plt.plot(history.history['accuracy']) plt.yscale('log') plt.ylabel("loss") plt.xlabel("epochs") plt.plot(history.history['loss']) y_pred = model.predict(x) y_pred_binary = (tf.squeeze(y_pred) > 0.5).numpy().astype(float) # below and above line plt.xlabel("x1") plt.ylabel("x2") plt.scatter(X[:,0], X[:,1], c=y_pred_binary, cmap=ListedColormap(['#AA6666', '#6666AA']), marker='o', edgecolors='k') """ Explanation: The same solution using high level Keas API End of explanation """
GoogleCloudPlatform/training-data-analyst
courses/machine_learning/deepdive2/production_ml/solutions/mlmd_tutorial.ipynb
apache-2.0
!pip install --upgrade pip """ Explanation: Better ML Engineering with ML Metadata Learning Objectives Download the dataset Create an InteractiveContext Construct the TFX Pipeline Query the MLMD Database Introduction Assume a scenario where you set up a production ML pipeline to classify penguins. The pipeline ingests your training data, trains and evaluates a model, and pushes it to production. However, when you later try using this model with a larger dataset that contains different kinds of penguins, you observe that your model does not behave as expected and starts classifying the species incorrectly. At this point, you are interested in knowing: What is the most efficient way to debug the model when the only available artifact is the model in production? Which training dataset was used to train the model? Which training run led to this erroneous model? Where are the model evaluation results? Where to begin debugging? ML Metadata (MLMD) is a library that leverages the metadata associated with ML models to help you answer these questions and more. A helpful analogy is to think of this metadata as the equivalent of logging in software development. MLMD enables you to reliably track the artifacts and lineage associated with the various components of your ML pipeline. In this notebook, you set up a TFX Pipeline to create a model that classifies penguins into three species based on the body mass and the length and depth of their culmens, and the length of their flippers. You then use MLMD to track the lineage of pipeline components. Each learning objective will correspond to a #TODO in the student lab notebook -- try to complete that notebook first before reviewing this solution notebook. Setup First, we install and import the necessary packages, set up paths, and download data. Upgrade Pip End of explanation """ !pip install -q -U tfx """ Explanation: Install and import TFX End of explanation """ import os import tempfile import urllib import pandas as pd import tensorflow_model_analysis as tfma from tfx.orchestration.experimental.interactive.interactive_context import InteractiveContext """ Explanation: Please ignore the incompatibility error and warnings. Make sure to re-run the cell. You must restart the kernel after installing TFX. Select Kernel > Restart kernel > Restart from the menu. Do not proceed with the rest of this notebook without restarting the kernel. Import other libraries End of explanation """ from tfx import v1 as tfx print('TFX version: {}'.format(tfx.__version__)) import ml_metadata as mlmd print('MLMD version: {}'.format(mlmd.__version__)) from ml_metadata.proto import metadata_store_pb2 """ Explanation: Import the MLMD library. End of explanation """ DATA_PATH = 'https://raw.githubusercontent.com/tensorflow/tfx/master/tfx/examples/penguin/data/labelled/penguins_processed.csv' _data_root = tempfile.mkdtemp(prefix='tfx-data') # TODO # Join various path components _data_filepath = os.path.join(_data_root, "penguins_processed.csv") urllib.request.urlretrieve(DATA_PATH, _data_filepath) """ Explanation: Download the dataset We use the Palmer Penguins dataset which can be found on Github. We processed the dataset by leaving out any incomplete records, and drops island and sex columns, and converted labels to int32. The dataset contains 334 records of the body mass and the length and depth of penguins' culmens, and the length of their flippers. You use this data to classify penguins into one of three species. End of explanation """ # TODO interactive_context = InteractiveContext() """ Explanation: Create an InteractiveContext To run TFX components interactively in this notebook, create an InteractiveContext. The InteractiveContext uses a temporary directory with an ephemeral MLMD database instance. In general, it is a good practice to group similar pipeline runs under a Context. End of explanation """ # TODO example_gen = tfx.components.CsvExampleGen(input_base=_data_root) interactive_context.run(example_gen) """ Explanation: Construct the TFX Pipeline A TFX pipeline consists of several components that perform different aspects of the ML workflow. In this notebook, you create and run the ExampleGen, StatisticsGen, SchemaGen, and Trainer components and use the Evaluator and Pusher component to evaluate and push the trained model. Refer to the components tutorial for more information on TFX pipeline components. Note: Constructing a TFX Pipeline by setting up the individual components involves a lot of boilerplate code. For the purpose of this notebook, it is alright if you do not fully understand every line of code in the pipeline setup. Instantiate and run the ExampleGen Component End of explanation """ # TODO statistics_gen = tfx.components.StatisticsGen( examples=example_gen.outputs['examples']) interactive_context.run(statistics_gen) """ Explanation: Instantiate and run the StatisticsGen Component End of explanation """ # TODO infer_schema = tfx.components.SchemaGen( statistics=statistics_gen.outputs['statistics'], infer_feature_shape=True) interactive_context.run(infer_schema) """ Explanation: Instantiate and run the SchemaGen Component End of explanation """ # Define the module file for the Trainer component trainer_module_file = 'penguin_trainer.py' %%writefile {trainer_module_file} # Define the training algorithm for the Trainer module file import os from typing import List, Text import tensorflow as tf from tensorflow import keras from tfx import v1 as tfx from tfx_bsl.public import tfxio from tensorflow_metadata.proto.v0 import schema_pb2 # Features used for classification - culmen length and depth, flipper length, # body mass, and species. _LABEL_KEY = 'species' _FEATURE_KEYS = [ 'culmen_length_mm', 'culmen_depth_mm', 'flipper_length_mm', 'body_mass_g' ] def _input_fn(file_pattern: List[Text], data_accessor: tfx.components.DataAccessor, schema: schema_pb2.Schema, batch_size: int) -> tf.data.Dataset: return data_accessor.tf_dataset_factory( file_pattern, tfxio.TensorFlowDatasetOptions( batch_size=batch_size, label_key=_LABEL_KEY), schema).repeat() def _build_keras_model(): inputs = [keras.layers.Input(shape=(1,), name=f) for f in _FEATURE_KEYS] d = keras.layers.concatenate(inputs) d = keras.layers.Dense(8, activation='relu')(d) d = keras.layers.Dense(8, activation='relu')(d) outputs = keras.layers.Dense(3)(d) model = keras.Model(inputs=inputs, outputs=outputs) model.compile( optimizer=keras.optimizers.Adam(1e-2), loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=[keras.metrics.SparseCategoricalAccuracy()]) return model def run_fn(fn_args: tfx.components.FnArgs): schema = schema_pb2.Schema() tfx.utils.parse_pbtxt_file(fn_args.schema_path, schema) train_dataset = _input_fn( fn_args.train_files, fn_args.data_accessor, schema, batch_size=10) eval_dataset = _input_fn( fn_args.eval_files, fn_args.data_accessor, schema, batch_size=10) model = _build_keras_model() model.fit( train_dataset, epochs=int(fn_args.train_steps / 20), steps_per_epoch=20, validation_data=eval_dataset, validation_steps=fn_args.eval_steps) model.save(fn_args.serving_model_dir, save_format='tf') """ Explanation: Instantiate and run the Trainer Component End of explanation """ trainer = tfx.components.Trainer( module_file=os.path.abspath(trainer_module_file), examples=example_gen.outputs['examples'], schema=infer_schema.outputs['schema'], train_args=tfx.proto.TrainArgs(num_steps=100), eval_args=tfx.proto.EvalArgs(num_steps=50)) interactive_context.run(trainer) """ Explanation: Run the Trainer component. End of explanation """ _serving_model_dir = os.path.join(tempfile.mkdtemp(), 'serving_model/penguins_classification') eval_config = tfma.EvalConfig( model_specs=[ tfma.ModelSpec(label_key='species', signature_name='serving_default') ], metrics_specs=[ tfma.MetricsSpec(metrics=[ tfma.MetricConfig( class_name='SparseCategoricalAccuracy', threshold=tfma.MetricThreshold( value_threshold=tfma.GenericValueThreshold( lower_bound={'value': 0.6}))) ]) ], slicing_specs=[tfma.SlicingSpec()]) evaluator = tfx.components.Evaluator( examples=example_gen.outputs['examples'], model=trainer.outputs['model'], schema=infer_schema.outputs['schema'], eval_config=eval_config) interactive_context.run(evaluator) pusher = tfx.components.Pusher( model=trainer.outputs['model'], model_blessing=evaluator.outputs['blessing'], push_destination=tfx.proto.PushDestination( filesystem=tfx.proto.PushDestination.Filesystem( base_directory=_serving_model_dir))) interactive_context.run(pusher) """ Explanation: Evaluate and push the model Use the Evaluator component to evaluate and 'bless' the model before using the Pusher component to push the model to a serving directory. End of explanation """ connection_config = interactive_context.metadata_connection_config store = mlmd.MetadataStore(connection_config) # All TFX artifacts are stored in the base directory base_dir = connection_config.sqlite.filename_uri.split('metadata.sqlite')[0] """ Explanation: Running the TFX pipeline populates the MLMD Database. In the next section, you use the MLMD API to query this database for metadata information. Query the MLMD Database The MLMD database stores three types of metadata: Metadata about the pipeline and lineage information associated with the pipeline components Metadata about artifacts that were generated during the pipeline run Metadata about the executions of the pipeline A typical production environment pipeline serves multiple models as new data arrives. When you encounter erroneous results in served models, you can query the MLMD database to isolate the erroneous models. You can then trace the lineage of the pipeline components that correspond to these models to debug your models Set up the metadata (MD) store with the InteractiveContext defined previously to query the MLMD database. End of explanation """ def display_types(types): # Helper function to render dataframes for the artifact and execution types table = {'id': [], 'name': []} for a_type in types: table['id'].append(a_type.id) table['name'].append(a_type.name) return pd.DataFrame(data=table) def display_artifacts(store, artifacts): # Helper function to render dataframes for the input artifacts table = {'artifact id': [], 'type': [], 'uri': []} for a in artifacts: table['artifact id'].append(a.id) artifact_type = store.get_artifact_types_by_id([a.type_id])[0] table['type'].append(artifact_type.name) table['uri'].append(a.uri.replace(base_dir, './')) return pd.DataFrame(data=table) def display_properties(store, node): # Helper function to render dataframes for artifact and execution properties table = {'property': [], 'value': []} for k, v in node.properties.items(): table['property'].append(k) table['value'].append( v.string_value if v.HasField('string_value') else v.int_value) for k, v in node.custom_properties.items(): table['property'].append(k) table['value'].append( v.string_value if v.HasField('string_value') else v.int_value) return pd.DataFrame(data=table) """ Explanation: Create some helper functions to view the data from the MD store. End of explanation """ display_types(store.get_artifact_types()) """ Explanation: First, query the MD store for a list of all its stored ArtifactTypes. End of explanation """ pushed_models = store.get_artifacts_by_type("PushedModel") display_artifacts(store, pushed_models) """ Explanation: Next, query all PushedModel artifacts. End of explanation """ pushed_model = pushed_models[-1] display_properties(store, pushed_model) """ Explanation: Query the MD store for the latest pushed model. This notebook has only one pushed model. End of explanation """ def get_one_hop_parent_artifacts(store, artifacts): # Get a list of artifacts within a 1-hop of the artifacts of interest artifact_ids = [artifact.id for artifact in artifacts] executions_ids = set( event.execution_id for event in store.get_events_by_artifact_ids(artifact_ids) if event.type == mlmd.proto.Event.OUTPUT) artifacts_ids = set( event.artifact_id for event in store.get_events_by_execution_ids(executions_ids) if event.type == mlmd.proto.Event.INPUT) return [artifact for artifact in store.get_artifacts_by_id(artifacts_ids)] """ Explanation: One of the first steps in debugging a pushed model is to look at which trained model is pushed and to see which training data is used to train that model. MLMD provides traversal APIs to walk through the provenance graph, which you can use to analyze the model provenance. End of explanation """ # TODO parent_artifacts = get_one_hop_parent_artifacts(store, [pushed_model]) display_artifacts(store, parent_artifacts) """ Explanation: Query the parent artifacts for the pushed model. End of explanation """ exported_model = parent_artifacts[0] display_properties(store, exported_model) """ Explanation: Query the properties for the model. End of explanation """ model_parents = get_one_hop_parent_artifacts(store, [exported_model]) display_artifacts(store, model_parents) """ Explanation: Query the upstream artifacts for the model. End of explanation """ used_data = model_parents[0] display_properties(store, used_data) """ Explanation: Get the training data the model trained with. End of explanation """ display_types(store.get_execution_types()) """ Explanation: Now that you have the training data that the model trained with, query the database again to find the training step (execution). Query the MD store for a list of the registered execution types. End of explanation """ def find_producer_execution(store, artifact): executions_ids = set( event.execution_id for event in store.get_events_by_artifact_ids([artifact.id]) if event.type == mlmd.proto.Event.OUTPUT) return store.get_executions_by_id(executions_ids)[0] # TODO trainer = find_producer_execution(store, exported_model) display_properties(store, trainer) """ Explanation: The training step is the ExecutionType named tfx.components.trainer.component.Trainer. Traverse the MD store to get the trainer run that corresponds to the pushed model. End of explanation """
rsterbentz/phys202-2015-work
assignments/assignment09/IntegrationEx02.ipynb
mit
%matplotlib inline import matplotlib.pyplot as plt import numpy as np import seaborn as sns from scipy import integrate import math # From https://docs.python.org/3.3/library/math.html """ Explanation: Integration Exercise 2 Imports End of explanation """ def integrand(x, a): return 1.0/(x**2 + a**2) def integral_approx(a): # Use the args keyword argument to feed extra arguments to your integrand I, e = integrate.quad(integrand, 0, np.inf, args=(a,)) return I def integral_exact(a): return 0.5*np.pi/a print("Numerical: ", integral_approx(1.0)) print("Exact : ", integral_exact(1.0)) assert True # leave this cell to grade the above integral """ Explanation: Indefinite integrals Here is a table of definite integrals. Many of these integrals has a number of parameters $a$, $b$, etc. Find five of these integrals and perform the following steps: Typeset the integral using LateX in a Markdown cell. Define an integrand function that computes the value of the integrand. Define an integral_approx funciton that uses scipy.integrate.quad to peform the integral. Define an integral_exact function that computes the exact value of the integral. Call and print the return value of integral_approx and integral_exact for one set of parameters. Here is an example to show what your solutions should look like: Example Here is the integral I am performing: $$ I_1 = \int_0^\infty \frac{dx}{x^2 + a^2} = \frac{\pi}{2a} $$ End of explanation """ def integrand(x,a): return np.exp((-a)*(x**2)) def integral_approx(a): I, e = integrate.quad(integrand, 0, np.inf, args=(a,)) return I def integral_exact(a): return 0.5*(np.sqrt(np.pi/a)) print("Numerical: ", integral_approx(1.0)) print("Exact : ", integral_exact(1.0)) assert True # leave this cell to grade the above integral """ Explanation: Integral 1 $$ \large I_A = \int_0^\infty e^{-ax^2} dx = \frac{1}{2}\sqrt{\frac{\pi}{a}} $$ End of explanation """ def integrand(x,a,b): return np.exp(-a*x)*np.cos(b*x) def integral_approx(a,b): I, e = integrate.quad(integrand, 0, np.inf, args=(a,b,)) return I def integral_exact(a,b): return a/(a**2+b**2) print("Numerical: ", integral_approx(1.0,2.0)) print("Exact : ", integral_exact(1.0,2.0)) assert True # leave this cell to grade the above integral """ Explanation: Integral 2 $$ \large I_B = \int_0^\infty e^{-ax}\cos{bx} dx = \frac{a}{a^2+b^2} $$ End of explanation """ math.factorial(5) def integrand(x,m,n): return (x**m)*((np.log(x))**n) def integral_approx(m,n): I, e = integrate.quad(integrand, 0, 1, args=(m,n,)) return I def integral_exact(m,n): return ((-1)**n*math.factorial(n))/((m+1)**(n+1)) print("Numerical: ", integral_approx(1.0,2.0)) print("Exact : ", integral_exact(1.0,2.0)) assert True # leave this cell to grade the above integral """ Explanation: Integral 3 $$ \large I_C = \int_0^1 x^m(\ln{x})^n dx = \frac{(-1)^nn!}{(m+1)^{n+1}} \quad m>-1,n=0,1,2,...$$ End of explanation """ def integrand(x,a,b): return 1/(a+(b*np.sin(x))) def integral_approx(a,b): I, e = integrate.quad(integrand, 0, 2*np.pi, args=(a,b,)) return I def integral_exact(a,b): return 2*np.pi/(np.sqrt((a**2)-(b**2))) print("Numerical: ", integral_approx(2.0,1.0)) print("Exact : ", integral_exact(2.0,1.0)) assert True # leave this cell to grade the above integral """ Explanation: Integral 4 $$ \large I_D = \int_0^{2\pi} \frac{dx}{a+b\sin{x}} = \frac{2\pi}{\sqrt{a^2-b^2}} $$ End of explanation """ def integrand(x): return np.log(1+x)/x def integral_approx(): I, e = integrate.quad(integrand, 0, 1) return I def integral_exact(): return (np.pi**2)/12 print("Numerical: ", integral_approx()) print("Exact : ", integral_exact()) assert True # leave this cell to grade the above integral """ Explanation: Integral 5 $$ \large I_E = \int_0^1 \frac{\ln{(1+x)}}{x} dx = \frac{\pi^2}{12} $$ End of explanation """
liyigerry/msm_test
examples/bayesian-msm.ipynb
apache-2.0
%matplotlib inline import numpy as np from matplotlib import pyplot as plt from mdtraj.utils import timing from msmbuilder.example_datasets import load_doublewell from msmbuilder.cluster import NDGrid from msmbuilder.msm import BayesianMarkovStateModel, MarkovStateModel """ Explanation: BayesianMarkovStateModel This example demonstrates the class BayesianMarkovStateModel, which uses Metropolis Markov chain Monte Carlo (MCMC) to sample over the posterior distribution of transition matrices, given the observed transitions in your dataset. This can be useful for evaluating the uncertainty due to sampling in your dataset. End of explanation """ trjs = load_doublewell(random_state=0)['trajectories'] plt.hist(np.concatenate(trjs), bins=50, log=True) plt.ylabel('Frequency') plt.show() """ Explanation: Load some double-well data End of explanation """ clusterer = NDGrid(n_bins_per_feature=10) mle_msm = MarkovStateModel(lag_time=100) b_msm = BayesianMarkovStateModel(lag_time=100, n_samples=10000, n_steps=1000) states = clusterer.fit_transform(trjs) with timing('running mcmc'): b_msm.fit(states) mle_msm.fit(states) plt.subplot(2, 1, 1) plt.plot(b_msm.all_transmats_[:, 0, 0]) plt.axhline(mle_msm.transmat_[0, 0], c='k') plt.ylabel('t_00') plt.subplot(2, 1, 2) plt.ylabel('t_23') plt.xlabel('MCMC Iteration') plt.plot(b_msm.all_transmats_[:, 2, 3]) plt.axhline(mle_msm.transmat_[2, 3], c='k') plt.show() plt.plot(b_msm.all_timescales_[:, 0], label='MCMC') plt.axhline(mle_msm.timescales_[0], c='k', label='MLE') plt.legend(loc='best') plt.ylabel('Longest timescale') plt.xlabel('MCMC iteration') plt.show() """ Explanation: We'll discretize the space using 10 states And the build one MSM using the MLE transition matrix estimator, and one with the Bayesian estimator End of explanation """ clusterer = NDGrid(n_bins_per_feature=50) mle_msm = MarkovStateModel(lag_time=100) b_msm = BayesianMarkovStateModel(lag_time=100, n_samples=1000, n_steps=100000) states = clusterer.fit_transform(trjs) with timing('running mcmc (50 states)'): b_msm.fit(states) mle_msm.fit(states) plt.plot(b_msm.all_timescales_[:, 0], label='MCMC') plt.axhline(mle_msm.timescales_[0], c='k', label='MLE') plt.legend(loc='best') plt.ylabel('Longest timescale') plt.xlabel('MCMC iteration') plt.plot(b_msm.all_transmats_[:, 0, 0], label='MCMC') plt.axhline(mle_msm.transmat_[0, 0], c='k', label='MLE') plt.legend(loc='best') plt.ylabel('t_00') plt.xlabel('MCMC iteration') """ Explanation: Now lets try using 50 states The MCMC sampling is a lot harder to converge End of explanation """
rocketproplab/Guides
Guides/python/excelToPandas.ipynb
mit
import pandas as pd import os """ Explanation: Import Excel or CSV To Pandas This file covers the process of importing excel and csv files into a pandas dataframe. Note: the methods for importing excel and csv files is almost identical. The major difference is in the method used. This notebook serves as a tutorial for both. Importing Excel (xlsx): <br> The function used is read_excel. <br> Importing comma separated values (csv): <br> The function used is read_csv. <br> Step 1 Lets start by importing pandas and os. We will be using pandas to create a dataframe from our data, and os to get file paths. End of explanation """ dirPath = os.path.realpath('.') fileName = 'assets/coolingExample.xlsx' filePath = os.path.join(dirPath, fileName) """ Explanation: Step 2 Now lets create a variable, <code>filePath</code>, that is a string containing the full path to the file we want to import. The code below looks in the current working directory for the file given a file name input by the user. This isn't necessary, and is just included for convienence. Alternatively, user can input a full path into the <code>filePath</code> variable. End of explanation """ df = pd.read_excel(filePath,header=0) df.head() """ Explanation: Step 3 Great! Now lets read the data into a dataframe called <code>df</code>. This will allow our data to be accessible by the string in the header. End of explanation """ df[df.columns[0]] """ Explanation: Our data is now accessible by a key value. The keys are the column headers in the dataframe. In this example case, those are 'Time (s) - Dev1/ai0' and 'Temperature - Dev1/ai0'. For example, lets access the data in the first column. End of explanation """ try: df[1] except KeyError: print("KeyError: 1 - not a valid key") """ Explanation: What would happen if we tried to access the data with an invalid key, say <code>1</code> for example? Lets try it to find out. Note: I enclose this code in a <code>try: except:</code> statement in order to prevent a huge error from being generated. End of explanation """ cols = df.columns for col in cols: print(df[col]) """ Explanation: So lets say you have a large dataframe with unknown columns. There is a simple way to index them without having prior knowledge of what the dataframe columns are. Namely, the <code>columns</code> method in pandas. End of explanation """ import matplotlib.pyplot as plt """ Explanation: Data Manipulation (Plots) Now that we have the data easily accessible in python, lets look at how to plot it. <code>Pandas</code> allows you to use matplotlib to plot, however it is done using methods built into pandas. Although the methods to create an manipulate plots are built into <code>Pandas</code>, we will still have to import matplotlib to save and show the plots. End of explanation """ plt.figure(1) ax = df.plot() plt.show() """ Explanation: In order to demonstrate the plotting capabilities of pandas arrays, lets use the example data that we imported earlier. The data frame contains only the two columns that were in the file; temperature and time. Because of this simplicity, we can trust pandas to properly interpret the first column as time and the second column as th measurement (temperature). Thus we can plot with the simple command. <code>df.plot()</code> End of explanation """ plt.figure(2) ax = df.plot(cols[0],cols[1]) plt.show() """ Explanation: While this simplification is nice, it is generally better to specify what data you want to plot. Particularly if you are automating the plotting of a large set of dataframes. To do this, specify the <code>x</code> and <code>y</code> arrays in your dataframe as you would in a standard <code>matplotlib</code> plot call, however since this plotting function is a method of the dataframe, you need only specify the column. I.e. End of explanation """ plt.figure(3) ax = df.plot(cols[0],cols[1]) ax.set_title('This is a Title') ax.set_ylabel('Temperature (deg F)') ax.grid() plt.show() """ Explanation: Now that we have the basics down, lets spice up the plot a little bit. End of explanation """ df[cols[0]][0] """ Explanation: Data Manipulation (Timestamps) One thing you probably noticed in these plots is that the time axis isn't all that useful. It would be better to change the timestamps to a more useful form like seconds since start. Lets go through the process of making that conversion. First, lets see what the timestamp currently looks like. End of explanation """ from datetime import datetime, date startTime = df[cols[0]][0] timeArray = [] for i in range(0,len(df[cols[0]])): timeArray.append((datetime.combine(date.today(), df[cols[0]][i]) - datetime.combine(date.today(), startTime)).total_seconds()) """ Explanation: Good news! Since python interpreted the date as a datetime object, we can use datetime object methods to determine the time in seconds. The one caveat is that we can only determine a time difference, not an absolute time. For more on this, read this stackoverflow question. The first thing we have to do is convert these <code>datetime.time</code> objects into <code>datetime.datetime</code> objects using <code>datetime.combine</code> Note: importing datetime is a little weird.. <code>datetime</code> is both a module and a class. End of explanation """ plt.figure(4) plt.plot(timeArray, df[cols[1]], 'b') plt.title('This is a graph with a better time axis') plt.ylabel('Temperature (deg F)') plt.xlabel('Time (s)') plt.grid() plt.show() """ Explanation: Note: There is probably a better way of doing this (i.e. without a loop, but I'm tired and can't think of anything right now) End of explanation """
rasilab/ferrin_elife_2017
scripts/run_simulations_whole_cell_parameter_sweep.ipynb
gpl-3.0
# sequence input and output import Bio.SeqIO # provides dictionary of codon names from Bio.SeqUtils.CodonUsage import SynonymousCodons # for converting 3 letter amino acid code to 1 letter code from Bio.SeqUtils import seq1 # for fast access of fasta files import pyfaidx # for parsing GFF3 files import HTSeq # for tab data processing import pandas as pd # numeric and matrix library import numpy as np # shell utilities import shutil # for submitting shell commands import subprocess as sp # for string matching import re # create a dictionary of codon names and number (arranged alphabetically by aa) codonnum = 0 codonDict = dict() for aa in sorted(SynonymousCodons, key=lambda aa3: seq1(aa3)): if aa == 'STOP': continue for codon in sorted(SynonymousCodons[aa]): # these two codons are numbered out of order for consistent notation # with Subramaniam et al. Cell 2014 if codon in ['AGC']: codonDict['AGC'] = 59 elif codon in ['AGT']: codonDict['AGT'] = 60 else: codonDict[codon] = codonnum codonnum += 1 # to convert 3 letter codons to numbers between 0 and 63 def get_numerical_codon_sequence(seq): numseq = list() for pos in range(0, len(seq) - 3, 3): try: numseq.append(str(codonDict[seq[pos:pos + 3]])) except KeyError: numseq.append('-1') raise return None return ' '.join(numseq) # starting yfp sequence for leucine starvation expts yfp0 = Bio.SeqIO.read('../annotations/simulations/yfp0.fa', 'fasta') yfp0 = str(yfp0.seq) # starting sequence for serine starvation expts # all ser codons in yfp0 were AGC yfp_agc = list(yfp0) for pos in range(0, len(yfp0), 3): current_codon = yfp0[pos:pos + 3] if current_codon in SynonymousCodons['SER']: yfp_agc[pos:pos + 3] = 'AGC' yfp_agc = ''.join(yfp_agc) """ Explanation: Run whole-cell simulation and parameter sweep reporter simulation <div id="toc-wrapper"><h3> Table of Contents </h3><div id="toc" style="max-height: 787px;"><ol class="toc-item"><li><a href="#Globals">Globals</a></li><li><a href="#Get-codon-sequence-of-all-E.-coli-genes">Get codon sequence of all genes</a></li><li><a href="#Calculate-mRNA-copy-numbers-and-translation-initiation-rate-for-all-E.-coli-mRNAs">Calculate mRNA copy numbers and translation initiation rate for all mRNAs</a></li><li><a href="#Prepare-tRNA-input-files">Prepare tRNA input files</a></li><li><a href="#Create-mRNA-sequence-file-for-leucine-starvation-whole-cell-simulation-(Run-1)">Create mRNA sequence file for leucine starvation whole-cell simulation (Run 1)</a></li><li><a href="#Simulation-run-1:-Single-whole-cell-simulation-to-calculate-steady-state-charged-tRNA-fraction-during-Leu-starvation">Simulation run 1: Single whole-cell simulation to calculate steady state charged tRNA fraction during Leu starvation</a></li><li><a href="#Create-mRNA-sequence-for-serine-starvation-whole-cell-simulation-(Run-12)">Create mRNA sequence for serine starvation whole cell simulation (Run 12)</a></li><li><a href="#Simulation-run-12:-Single-whole-cell-simulation-to-calculate-steady-state-charged-tRNA-fraction-during-Ser-starvation">Simulation run 12: Single whole-cell simulation to calculate steady state charged tRNA fraction during Ser starvation</a></li><li><a href="#Create-average-tRNA-concentration-file-during-leucine-starvation-based-on-whole-cell-simulation-Run-1">Create average tRNA concentration file during leucine starvation based on whole-cell simulation Run 1</a></li><li><a href="#Create-average-tRNA-concentration-file-during-serine-starvation-based-on-whole-cell-simulation-Run-12">Create average tRNA concentration file during serine starvation based on whole-cell simulation Run 12</a></li><li><a href="#Create-mRNA-sequences-for-simulation-run-2-with-systematically-varying-tRNA-accommodation-rate-(stall-duration)-at-Leu-codons">Create mRNA sequences for simulation run 2 with systematically varying tRNA accommodation rate (stall duration) at Leu codons</a></li><li><a href="#Create-varying-tRNA-accommodation-rate-(stall-duration)-at-Leu-codons-for-simulation-run-2">Create varying tRNA accommodation rate (stall duration) at Leu codons for simulation run 2</a></li><li><a href="#Simulation-run-2:-Reporter-simulation-with-systematically-varying-duration-of-ribosome-stalling-during-leucine-starvation">Simulation run 2: Reporter simulation with systematically varying duration of ribosome stalling during leucine starvation</a></li><li><a href="#Create-mRNA-sequences-for-simulation-run-13-with-systematically-varying-tRNA-accommodation-rate-(stall-duration)-at-Ser-codons">Create mRNA sequences for simulation run 13 with systematically varying tRNA accommodation rate (stall duration) at Ser codons</a></li><li><a href="#Create-varying-tRNA-accommodation-rate-(stall-duration)-at-Ser-codons-for-simulation-run-13">Create varying tRNA accommodation rate (stall duration) at Ser codons for simulation run 13</a></li><li><a href="#Simulation-run-13:-Reporter-simulation-with-systematically-varying-duration-of-ribosome-stalling-during-serine-starvation">Simulation run 13: Reporter simulation with systematically varying duration of ribosome stalling during serine starvation</a></li></ol></div></div> Globals End of explanation """ genome = '../annotations/simulations/NC_000913.fna' annotations = '../annotations/simulations/NC_000913.gff' # read genome as a python dictionary genome = pyfaidx.Fasta(genome) annotationrecords = HTSeq.GFF_Reader(annotations) annotationdf = dict() for record in annotationrecords: if record.type != 'CDS': continue if 'pseudo' in record.attr.keys() and record.attr['pseudo'] == 'true': continue # fdnG, fdoG, fdhF have TGA encoded selenocysteine codons if record.attr['gene'] in ['fdnG', 'fdoG', 'fdhF']: continue sequence = str(genome[record.iv.chrom][record.iv.start:record.iv.end]) if record.iv.strand == '-': sequence = str(HTSeq.Sequence(sequence).get_reverse_complement()) annotationdf[record.attr['ID']] = { 'iv': record.iv, 'gene': record.attr['gene'], 'product': record.attr['product'], 'start': record.iv.start, 'end': record.iv.end, 'strand': record.iv.strand, 'sequence': sequence, } annotationdf = pd.DataFrame.from_dict(annotationdf, orient='index') annotationdf['length'] = annotationdf['iv'].apply(lambda x: x.length) annotationdf['numsequence'] = annotationdf['sequence'].apply( get_numerical_codon_sequence) """ Explanation: Get codon sequence of all E. coli genes End of explanation """ # Number of ribosomes (molecules per cell), 37C, 20 min doubling # Bremer 2008 nRibo = 73000 # Number of mrna (nt per cell), 37C, 20 min doubling # Bremer 2008 nMrna = 4.3e6 # Bound fraction of ribosomes, 37C, 20 min doubling # Bremer 2008 boundRiboFract = 0.85 # Mean elongation rate (s^-1), 37C, 20 min doubling # Bremer 2008 meanElongationRate = 22 rawdata = pd.read_table( '../annotations/simulations/ecoli.mrna.concn.and.te.li.2014.csv', sep=',', ) rawdata = rawdata.dropna().set_index('Gene') rawdata['mRNA level (RPKM)'] = rawdata['mRNA level (RPKM)'].apply(int) combined = annotationdf.join(rawdata, on='gene', how='inner') # the total number of mrnaribonucleotides in the cell should be equal to # the number of each mrna species multiplied by its length combined['rpkm_nt'] = combined['mRNA level (RPKM)'] * combined['length'] mrnaNormalization = combined['rpkm_nt'].sum() / nMrna combined['mrnaCopyNumber'] = combined['mRNA level (RPKM)'].apply( lambda x: int(np.round(x / mrnaNormalization))) combined = combined[combined.mrnaCopyNumber > 0] # basic idea for TE normalization below is the equality: # total number of mrna bound ribosomes in the cell = # sum over all mrnas( initation rate * mrna copy number # * length / elongation rate) combined['boundribosomes'] = (combined['length'] * combined['Translation efficiency (AU)'] * combined['mrnaCopyNumber']) initiationRateNormalization = (combined['boundribosomes'].sum() / (nRibo * boundRiboFract * meanElongationRate)) combined['initiationRate'] = ( combined['Translation efficiency (AU)'] / initiationRateNormalization).apply(lambda x: np.round(x, 4)) # Write initiation rate, mRNA copy number and the codon sequence to a file # for using in the simulation temp = combined[['initiationRate', 'mrnaCopyNumber', 'numsequence' ]].sort_values( by=['mrnaCopyNumber'], ascending=False) temp.to_csv( '../annotations/simulations/run1/run1_ecoli_mrnas.csv', sep='\t', index=False, header=None) """ Explanation: Calculate mRNA copy numbers and translation initiation rate for all E. coli mRNAs End of explanation """ max_aminoacylation_rate = 2.0e10 # Read tRNA concentrations, anticodon, cognate codons # from Table 2 in Dong et al. J. Mol. Biol. 1996. # Gly1 and Ile2 entries were manually deleted from the table # since these were not measured in this study. # Sec tRNA was deleted since we are not considering # this non-canonical translation. # A typo for Val1 anticodon was corrected (TAG → TAC). lines = open('../annotations/simulations/ecoli.trna.abundance.dong1996.txt' ).read().splitlines() trnas = dict() # read every 6th line until last but one. for trnaindex, trna in enumerate(lines[0:-1:6]): trnas[trna] = dict() trnas[trna]['anticodon'] = lines[trnaindex * 6 + 1].strip().replace('U', 'T') trnas[trna]['codons'] = [ codon.strip().replace('U', 'T') for codon in lines[trnaindex * 6 + 2].split(',') ] trnas[trna]['mean_concn'] = int(lines[trnaindex * 6 + 4].split('(')[0]) trnas[trna]['std_concn'] = int( lines[trnaindex * 6 + 4].split('(')[1].split(')')[0].strip()) trnas[trna]['fraction'] = float(lines[trnaindex * 6 + 5].strip()) trnas[trna]['aminoacid'] = trna[:3].lower() # Ile2 was not measured in Dong 1996. So assigning it to Ile1. trnas['Ile1']['codons'].append('ATA') # Calculate the factors for codon-tRNA interaction # that multiply the kcat/KM for codon-tRNA reading: # w = 1 for Watson-Crick base pairs, # w = 0.64 for purine-pyrimidine mismatch, # w = 0.61 for purine-purine mismatch. (Weights based on Shah 2013). # If two trnas have the same anticodon # they are considered as a single trna species in the simulation. concentrations = dict() cognate_pairings = dict() weights = dict() for trna in trnas: cognatecodons = trnas[trna]['codons'] if trnas[trna]['anticodon'] not in concentrations: concentrations[trnas[trna]['anticodon']] = trnas[trna]['mean_concn'] else: concentrations[trnas[trna]['anticodon']] += trnas[trna]['mean_concn'] for codon in cognatecodons: # pairing at the wobble position. pairing = codon[-1:] + trnas[trna]['anticodon'][:1] watson_crick_pairs = ['GC', 'AT', 'TA', 'CG'] pur_pyr_pairs = ['GT', 'TG', 'CA'] pur_pur_pairs = ['TT', 'AA'] if pairing in watson_crick_pairs: weight = 1 elif pairing in pur_pyr_pairs: weight = 0.64 else: weight = 0.61 if codon not in cognate_pairings: cognate_pairings[codon] = [trnas[trna]['anticodon']] weights[codon] = [weight] elif trnas[trna]['anticodon'] not in cognate_pairings[codon]: cognate_pairings[codon].append(trnas[trna]['anticodon']) weights[codon].append(weight) # trnaindices are sorted alphabetically. # concentrations file. outputFile = open( "../annotations/simulations/wholecell.trna.concentrations.tsv", "write") outputFile.write('trnaindex\tanticodon\tmolpercell\taminoacid\n') # trnaindex 0 is a dummy trna without any anticodon. outputFile.write('0\tNNN\t0\txxx\n') trnaindices = dict() for trnaindex, anticodon in enumerate(sorted(concentrations)): aminoacid = [ trnas[trna]['aminoacid'] for trna in trnas if trnas[trna]['anticodon'] == anticodon ][0] trnaindices[anticodon] = trnaindex + 1 outputFile.write( str(trnaindex + 1) + '\t' + anticodon + '\t' + '%d' % concentrations[anticodon] + '\t' + aminoacid + '\n') outputFile.close() # aminoacylation rates file. outputFile = open( "../annotations/simulations/wholecell.trna.aminoacylation.rate.tsv", "write") outputFile.write('trnaindex\tanticodon\tkcatBykm\n') outputFile.write('0\tNNN\t0\n') trnaindices = dict() for trnaindex, anticodon in enumerate(sorted(concentrations)): trnaindices[anticodon] = trnaindex + 1 outputFile.write( str(trnaindex + 1) + '\t' + anticodon + '\t' + '%d' % max_aminoacylation_rate + '\n') outputFile.close() # cognate pairings file. outputFile = open("../annotations/simulations/wholecell.cognate.pairs.tsv", "write") outputFile.write('codonindex\tcodon\ttrnaindex1\ttrnaindex2\n') for codon in sorted(codonDict, key=lambda x: codonDict[x]): if len(cognate_pairings[codon]) == 1: outputFile.write( str(codonDict[codon]) + '\t' + codon + '\t' + '%d' % trnaindices[cognate_pairings[codon][0]] + '\t0\n') elif len(cognate_pairings[codon]) == 2: outputFile.write( str(codonDict[codon]) + '\t' + codon + '\t' + '%d' % trnaindices[cognate_pairings[codon][0]] + '\t' + '%d' % trnaindices[cognate_pairings[codon][1]] + '\n') outputFile.close() # cognate weights file. outputFile = open("../annotations/simulations/wholecell.cognate.weights.tsv", "write") outputFile.write('codonindex\tcodon\ttrnaweight1\ttrnaweight2\n') for codon in sorted(codonDict, key=lambda x: codonDict[x]): if len(weights[codon]) == 1: outputFile.write( str(codonDict[codon]) + '\t' + codon + '\t' + '%0.2f' % weights[codon][0] + '\t0.00\n') elif len(weights[codon]) == 2: outputFile.write( str(codonDict[codon]) + '\t' + codon + '\t' + '%0.2f' % weights[codon][0] + '\t' + '%0.2f' % weights[codon][1] + '\n') outputFile.close() """ Explanation: Prepare tRNA input files tRNA concentration tRNA-codon cognate pairs tRNA-codon cognate weights tRNA aminoacylation rate constants End of explanation """ mutation_locations = [ { 15: 'cta' }, { 10: 'cta', 14: 'cta', 15: 'cta' }, ] yfpmutants = dict() for mutant in mutation_locations: key = '_'.join(['yfp'] + [ codon + str(location) for location, codon in mutant.items() ]) yfpmutants[key] = list(yfp0) leucodon_number = 0 for position in range(0, len(yfp0), 3): currentcodon = yfp0[position:position + 3] # proceed only if the codon is a Leu codon (which are all CTG in yfp0) if currentcodon not in ['CTG']: continue leucodon_number += 1 for location in mutant.keys(): if leucodon_number == location: yfpmutants[key][position:position + 3] = mutant[ location].upper() yfpmutants[key] = ''.join(yfpmutants[key]) # mrna copy number is arbitrary, but kept low so that # ribosomes are not overloaded defaultMrnaCopyNumber = 10 # per cell # median initiation rate of all E. coli mRNAs in the simulation, s-1 defaultInitationRate = 0.3 # s-1, listOfInitiationRates = [defaultInitationRate] outputFile = '../annotations/simulations/run1/run1_ecoli_and_reporter_mrnas.csv' shutil.copyfile('../annotations/simulations/run1/run1_ecoli_mrnas.csv', outputFile) for initiationRate in listOfInitiationRates: File = open(outputFile, 'a') File.write("%0.3f\t%d\t%s\n" % (initiationRate, defaultMrnaCopyNumber, get_numerical_codon_sequence(yfp0[:-3]))) for mutant in sorted(yfpmutants, reverse=True): num_seq = ''.join( get_numerical_codon_sequence(yfpmutants[mutant][:-3])) File.write("%0.3f\t%d\t%s\n" % (initiationRate, defaultMrnaCopyNumber, num_seq)) File.close() """ Explanation: Create mRNA sequence file for leucine starvation whole-cell simulation (Run 1) End of explanation """ %%writefile simulation_run_1.py #!/usr/bin/env python # SBATCH --mem=8000 import subprocess as sp import sys jobindex = int(sys.argv[1]) currentindex = -1 for starvationfactor in [0.01]: currentindex += 1 if currentindex != jobindex: continue sp.check_output(' '.join([ './wholecell_simulation', '--threshold-time', '100', '--total-time', '150', '--aminoacid', 'leu', 'starvationfactor', '%0.2f' % starvationfactor, '--aminoacid', 'leu', 'relativetrnaaminoacylationrates', '1', '2.2', '1.2', '1', '0.1', '--output-prefix', '../rawdata/simulations/run1/', '--input-genes', '../annotations/simulations/run1/run1_ecoli_and_reporter_mrnas.csv' ]), shell=True) import subprocess as sp # loop submits each simulation to a different node of the cluster for index in range(1): sp.check_output([ 'sbatch', # for SLURM cluster; this line can be commented out if running locally '-t', '15', # for SLURM cluster; this line can be commented out if running locally '-n', '1', # for SLURM cluster; this line can be commented out if running locally 'simulation_run_1.py', str(index) ]) """ Explanation: Simulation run 1: Single whole-cell simulation to calculate steady state charged tRNA fraction during Leu starvation End of explanation """ aa = 'SER' mutation_locations = [{ 2: 'tcg', 3: 'tcg', 4: 'tcg', 5: 'tcg', 6: 'tcg', 7: 'tcg', 8: 'tcg' }, ] yfpmutants = dict() for mutant in mutation_locations: key = '_'.join(['yfp'] + [ codon + str(location) for location, codon in mutant.items() ]) yfpmutants[key] = list(yfp_agc) codon_number = 0 for position in range(0, len(yfp_agc), 3): currentcodon = yfp_agc[position:position + 3] # proceed only if the codon is a Leu codon (which are all CTG in yfp0) if currentcodon not in SynonymousCodons[aa]: continue codon_number += 1 for location in mutant.keys(): if codon_number == location: yfpmutants[key][position:position + 3] = mutant[ location].upper() yfpmutants[key] = ''.join(yfpmutants[key]) # mrna copy number is arbitrary, but kept low so that # ribosomes are not overloaded defaultMrnaCopyNumber = 10 # per cell # median initiation rate of all E. coli mRNAs in the simulation, s-1 defaultInitationRate = 0.3 # s-1, listOfInitiationRates = [defaultInitationRate] outputFile = '../annotations/simulations/run12/run12_ecoli_and_reporter_mrnas.csv' shutil.copyfile('../annotations/simulations/run1/run1_ecoli_mrnas.csv', outputFile) for initiationRate in listOfInitiationRates: File = open(outputFile, 'a') File.write("%0.3f\t%d\t%s\n" % (initiationRate, defaultMrnaCopyNumber, get_numerical_codon_sequence(yfp_agc[:-3]))) for mutant in sorted(yfpmutants, reverse=True): num_seq = ''.join( get_numerical_codon_sequence(yfpmutants[mutant][:-3])) File.write("%0.3f\t%d\t%s\n" % (initiationRate, defaultMrnaCopyNumber, num_seq)) File.close() """ Explanation: Create mRNA sequence for serine starvation whole cell simulation (Run 12) End of explanation """ %%writefile simulation_run_12.py #!/usr/bin/env python # SBATCH --mem=8000 import subprocess as sp import sys jobindex = int(sys.argv[1]) currentindex = -1 for starvationfactor in [0.01]: currentindex += 1 if currentindex != jobindex: continue sp.check_output(' '.join([ './wholecell_simulation', '--threshold-time', '100', '--total-time', '150', '--aminoacid', 'ser', 'starvationfactor', '%0.2f' % starvationfactor, '--aminoacid', 'ser', 'relativetrnaaminoacylationrates', '0.1', '1.5', '1.5', '1', '--output-prefix', '../rawdata/simulations/run12/', '--input-genes', '../annotations/simulations/run12/run12_ecoli_and_reporter_mrnas.csv' ]), shell=True) import subprocess as sp # loop submits each simulation to a different node of the cluster for index in range(1): sp.check_output([ 'sbatch', # for SLURM cluster; this line can be commented out if running locally '-t', '15', # for SLURM cluster; this line can be commented out if running locally '-n', '1', # for SLURM cluster; this line can be commented out if running locally 'simulation_run_12.py', str(index) ]) """ Explanation: Simulation run 12: Single whole-cell simulation to calculate steady state charged tRNA fraction during Ser starvation End of explanation """ inputfile = '../rawdata/simulations/run1/mrnafile_run1_ecoli_and_reporter_mrnas_thresholdtime_100_totaltime_150_leu_starvationfactor_0.01_leu_relativetrnaaminoacylationrates_1_2.2_1.2_1_0.1/avg_ribo_tRNA.out' trnaconcentrationfile = pd.read_table( '../annotations/simulations/wholecell.trna.concentrations.tsv') trnaconcnlist = open(inputfile).readlines() trnaConcn = list() for trnatype in ['acylated']: temp = map( lambda x: re.findall('^' + trnatype + '_trna_(\d+)_([ACTG]{3})\t([\w\.]+)', x), trnaconcnlist) temp = filter(lambda x: len(x), temp) temp = pd.DataFrame( [x[0] for x in temp], columns=['trnaindex', 'anticodon', 'molpercell']) trnaConcn.append(temp) trnaConcn = pd.concat(trnaConcn, join='inner', axis=1) trnaConcn = trnaConcn.merge( trnaconcentrationfile[['anticodon', 'aminoacid']], how='right').fillna(0) trnaConcn['trnaindex'] = trnaConcn['trnaindex'].apply(int) trnaConcn['molpercell'] = trnaConcn['molpercell'].apply( lambda x: int(float(x))) trnaConcn.sort_values(by='trnaindex').to_csv( '../annotations/simulations/leucine.starvation.average.trna.concentrations.tsv', index=False, sep='\t') """ Explanation: Create average tRNA concentration file during leucine starvation based on whole-cell simulation Run 1 End of explanation """ inputfile = ( '../rawdata/simulations/run12/mrnafile_run12_ecoli_and_reporter_mrnas_' 'thresholdtime_100_totaltime_150_ser_starvationfactor_0.01_' 'ser_relativetrnaaminoacylationrates_0.1_1.5_1.5_1/avg_ribo_tRNA.out') trnaconcentrationfile = pd.read_table( '../annotations/simulations/wholecell.trna.concentrations.tsv') trnaconcnlist = open(inputfile).readlines() trnaConcn = list() for trnatype in ['acylated']: temp = map( lambda x: re.findall('^' + trnatype + '_trna_(\d+)_([ACTG]{3})\t([\w\.]+)', x), trnaconcnlist) temp = filter(lambda x: len(x), temp) temp = pd.DataFrame( [x[0] for x in temp], columns=['trnaindex', 'anticodon', 'molpercell']) trnaConcn.append(temp) trnaConcn = pd.concat(trnaConcn, join='inner', axis=1) trnaConcn = trnaConcn.merge( trnaconcentrationfile[['anticodon', 'aminoacid']], how='right').fillna(0) trnaConcn['trnaindex'] = trnaConcn['trnaindex'].apply(int) trnaConcn['molpercell'] = trnaConcn['molpercell'].apply( lambda x: int(float(x))) trnaConcn.sort_values(by='trnaindex').to_csv( '../annotations/simulations/serine.starvation.average.trna.concentrations.tsv', index=False, sep='\t') """ Explanation: Create average tRNA concentration file during serine starvation based on whole-cell simulation Run 12 End of explanation """ mutation_locations = [ { 'cta': 2 }, { 'cta': 6 }, { 'cta': 8 }, { 'cta': 9 }, { 'cta': 10 }, { 'cta': 11 }, { 'cta': 12 }, { 'cta': 13 }, { 'cta': 14 }, { 'cta': 18 }, { 'ctc': 2 }, { 'ctc': 6 }, { 'ctc': 8 }, { 'ctc': 9 }, { 'ctc': 10 }, { 'ctc': 11 }, { 'ctc': 12 }, { 'ctc': 13 }, { 'ctc': 14 }, { 'ctc': 18 }, { 'ctt': 2 }, { 'ctt': 6 }, { 'ctt': 10 }, { 'ctt': 14 }, { 'ctt': 18 }, ] yfpmutants = dict() for mutant in mutation_locations: key = '_'.join(['yfp'] + [ codon + str(location) for codon, location in mutant.items() ]) yfpmutants[key] = list(yfp0) leucodon_number = 0 for position in range(0, len(yfp0), 3): currentcodon = yfp0[position:position + 3] # proceed only if the codon is a Leu codon (which are all CTG in yfp0) if currentcodon not in ['CTG']: continue leucodon_number += 1 for codon in mutant.keys(): if leucodon_number == mutant[codon]: yfpmutants[key][position:position + 3] = codon.upper() yfpmutants[key] = ''.join(yfpmutants[key]) defaultMrnaCopyNumber = 1 # per cell defaultInitationRate = 0.3 # s-1, This is the median initiation rate listOfInitiationRates = [defaultInitationRate] for initiationRate in listOfInitiationRates: for mutant in yfpmutants: outputFile = '../annotations/simulations/run2/' + \ '%s_initiationrate_%0.4f.csv' % (mutant, initiationRate) num_seq = ''.join( get_numerical_codon_sequence(yfpmutants[mutant][:-3])) File = open(outputFile, 'w') File.write("%0.4f\t%d\t%s\n" % (initiationRate, defaultMrnaCopyNumber, get_numerical_codon_sequence(yfp0[:-3]))) File.write("%0.4f\t%d\t%s\n" % (initiationRate, defaultMrnaCopyNumber, num_seq)) File.close() """ Explanation: Create mRNA sequences for simulation run 2 with systematically varying tRNA accommodation rate (stall duration) at Leu codons End of explanation """ stallstrengthranges = { 'trafficjam': { 'cta': np.linspace( 0.01, 0.2, num=20), 'ctc': np.linspace( 0.02, 0.4, num=20), 'ctt': np.linspace( 0.02, 0.4, num=20), }, '5primepreterm': { 'cta': np.linspace( 0.02, 0.4, num=20), 'ctc': np.linspace( 0.05, 1, num=20), 'ctt': np.linspace( 0.05, 1, num=20), }, 'selpreterm': { 'cta': np.linspace( 0.05, 1, num=20), 'ctc': np.linspace( 0.5, 10, num=20), 'ctt': np.linspace( 0.25, 5, num=20), } } # find the location of all leucine codons to convert leu codon serial number # to absolute position along yfp in codon units for simulation leupositions = dict() leucodon_number = 1 for position in range(0, len(yfp0), 3): currentcodon = yfp0[position:position + 3] if currentcodon == 'CTG': leupositions[leucodon_number] = position / 3 leucodon_number += 1 for model in stallstrengthranges: for index in range(20): fitdata = list() for mutant in mutation_locations: if len(mutant) > 1: raise "Only single mutants allowed" codon = mutant.keys()[0] pos = mutant.values()[0] fitdata.append({ 'codon': codonDict[codon.upper()], 'pos': leupositions[pos], 'stallstrength': stallstrengthranges[model][codon][index] }) fitdata = pd.DataFrame(fitdata) fitdata.to_csv( '../annotations/simulations/run2/{0}_stallstrengthindex_{1}.tsv'. format(model, index), sep='\t', index=False) """ Explanation: Create varying tRNA accommodation rate (stall duration) at Leu codons for simulation run 2 End of explanation """ %%writefile simulation_run_2.py #!/usr/bin/env python # SBATCH --mem=8000 import subprocess as sp import os import sys import itertools import numpy as np jobindex = int(sys.argv[1]) currentindex = -1 allfiles = os.listdir('../annotations/simulations/run2/') mrnafiles = list(filter(lambda x: x.startswith('yfp'), allfiles)) mrnafiles = ['../annotations/simulations/run2/' + File for File in mrnafiles] terminationRates = { 'trafficjam': {'--5prime-preterm-rate': 0}, '5primepreterm': {'--5prime-preterm-rate': 1}, 'selpreterm': {'--selective-preterm-rate': 1}, } for (mrnafile, typeOfTermination ) in list(itertools.product(mrnafiles, terminationRates)): currentindex += 1 if currentindex != jobindex: continue termoption = terminationRates[typeOfTermination].keys()[0] termvalue = terminationRates[typeOfTermination].values()[0] stallstrengthfiles = list( filter(lambda x: x.startswith(typeOfTermination), allfiles)) stallstrengthfiles = [ '../annotations/simulations/run2/' + File for File in stallstrengthfiles] for stallstrengthfile in stallstrengthfiles: cmd = ' '.join([ './reporter_simulation', '--trna-concn', '../annotations/simulations/leucine.starvation.average.trna.concentrations.tsv', termoption, '%0.2f' % termvalue, '--threshold-accommodation-rate', '22', '--output-prefix', '../rawdata/simulations/run2/', '--stall-strength-file', stallstrengthfile, '--input-genes', mrnafile ]) sp.check_output(cmd, shell=True) import subprocess as sp # loop submits each simulation to a different node of the cluster for index in range(75): sp.check_output([ 'sbatch', # for SLURM cluster; this line can be commented out if running locally '-t', '300', # for SLURM cluster; this line can be commented out if running locally '-n', '1', # for SLURM cluster; this line can be commented out if running locally 'simulation_run_2.py', str(index) ]) """ Explanation: Simulation run 2: Reporter simulation with systematically varying duration of ribosome stalling during leucine starvation End of explanation """ mutation_locations = [ { 'tcg': 2 }, { 'tcg': 3 }, { 'tcg': 4 }, { 'tcg': 5 }, { 'tcg': 6 }, { 'tcg': 7 }, ] yfpmutants = dict() for mutant in mutation_locations: key = '_'.join(['yfp'] + [ codon + str(location) for codon, location in mutant.items() ]) yfpmutants[key] = list(yfp_agc) codon_number = 0 for position in range(0, len(yfp_agc), 3): currentcodon = yfp_agc[position:position + 3] # proceed only if the codon is a Leu codon (which are all CTG in yfp0) if currentcodon not in ['AGC']: continue codon_number += 1 for codon in mutant.keys(): if codon_number == mutant[codon]: yfpmutants[key][position:position + 3] = codon.upper() yfpmutants[key] = ''.join(yfpmutants[key]) defaultMrnaCopyNumber = 1 # per cell defaultInitationRate = 0.3 # s-1, This is the median initiation rate listOfInitiationRates = [defaultInitationRate] for initiationRate in listOfInitiationRates: for mutant in yfpmutants: outputFile = '../annotations/simulations/run13/' + \ '%s_initiationrate_%0.4f.csv' % (mutant, initiationRate) num_seq = ''.join( get_numerical_codon_sequence(yfpmutants[mutant][:-3])) File = open(outputFile, 'w') File.write("%0.4f\t%d\t%s\n" % (initiationRate, defaultMrnaCopyNumber, get_numerical_codon_sequence(yfp_agc[:-3]))) File.write("%0.4f\t%d\t%s\n" % (initiationRate, defaultMrnaCopyNumber, num_seq)) File.close() """ Explanation: Create mRNA sequences for simulation run 13 with systematically varying tRNA accommodation rate (stall duration) at Ser codons End of explanation """ stallstrengthranges = { 'trafficjam': { 'tcg': np.linspace( 0.02, 0.4, num=20), }, '5primepreterm': { 'tcg': np.linspace( 0.05, 1, num=20), }, 'selpreterm': { 'tcg': np.linspace( 0.5, 10, num=20), } } # find the location of all leucine codons to convert leu codon serial number # to absolute position along yfp in codon units for simulation serpositions = dict() sercodon_number = 1 for position in range(0, len(yfp_agc), 3): currentcodon = yfp_agc[position:position + 3] if currentcodon == 'AGC': serpositions[sercodon_number] = position / 3 sercodon_number += 1 for model in stallstrengthranges: for index in range(20): fitdata = list() for mutant in mutation_locations: if len(mutant) > 1: raise "Only single mutants allowed" codon = mutant.keys()[0] pos = mutant.values()[0] fitdata.append({ 'codon': codonDict[codon.upper()], 'pos': serpositions[pos], 'stallstrength': stallstrengthranges[model][codon][index] }) fitdata = pd.DataFrame(fitdata) fitdata.to_csv( '../annotations/simulations/run13/{0}_stallstrengthindex_{1}.tsv'. format(model, index), sep='\t', index=False) """ Explanation: Create varying tRNA accommodation rate (stall duration) at Ser codons for simulation run 13 End of explanation """ %%writefile simulation_run_13.py #!/usr/bin/env python # SBATCH --mem=8000 import subprocess as sp import os import sys import itertools import numpy as np jobindex = int(sys.argv[1]) currentindex = -1 allfiles = os.listdir('../annotations/simulations/run13/') mrnafiles = list(filter(lambda x: x.startswith('yfp'), allfiles)) mrnafiles = ['../annotations/simulations/run13/' + File for File in mrnafiles] terminationRates = { 'trafficjam': {'--5prime-preterm-rate': 0}, '5primepreterm': {'--5prime-preterm-rate': 1}, 'selpreterm': {'--selective-preterm-rate': 1}, } for (mrnafile, typeOfTermination ) in list(itertools.product(mrnafiles, terminationRates)): currentindex += 1 if currentindex != jobindex: continue termoption = terminationRates[typeOfTermination].keys()[0] termvalue = terminationRates[typeOfTermination].values()[0] stallstrengthfiles = list( filter(lambda x: x.startswith(typeOfTermination), allfiles)) stallstrengthfiles = [ '../annotations/simulations/run13/' + File for File in stallstrengthfiles] for stallstrengthfile in stallstrengthfiles: cmd = ' '.join([ './reporter_simulation', '--trna-concn', '../annotations/simulations/serine.starvation.average.trna.concentrations.tsv', termoption, '%0.2f' % termvalue, '--threshold-accommodation-rate', '22', '--output-prefix', '../rawdata/simulations/run13/', '--stall-strength-file', stallstrengthfile, '--input-genes', mrnafile ]) sp.check_output(cmd, shell=True) import subprocess as sp # loop submits each simulation to a different node of the cluster for index in range(21): sp.check_output([ 'sbatch', # for SLURM cluster; this line can be commented out if running locally '-t', '300', # for SLURM cluster; this line can be commented out if running locally '-n', '1', # for SLURM cluster; this line can be commented out if running locally 'simulation_run_13.py', str(index) ]) """ Explanation: Simulation run 13: Reporter simulation with systematically varying duration of ribosome stalling during serine starvation End of explanation """
karlstroetmann/Artificial-Intelligence
Python/2 Constraint Solver/Local-Search.ipynb
gpl-2.0
import extractVariables as ev """ Explanation: Local Search Utility Functions The module extractVariables implements the function $\texttt{extractVars}(e)$ that takes a Python expression $e$ as its argument and returns the set of all variables and function names occurring in $e$. End of explanation """ def collect_variables(expr): return frozenset(var for var in ev.extractVars(expr) if var not in dir(__builtins__) if var not in ['and', 'or', 'not'] ) """ Explanation: The function collect_variables(expr) takes a string expr that can be interpreted as a Python expression as input and collects all variables occurring in expr. It takes care to eliminate the function symbols from the names returned by extract_variables. End of explanation """ def arb(S): for x in S: return x """ Explanation: The function arb(S) takes a set S as input and returns an arbitrary element from this set. End of explanation """ import random random.seed(42) """ Explanation: We need the function choice from the module random. Given a list L, random.choice(L) returns a random element from L. In order to have reproducible results, we have to set the seed for the random number generator. End of explanation """ def extend(A, key, value): B = A.copy() B[key] = value return B """ Explanation: Given a dictionary A, the function extend(A) returns a dictionary B such that B[key] = value and B[x] = A[x] for all x that are different from key. End of explanation """ import sys sys.path.append('..') import Set """ Explanation: The module Set implements <em style="color:blue;">sets</em> as <a href="https://en.wikipedia.org/wiki/AVL_tree">AVL trees</a>. The API provided by Set offers the following functions and methods: - Set() creates an empty set. - S.isEmpty() checks whether the set S is empty. - S.member(x) checks whether x is an element of the set S. - S.insert(x) inserts x into the set S. This does not return a new set but rather modifies the set S. - S.delete(x) deletes x from the set S. This does not return a new set but rather modifies the set S. - S.pop() returns the smallest element of the set S. Furthermore, this element is removed from S. - S.pop_last() returns the biggest element of the set S. Furthermore, this element is removed from S. - S.first() returns the smallest element of the set S. - S.last() returns the biggest element of the set S. Since sets are implemented as <em style="color:blue;">ordered binary trees</em>, the elements of a set need to be <em style="color:blue;">comparable</em>, i.e. if x and y are inserted into a set, then the expression x &lt; y must return a Boolean value and &lt; has to define a <em style="color:blue;">linear order</em>. The module Set can be used to implement a priority queue that supports the removal of elements. End of explanation """ def cast_to_Set(L): Result = Set.Set() for x in L: Result.insert(x) return Result """ Explanation: The function cast_to_set(L) returns a Set object containing all elements from the iterable L. End of explanation """ def union(L): return { x for S in L for x in S } """ Explanation: Given a list of sets L, the function union(L) returns the set of all elements occurring in some set $S$ that is itself a member of the list L, i.e. we have $$ \texttt{union}(L) = { x \mid \exists S \in L : x \in L }. $$ End of explanation """ class Failure(Exception): pass """ Explanation: We define the class Failure of exceptions so that we can distinguish Failure exceptions from other exceptions. This is done by creating a new, empty class that is derived from the class Exception. End of explanation """ def solve(P, consistency=True): Variables, Values, Constraints = P VarsInConstrs = union([ collect_variables(f) for f in Constraints ]) MisspelledVars = (VarsInConstrs - Variables) | (Variables - VarsInConstrs) if MisspelledVars: print("Did you misspell any of the following Variables?") for v in MisspelledVars: print(v) ValuesPerVar = { x: Values for x in Variables } Annotated = { f: collect_variables(f) for f in Constraints } if consistency: Connected = {} Var2Formulas = variables_2_formulas(Annotated) for x in Variables: Connected[x] = union([ V for f, V in Annotated.items() if x in V ]) - { x } try: enforce_consistency(ValuesPerVar, Var2Formulas, Annotated, Connected) for x, Values in ValuesPerVar.items(): print(f'{x}: {Values}') except Failure: return None return local_search(Variables, ValuesPerVar, Annotated) """ Explanation: A Constraint Problem Solver Using Local Search The procedure solve(P, consistency) takes a constraint satisfaction problem P and a flag consistency as input. Here P is a triple of the form $$ \mathcal{P} = \langle \mathtt{Variables}, \mathtt{Values}, \mathtt{Constraints} \rangle $$ where - $\mathtt{Variables}$ is a set of strings which serve as variables, - $\mathtt{Values}$ is a set of values that can be assigned to the variables in the set $\mathtt{Variables}$. - $\mathtt{Constraints}$ is a set of formulas from first order logic. Each of these formulas is called a constraint of $\mathcal{P}$. The CSP P is solved using local search. If consistency is True, consistency checking is used as a preprocessing step. End of explanation """ def local_search(Variables, ValuesPerVar, Annotated): Variables = list(Variables) # convert to list for random.choice(Variables) to work Assignment = { x: random.choice(list(ValuesPerVar[x])) for x in Variables } iteration = 0 lastVar = arb(Variables) while True: Conflicts = [ (numConflicts(x, Assignment, Annotated), x) for x in Variables if x != lastVar ] maxNum, _ = Set.last(cast_to_Set(Conflicts)) if maxNum == 0 and numConflicts(lastVar, Assignment, Annotated) == 0: print(f'Number of iterations: {iteration}') return Assignment if iteration % 11 == 0: # avoid infinite loop x = random.choice(Variables) else: # choose var with max number of conflicts FaultyVars = [ var for (num, var) in Conflicts if num == maxNum ] x = random.choice(FaultyVars) if iteration % 13 == 0: # avoid infinite loop newVal = random.choice(list(ValuesPerVar[x])) else: Conflicts = [ (numConflicts(x, extend(Assignment, x, val), Annotated), val) for val in ValuesPerVar[x] ] minNum, _ = Set.first(cast_to_Set(Conflicts)) ValuesForX = [ val for (n, val) in Conflicts if n == minNum ] newVal = random.choice(ValuesForX) Assignment[x] = newVal lastVar = x iteration += 1 """ Explanation: The function local_search takes three parameters. Variables is the set of all variables occurring in the given CSP*. * ValuesPerVar is a dictionary. For every variable x, ValuesPerVar[x] is the set of values that can be used to instantiate x. * Annotated is a dictionary. For every constraint $f$, $\texttt{Annotated}[f]$ is the set of variables occurring in $f$. If the computation is successful, local_search returns a dictionary that encodes a solution of the given CSP by mapping variables to values. The algorithm applied works as follows: * Initialize the values of the variables in $\texttt{Variables}$ randomly. If all $\texttt{Constraints}$ are satisfied, return the current variable binding as a solution. * For every $x \in \texttt{Variables}$, count the number of unsatisfied* constraints that involve the variable $x$. * Set $\texttt{maxNum}$ to be the maximum of these numbers, i.e. $\texttt{maxNum}$ is the maximal number of unsatisfied constraints for any variable. * Compute the list $\texttt{FaultyVars}$ of those variables that have $\texttt{maxNum}$ unsatisfied constraints. * Randomly choose a variable $x$ from the set $\texttt{FaultyVars}$. * Find a value $d \in \texttt{ValuesPerVar[x]}$ such that by assigning $d$ to the variable $x$, the number of unsatisfied constraints for the variable $x$ is minimized. If there is more than one value $d$ with this property, choose the value $d$ randomly from those values that minimize the number of unsatisfied constraints. * Rinse and repeat until a solution is found. End of explanation """ def numConflicts(x, Assign, Annotated): NewAssign = Assign.copy() return len([ (f, V) for (f, V) in Annotated.items() if x in V and not eval(f, NewAssign) ]) """ Explanation: The function numConflicts takes three arguments: - x is a variable, - Assign is a dictionary mapping variables to values, - Annotated is a set of pairs of the form (f, V) where f is a constraint and V is the set of variables occurring in f. The function returns the number of constraints f such that x occurs in f but f is not satisfied. End of explanation """ def variables_2_formulas(Annotated): Dictionary = {}; for f, Vars in Annotated.items(): for x in Vars: if x in Dictionary: Dictionary[x] |= { f } else: Dictionary[x] = { f } return Dictionary """ Explanation: Consistency Checking The function variables_2_formulas takes the set of annotated constraints as input. It returns a dictionary that attaches to every variable x the set of those constraints f such that x occurs in f. End of explanation """ def enforce_consistency(ValuesPerVar, Var2Formulas, Annotated, Connected): UncheckedVars = set(Var2Formulas.keys()) while UncheckedVars: variable = UncheckedVars.pop() Constraints = Var2Formulas[variable] Values = ValuesPerVar[variable] RemovedVals = set() for f in Constraints: OtherVars = Annotated[f] - { variable } for value in Values: if not exists_values(variable, value, f, OtherVars, ValuesPerVar): RemovedVals |= { value } UncheckedVars |= Connected[variable] Remaining = Values - RemovedVals if not Remaining: raise Failure() ValuesPerVar[variable] = Remaining """ Explanation: The function enforce_consistency takes 4 arguments: - ValuesPerVar is a dictionary. For every variable x we have that ValuesPerVar[x] is the set of values that can be substituted for x. - Var2Formulas is a dictionary. For every variable x we have that Var2Formulas[x] is the set of those formulas that mention the variable x. - Annotated is a dictionary. For every constraint f, Annotated[f] is the set of variables occurring in f. - Connected is a dictionary. For every variable x we have that Connected[x] is the set of those variables y that are directly connected with the variable x. Two variables x and y are directly connected if there is a constraint F such that both x and y occur in F. In this case, F is connecting x and y. The function enforce_consistencyshrinks the sets ValuesPerVar[x] such that the values in ValuesPerVar[x] are consistent for x for all constraints. End of explanation """ def exists_values(var, val, f, Vars, ValuesPerVar): Assignments = all_assignments(Vars, ValuesPerVar) return any(eval(f, extend(A, var, val)) for A in Assignments) """ Explanation: The procedure exists_values takes five arguments: - var is a variable, - val is a value val, - f is a constraint, - Vars is the set Vars of those variables in f that are different from var, and - ValuesPerVar is a dictionary. For every variable x we have that ValuesPerVar[x] is the set of those values that still may be tried for x. The function checks whether there is a value for var such that the other variables occurring in the constraint f can be set to values such that the constraint f is satisfied. End of explanation """ def all_assignments(Variables, ValuesPerVar): Variables = set(Variables) # turn frozenset into a set if not Variables: return [ {} ] # list containing empty assignment var = Variables.pop() Values = ValuesPerVar[var] Assignments = all_assignments(Variables, ValuesPerVar) return [ extend(A, var, val) for A in Assignments for val in ValuesPerVar[var] ] """ Explanation: The function all_assignments returns the list of all possible assignments for the variables in the set Vars. For every variable x, the values for x are taken from ValuesPerVar[x]. End of explanation """ %%capture %run N-Queens-Problem-CSP.ipynb P = create_csp(8) """ Explanation: Solving the Eight-Queens-Puzzle End of explanation """ %%time Solution = solve(P, False) print(f'Solution = {Solution}') show_solution(Solution) """ Explanation: As the N queens problem is not very difficult, we will not use consistency checking. Local search takes 62 milliseconds on my desktop to solve the eight queens puzzle. End of explanation """ P = create_csp(100) %%time Solution = solve(P, False) """ Explanation: The 100 queens problem can be solved in 32 seconds if we do not use consistency checking. End of explanation """ %run Zebra.ipynb zebra = zebra_csp() %%time Solution = solve(zebra, True) """ Explanation: Solving the Zebra Puzzle End of explanation """ show_solution(Solution) """ Explanation: Solving the Zebra Puzzle takes about 4 seconds, provided we use consistency checking. End of explanation """ %run Sudoku.ipynb csp = sudoku_csp(Sudoku) csp """ Explanation: Solving a Sudoku Puzzle End of explanation """ %%time Solution = solve(csp) show_solution(Solution) """ Explanation: Solving the given Sudoku puzzle takes about 2 minutes, provided we use consistency checking. For hard problems, local search is not a good idea. End of explanation """ %run Crypto-Arithmetic.ipynb csp = crypto_csp() """ Explanation: Solving a Crypto-Arithmetic Puzzle End of explanation """ %%time Solution = solve(csp, True) show_solution(Solution) """ Explanation: Solving the crypto-arithmetic puzzle took 160 milliseconds with consistency checking. End of explanation """
remenska/iSDM
notebooks/old/DemoFramework-IUCN.ipynb
apache-2.0
import logging root = logging.getLogger() root.addHandler(logging.StreamHandler()) %matplotlib inline """ Explanation: Working with IUCN data in shapefiles just some logging/plotting magic to output in this notebook, nothing to care about. End of explanation """ # download http://bit.ly/1R8pt20 (zipped Turtles shapefiles), and unzip them from iSDM.species import IUCNSpecies turtles = IUCNSpecies(name_species='Acanthochelys pallidipectoris') turtles.load_shapefile('../data/FW_TURTLES/FW_TURTLES.shp') """ Explanation: 1. Load a shapefile with all turtles data. At this point no data cleaning is done yet. End of explanation """ turtles.get_data().head() turtles.get_data().columns # all the columns available per species geometry """ Explanation: Show only first 5 species (meta)data, to get an idea of the data structure. End of explanation """ turtles.find_species_occurrences() turtles.get_data() # datatype: geopandas.geodataframe.GeoDataFrame turtles.save_data() # serialize all the current data to a pickle file, so it can be loaded later on turtles.load_data() turtles.ID # derived from "id_no" column. It's a sort of unique ID per species """ Explanation: 2. Filter species by the name given above End of explanation """ turtles.get_data().plot() turtles.data_full.geometry.convex_hull.plot() """ Explanation: 3. Plot geometry Plot the shapefile data, and a convex hull. GeoPandas objects also know how to plot themselves directly. End of explanation """ with_buffer = turtles.get_data().geometry.buffer(0.5) with_buffer.plot() """ Explanation: Let's put a buffer around the data, and plot that End of explanation """ turtles.save_shapefile(overwrite=True) """ Explanation: The currently filtered shape data can be saved. If overwrite=True, the shapefile it was loaded from, will be overwritten. Otherwise you can provide a new shape_file as an argument. End of explanation """ turtles.rasterize_data(raster_file='./turtles.tif', pixel_size=0.5) """ Explanation: 4. Rasterize Rasterize the data: we need a target raster_file to save it to, and a resolution. End of explanation """ turtles_raster_data = turtles.load_raster_data() turtles_raster_data.shape type(turtles_raster_data) """ Explanation: Or at some point later, if you want to load the raster file End of explanation """ import matplotlib.pyplot as plt plt.figure(figsize=turtles_raster_data.shape) # careful with big images! plt.imshow(turtles_raster_data, cmap="hot", interpolation="none") type(turtles_raster_data) from osgeo import gdal, ogr geo = gdal.Open("./turtles.tif") geo.GetGCPs() drv = geo.GetDriver() geo.RasterXSize geo.GetGeoTransform() """ Explanation: A simple plot of the raster data End of explanation """
GoogleCloudPlatform/vertex-ai-samples
notebooks/community/ml_ops/stage2/mlops_experimentation.ipynb
apache-2.0
import os # The Vertex AI Workbench Notebook product has specific requirements IS_WORKBENCH_NOTEBOOK = os.getenv("DL_ANACONDA_HOME") IS_USER_MANAGED_WORKBENCH_NOTEBOOK = os.path.exists( "/opt/deeplearning/metadata/env_version" ) # Vertex AI Notebook requires dependencies to be installed with '--user' USER_FLAG = "" if IS_WORKBENCH_NOTEBOOK: USER_FLAG = "--user" ONCE_ONLY = False if ONCE_ONLY: ! pip3 install -U tensorflow==2.5 $USER_FLAG ! pip3 install -U tensorflow-data-validation==1.2 $USER_FLAG ! pip3 install -U tensorflow-transform==1.2 $USER_FLAG ! pip3 install -U tensorflow-io==0.18 $USER_FLAG ! pip3 install --upgrade google-cloud-aiplatform[tensorboard] $USER_FLAG ! pip3 install --upgrade google-cloud-pipeline-components $USER_FLAG ! pip3 install --upgrade google-cloud-bigquery $USER_FLAG ! pip3 install --upgrade google-cloud-logging $USER_FLAG ! pip3 install --upgrade apache-beam[gcp] $USER_FLAG ! pip3 install --upgrade pyarrow $USER_FLAG ! pip3 install --upgrade cloudml-hypertune $USER_FLAG ! pip3 install --upgrade kfp $USER_FLAG ! pip3 install --upgrade torchvision $USER_FLAG ! pip3 install --upgrade rpy2 $USER_FLAG """ Explanation: E2E ML on GCP: MLOps stage 2 : experimentation <table align="left"> <td> <a href="https://github.com/GoogleCloudPlatform/vertex-ai-samples/blob/main/notebooks/community/ml_ops/stage2/mlops_experimentation.ipynb"> <img src="https://cloud.google.com/ml-engine/images/github-logo-32px.png" alt="GitHub logo"> View on GitHub </a> </td> <td> <a href="https://colab.research.google.com/github/GoogleCloudPlatform/vertex-ai-samples/blob/main/notebooks/community/ml_ops/stage2/mlops_experimentation.ipynb"> <img src="https://cloud.google.com/ml-engine/images/colab-logo-32px.png\" alt="Colab logo"> Run in Colab </a> </td> <td> <a href="https://console.cloud.google.com/vertex-ai/workbench/deploy-notebook?download_url=https://raw.githubusercontent.com/GoogleCloudPlatform/vertex-ai-samples/main/notebooks/community/ml_ops/stage2/mlops_experimentation.ipynb"> <img src="https://lh3.googleusercontent.com/UiNooY4LUgW_oTvpsNhPpQzsstV5W8F7rYgxgGBD85cWJoLmrOzhVs_ksK_vgx40SHs7jCqkTkCk=e14-rj-sc0xffffff-h130-w32" alt="Vertex AI logo"> Open in Vertex AI Workbench </a> </td> </table> <br/><br/><br/> Overview This tutorial demonstrates how to use Vertex AI for E2E MLOps on Google Cloud in production. This tutorial covers stage 2 : experimentation. Dataset The dataset used for this tutorial is the Chicago Taxi. The version of the dataset you will use in this tutorial is stored in a public BigQuery table. The trained model predicts whether someone would leave a tip for a taxi fare. Objective In this tutorial, you create a MLOps stage 2: experimentation process. This tutorial uses the following Vertex AI: Vertex AI Datasets Vertex AI Models Vertex AI AutoML Vertex AI Training Vertex AI TensorBoard Vertex AI Vizier Vertex AI Batch Prediction The steps performed include: Review the Dataset resource created during stage 1. Train an AutoML tabular binary classifier model in the background. Build the experimental model architecture. Construct a custom training package for the Dataset resource. Test the custom training package locally. Test the custom training package in the cloud with Vertex AI Training. Hyperparameter tune the model training with Vertex AI Vizier. Train the custom model with Vertex AI Training. Add a serving function for online/batch prediction to the custom model. Test the custom model with the serving function. Evaluate the custom model using Vertex AI Batch Prediction Wait for the AutoML training job to complete. Evaluate the AutoML model using Vertex AI Batch Prediction with the same evaluation slices as the custom model. Set the evaluation results of the AutoML model as the baseline. If the evaluation of the custom model is below baseline, continue to experiment with the custom model. If the evaluation of the custom model is above baseline, save the model as the first best model. Recommendations When doing E2E MLOps on Google Cloud for experimentation, the following best practices with structured (tabular) data are recommended: Determine a baseline evaluation using AutoML. Design and build a model architecture. Upload the untrained model architecture as a Vertex AI Model resource. Construct a training package that can be ran locally and as a Vertex AI Training job. Decompose the training package into: data, model, train and task Python modules. Obtain the location of the transformed training data from the user metadata of the Vertex AI Dataset resource. Obtain the location of the model artifacts from the Vertex AI Model resource. Include in the training package initializing a Vertex AI Experiment and corresponding run. Log hyperparameters and training parameters for the experiment. Add callbacks for early stop, TensorBoard, and hyperparameter tuning, where hyperparameter tuning is a command-line option. Test the training package locally with a small number of epochs. Test the training package with Vertex AI Training. Do hyperparameter tuning with Vertex AI Hyperparameter Tuning. Do full training of the custom model with Vertex AI Training. Log the hyperparameter values for the experiment/run. Evaluate the custom model. Single evaluation slice, same metrics as AutoML Add evaluation to the training package and return the results in a file in the Cloud Storage bucket used for training Custom evaluation slices, custom metrics Evaluate custom evaluation slices as a Vertex AI Batch Prediction for both AutoML and custom model Perform custom metrics on the results from the batch job Compare custom model metrics against the AutoML baseline If less than baseline, then continue to experiment If greater then baseline, then upload model as the new baseline and save evaluation results with the model. Installations Install one time the packages for executing the MLOps notebooks. End of explanation """ import os if not os.getenv("IS_TESTING"): # Automatically restart kernel after installs import IPython app = IPython.Application.instance() app.kernel.do_shutdown(True) """ Explanation: Restart the kernel Once you've installed the additional packages, you need to restart the notebook kernel so it can find the packages. End of explanation """ PROJECT_ID = "[your-project-id]" # @param {type:"string"} if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]": # Get your GCP project id from gcloud shell_output = ! gcloud config list --format 'value(core.project)' 2>/dev/null PROJECT_ID = shell_output[0] print("Project ID:", PROJECT_ID) ! gcloud config set project $PROJECT_ID """ Explanation: Before you begin Set up your Google Cloud project The following steps are required, regardless of your notebook environment. Select or create a Google Cloud project. When you first create an account, you get a $300 free credit towards your compute/storage costs. Make sure that billing is enabled for your project. Enable the Vertex AI API. If you are running this notebook locally, you will need to install the Cloud SDK. Enter your project ID in the cell below. Then run the cell to make sure the Cloud SDK uses the right project for all the commands in this notebook. Note: Jupyter runs lines prefixed with ! as shell commands, and it interpolates Python variables prefixed with $ into these commands. Set your project ID If you don't know your project ID, you may be able to get your project ID using gcloud. End of explanation """ REGION = "[your-region]" # @param {type:"string"} if REGION == "[your-region]": REGION = "us-central1" """ Explanation: Region You can also change the REGION variable, which is used for operations throughout the rest of this notebook. Below are regions supported for Vertex AI. We recommend that you choose the region closest to you. Americas: us-central1 Europe: europe-west4 Asia Pacific: asia-east1 You may not use a multi-regional bucket for training with Vertex AI. Not all regions provide support for all Vertex AI services. Learn more about Vertex AI regions. End of explanation """ from datetime import datetime TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S") """ Explanation: Timestamp If you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append the timestamp onto the name of resources you create in this tutorial. End of explanation """ # If you are running this notebook in Colab, run this cell and follow the # instructions to authenticate your GCP account. This provides access to your # Cloud Storage bucket and lets you submit training jobs and prediction # requests. import os import sys # If on Vertex AI Workbench, then don't execute this code IS_COLAB = False if not os.path.exists("/opt/deeplearning/metadata/env_version") and not os.getenv( "DL_ANACONDA_HOME" ): if "google.colab" in sys.modules: IS_COLAB = True from google.colab import auth as google_auth google_auth.authenticate_user() # If you are running this notebook locally, replace the string below with the # path to your service account key and run this cell to authenticate your GCP # account. elif not os.getenv("IS_TESTING"): %env GOOGLE_APPLICATION_CREDENTIALS '' """ Explanation: Authenticate your Google Cloud account If you are using Vertex AI Workbench Notebooks, your environment is already authenticated. Skip this step. If you are using Colab, run the cell below and follow the instructions when prompted to authenticate your account via oAuth. Otherwise, follow these steps: In the Cloud Console, go to the Create service account key page. Click Create service account. In the Service account name field, enter a name, and click Create. In the Grant this service account access to project section, click the Role drop-down list. Type "Vertex AI" into the filter box, and select Vertex AI Administrator. Type "Storage Object Admin" into the filter box, and select Storage Object Admin. Click Create. A JSON file that contains your key downloads to your local environment. Enter the path to your service account key as the GOOGLE_APPLICATION_CREDENTIALS variable in the cell below and run the cell. End of explanation """ BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"} if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]": BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP """ Explanation: Create a Cloud Storage bucket The following steps are required, regardless of your notebook environment. When you initialize the Vertex SDK for Python, you specify a Cloud Storage staging bucket. The staging bucket is where all the data associated with your dataset and model resources are retained across sessions. Set the name of your Cloud Storage bucket below. Bucket names must be globally unique across all Google Cloud projects, including those outside of your organization. End of explanation """ ! gsutil mb -l $REGION $BUCKET_NAME """ Explanation: Only if your bucket doesn't already exist: Run the following cell to create your Cloud Storage bucket. End of explanation """ ! gsutil ls -al $BUCKET_NAME """ Explanation: Finally, validate access to your Cloud Storage bucket by examining its contents: End of explanation """ SERVICE_ACCOUNT = "[your-service-account]" # @param {type:"string"} if ( SERVICE_ACCOUNT == "" or SERVICE_ACCOUNT is None or SERVICE_ACCOUNT == "[your-service-account]" ): # Get your service account from gcloud if not IS_COLAB: shell_output = !gcloud auth list 2>/dev/null SERVICE_ACCOUNT = shell_output[2].replace("*", "").strip() if IS_COLAB: shell_output = ! gcloud projects describe $PROJECT_ID project_number = shell_output[-1].split(":")[1].strip().replace("'", "") SERVICE_ACCOUNT = f"{project_number}-compute@developer.gserviceaccount.com" print("Service Account:", SERVICE_ACCOUNT) """ Explanation: Service Account If you don't know your service account, try to get your service account using gcloud command by executing the second cell below. End of explanation """ import google.cloud.aiplatform as aip """ Explanation: Set up variables Next, set up some variables used throughout the tutorial. Import libraries and define constants End of explanation """ import tensorflow as tf """ Explanation: Import TensorFlow Import the TensorFlow package into your Python environment. End of explanation """ import tensorflow_transform as tft """ Explanation: Import TensorFlow Transform Import the TensorFlow Transform (TFT) package into your Python environment. End of explanation """ import tensorflow_data_validation as tfdv """ Explanation: Import TensorFlow Data Validation Import the TensorFlow Data Validation (TFDV) package into your Python environment. End of explanation """ aip.init(project=PROJECT_ID, location=REGION, staging_bucket=BUCKET_NAME) """ Explanation: Initialize Vertex AI SDK for Python Initialize the Vertex AI SDK for Python for your project and corresponding bucket. End of explanation """ import os if os.getenv("IS_TESTING_TRAIN_GPU"): TRAIN_GPU, TRAIN_NGPU = ( aip.gapic.AcceleratorType.NVIDIA_TESLA_K80, int(os.getenv("IS_TESTING_TRAIN_GPU")), ) else: TRAIN_GPU, TRAIN_NGPU = (aip.gapic.AcceleratorType.NVIDIA_TESLA_K80, 4) if os.getenv("IS_TESTING_DEPLOY_GPU"): DEPLOY_GPU, DEPLOY_NGPU = ( aip.gapic.AcceleratorType.NVIDIA_TESLA_K80, int(os.getenv("IS_TESTING_DEPLOY_GPU")), ) else: DEPLOY_GPU, DEPLOY_NGPU = (None, None) """ Explanation: Set hardware accelerators You can set hardware accelerators for training and prediction. Set the variables TRAIN_GPU/TRAIN_NGPU and DEPLOY_GPU/DEPLOY_NGPU to use a container image supporting a GPU and the number of GPUs allocated to the virtual machine (VM) instance. For example, to use a GPU container image with 4 Nvidia Telsa K80 GPUs allocated to each VM, you would specify: (aip.AcceleratorType.NVIDIA_TESLA_K80, 4) Otherwise specify (None, None) to use a container image to run on a CPU. Learn more about hardware accelerator support for your region. Note: TF releases before 2.3 for GPU support will fail to load the custom model in this tutorial. It is a known issue and fixed in TF 2.3. This is caused by static graph ops that are generated in the serving function. If you encounter this issue on your own custom models, use a container image for TF 2.3 with GPU support. End of explanation """ if os.getenv("IS_TESTING_TF"): TF = os.getenv("IS_TESTING_TF") else: TF = "2.5".replace(".", "-") if TF[0] == "2": if TRAIN_GPU: TRAIN_VERSION = "tf-gpu.{}".format(TF) else: TRAIN_VERSION = "tf-cpu.{}".format(TF) if DEPLOY_GPU: DEPLOY_VERSION = "tf2-gpu.{}".format(TF) else: DEPLOY_VERSION = "tf2-cpu.{}".format(TF) else: if TRAIN_GPU: TRAIN_VERSION = "tf-gpu.{}".format(TF) else: TRAIN_VERSION = "tf-cpu.{}".format(TF) if DEPLOY_GPU: DEPLOY_VERSION = "tf-gpu.{}".format(TF) else: DEPLOY_VERSION = "tf-cpu.{}".format(TF) TRAIN_IMAGE = "{}-docker.pkg.dev/vertex-ai/training/{}:latest".format( REGION.split("-")[0], TRAIN_VERSION ) DEPLOY_IMAGE = "{}-docker.pkg.dev/vertex-ai/prediction/{}:latest".format( REGION.split("-")[0], DEPLOY_VERSION ) print("Training:", TRAIN_IMAGE, TRAIN_GPU, TRAIN_NGPU) print("Deployment:", DEPLOY_IMAGE, DEPLOY_GPU, DEPLOY_NGPU) """ Explanation: Set pre-built containers Set the pre-built Docker container image for training and prediction. For the latest list, see Pre-built containers for training. For the latest list, see Pre-built containers for prediction. End of explanation """ if os.getenv("IS_TESTING_TRAIN_MACHINE"): MACHINE_TYPE = os.getenv("IS_TESTING_TRAIN_MACHINE") else: MACHINE_TYPE = "n1-standard" VCPU = "4" TRAIN_COMPUTE = MACHINE_TYPE + "-" + VCPU print("Train machine type", TRAIN_COMPUTE) if os.getenv("IS_TESTING_DEPLOY_MACHINE"): MACHINE_TYPE = os.getenv("IS_TESTING_DEPLOY_MACHINE") else: MACHINE_TYPE = "n1-standard" VCPU = "4" DEPLOY_COMPUTE = MACHINE_TYPE + "-" + VCPU print("Deploy machine type", DEPLOY_COMPUTE) """ Explanation: Set machine type Next, set the machine type to use for training and prediction. Set the variables TRAIN_COMPUTE and DEPLOY_COMPUTE to configure the compute resources for the VMs you will use for for training and prediction. machine type n1-standard: 3.75GB of memory per vCPU. n1-highmem: 6.5GB of memory per vCPU n1-highcpu: 0.9 GB of memory per vCPU vCPUs: number of [2, 4, 8, 16, 32, 64, 96 ] Note: The following is not supported for training: standard: 2 vCPUs highcpu: 2, 4 and 8 vCPUs Note: You may also use n2 and e2 machine types for training and deployment, but they do not support GPUs. End of explanation """ def find_dataset(display_name_prefix, import_format): matches = [] datasets = aip.TabularDataset.list() for dataset in datasets: if dataset.display_name.startswith(display_name_prefix): try: if ( "bq" == import_format and dataset.to_dict()["metadata"]["inputConfig"]["bigquerySource"] ): matches.append(dataset) if ( "csv" == import_format and dataset.to_dict()["metadata"]["inputConfig"]["gcsSource"] ): matches.append(dataset) except: pass create_time = None for match in matches: if create_time is None or match.create_time > create_time: create_time = match.create_time dataset = match return dataset dataset = find_dataset("Chicago Taxi", "bq") print(dataset) """ Explanation: Retrieve the dataset from stage 1 Next, retrieve the dataset you created during stage 1 with the helper function find_dataset(). This helper function finds all the datasets whose display name matches the specified prefix and import format (e.g., bq). Finally it sorts the matches by create time and returns the latest version. End of explanation """ import json try: with tf.io.gfile.GFile( "gs://" + dataset.labels["user_metadata"] + "/metadata.jsonl", "r" ) as f: metadata = json.load(f) print(metadata) except: print("no metadata") """ Explanation: Load dataset's user metadata Load the user metadata for the dataset. End of explanation """ dag = aip.AutoMLTabularTrainingJob( display_name="chicago_" + TIMESTAMP, optimization_prediction_type="classification", optimization_objective="minimize-log-loss", ) print(dag) """ Explanation: Create and run training pipeline To train an AutoML model, you perform two steps: 1) create a training pipeline, and 2) run the pipeline. Create training pipeline An AutoML training pipeline is created with the AutoMLTabularTrainingJob class, with the following parameters: display_name: The human readable name for the TrainingJob resource. optimization_prediction_type: The type task to train the model for. classification: A tabuar classification model. regression: A tabular regression model. column_transformations: (Optional): Transformations to apply to the input columns optimization_objective: The optimization objective to minimize or maximize. binary classification: minimize-log-loss maximize-au-roc maximize-au-prc maximize-precision-at-recall maximize-recall-at-precision multi-class classification: minimize-log-loss regression: minimize-rmse minimize-mae minimize-rmsle The instantiated object is the DAG (directed acyclic graph) for the training pipeline. End of explanation """ async_model = dag.run( dataset=dataset, model_display_name="chicago_" + TIMESTAMP, training_fraction_split=0.8, validation_fraction_split=0.1, test_fraction_split=0.1, budget_milli_node_hours=8000, disable_early_stopping=False, target_column="tip_bin", sync=False, ) """ Explanation: Run the training pipeline Next, you run the DAG to start the training job by invoking the method run, with the following parameters: dataset: The Dataset resource to train the model. model_display_name: The human readable name for the trained model. training_fraction_split: The percentage of the dataset to use for training. test_fraction_split: The percentage of the dataset to use for test (holdout data). validation_fraction_split: The percentage of the dataset to use for validation. target_column: The name of the column to train as the label. budget_milli_node_hours: (optional) Maximum training time specified in unit of millihours (1000 = hour). disable_early_stopping: If True, training maybe completed before using the entire budget if the service believes it cannot further improve on the model objective measurements. The run method when completed returns the Model resource. The execution of the training pipeline will take upto 180 minutes. End of explanation """ EXPERIMENT_NAME = "chicago-" + TIMESTAMP aip.init(experiment=EXPERIMENT_NAME) aip.start_run("run-1") """ Explanation: Create experiment for tracking training related metadata Setup tracking the parameters (configuration) and metrics (results) for each experiment: aip.init() - Create an experiment instance aip.start_run() - Track a specific run within the experiment. Learn more about Introduction to Vertex AI ML Metadata. End of explanation """ TENSORBOARD_DISPLAY_NAME = "chicago_" + TIMESTAMP tensorboard = aip.Tensorboard.create(display_name=TENSORBOARD_DISPLAY_NAME) tensorboard_resource_name = tensorboard.gca_resource.name print("TensorBoard resource name:", tensorboard_resource_name) """ Explanation: Create a Vertex AI TensorBoard instance Create a Vertex AI TensorBoard instance to use TensorBoard in conjunction with Vertex AI Training for custom model training. Learn more about Get started with Vertex AI TensorBoard. End of explanation """ from tensorflow.keras.layers import Input def create_model_inputs( numeric_features=None, categorical_features=None, embedding_features=None ): inputs = {} for feature_name in numeric_features: inputs[feature_name] = Input(name=feature_name, shape=[], dtype=tf.float32) for feature_name in categorical_features: inputs[feature_name] = Input(name=feature_name, shape=[], dtype=tf.int64) for feature_name in embedding_features: inputs[feature_name] = Input(name=feature_name, shape=[], dtype=tf.int64) return inputs input_layers = create_model_inputs( numeric_features=metadata["numeric_features"], categorical_features=metadata["categorical_features"], embedding_features=metadata["embedding_features"], ) print(input_layers) """ Explanation: Create the input layer for your custom model Next, you create the input layer for your custom tabular model, based on the data types of each feature. End of explanation """ from math import sqrt from tensorflow.keras import Model, Sequential from tensorflow.keras.layers import (Activation, Concatenate, Dense, Embedding, experimental) def create_binary_classifier( input_layers, tft_output, metaparams, numeric_features, categorical_features, embedding_features, ): layers = [] for feature_name in input_layers: if feature_name in embedding_features: vocab_size = tft_output.vocabulary_size_by_name(feature_name) embedding_size = int(sqrt(vocab_size)) embedding_output = Embedding( input_dim=vocab_size + 1, output_dim=embedding_size, name=f"{feature_name}_embedding", )(input_layers[feature_name]) layers.append(embedding_output) elif feature_name in categorical_features: vocab_size = tft_output.vocabulary_size_by_name(feature_name) onehot_layer = experimental.preprocessing.CategoryEncoding( num_tokens=vocab_size, output_mode="binary", name=f"{feature_name}_onehot", )(input_layers[feature_name]) layers.append(onehot_layer) elif feature_name in numeric_features: numeric_layer = tf.expand_dims(input_layers[feature_name], -1) layers.append(numeric_layer) else: pass joined = Concatenate(name="combines_inputs")(layers) feedforward_output = Sequential( [Dense(units, activation="relu") for units in metaparams["hidden_units"]], name="feedforward_network", )(joined) logits = Dense(units=1, name="logits")(feedforward_output) pred = Activation("sigmoid")(logits) model = Model(inputs=input_layers, outputs=[pred]) return model TRANSFORM_ARTIFACTS_DIR = metadata["transform_artifacts_dir"] tft_output = tft.TFTransformOutput(TRANSFORM_ARTIFACTS_DIR) metaparams = {"hidden_units": [128, 64]} aip.log_params(metaparams) model = create_binary_classifier( input_layers, tft_output, metaparams, numeric_features=metadata["numeric_features"], categorical_features=metadata["categorical_features"], embedding_features=metadata["embedding_features"], ) model.summary() """ Explanation: Create the binary classifier custom model Next, you create your binary classifier custom tabular model. End of explanation """ tf.keras.utils.plot_model(model, show_shapes=True, show_dtype=True) """ Explanation: Visualize the model architecture Next, visualize the architecture of the custom model. End of explanation """ MODEL_DIR = f"{BUCKET_NAME}/base_model" model.save(MODEL_DIR) """ Explanation: Save model artifacts Next, save the model artifacts to your Cloud Storage bucket End of explanation """ vertex_custom_model = aip.Model.upload( display_name="chicago_" + TIMESTAMP, artifact_uri=MODEL_DIR, serving_container_image_uri=DEPLOY_IMAGE, labels={"base_model": "1"}, sync=True, ) """ Explanation: Upload the local model to a Vertex AI Model resource Next, you upload your local custom model artifacts to Vertex AI to convert into a managed Vertex AI Model resource. End of explanation """ # Make folder for Python training script ! rm -rf custom ! mkdir custom # Add package information ! touch custom/README.md setup_cfg = "[egg_info]\n\ntag_build =\n\ntag_date = 0" ! echo "$setup_cfg" > custom/setup.cfg setup_py = "import setuptools\n\nsetuptools.setup(\n\n install_requires=[\n\n 'google-cloud-aiplatform',\n\n 'cloudml-hypertune',\n\n 'tensorflow_datasets==1.3.0',\n\n 'tensorflow==2.5',\n\n 'tensorflow_data_validation==1.2',\n\n ],\n\n packages=setuptools.find_packages())" ! echo "$setup_py" > custom/setup.py pkg_info = "Metadata-Version: 1.0\n\nName: Chicago Taxi tabular binary classifier\n\nVersion: 0.0.0\n\nSummary: Demostration training script\n\nHome-page: www.google.com\n\nAuthor: Google\n\nAuthor-email: cdpe@google.com\n\nLicense: Public\n\nDescription: Demo\n\nPlatform: Vertex AI" ! echo "$pkg_info" > custom/PKG-INFO # Make the training subfolder ! mkdir custom/trainer ! touch custom/trainer/__init__.py """ Explanation: Construct the training package Package layout Before you start training, you will look at how a Python package is assembled for a custom training job. When unarchived, the package contains the following directory/file layout. PKG-INFO README.md setup.cfg setup.py trainer __init__.py task.py other Python scripts The files setup.cfg and setup.py are the instructions for installing the package into the operating environment of the Docker image. The file trainer/task.py is the Python script for executing the custom training job. End of explanation """ transform_feature_spec = tft_output.transformed_feature_spec() print(transform_feature_spec) """ Explanation: Get feature specification for the preprocessed data Next, create the feature specification for the preprocessed data. End of explanation """ %%writefile custom/trainer/data.py import tensorflow as tf def _gzip_reader_fn(filenames): """Small utility returning a record reader that can read gzip'ed files.""" return tf.data.TFRecordDataset(filenames, compression_type="GZIP") def get_dataset(file_pattern, feature_spec, label_column, batch_size=200): """Generates features and label for tuning/training. Args: file_pattern: input tfrecord file pattern. feature_spec: a dictionary of feature specifications. batch_size: representing the number of consecutive elements of returned dataset to combine in a single batch Returns: A dataset that contains (features, indices) tuple where features is a dictionary of Tensors, and indices is a single Tensor of label indices. """ dataset = tf.data.experimental.make_batched_features_dataset( file_pattern=file_pattern, batch_size=batch_size, features=feature_spec, label_key=label_column, reader=_gzip_reader_fn, num_epochs=1, drop_final_batch=True, ) return dataset from custom.trainer import data TRANSFORMED_DATA_PREFIX = metadata["transformed_data_prefix"] LABEL_COLUMN = metadata["label_column"] train_data_file_pattern = TRANSFORMED_DATA_PREFIX + "/train/data-*.gz" val_data_file_pattern = TRANSFORMED_DATA_PREFIX + "/val/data-*.gz" test_data_file_pattern = TRANSFORMED_DATA_PREFIX + "/test/data-*.gz" for input_features, target in data.get_dataset( train_data_file_pattern, transform_feature_spec, LABEL_COLUMN, batch_size=3 ).take(1): for key in input_features: print( f"{key} {input_features[key].dtype}: {input_features[key].numpy().tolist()}" ) print(f"target: {target.numpy().tolist()}") """ Explanation: Load the transformed data into a tf.data.Dataset Next, you load the gzip TFRecords on Cloud Storage storage into a tf.data.Dataset generator. These functions are re-used when training the custom model using Vertex Training, so you save them to the python training package. End of explanation """ model(input_features) """ Explanation: Test the model architecture with transformed input Next, test the model architecture with a sample of the transformed training input. Note: Since the model is untrained, the predictions should be random. Since this is a binary classifier, expect the predicted results ~0.5. End of explanation """ %%writefile custom/trainer/train.py from trainer import data import tensorflow as tf import logging from hypertune import HyperTune def compile(model, hyperparams): ''' Compile the model ''' optimizer = tf.keras.optimizers.Adam(learning_rate=hyperparams["learning_rate"]) loss = tf.keras.losses.BinaryCrossentropy(from_logits=False) metrics = [tf.keras.metrics.BinaryAccuracy(name="accuracy")] model.compile(optimizer=optimizer,loss=loss, metrics=metrics) return model def warmup( model, hyperparams, train_data_dir, label_column, transformed_feature_spec ): ''' Warmup the initialized model weights ''' train_dataset = data.get_dataset( train_data_dir, transformed_feature_spec, label_column, batch_size=hyperparams["batch_size"], ) lr_inc = (hyperparams['end_learning_rate'] - hyperparams['start_learning_rate']) / hyperparams['num_epochs'] def scheduler(epoch, lr): if epoch == 0: return hyperparams['start_learning_rate'] return lr + lr_inc callbacks = [tf.keras.callbacks.LearningRateScheduler(scheduler)] logging.info("Model warmup started...") history = model.fit( train_dataset, epochs=hyperparams["num_epochs"], steps_per_epoch=hyperparams["steps"], callbacks=callbacks ) logging.info("Model warmup completed.") return history def train( model, hyperparams, train_data_dir, val_data_dir, label_column, transformed_feature_spec, log_dir, tuning=False ): ''' Train the model ''' train_dataset = data.get_dataset( train_data_dir, transformed_feature_spec, label_column, batch_size=hyperparams["batch_size"], ) val_dataset = data.get_dataset( val_data_dir, transformed_feature_spec, label_column, batch_size=hyperparams["batch_size"], ) early_stop = tf.keras.callbacks.EarlyStopping( monitor=hyperparams["early_stop"]["monitor"], patience=hyperparams["early_stop"]["patience"], restore_best_weights=True ) callbacks = [early_stop] if log_dir: tensorboard = tf.keras.callbacks.TensorBoard(log_dir=log_dir) callbacks = callbacks.append(tensorboard) if tuning: # Instantiate the HyperTune reporting object hpt = HyperTune() # Reporting callback class HPTCallback(tf.keras.callbacks.Callback): def on_epoch_end(self, epoch, logs=None): hpt.report_hyperparameter_tuning_metric( hyperparameter_metric_tag='val_loss', metric_value=logs['val_loss'], global_step=epoch ) if not callbacks: callbacks = [] callbacks.append(HPTCallback()) logging.info("Model training started...") history = model.fit( train_dataset, epochs=hyperparams["num_epochs"], validation_data=val_dataset, callbacks=callbacks ) logging.info("Model training completed.") return history def evaluate( model, hyperparams, test_data_dir, label_column, transformed_feature_spec ): logging.info("Model evaluation started...") test_dataset = data.get_dataset( test_data_dir, transformed_feature_spec, label_column, hyperparams["batch_size"], ) evaluation_metrics = model.evaluate(test_dataset) logging.info("Model evaluation completed.") return evaluation_metrics """ Explanation: Develop and test the training scripts When experimenting, one typically develops and tests the training package locally, before moving to training in the cloud. Create training script Next, you write the Python script for compiling and training the model. End of explanation """ os.chdir("custom") import logging from trainer import train TENSORBOARD_LOG_DIR = "./logs" logging.getLogger().setLevel(logging.INFO) hyperparams = {} hyperparams["learning_rate"] = 0.01 aip.log_params(hyperparams) train.compile(model, hyperparams) warmupparams = {} warmupparams["start_learning_rate"] = 0.0001 warmupparams["end_learning_rate"] = 0.01 warmupparams["num_epochs"] = 4 warmupparams["batch_size"] = 64 warmupparams["steps"] = 50 aip.log_params(warmupparams) train.warmup( model, warmupparams, train_data_file_pattern, LABEL_COLUMN, transform_feature_spec ) trainparams = {} trainparams["num_epochs"] = 5 trainparams["batch_size"] = 64 trainparams["early_stop"] = {"monitor": "val_loss", "patience": 5} aip.log_params(trainparams) train.train( model, trainparams, train_data_file_pattern, val_data_file_pattern, LABEL_COLUMN, transform_feature_spec, TENSORBOARD_LOG_DIR, ) os.chdir("..") """ Explanation: Train the model locally Next, test the training package locally, by training with just a few epochs: num_epochs: The number of epochs to pass to the training package. compile(): Compile the model for training. warmup(): Warmup the initialized model weights. train(): Train the model. End of explanation """ os.chdir("custom") from trainer import train evalparams = {} evalparams["batch_size"] = 64 metrics = {} metrics["loss"], metrics["acc"] = train.evaluate( model, evalparams, test_data_file_pattern, LABEL_COLUMN, transform_feature_spec ) print("ACC", metrics["acc"], "LOSS", metrics["loss"]) aip.log_metrics(metrics) os.chdir("..") """ Explanation: Evaluate the model locally Next, test the evaluation portion of the training package: evaluate(): Evaluate the model. End of explanation """ %%writefile custom/trainer/model.py import google.cloud.aiplatform as aip def get(model_id): model = aip.Model(model_id) return model """ Explanation: Retrieve model from Vertex AI Next, create the Python script to retrieve your experimental model from Vertex AI. End of explanation """ %%writefile custom/trainer/task.py import os import argparse import logging import json import tensorflow as tf import tensorflow_transform as tft from tensorflow.python.client import device_lib import google.cloud.aiplatform as aip from trainer import data from trainer import model as model_ from trainer import train try: from trainer import serving except: pass parser = argparse.ArgumentParser() parser.add_argument('--model-dir', dest='model_dir', default=os.getenv('AIP_MODEL_DIR'), type=str, help='Model dir.') parser.add_argument('--model-id', dest='model_id', default=None, type=str, help='Vertex Model ID.') parser.add_argument('--dataset-id', dest='dataset_id', default=None, type=str, help='Vertex Dataset ID.') parser.add_argument('--lr', dest='lr', default=0.001, type=float, help='Learning rate.') parser.add_argument('--start_lr', dest='start_lr', default=0.0001, type=float, help='Starting learning rate.') parser.add_argument('--epochs', dest='epochs', default=20, type=int, help='Number of epochs.') parser.add_argument('--steps', dest='steps', default=200, type=int, help='Number of steps per epoch.') parser.add_argument('--batch_size', dest='batch_size', default=16, type=int, help='Batch size.') parser.add_argument('--distribute', dest='distribute', type=str, default='single', help='distributed training strategy') parser.add_argument('--tensorboard-log-dir', dest='tensorboard_log_dir', default=os.getenv('AIP_TENSORBOARD_LOG_DIR'), type=str, help='Output file for tensorboard logs') parser.add_argument('--experiment', dest='experiment', default=None, type=str, help='Name of experiment') parser.add_argument('--project', dest='project', default=None, type=str, help='Name of project') parser.add_argument('--run', dest='run', default=None, type=str, help='Name of run in experiment') parser.add_argument('--evaluate', dest='evaluate', default=False, type=bool, help='Whether to perform evaluation') parser.add_argument('--serving', dest='serving', default=False, type=bool, help='Whether to attach the serving function') parser.add_argument('--tuning', dest='tuning', default=False, type=bool, help='Whether to perform hyperparameter tuning') parser.add_argument('--warmup', dest='warmup', default=False, type=bool, help='Whether to perform warmup weight initialization') args = parser.parse_args() logging.getLogger().setLevel(logging.INFO) logging.info('DEVICES' + str(device_lib.list_local_devices())) # Single Machine, single compute device if args.distribute == 'single': if tf.test.is_gpu_available(): strategy = tf.distribute.OneDeviceStrategy(device="/gpu:0") else: strategy = tf.distribute.OneDeviceStrategy(device="/cpu:0") logging.info("Single device training") # Single Machine, multiple compute device elif args.distribute == 'mirrored': strategy = tf.distribute.MirroredStrategy() logging.info("Mirrored Strategy distributed training") # Multi Machine, multiple compute device elif args.distribute == 'multiworker': strategy = tf.distribute.MultiWorkerMirroredStrategy() logging.info("Multi-worker Strategy distributed training") logging.info('TF_CONFIG = {}'.format(os.environ.get('TF_CONFIG', 'Not found'))) logging.info('num_replicas_in_sync = {}'.format(strategy.num_replicas_in_sync)) # Initialize the run for this experiment if args.experiment: logging.info("Initialize experiment: {}".format(args.experiment)) aip.init(experiment=args.experiment, project=args.project) aip.start_run(args.run) metadata = {} def get_data(): ''' Get the preprocessed training data ''' global train_data_file_pattern, val_data_file_pattern, test_data_file_pattern global label_column, transform_feature_spec, metadata dataset = aip.TabularDataset(args.dataset_id) METADATA = 'gs://' + dataset.labels['user_metadata'] + "/metadata.jsonl" with tf.io.gfile.GFile(METADATA, "r") as f: metadata = json.load(f) TRANSFORMED_DATA_PREFIX = metadata['transformed_data_prefix'] label_column = metadata['label_column'] train_data_file_pattern = TRANSFORMED_DATA_PREFIX + '/train/data-*.gz' val_data_file_pattern = TRANSFORMED_DATA_PREFIX + '/val/data-*.gz' test_data_file_pattern = TRANSFORMED_DATA_PREFIX + '/test/data-*.gz' TRANSFORM_ARTIFACTS_DIR = metadata['transform_artifacts_dir'] tft_output = tft.TFTransformOutput(TRANSFORM_ARTIFACTS_DIR) transform_feature_spec = tft_output.transformed_feature_spec() def get_model(): ''' Get the untrained model architecture ''' global model_artifacts vertex_model = model_.get(args.model_id) model_artifacts = vertex_model.gca_resource.artifact_uri model = tf.keras.models.load_model(model_artifacts) # Compile the model hyperparams = {} hyperparams["learning_rate"] = args.lr if args.experiment: aip.log_params(hyperparams) metadata.update(hyperparams) with tf.io.gfile.GFile(os.path.join(args.model_dir, "metrics.txt"), "w") as f: f.write(json.dumps(metadata)) train.compile(model, hyperparams) return model def warmup_model(model): ''' Warmup the initialized model weights ''' warmupparams = {} warmupparams["num_epochs"] = args.epochs warmupparams["batch_size"] = args.batch_size warmupparams["steps"] = args.steps warmupparams["start_learning_rate"] = args.start_lr warmupparams["end_learning_rate"] = args.lr train.warmup(model, warmupparams, train_data_file_pattern, label_column, transform_feature_spec) return model def train_model(model): ''' Train the model ''' trainparams = {} trainparams["num_epochs"] = args.epochs trainparams["batch_size"] = args.batch_size trainparams["early_stop"] = {"monitor": "val_loss", "patience": 5} if args.experiment: aip.log_params(trainparams) metadata.update(trainparams) with tf.io.gfile.GFile(os.path.join(args.model_dir, "metrics.txt"), "w") as f: f.write(json.dumps(metadata)) train.train(model, trainparams, train_data_file_pattern, val_data_file_pattern, label_column, transform_feature_spec, args.tensorboard_log_dir, args.tuning) return model def evaluate_model(model): ''' Evaluate the model ''' evalparams = {} evalparams["batch_size"] = args.batch_size metrics = train.evaluate(model, evalparams, test_data_file_pattern, label_column, transform_feature_spec) metadata.update({'metrics': metrics}) with tf.io.gfile.GFile(os.path.join(args.model_dir, "metrics.txt"), "w") as f: f.write(json.dumps(metadata)) get_data() with strategy.scope(): model = get_model() if args.warmup: model = warmup_model(model) else: model = train_model(model) if args.evaluate: evaluate_model(model) if args.serving: logging.info('Save serving model to: ' + args.model_dir) serving.construct_serving_model( model=model, serving_model_dir=args.model_dir, metadata=metadata ) elif args.warmup: logging.info('Save warmed up model to: ' + model_artifacts) model.save(model_artifacts) else: logging.info('Save trained model to: ' + args.model_dir) model.save(args.model_dir) """ Explanation: Create the task script for the Python training package Next, you create the task.py script for driving the training package. Some noteable steps include: Command-line arguments: model-id: The resource ID of the Model resource you built during experimenting. This is the untrained model architecture. dataset-id: The resource ID of the Dataset resource to use for training. experiment: The name of the experiment. run: The name of the run within this experiment. tensorboard-logdir: The logging directory for Vertex AI Tensorboard. get_data(): Loads the Dataset resource into memory. Obtains the user metadata from the Dataset resource. From the metadata, obtain location of transformed data, transformation function and name of label column get_model(): Loads the Model resource into memory. Obtains location of model artifacts of the model architecture. Loads the model architecture. Compiles the model. warmup_model(): Warms up the initialized model weights train_model(): Train the model. evaluate_model(): Evaluates the model. Saves evaluation metrics to Cloud Storage bucket. End of explanation """ DATASET_ID = dataset.resource_name MODEL_ID = vertex_custom_model.resource_name !cd custom; python3 -m trainer.task --model-id={MODEL_ID} --dataset-id={DATASET_ID} --experiment='chicago' --run='test' --project={PROJECT_ID} --epochs=5 --model-dir=/tmp --evaluate=True """ Explanation: Test training package locally Next, test your completed training package locally with just a few epochs. End of explanation """ MODEL_DIR = f"{BUCKET_NAME}/base_model" !cd custom; python3 -m trainer.task --model-id={MODEL_ID} --dataset-id={DATASET_ID} --project={PROJECT_ID} --epochs=5 --steps=300 --batch_size=16 --lr=0.01 --start_lr=0.0001 --model-dir={MODEL_DIR} --warmup=True """ Explanation: Warmup training Now that you have tested the training scripts, you perform warmup training on the base model. Warmup training is used to stabilize the weight initialization. By doing so, each subsequent training and tuning of the model architecture will start with the same stabilized weight initialization. End of explanation """ DISPLAY_NAME = "chicago_" + TIMESTAMP job = aip.CustomPythonPackageTrainingJob( display_name=DISPLAY_NAME, python_package_gcs_uri=f"{BUCKET_NAME}/trainer_chicago.tar.gz", python_module_name="trainer.task", container_uri=TRAIN_IMAGE, model_serving_container_image_uri=DEPLOY_IMAGE, project=PROJECT_ID, ) ! rm -rf custom/logs ! rm -rf custom/trainer/__pycache__ """ Explanation: Mirrored Strategy When training on a single VM, one can either train was a single compute device or with multiple compute devices on the same VM. With Vertex AI Distributed Training you can specify both the number of compute devices for the VM instance and type of compute devices: CPU, GPU. Vertex AI Distributed Training supports `tf.distribute.MirroredStrategy' for TensorFlow models. To enable training across multiple compute devices on the same VM, you do the following additional steps in your Python training script: Set the tf.distribute.MirrorStrategy Compile the model within the scope of tf.distribute.MirrorStrategy. Note: Tells MirroredStrategy which variables to mirror across your compute devices. Increase the batch size for each compute device to num_devices * batch size. During transitions, the distribution of batches will be synchronized as well as the updates to the model parameters. Create and run custom training job To train a custom model, you perform two steps: 1) create a custom training job, and 2) run the job. Create custom training job A custom training job is created with the CustomTrainingJob class, with the following parameters: display_name: The human readable name for the custom training job. container_uri: The training container image. python_package_gcs_uri: The location of the Python training package as a tarball. python_module_name: The relative path to the training script in the Python package. model_serving_container_uri: The container image for deploying the model. Note: There is no requirements parameter. You specify any requirements in the setup.py script in your Python package. End of explanation """ ! rm -f custom.tar custom.tar.gz ! tar cvf custom.tar custom ! gzip custom.tar ! gsutil cp custom.tar.gz $BUCKET_NAME/trainer_chicago.tar.gz """ Explanation: Store training script on your Cloud Storage bucket Next, you package the training folder into a compressed tar ball, and then store it in your Cloud Storage bucket. End of explanation """ MODEL_DIR = BUCKET_NAME + "/testing" CMDARGS = [ "--epochs=5", "--batch_size=16", "--distribute=mirrored", "--experiment=chicago", "--run=test", "--project=" + PROJECT_ID, "--model-id=" + MODEL_ID, "--dataset-id=" + DATASET_ID, ] model = job.run( model_display_name="chicago_" + TIMESTAMP, args=CMDARGS, replica_count=1, machine_type=TRAIN_COMPUTE, accelerator_type=TRAIN_GPU.name, accelerator_count=TRAIN_NGPU, base_output_dir=MODEL_DIR, service_account=SERVICE_ACCOUNT, tensorboard=tensorboard_resource_name, sync=True, ) """ Explanation: Run the custom Python package training job Next, you run the custom job to start the training job by invoking the method run(). The parameters are the same as when running a CustomTrainingJob. Note: The parameter service_account is set so that the initializing experiment step aip.init(experiment="...") has necessarily permission to access the Vertex AI Metadata Store. End of explanation """ job.delete() """ Explanation: Delete a custom training job After a training job is completed, you can delete the training job with the method delete(). Prior to completion, a training job can be canceled with the method cancel(). End of explanation """ model.delete() """ Explanation: Delete the model The method 'delete()' will delete the model. End of explanation """ if TRAIN_GPU: machine_spec = { "machine_type": TRAIN_COMPUTE, "accelerator_type": TRAIN_GPU, "accelerator_count": TRAIN_NGPU, } else: machine_spec = {"machine_type": TRAIN_COMPUTE, "accelerator_count": 0} """ Explanation: Hyperparameter tuning Next, you perform hyperparameter tuning with the training package. The training package has some additions that make the same package usable for both hyperparameter tuning, as well as local testing and full cloud training: Command-Line: tuning: indicates to use the HyperTune service as a callback during training. train(): If tuning is set, creates and adds a callback to HyperTune service. Prepare your machine specification Now define the machine specification for your custom training job. This tells Vertex what type of machine instance to provision for the training. - machine_type: The type of GCP instance to provision -- e.g., n1-standard-8. - accelerator_type: The type, if any, of hardware accelerator. In this tutorial if you previously set the variable TRAIN_GPU != None, you are using a GPU; otherwise you will use a CPU. - accelerator_count: The number of accelerators. End of explanation """ DISK_TYPE = "pd-ssd" # [ pd-ssd, pd-standard] DISK_SIZE = 200 # GB disk_spec = {"boot_disk_type": DISK_TYPE, "boot_disk_size_gb": DISK_SIZE} """ Explanation: Prepare your disk specification (optional) Now define the disk specification for your custom training job. This tells Vertex what type and size of disk to provision in each machine instance for the training. boot_disk_type: Either SSD or Standard. SSD is faster, and Standard is less expensive. Defaults to SSD. boot_disk_size_gb: Size of disk in GB. End of explanation """ CMDARGS = [ "--epochs=5", "--distribute=mirrored", # "--experiment=chicago", # "--run=tune", # "--project=" + PROJECT_ID, "--model-id=" + MODEL_ID, "--dataset-id=" + DATASET_ID, "--tuning=True", ] worker_pool_spec = [ { "replica_count": 1, "machine_spec": machine_spec, "disk_spec": disk_spec, "python_package_spec": { "executor_image_uri": TRAIN_IMAGE, "package_uris": [BUCKET_NAME + "/trainer_chicago.tar.gz"], "python_module": "trainer.task", "args": CMDARGS, }, } ] """ Explanation: Define worker pool specification for hyperparameter tuning job Next, define the worker pool specification. Note that we plan to tune the learning rate and batch size, so you do not pass them as command-line arguments (omitted). The Vertex AI Hyperparameter Tuning service will pick values for both learning rate and batch size during trials, which it will pass along as command-line arguments. End of explanation """ job = aip.CustomJob( display_name="chicago_" + TIMESTAMP, worker_pool_specs=worker_pool_spec ) """ Explanation: Create a custom job Use the class CustomJob to create a custom job, such as for hyperparameter tuning, with the following parameters: display_name: A human readable name for the custom job. worker_pool_specs: The specification for the corresponding VM instances. End of explanation """ from google.cloud.aiplatform import hyperparameter_tuning as hpt hpt_job = aip.HyperparameterTuningJob( display_name="chicago_" + TIMESTAMP, custom_job=job, metric_spec={ "val_loss": "minimize", }, parameter_spec={ "lr": hpt.DoubleParameterSpec(min=0.001, max=0.1, scale="log"), "batch_size": hpt.DiscreteParameterSpec([16, 32, 64, 128, 256], scale="linear"), }, search_algorithm=None, max_trial_count=8, parallel_trial_count=1, ) """ Explanation: Create a hyperparameter tuning job Use the class HyperparameterTuningJob to create a hyperparameter tuning job, with the following parameters: display_name: A human readable name for the custom job. custom_job: The worker pool spec from this custom job applies to the CustomJobs created in all the trials. metrics_spec: The metrics to optimize. The dictionary key is the metric_id, which is reported by your training job, and the dictionary value is the optimization goal of the metric('minimize' or 'maximize'). parameter_spec: The parameters to optimize. The dictionary key is the metric_id, which is passed into your training job as a command line key word argument, and the dictionary value is the parameter specification of the metric. search_algorithm: The search algorithm to use: grid, random and None. If None is specified, the Vizier service (Bayesian) is used. max_trial_count: The maximum number of trials to perform. End of explanation """ hpt_job.run() """ Explanation: Run the hyperparameter tuning job Use the run() method to execute the hyperparameter tuning job. End of explanation """ best = (None, None, None, 0.0) for trial in hpt_job.trials: # Keep track of the best outcome if float(trial.final_measurement.metrics[0].value) > best[3]: try: best = ( trial.id, float(trial.parameters[0].value), float(trial.parameters[1].value), float(trial.final_measurement.metrics[0].value), ) except: best = ( trial.id, float(trial.parameters[0].value), None, float(trial.final_measurement.metrics[0].value), ) print(best) """ Explanation: Best trial Now look at which trial was the best: End of explanation """ hpt_job.delete() """ Explanation: Delete the hyperparameter tuning job The method 'delete()' will delete the hyperparameter tuning job. End of explanation """ LR = best[2] BATCH_SIZE = int(best[1]) """ Explanation: Save the best hyperparameter values End of explanation """ DISPLAY_NAME = "chicago_" + TIMESTAMP job = aip.CustomPythonPackageTrainingJob( display_name=DISPLAY_NAME, python_package_gcs_uri=f"{BUCKET_NAME}/trainer_chicago.tar.gz", python_module_name="trainer.task", container_uri=TRAIN_IMAGE, model_serving_container_image_uri=DEPLOY_IMAGE, project=PROJECT_ID, ) """ Explanation: Create and run custom training job To train a custom model, you perform two steps: 1) create a custom training job, and 2) run the job. Create custom training job A custom training job is created with the CustomTrainingJob class, with the following parameters: display_name: The human readable name for the custom training job. container_uri: The training container image. python_package_gcs_uri: The location of the Python training package as a tarball. python_module_name: The relative path to the training script in the Python package. model_serving_container_uri: The container image for deploying the model. Note: There is no requirements parameter. You specify any requirements in the setup.py script in your Python package. End of explanation """ MODEL_DIR = BUCKET_NAME + "/trained" FULL_EPOCHS = 100 CMDARGS = [ f"--epochs={FULL_EPOCHS}", f"--lr={LR}", f"--batch_size={BATCH_SIZE}", "--distribute=mirrored", "--experiment=chicago", "--run=full", "--project=" + PROJECT_ID, "--model-id=" + MODEL_ID, "--dataset-id=" + DATASET_ID, "--evaluate=True", ] model = job.run( model_display_name="chicago_" + TIMESTAMP, args=CMDARGS, replica_count=1, machine_type=TRAIN_COMPUTE, accelerator_type=TRAIN_GPU.name, accelerator_count=TRAIN_NGPU, base_output_dir=MODEL_DIR, service_account=SERVICE_ACCOUNT, tensorboard=tensorboard_resource_name, sync=True, ) """ Explanation: Run the custom Python package training job Next, you run the custom job to start the training job by invoking the method run(). The parameters are the same as when running a CustomTrainingJob. Note: The parameter service_account is set so that the initializing experiment step aip.init(experiment="...") has necessarily permission to access the Vertex AI Metadata Store. End of explanation """ job.delete() """ Explanation: Delete a custom training job After a training job is completed, you can delete the training job with the method delete(). Prior to completion, a training job can be canceled with the method cancel(). End of explanation """ EXPERIMENT_NAME = "chicago" experiment_df = aip.get_experiment_df() experiment_df = experiment_df[experiment_df.experiment_name == EXPERIMENT_NAME] experiment_df.T """ Explanation: Get the experiment results Next, you use the experiment name as a parameter to the method get_experiment_df() to get the results of the experiment as a pandas dataframe. End of explanation """ METRICS = MODEL_DIR + "/model/metrics.txt" ! gsutil cat $METRICS """ Explanation: Review the custom model evaluation results Next, you review the evaluation metrics builtin into the training package. End of explanation """ tensorboard.delete() vertex_custom_model = model model = tf.keras.models.load_model(MODEL_DIR + "/model") """ Explanation: Delete the TensorBoard instance Next, delete the TensorBoard instance. End of explanation """ %%writefile custom/trainer/serving.py import tensorflow as tf import tensorflow_data_validation as tfdv import tensorflow_transform as tft import logging def _get_serve_features_fn(model, tft_output): """Returns a function that accept a dictionary of features and applies TFT.""" model.tft_layer = tft_output.transform_features_layer() @tf.function def serve_features_fn(raw_features): """Returns the output to be used in the serving signature.""" transformed_features = model.tft_layer(raw_features) probabilities = model(transformed_features) return {"scores": probabilities} return serve_features_fn def _get_serve_tf_examples_fn(model, tft_output, feature_spec): """Returns a function that parses a serialized tf.Example and applies TFT.""" model.tft_layer = tft_output.transform_features_layer() @tf.function def serve_tf_examples_fn(serialized_tf_examples): """Returns the output to be used in the serving signature.""" for key in list(feature_spec.keys()): if key not in features: feature_spec.pop(key) parsed_features = tf.io.parse_example(serialized_tf_examples, feature_spec) transformed_features = model.tft_layer(parsed_features) probabilities = model(transformed_features) return {"scores": probabilities} return serve_tf_examples_fn def construct_serving_model( model, serving_model_dir, metadata ): global features schema_location = metadata['schema'] features = metadata['numeric_features'] + metadata['categorical_features'] + metadata['embedding_features'] print("FEATURES", features) tft_output_dir = metadata["transform_artifacts_dir"] schema = tfdv.load_schema_text(schema_location) feature_spec = tft.tf_metadata.schema_utils.schema_as_feature_spec(schema).feature_spec tft_output = tft.TFTransformOutput(tft_output_dir) # Drop features that were not used in training features_input_signature = { feature_name: tf.TensorSpec( shape=(None, 1), dtype=spec.dtype, name=feature_name ) for feature_name, spec in feature_spec.items() if feature_name in features } signatures = { "serving_default": _get_serve_features_fn( model, tft_output ).get_concrete_function(features_input_signature), "serving_tf_example": _get_serve_tf_examples_fn( model, tft_output, feature_spec ).get_concrete_function( tf.TensorSpec(shape=[None], dtype=tf.string, name="examples") ), } logging.info("Model saving started...") model.save(serving_model_dir, signatures=signatures) logging.info("Model saving completed.") """ Explanation: Add a serving function Next, you add a serving function to your model for online and batch prediction. This allows prediction requests to be sent in raw format (unpreprocessed), either as a serialized TF.Example or JSONL object. The serving function will then preprocess the prediction request into the transformed format expected by the model. End of explanation """ os.chdir("custom") from trainer import serving SERVING_MODEL_DIR = BUCKET_NAME + "/serving_model" serving.construct_serving_model( model=model, serving_model_dir=SERVING_MODEL_DIR, metadata=metadata ) serving_model = tf.keras.models.load_model(SERVING_MODEL_DIR) os.chdir("..") """ Explanation: Construct the serving model Now construct the serving model and store the serving model to your Cloud Storage bucket. End of explanation """ EXPORTED_TFREC_PREFIX = metadata["exported_tfrec_prefix"] file_names = tf.data.TFRecordDataset.list_files( EXPORTED_TFREC_PREFIX + "/data-*.tfrecord" ) for batch in tf.data.TFRecordDataset(file_names).batch(3).take(1): predictions = serving_model.signatures["serving_tf_example"](batch) for key in predictions: print(f"{key}: {predictions[key]}") """ Explanation: Test the serving model locally with tf.Example data Next, test the layer interface in the serving model for tf.Example data. End of explanation """ schema = tfdv.load_schema_text(metadata["schema"]) feature_spec = tft.tf_metadata.schema_utils.schema_as_feature_spec(schema).feature_spec instance = { "dropoff_grid": "POINT(-87.6 41.9)", "euclidean": 2064.2696, "loc_cross": "", "payment_type": "Credit Card", "pickup_grid": "POINT(-87.6 41.9)", "trip_miles": 1.37, "trip_day": 12, "trip_hour": 6, "trip_month": 2, "trip_day_of_week": 4, "trip_seconds": 555, } for feature_name in instance: dtype = feature_spec[feature_name].dtype instance[feature_name] = tf.constant([[instance[feature_name]]], dtype) predictions = serving_model.signatures["serving_default"](**instance) for key in predictions: print(f"{key}: {predictions[key].numpy()}") """ Explanation: Test the serving model locally with JSONL data Next, test the layer interface in the serving model for JSONL data. End of explanation """ vertex_serving_model = aip.Model.upload( display_name="chicago_" + TIMESTAMP, artifact_uri=SERVING_MODEL_DIR, serving_container_image_uri=DEPLOY_IMAGE, labels={"user_metadata": BUCKET_NAME[5:]}, sync=True, ) """ Explanation: Upload the serving model to a Vertex AI Model resource Next, you upload your serving custom model artifacts to Vertex AI to convert into a managed Vertex AI Model resource. End of explanation """ SERVING_OUTPUT_DATA_DIR = BUCKET_NAME + "/batch_eval" EXPORTED_JSONL_PREFIX = metadata["exported_jsonl_prefix"] MIN_NODES = 1 MAX_NODES = 1 job = vertex_serving_model.batch_predict( instances_format="jsonl", predictions_format="jsonl", job_display_name="chicago_" + TIMESTAMP, gcs_source=EXPORTED_JSONL_PREFIX + "*.jsonl", gcs_destination_prefix=SERVING_OUTPUT_DATA_DIR, model_parameters=None, machine_type=DEPLOY_COMPUTE, accelerator_type=DEPLOY_GPU, accelerator_count=DEPLOY_NGPU, starting_replica_count=MIN_NODES, max_replica_count=MAX_NODES, sync=True, ) """ Explanation: Evaluate the serving model Next, evaluate the serving model with the evaluation (test) slices. For apples-to-apples comparison, you use the same evaluation slices for both the custom model and the AutoML model. Since your evaluation slices and metrics maybe custom, we recommend: Send each evaluation slice as a Vertex AI Batch Prediction Job. Use a custom evaluation script to evaluate the results from the batch prediction job. End of explanation """ batch_dir = ! gsutil ls $SERVING_OUTPUT_DATA_DIR batch_dir = batch_dir[0] outputs = ! gsutil ls $batch_dir errors = outputs[0] results = outputs[1] print("errors") ! gsutil cat $errors print("results") ! gsutil cat $results | head -n10 model = async_model """ Explanation: Perform custom evaluation metrics After the batch job has completed, you input the results and target labels to your custom evaluation script. For demonstration purposes, we just display the results of the batch prediction. End of explanation """ model.wait() """ Explanation: Wait for completion of AutoML training job Next, wait for the AutoML training job to complete. Alternatively, one can set the parameter sync to True in the run() method to block until the AutoML training job is completed. End of explanation """ model_evaluations = model.list_model_evaluations() for model_evaluation in model_evaluations: print(model_evaluation.to_dict()) """ Explanation: Review model evaluation scores After your model training has finished, you can review the evaluation scores for it using the list_model_evaluations() method. This method will return an iterator for each evaluation slice. End of explanation """ import json metadata = {} metadata["train_eval_metrics"] = METRICS metadata["custom_eval_metrics"] = "[you-fill-this-in]" with tf.io.gfile.GFile("gs://" + BUCKET_NAME[5:] + "/metadata.jsonl", "w") as f: json.dump(metadata, f) !gsutil cat $BUCKET_NAME/metadata.jsonl """ Explanation: Compare metric results with AutoML baseline Finally, you make a decision if the current experiment produces a custom model that is better than the AutoML baseline, as follows: - Compare the evaluation results for each evaluation slice between the custom model and the AutoML model. - Weight the results according to your business purposes. - Add up the result and make a determination if the custom model is better. Store evaluation results for custom model Next, you use the labels field to store user metadata containing the custom metrics information. End of explanation """ delete_all = False if delete_all: # Delete the dataset using the Vertex dataset object try: if "dataset" in globals(): dataset.delete() except Exception as e: print(e) # Delete the model using the Vertex model object try: if "model" in globals(): model.delete() except Exception as e: print(e) if "BUCKET_NAME" in globals(): ! gsutil rm -r $BUCKET_NAME """ Explanation: Cleaning up To clean up all Google Cloud resources used in this project, you can delete the Google Cloud project you used for the tutorial. Otherwise, you can delete the individual resources you created in this tutorial: Dataset Pipeline Model Endpoint AutoML Training Job Batch Job Custom Job Hyperparameter Tuning Job Cloud Storage Bucket End of explanation """
radu941208/DeepLearning
Convolutional_Neural_Network/Autonomous+driving+application+-+Car+detection+-+v1.ipynb
mit
import argparse import os import matplotlib.pyplot as plt from matplotlib.pyplot import imshow import scipy.io import scipy.misc import numpy as np import pandas as pd import PIL import tensorflow as tf from keras import backend as K from keras.layers import Input, Lambda, Conv2D from keras.models import load_model, Model from yolo_utils import read_classes, read_anchors, generate_colors, preprocess_image, draw_boxes, scale_boxes from yad2k.models.keras_yolo import yolo_head, yolo_boxes_to_corners, preprocess_true_boxes, yolo_loss, yolo_body %matplotlib inline """ Explanation: Autonomous driving - Car detection Welcome to your week 3 programming assignment. You will learn about object detection using the very powerful YOLO model. Many of the ideas in this notebook are described in the two YOLO papers: Redmon et al., 2016 (https://arxiv.org/abs/1506.02640) and Redmon and Farhadi, 2016 (https://arxiv.org/abs/1612.08242). You will learn to: - Use object detection on a car detection dataset - Deal with bounding boxes Run the following cell to load the packages and dependencies that are going to be useful for your journey! End of explanation """ # GRADED FUNCTION: yolo_filter_boxes def yolo_filter_boxes(box_confidence, boxes, box_class_probs, threshold = .6): """Filters YOLO boxes by thresholding on object and class confidence. Arguments: box_confidence -- tensor of shape (19, 19, 5, 1) boxes -- tensor of shape (19, 19, 5, 4) box_class_probs -- tensor of shape (19, 19, 5, 80) threshold -- real value, if [ highest class probability score < threshold], then get rid of the corresponding box Returns: scores -- tensor of shape (None,), containing the class probability score for selected boxes boxes -- tensor of shape (None, 4), containing (b_x, b_y, b_h, b_w) coordinates of selected boxes classes -- tensor of shape (None,), containing the index of the class detected by the selected boxes Note: "None" is here because you don't know the exact number of selected boxes, as it depends on the threshold. For example, the actual output size of scores would be (10,) if there are 10 boxes. """ # Step 1: Compute box scores ### START CODE HERE ### (≈ 1 line) box_scores = box_confidence * box_class_probs ### END CODE HERE ### # Step 2: Find the box_classes thanks to the max box_scores, keep track of the corresponding score ### START CODE HERE ### (≈ 2 lines) box_classes = K.argmax(box_scores, axis=-1) box_class_scores = K.max(box_scores, axis=-1) ### END CODE HERE ### # Step 3: Create a filtering mask based on "box_class_scores" by using "threshold". The mask should have the # same dimension as box_class_scores, and be True for the boxes you want to keep (with probability >= threshold) ### START CODE HERE ### (≈ 1 line) filtering_mask = (box_class_scores>=threshold) ### END CODE HERE ### # Step 4: Apply the mask to scores, boxes and classes ### START CODE HERE ### (≈ 3 lines) scores = tf.boolean_mask(box_class_scores, filtering_mask) boxes = tf.boolean_mask(boxes, filtering_mask) classes = tf.boolean_mask(box_classes, filtering_mask) ### END CODE HERE ### return scores, boxes, classes with tf.Session() as test_a: box_confidence = tf.random_normal([19, 19, 5, 1], mean=1, stddev=4, seed = 1) boxes = tf.random_normal([19, 19, 5, 4], mean=1, stddev=4, seed = 1) box_class_probs = tf.random_normal([19, 19, 5, 80], mean=1, stddev=4, seed = 1) scores, boxes, classes = yolo_filter_boxes(box_confidence, boxes, box_class_probs, threshold = 0.5) print("scores[2] = " + str(scores[2].eval())) print("boxes[2] = " + str(boxes[2].eval())) print("classes[2] = " + str(classes[2].eval())) print("scores.shape = " + str(scores.shape)) print("boxes.shape = " + str(boxes.shape)) print("classes.shape = " + str(classes.shape)) """ Explanation: Important Note: As you can see, we import Keras's backend as K. This means that to use a Keras function in this notebook, you will need to write: K.function(...). 1 - Problem Statement You are working on a self-driving car. As a critical component of this project, you'd like to first build a car detection system. To collect data, you've mounted a camera to the hood (meaning the front) of the car, which takes pictures of the road ahead every few seconds while you drive around. <center> <video width="400" height="200" src="nb_images/road_video_compressed2.mp4" type="video/mp4" controls> </video> </center> <caption><center> Pictures taken from a car-mounted camera while driving around Silicon Valley. <br> We would like to especially thank drive.ai for providing this dataset! Drive.ai is a company building the brains of self-driving vehicles. </center></caption> <img src="nb_images/driveai.png" style="width:100px;height:100;"> You've gathered all these images into a folder and have labelled them by drawing bounding boxes around every car you found. Here's an example of what your bounding boxes look like. <img src="nb_images/box_label.png" style="width:500px;height:250;"> <caption><center> <u> Figure 1 </u>: Definition of a box<br> </center></caption> If you have 80 classes that you want YOLO to recognize, you can represent the class label $c$ either as an integer from 1 to 80, or as an 80-dimensional vector (with 80 numbers) one component of which is 1 and the rest of which are 0. The video lectures had used the latter representation; in this notebook, we will use both representations, depending on which is more convenient for a particular step. In this exercise, you will learn how YOLO works, then apply it to car detection. Because the YOLO model is very computationally expensive to train, we will load pre-trained weights for you to use. 2 - YOLO YOLO ("you only look once") is a popular algoritm because it achieves high accuracy while also being able to run in real-time. This algorithm "only looks once" at the image in the sense that it requires only one forward propagation pass through the network to make predictions. After non-max suppression, it then outputs recognized objects together with the bounding boxes. 2.1 - Model details First things to know: - The input is a batch of images of shape (m, 608, 608, 3) - The output is a list of bounding boxes along with the recognized classes. Each bounding box is represented by 6 numbers $(p_c, b_x, b_y, b_h, b_w, c)$ as explained above. If you expand $c$ into an 80-dimensional vector, each bounding box is then represented by 85 numbers. We will use 5 anchor boxes. So you can think of the YOLO architecture as the following: IMAGE (m, 608, 608, 3) -> DEEP CNN -> ENCODING (m, 19, 19, 5, 85). Lets look in greater detail at what this encoding represents. <img src="nb_images/architecture.png" style="width:700px;height:400;"> <caption><center> <u> Figure 2 </u>: Encoding architecture for YOLO<br> </center></caption> If the center/midpoint of an object falls into a grid cell, that grid cell is responsible for detecting that object. Since we are using 5 anchor boxes, each of the 19 x19 cells thus encodes information about 5 boxes. Anchor boxes are defined only by their width and height. For simplicity, we will flatten the last two last dimensions of the shape (19, 19, 5, 85) encoding. So the output of the Deep CNN is (19, 19, 425). <img src="nb_images/flatten.png" style="width:700px;height:400;"> <caption><center> <u> Figure 3 </u>: Flattening the last two last dimensions<br> </center></caption> Now, for each box (of each cell) we will compute the following elementwise product and extract a probability that the box contains a certain class. <img src="nb_images/probability_extraction.png" style="width:700px;height:400;"> <caption><center> <u> Figure 4 </u>: Find the class detected by each box<br> </center></caption> Here's one way to visualize what YOLO is predicting on an image: - For each of the 19x19 grid cells, find the maximum of the probability scores (taking a max across both the 5 anchor boxes and across different classes). - Color that grid cell according to what object that grid cell considers the most likely. Doing this results in this picture: <img src="nb_images/proba_map.png" style="width:300px;height:300;"> <caption><center> <u> Figure 5 </u>: Each of the 19x19 grid cells colored according to which class has the largest predicted probability in that cell.<br> </center></caption> Note that this visualization isn't a core part of the YOLO algorithm itself for making predictions; it's just a nice way of visualizing an intermediate result of the algorithm. Another way to visualize YOLO's output is to plot the bounding boxes that it outputs. Doing that results in a visualization like this: <img src="nb_images/anchor_map.png" style="width:200px;height:200;"> <caption><center> <u> Figure 6 </u>: Each cell gives you 5 boxes. In total, the model predicts: 19x19x5 = 1805 boxes just by looking once at the image (one forward pass through the network)! Different colors denote different classes. <br> </center></caption> In the figure above, we plotted only boxes that the model had assigned a high probability to, but this is still too many boxes. You'd like to filter the algorithm's output down to a much smaller number of detected objects. To do so, you'll use non-max suppression. Specifically, you'll carry out these steps: - Get rid of boxes with a low score (meaning, the box is not very confident about detecting a class) - Select only one box when several boxes overlap with each other and detect the same object. 2.2 - Filtering with a threshold on class scores You are going to apply a first filter by thresholding. You would like to get rid of any box for which the class "score" is less than a chosen threshold. The model gives you a total of 19x19x5x85 numbers, with each box described by 85 numbers. It'll be convenient to rearrange the (19,19,5,85) (or (19,19,425)) dimensional tensor into the following variables: - box_confidence: tensor of shape $(19 \times 19, 5, 1)$ containing $p_c$ (confidence probability that there's some object) for each of the 5 boxes predicted in each of the 19x19 cells. - boxes: tensor of shape $(19 \times 19, 5, 4)$ containing $(b_x, b_y, b_h, b_w)$ for each of the 5 boxes per cell. - box_class_probs: tensor of shape $(19 \times 19, 5, 80)$ containing the detection probabilities $(c_1, c_2, ... c_{80})$ for each of the 80 classes for each of the 5 boxes per cell. Exercise: Implement yolo_filter_boxes(). 1. Compute box scores by doing the elementwise product as described in Figure 4. The following code may help you choose the right operator: python a = np.random.randn(19*19, 5, 1) b = np.random.randn(19*19, 5, 80) c = a * b # shape of c will be (19*19, 5, 80) 2. For each box, find: - the index of the class with the maximum box score (Hint) (Be careful with what axis you choose; consider using axis=-1) - the corresponding box score (Hint) (Be careful with what axis you choose; consider using axis=-1) 3. Create a mask by using a threshold. As a reminder: ([0.9, 0.3, 0.4, 0.5, 0.1] &lt; 0.4) returns: [False, True, False, False, True]. The mask should be True for the boxes you want to keep. 4. Use TensorFlow to apply the mask to box_class_scores, boxes and box_classes to filter out the boxes we don't want. You should be left with just the subset of boxes you want to keep. (Hint) Reminder: to call a Keras function, you should use K.function(...). End of explanation """ # GRADED FUNCTION: iou def iou(box1, box2): """Implement the intersection over union (IoU) between box1 and box2 Arguments: box1 -- first box, list object with coordinates (x1, y1, x2, y2) box2 -- second box, list object with coordinates (x1, y1, x2, y2) """ # Calculate the (y1, x1, y2, x2) coordinates of the intersection of box1 and box2. Calculate its Area. ### START CODE HERE ### (≈ 5 lines) xi1 = max(box1[0],box2[0]) yi1 = max(box1[1],box2[1]) xi2 = min(box1[2],box2[2]) yi2 = min(box1[3],box2[3]) inter_area = (yi2-yi1)*(xi2-xi1) ### END CODE HERE ### # Calculate the Union area by using Formula: Union(A,B) = A + B - Inter(A,B) ### START CODE HERE ### (≈ 3 lines) box1_area = (box1[2]-box1[0])*(box1[3]-box1[1]) box2_area = (box2[2]-box2[0])*(box2[3]-box2[1]) union_area = box1_area + box2_area - inter_area ### END CODE HERE ### # compute the IoU ### START CODE HERE ### (≈ 1 line) iou = (inter_area/union_area) ### END CODE HERE ### return iou box1 = (2, 1, 4, 3) box2 = (1, 2, 3, 4) print("iou = " + str(iou(box1, box2))) """ Explanation: Expected Output: <table> <tr> <td> **scores[2]** </td> <td> 10.7506 </td> </tr> <tr> <td> **boxes[2]** </td> <td> [ 8.42653275 3.27136683 -0.5313437 -4.94137383] </td> </tr> <tr> <td> **classes[2]** </td> <td> 7 </td> </tr> <tr> <td> **scores.shape** </td> <td> (?,) </td> </tr> <tr> <td> **boxes.shape** </td> <td> (?, 4) </td> </tr> <tr> <td> **classes.shape** </td> <td> (?,) </td> </tr> </table> 2.3 - Non-max suppression Even after filtering by thresholding over the classes scores, you still end up a lot of overlapping boxes. A second filter for selecting the right boxes is called non-maximum suppression (NMS). <img src="nb_images/non-max-suppression.png" style="width:500px;height:400;"> <caption><center> <u> Figure 7 </u>: In this example, the model has predicted 3 cars, but it's actually 3 predictions of the same car. Running non-max suppression (NMS) will select only the most accurate (highest probabiliy) one of the 3 boxes. <br> </center></caption> Non-max suppression uses the very important function called "Intersection over Union", or IoU. <img src="nb_images/iou.png" style="width:500px;height:400;"> <caption><center> <u> Figure 8 </u>: Definition of "Intersection over Union". <br> </center></caption> Exercise: Implement iou(). Some hints: - In this exercise only, we define a box using its two corners (upper left and lower right): (x1, y1, x2, y2) rather than the midpoint and height/width. - To calculate the area of a rectangle you need to multiply its height (y2 - y1) by its width (x2 - x1) - You'll also need to find the coordinates (xi1, yi1, xi2, yi2) of the intersection of two boxes. Remember that: - xi1 = maximum of the x1 coordinates of the two boxes - yi1 = maximum of the y1 coordinates of the two boxes - xi2 = minimum of the x2 coordinates of the two boxes - yi2 = minimum of the y2 coordinates of the two boxes In this code, we use the convention that (0,0) is the top-left corner of an image, (1,0) is the upper-right corner, and (1,1) the lower-right corner. End of explanation """ # GRADED FUNCTION: yolo_non_max_suppression def yolo_non_max_suppression(scores, boxes, classes, max_boxes = 10, iou_threshold = 0.5): """ Applies Non-max suppression (NMS) to set of boxes Arguments: scores -- tensor of shape (None,), output of yolo_filter_boxes() boxes -- tensor of shape (None, 4), output of yolo_filter_boxes() that have been scaled to the image size (see later) classes -- tensor of shape (None,), output of yolo_filter_boxes() max_boxes -- integer, maximum number of predicted boxes you'd like iou_threshold -- real value, "intersection over union" threshold used for NMS filtering Returns: scores -- tensor of shape (, None), predicted score for each box boxes -- tensor of shape (4, None), predicted box coordinates classes -- tensor of shape (, None), predicted class for each box Note: The "None" dimension of the output tensors has obviously to be less than max_boxes. Note also that this function will transpose the shapes of scores, boxes, classes. This is made for convenience. """ max_boxes_tensor = K.variable(max_boxes, dtype='int32') # tensor to be used in tf.image.non_max_suppression() K.get_session().run(tf.variables_initializer([max_boxes_tensor])) # initialize variable max_boxes_tensor # Use tf.image.non_max_suppression() to get the list of indices corresponding to boxes you keep ### START CODE HERE ### (≈ 1 line) nms_indices = tf.image.non_max_suppression(boxes, scores,max_boxes, iou_threshold) ### END CODE HERE ### # Use K.gather() to select only nms_indices from scores, boxes and classes ### START CODE HERE ### (≈ 3 lines) scores = K.gather(scores,nms_indices) boxes = K.gather(boxes,nms_indices) classes = K.gather(classes,nms_indices) ### END CODE HERE ### return scores, boxes, classes with tf.Session() as test_b: scores = tf.random_normal([54,], mean=1, stddev=4, seed = 1) boxes = tf.random_normal([54, 4], mean=1, stddev=4, seed = 1) classes = tf.random_normal([54,], mean=1, stddev=4, seed = 1) scores, boxes, classes = yolo_non_max_suppression(scores, boxes, classes) print("scores[2] = " + str(scores[2].eval())) print("boxes[2] = " + str(boxes[2].eval())) print("classes[2] = " + str(classes[2].eval())) print("scores.shape = " + str(scores.eval().shape)) print("boxes.shape = " + str(boxes.eval().shape)) print("classes.shape = " + str(classes.eval().shape)) """ Explanation: Expected Output: <table> <tr> <td> **iou = ** </td> <td> 0.14285714285714285 </td> </tr> </table> You are now ready to implement non-max suppression. The key steps are: 1. Select the box that has the highest score. 2. Compute its overlap with all other boxes, and remove boxes that overlap it more than iou_threshold. 3. Go back to step 1 and iterate until there's no more boxes with a lower score than the current selected box. This will remove all boxes that have a large overlap with the selected boxes. Only the "best" boxes remain. Exercise: Implement yolo_non_max_suppression() using TensorFlow. TensorFlow has two built-in functions that are used to implement non-max suppression (so you don't actually need to use your iou() implementation): - tf.image.non_max_suppression() - K.gather() End of explanation """ # GRADED FUNCTION: yolo_eval def yolo_eval(yolo_outputs, image_shape = (720., 1280.), max_boxes=10, score_threshold=.6, iou_threshold=.5): """ Converts the output of YOLO encoding (a lot of boxes) to your predicted boxes along with their scores, box coordinates and classes. Arguments: yolo_outputs -- output of the encoding model (for image_shape of (608, 608, 3)), contains 4 tensors: box_confidence: tensor of shape (None, 19, 19, 5, 1) box_xy: tensor of shape (None, 19, 19, 5, 2) box_wh: tensor of shape (None, 19, 19, 5, 2) box_class_probs: tensor of shape (None, 19, 19, 5, 80) image_shape -- tensor of shape (2,) containing the input shape, in this notebook we use (608., 608.) (has to be float32 dtype) max_boxes -- integer, maximum number of predicted boxes you'd like score_threshold -- real value, if [ highest class probability score < threshold], then get rid of the corresponding box iou_threshold -- real value, "intersection over union" threshold used for NMS filtering Returns: scores -- tensor of shape (None, ), predicted score for each box boxes -- tensor of shape (None, 4), predicted box coordinates classes -- tensor of shape (None,), predicted class for each box """ ### START CODE HERE ### # Retrieve outputs of the YOLO model (≈1 line) box_confidence, box_xy, box_wh, box_class_probs = yolo_outputs # Convert boxes to be ready for filtering functions boxes = yolo_boxes_to_corners(box_xy, box_wh) # Use one of the functions you've implemented to perform Score-filtering with a threshold of score_threshold (≈1 line) scores, boxes, classes = yolo_filter_boxes(box_confidence, boxes, box_class_probs, threshold = iou_threshold) # Scale boxes back to original image shape. boxes = scale_boxes(boxes, image_shape) # Use one of the functions you've implemented to perform Non-max suppression with a threshold of iou_threshold (≈1 line) scores, boxes, classes = yolo_non_max_suppression(scores, boxes, classes, max_boxes = 10, iou_threshold=iou_threshold) ### END CODE HERE ### return scores, boxes, classes with tf.Session() as test_b: yolo_outputs = (tf.random_normal([19, 19, 5, 1], mean=1, stddev=4, seed = 1), tf.random_normal([19, 19, 5, 2], mean=1, stddev=4, seed = 1), tf.random_normal([19, 19, 5, 2], mean=1, stddev=4, seed = 1), tf.random_normal([19, 19, 5, 80], mean=1, stddev=4, seed = 1)) scores, boxes, classes = yolo_eval(yolo_outputs) print("scores[2] = " + str(scores[2].eval())) print("boxes[2] = " + str(boxes[2].eval())) print("classes[2] = " + str(classes[2].eval())) print("scores.shape = " + str(scores.eval().shape)) print("boxes.shape = " + str(boxes.eval().shape)) print("classes.shape = " + str(classes.eval().shape)) """ Explanation: Expected Output: <table> <tr> <td> **scores[2]** </td> <td> 6.9384 </td> </tr> <tr> <td> **boxes[2]** </td> <td> [-5.299932 3.13798141 4.45036697 0.95942086] </td> </tr> <tr> <td> **classes[2]** </td> <td> -2.24527 </td> </tr> <tr> <td> **scores.shape** </td> <td> (10,) </td> </tr> <tr> <td> **boxes.shape** </td> <td> (10, 4) </td> </tr> <tr> <td> **classes.shape** </td> <td> (10,) </td> </tr> </table> 2.4 Wrapping up the filtering It's time to implement a function taking the output of the deep CNN (the 19x19x5x85 dimensional encoding) and filtering through all the boxes using the functions you've just implemented. Exercise: Implement yolo_eval() which takes the output of the YOLO encoding and filters the boxes using score threshold and NMS. There's just one last implementational detail you have to know. There're a few ways of representing boxes, such as via their corners or via their midpoint and height/width. YOLO converts between a few such formats at different times, using the following functions (which we have provided): python boxes = yolo_boxes_to_corners(box_xy, box_wh) which converts the yolo box coordinates (x,y,w,h) to box corners' coordinates (x1, y1, x2, y2) to fit the input of yolo_filter_boxes python boxes = scale_boxes(boxes, image_shape) YOLO's network was trained to run on 608x608 images. If you are testing this data on a different size image--for example, the car detection dataset had 720x1280 images--this step rescales the boxes so that they can be plotted on top of the original 720x1280 image. Don't worry about these two functions; we'll show you where they need to be called. End of explanation """ sess = K.get_session() """ Explanation: Expected Output: <table> <tr> <td> **scores[2]** </td> <td> 138.791 </td> </tr> <tr> <td> **boxes[2]** </td> <td> [ 1292.32971191 -278.52166748 3876.98925781 -835.56494141] </td> </tr> <tr> <td> **classes[2]** </td> <td> 54 </td> </tr> <tr> <td> **scores.shape** </td> <td> (10,) </td> </tr> <tr> <td> **boxes.shape** </td> <td> (10, 4) </td> </tr> <tr> <td> **classes.shape** </td> <td> (10,) </td> </tr> </table> <font color='blue'> Summary for YOLO: - Input image (608, 608, 3) - The input image goes through a CNN, resulting in a (19,19,5,85) dimensional output. - After flattening the last two dimensions, the output is a volume of shape (19, 19, 425): - Each cell in a 19x19 grid over the input image gives 425 numbers. - 425 = 5 x 85 because each cell contains predictions for 5 boxes, corresponding to 5 anchor boxes, as seen in lecture. - 85 = 5 + 80 where 5 is because $(p_c, b_x, b_y, b_h, b_w)$ has 5 numbers, and and 80 is the number of classes we'd like to detect - You then select only few boxes based on: - Score-thresholding: throw away boxes that have detected a class with a score less than the threshold - Non-max suppression: Compute the Intersection over Union and avoid selecting overlapping boxes - This gives you YOLO's final output. 3 - Test YOLO pretrained model on images In this part, you are going to use a pretrained model and test it on the car detection dataset. As usual, you start by creating a session to start your graph. Run the following cell. End of explanation """ class_names = read_classes("model_data/coco_classes.txt") anchors = read_anchors("model_data/yolo_anchors.txt") image_shape = (720., 1280.) """ Explanation: 3.1 - Defining classes, anchors and image shape. Recall that we are trying to detect 80 classes, and are using 5 anchor boxes. We have gathered the information about the 80 classes and 5 boxes in two files "coco_classes.txt" and "yolo_anchors.txt". Let's load these quantities into the model by running the next cell. The car detection dataset has 720x1280 images, which we've pre-processed into 608x608 images. End of explanation """ yolo_model = load_model("model_data/yolo.h5") """ Explanation: 3.2 - Loading a pretrained model Training a YOLO model takes a very long time and requires a fairly large dataset of labelled bounding boxes for a large range of target classes. You are going to load an existing pretrained Keras YOLO model stored in "yolo.h5". (These weights come from the official YOLO website, and were converted using a function written by Allan Zelener. References are at the end of this notebook. Technically, these are the parameters from the "YOLOv2" model, but we will more simply refer to it as "YOLO" in this notebook.) Run the cell below to load the model from this file. End of explanation """ yolo_model.summary() """ Explanation: This loads the weights of a trained YOLO model. Here's a summary of the layers your model contains. End of explanation """ yolo_outputs = yolo_head(yolo_model.output, anchors, len(class_names)) """ Explanation: Note: On some computers, you may see a warning message from Keras. Don't worry about it if you do--it is fine. Reminder: this model converts a preprocessed batch of input images (shape: (m, 608, 608, 3)) into a tensor of shape (m, 19, 19, 5, 85) as explained in Figure (2). 3.3 - Convert output of the model to usable bounding box tensors The output of yolo_model is a (m, 19, 19, 5, 85) tensor that needs to pass through non-trivial processing and conversion. The following cell does that for you. End of explanation """ scores, boxes, classes = yolo_eval(yolo_outputs, image_shape) """ Explanation: You added yolo_outputs to your graph. This set of 4 tensors is ready to be used as input by your yolo_eval function. 3.4 - Filtering boxes yolo_outputs gave you all the predicted boxes of yolo_model in the correct format. You're now ready to perform filtering and select only the best boxes. Lets now call yolo_eval, which you had previously implemented, to do this. End of explanation """ def predict(sess, image_file): """ Runs the graph stored in "sess" to predict boxes for "image_file". Prints and plots the preditions. Arguments: sess -- your tensorflow/Keras session containing the YOLO graph image_file -- name of an image stored in the "images" folder. Returns: out_scores -- tensor of shape (None, ), scores of the predicted boxes out_boxes -- tensor of shape (None, 4), coordinates of the predicted boxes out_classes -- tensor of shape (None, ), class index of the predicted boxes Note: "None" actually represents the number of predicted boxes, it varies between 0 and max_boxes. """ # Preprocess your image image, image_data = preprocess_image("images/" + image_file, model_image_size = (608, 608)) # Run the session with the correct tensors and choose the correct placeholders in the feed_dict. # You'll need to use feed_dict={yolo_model.input: ... , K.learning_phase(): 0}) ### START CODE HERE ### (≈ 1 line) out_scores, out_boxes, out_classes = sess.run([scores, boxes, classes],feed_dict={yolo_model.input: image_data,K.learning_phase(): 0 }) ### END CODE HERE ### # Print predictions info print('Found {} boxes for {}'.format(len(out_boxes), image_file)) # Generate colors for drawing bounding boxes. colors = generate_colors(class_names) # Draw bounding boxes on the image file draw_boxes(image, out_scores, out_boxes, out_classes, class_names, colors) # Save the predicted bounding box on the image image.save(os.path.join("out", image_file), quality=90) # Display the results in the notebook output_image = scipy.misc.imread(os.path.join("out", image_file)) imshow(output_image) return out_scores, out_boxes, out_classes """ Explanation: 3.5 - Run the graph on an image Let the fun begin. You have created a (sess) graph that can be summarized as follows: <font color='purple'> yolo_model.input </font> is given to yolo_model. The model is used to compute the output <font color='purple'> yolo_model.output </font> <font color='purple'> yolo_model.output </font> is processed by yolo_head. It gives you <font color='purple'> yolo_outputs </font> <font color='purple'> yolo_outputs </font> goes through a filtering function, yolo_eval. It outputs your predictions: <font color='purple'> scores, boxes, classes </font> Exercise: Implement predict() which runs the graph to test YOLO on an image. You will need to run a TensorFlow session, to have it compute scores, boxes, classes. The code below also uses the following function: python image, image_data = preprocess_image("images/" + image_file, model_image_size = (608, 608)) which outputs: - image: a python (PIL) representation of your image used for drawing boxes. You won't need to use it. - image_data: a numpy-array representing the image. This will be the input to the CNN. Important note: when a model uses BatchNorm (as is the case in YOLO), you will need to pass an additional placeholder in the feed_dict {K.learning_phase(): 0}. End of explanation """ out_scores, out_boxes, out_classes = predict(sess, "cars.jpg") """ Explanation: Run the following cell on the "test.jpg" image to verify that your function is correct. End of explanation """
emalgorithm/Algorithm_Notebooks
ShortestPath/Shortest Path Problem.ipynb
gpl-3.0
import math import numpy as np from graphviz import Digraph import queue # so our plots get drawn in the notebook %matplotlib inline from matplotlib import pyplot as plt from random import randint from time import clock """ Explanation: Shortest Path Problem Imports End of explanation """ # A timer - runs the provided function and reports the # run time in ms def time_f(f): before = clock() f() after = clock() return after - before """ Explanation: Utilities to time functions End of explanation """ def get_graph_img_from_adjacency_matrix(graph_matrix): n_nodes = len(graph_matrix) dot = Digraph() for i in range(n_nodes): dot.node(str(i)) for i in range(n_nodes): for j in range(n_nodes): if np.isfinite(graph_matrix[i][j]): dot.edge(str(i), str(j), str(graph_matrix[i][j])) return dot def get_graph_img_from_adjacency_list(graph_list): n_nodes = len(graph_list) dot = Digraph() for i in range(n_nodes): dot.node(str(i)) for i in range(n_nodes): for adj_node, edge_cost in graph_list[i]: dot.edge(str(i), str(adj_node), str(edge_cost)) return dot def get_graph_img_from_edges_list(n_nodes, edges_list): dot = Digraph() for i in range(n_nodes): dot.node(str(i)) for edge in edges_list: start, destination, cost = edge dot.edge(str(start), str(destination), str(cost)) return dot """ Explanation: Utilities to draw graphs End of explanation """ n_nodes = 10 # List of tuples (start, destination, cost) which represent directed edges non_negative_edges_list = [ (0, 1, 2), (0, 2, 10), (0, 3, 7), (1, 5, 4), (2, 3, 2), (2, 4, 3), (4, 0, 5), (5, 3, 1), (7, 8, 3), (8, 9, 4), ] # Adjacency list representation of a graph of n_nodes nodes # non_negative_graph_list[i] contains a list of tuple (adj_node, cost) for all adjacent nodes of i non_negative_graph_list = [[] for _ in range(n_nodes)] for edge in non_negative_edges_list: start, destination, cost = edge non_negative_graph_list[start].append((destination, cost)) """ Explanation: Graphs Graph with no negative edge End of explanation """ # List of tuples (start, destination, shortest_path_cost) which represent directed edges non_negative_solution_list = [ (0, 1, 2), (0, 5, 6), (0, 3, 7), (0, 8, np.inf), (4, 1, 7), ] """ Explanation: Solution End of explanation """ img = get_graph_img_from_adjacency_list(non_negative_graph_list) img """ Explanation: Draw Graph End of explanation """ n_nodes = 10 # List of tuples (start, destination, cost) which represent directed edges edges_list = [ (0, 1, 2), (0, 2, 10), (0, 3, 7), (1, 5, 4), (2, 3, -4), (2, 4, 3), (3, 6, 1), (4, 0, -5), (5, 3, 1), (7, 8, 3), (8, 9, 4), ] # Adjacency list representation of a graph of n_nodes nodes # non_negative_graph_list[i] contains a list of tuple (adj_node, cost) for all adjacent nodes of i graph_list = [[] for _ in range(n_nodes)] # Adjacency matrix representation of a graph of n_nodes nodes # Initialize all values to infinite graph_matrix = np.full((n_nodes, n_nodes), np.inf) # Values in the diagonal are 0: going from any node to itself has no cost # np.fill_diagonal(graph_matrix, 0) for edge in edges_list: start, destination, cost = edge graph_matrix[start, destination] = cost graph_list[start].append((destination, cost)) """ Explanation: Graph with negative edges End of explanation """ # List of tuples (start, destination, shortest_path_cost) which represent directed edges solution_list = [ (0, 1, 2), (0, 5, 6), (0, 3, 6), (0, 8, np.inf), (0, 6, 7), (4, 1, -3), ] """ Explanation: Solution End of explanation """ img = get_graph_img_from_adjacency_matrix(graph_matrix) img """ Explanation: Draw Graph End of explanation """ def test_single_source(algorithm, graph_list, solution_list): n_nodes = len(graph_list) for start_node in range(n_nodes): sp = algorithm(graph_list, start_node) for sol in solution_list: start, destination, cost = sol if start == start_node: if sp[destination] != cost: return False return True def test_single_source_non_negative(algorithm): return test_single_source(algorithm, non_negative_graph_list, non_negative_solution_list) def test_single_source_negative(algorithm): return test_single_source(algorithm, graph_list, solution_list) """ Explanation: Testing Utilities End of explanation """ """ Takes an adjacency list representing a graph and a starting node 'start', and returns a list 'sp' where sp[i] indicates the shortest path cost from start to i. Greedy algorithm which always chooses the closest node from the fringe. Once a node has been extracted from the fringe, then it's sp from the origin has already been computed. Each node gets visited (in this case, extracted from the priority queue) only once """ def dijkstra(graph, start): n_nodes = len(graph) # Array containing the shortest path costs initialized to infinity, apart to start itself which is 0 sp = np.full(n_nodes, np.inf) sp[start] = 0 # Bitmaps to indicate if a given node has already been visited visited = np.full(n_nodes, False) # Min-priority queue representing the fringe of nodes. Contains tuples (distance_from_start, node) fringe = queue.PriorityQueue() fringe.put((sp[start], start)) while not fringe.empty(): _, node = fringe.get() visited[node] = True for adj_node, edge_cost in graph[node]: if not visited[adj_node]: if sp[adj_node] > sp[node] + edge_cost: sp[adj_node] = sp[node] + edge_cost fringe.put((sp[adj_node], adj_node)) return sp """ Explanation: Single-Source Shortest Paths Dijkstra Algorithm End of explanation """ assert test_single_source_non_negative(dijkstra) == True """ Explanation: Testing Dijkstra algorithm should produce the correct answers for our graph with non negative edges. End of explanation """ assert test_single_source_negative(dijkstra) == False """ Explanation: However, it doesn't in general produce correct answers for graphs with negative edges. Our graph with negative edges should serve as example. End of explanation """ """ Takes an adjacency list representing a graph and a starting node 'start', and returns a list 'sp' where sp[i] indicates the shortest path cost from start to i. Greedy algorithm which always chooses the closest node from the fringe. Once a node has been extracted from the fringe, then it's sp from the origin has already been computed. """ def dijkstra_variant(graph, start): n_nodes = len(graph) # Array containing the shortest path costs initialized to infinity, apart to start itself which is 0 sp = np.full(n_nodes, np.inf) sp[start] = 0 # Min-priority queue representing the fringe of nodes. Contains tuples (distance_from_start, node) fringe = queue.PriorityQueue() fringe.put((sp[start], start)) while not fringe.empty(): _, node = fringe.get() for adj_node, edge_cost in graph[node]: if sp[adj_node] > sp[node] + edge_cost: sp[adj_node] = sp[node] + edge_cost fringe.put((sp[adj_node], adj_node)) return sp """ Explanation: Complexity Analysis Let $n$ be the number of nodes in the graph. Dijkstra while loop will execute at most $n$ times, since each node can be extracted at most one time from the priority queue. Since extracting from a priority queueu takes logarithmic time, this gives us $\mathcal{O}(n\log{}n)$ complexity for the while loop, without taking the inner for loop into account. In the inner for loop, each node loops through all its neighbours. Using an amortyzed analysis we know that the $n$ nodes overall will iterate over all edges (call this number $E$), once per edge. This means that the overall complexity for Dijkstra is $\mathcal{O}(n\log{}n + E)$ Dijkstra Variant For Negative Edges End of explanation """ assert test_single_source_non_negative(dijkstra_variant) == True """ Explanation: Testing Dijkstra algorithm should produce the correct answers for our graph with non negative edges. End of explanation """ assert test_single_source_negative(dijkstra_variant) == True """ Explanation: This variant should produce the correct answers also for graphs with negative edges. Our graph with negative edges should serve as example. End of explanation """ """ Takes an adjacency list representing a graph and a starting node 'start', and returns a list 'sp' where sp[i] indicates the shortest path cost from start to i. Greedy algorithm which always chooses the closest node from the fringe. Once a node has been extracted from the fringe, then it's sp from the origin has already been computed. Each node gets visited (in this case, extracted from the priority queue) only once """ def dijkstra(graph, start): n_nodes = len(graph) # Array containing the shortest path costs initialized to infinity, apart to start itself which is 0 sp = np.full(n_nodes, np.inf) sp[start] = 0 # Bitmaps to indicate if a given node has already been visited visited = np.full(n_nodes, False) # Min-priority queue representing the fringe of nodes. Contains tuples (distance_from_start, node) fringe = queue.PriorityQueue() fringe.put((sp[start], start)) while not fringe.empty(): _, node = fringe.get() visited[node] = True for adj_node, edge_cost in graph[node]: if not visited[adj_node]: if sp[adj_node] > sp[node] + edge_cost: sp[adj_node] = sp[node] + edge_cost fringe.put((sp[adj_node], adj_node)) return sp """ Explanation: Bellman-Ford Algorithm The Bellman-Ford algorithm solves the single source shortest path problem in the general case where negative weight edges may appear. End of explanation """ assert test_single_source_non_negative(dijkstra) == True """ Explanation: Testing Dijkstra algorithm should produce the correct answers for our graph with non negative edges. End of explanation """ assert test_single_source_negative(dijkstra) == False """ Explanation: However, it doesn't in general produce correct answers for graphs with negative edges. Our graph with negative edges should serve as example. End of explanation """ """ Takes an adjacency matrix representing a graph and returns a new matrix with matrix[i, j] containg the shortest path between node i and node j. """ def floyd_warshall(graph_matrix): g = graph_matrix[:][:] n_nodes = len(g) floyd_warshall_helper(g, n_nodes - 1) return g """ Takes a matrix representing a graph and modifies that matrix so that at the end of the function graph[i, j] contains the shortest path between node i and node j using only nodes in the set {0,1, .., k} as intermediary nodes, which we will call sp(i, j, k). We have sp(i, j, k) = min(sp(i, j, k - 1), sp(i, k, k - 1) + sp(k, j, k - 1)). This is saying that the shortest path from i to j using nodes {0, 1, .., k} is the minimum between going from i to j using only nodes {0,1, .., k - 1}, and the sum of first going from i to k and then from j to k (both steps still with nodes {0,1, .., k - 1}). """ def floyd_warshall_helper(graph, k): if k == -1: # No intermediary node, leave the original adjacency matrix unchanged return # Run recursively floyd_warshall_helper(graph, k - 1) # Now graph[i][j] contains the shortest path using node {0, 1, .., k - 1} as intermediary n_nodes = len(graph) # Update distance to get from i to j, for every i and j for i in range(n_nodes): for j in range(n_nodes): graph[i][j] = min(graph[i][j], graph[i][k] + graph[k][j]) """ Explanation: Complexity Analysis Let $n$ be the number of nodes in the graph. Dijkstra while loop will execute at most $n$ times, since each node can be extracted at most one time from the priority queue. Since extracting from a priority queueu takes logarithmic time, this gives us $\mathcal{O}(n\log{}n)$ complexity for the while loop, without taking the inner for loop into account. In the inner for loop, each node loops through all its neighbours. Using an amortyzed analysis we know that the $n$ nodes overall will iterate over all edges (call this number $E$), once per edge. This means that the overall complexity for Dijkstra is $\mathcal{O}(n\log{}n + E)$ All Pairs Shorterst Paths Floyd Warshall Algorithm End of explanation """ sp_matrix = floyd_warshall(graph_matrix) for sp in solution_list: start, destination, cost = sp assert sp_matrix[start, destination] == cost """ Explanation: Testing End of explanation """
mmadsen/experiment-seriation-classification
analysis/sc-1/sc-1-seriation-classification-analysis.ipynb
apache-2.0
import numpy as np import networkx as nx import pandas as pd import matplotlib.pyplot as plt import seaborn as sns %matplotlib inline import cPickle as pickle from copy import deepcopy from sklearn.metrics import classification_report, accuracy_score, confusion_matrix train_graphs = pickle.load(open("train-freq-graphs.pkl",'r')) train_labels = pickle.load(open("train-freq-labels.pkl",'r')) """ Explanation: Table of Contents <p><div class="lev1"><a href="#Seriation-Classification:--sc-1"><span class="toc-item-num">1&nbsp;&nbsp;</span>Seriation Classification: sc-1</a></div><div class="lev2"><a href="#Initial-Classification-Attempt"><span class="toc-item-num">1.1&nbsp;&nbsp;</span>Initial Classification Attempt</a></div><div class="lev2"><a href="#Leave-One-Out-Cross-Validation-for-Selecting-Optimal-K"><span class="toc-item-num">1.2&nbsp;&nbsp;</span>Leave-One-Out Cross Validation for Selecting Optimal K</a></div> # Seriation Classification: sc-1 # The goal of experiment `sc-1` is to validate that the Laplacian eigenvalue spectral distance can be useful in k-Nearest Neighbor classifiers for seriation output. In this experiment, I take a supervised learning approach, starting with two regional metapopulation models, simulating unbiased cultural transmission with 50 replicates across each model, sampling and time averaging the resulting cultural trait distributions in archaeologically realistic ways, and then seriating the results using our IDSS algorithm. Each seriation resulting from this procedure is thus "labeled" as to the regional metapopulation model from which it originated, so we can assess the accuracy of predicting that label based upon the graph spectral similarity. End of explanation """ import sklearn_mmadsen.graphs as skm """ Explanation: sklearn-mmadsen is a python package of useful machine learning tools that I'm accumulating for research and commercial work. You can find it at http://github.com/mmadsen/sklearn-mmadsen. End of explanation """ gclf = skm.GraphEigenvalueNearestNeighbors(n_neighbors=5) def leave_one_out_cv(ix, train_graphs, train_labels): """ Simple LOO data sets for kNN classification, given an index, returns a train set, labels, with the left out graph and label as test_graph, test_label """ test_graph = train_graphs[ix] test_label = train_labels[ix] train_loo_graphs = deepcopy(train_graphs) train_loo_labels = deepcopy(train_labels) del train_loo_graphs[ix] del train_loo_labels[ix] return (train_loo_graphs, train_loo_labels, test_graph, test_label) test_pred = [] for ix in range(0, len(train_graphs)): train_loo_graphs, train_loo_labels, test_graph, test_label = leave_one_out_cv(ix, train_graphs, train_labels) gclf.fit(train_loo_graphs, train_loo_labels) test_pred.append(gclf.predict([test_graph])[0]) cm = confusion_matrix(train_labels, test_pred) cmdf = pd.DataFrame(cm) cmdf.columns = map(lambda x: 'predicted {}'.format(x), cmdf.columns) cmdf.index = map(lambda x: 'actual {}'.format(x), cmdf.index) print cmdf print(classification_report(train_labels, test_pred)) print("Accuracy on test: %0.3f" % accuracy_score(train_labels, test_pred)) sns.heatmap(cm.T, square=True, annot=True, fmt='d', cbar=False) """ Explanation: Initial Classification Attempt Let's just see if the graph spectral distance does anything useful at all, or whether I'm barking up the wrong tree. I imagine that we want a few neighbors (to rule out relying on a single neighbor which might be anomalous), but not too many. So let's start with k=5. The approach here is to essentially do a "leave one out" strategy on the dataset. The KNN model isn't really "trained" in the usual sense of the term, so we don't need to separate a test and train set, we just need to make sure that the target graph we're trying to predict is not one of the "training" graphs that we calculate spectral distances to, otherwise the self-matching of the graph will always predict zero distance. So we first define a simple function which splits a graph out of the training set and returns the rest. I'd use scikit-learn functions for this, but our "data" is really a list of NetworkX objects, not a numeric matrix. End of explanation """ knn = [1, 3, 5, 7, 9, 11, 15] for nn in knn: gclf = skm.GraphEigenvalueNearestNeighbors(n_neighbors=nn) test_pred = [] for ix in range(0, len(train_graphs)): train_loo_graphs, train_loo_labels, test_graph, test_label = leave_one_out_cv(ix, train_graphs, train_labels) gclf.fit(train_loo_graphs, train_loo_labels) test_pred.append(gclf.predict([test_graph])[0]) print("Accuracy on test for %s neighbors: %0.3f" % (nn, accuracy_score(train_labels, test_pred))) """ Explanation: For a first try, this is pretty darned good, I think. Almost 77% of the time, we can correctly predict whether a seriation solution from one of two models belongs to the correct model. It would be nice to get that accuracy to near perfect if possible, howeve, because the goal here is to examine the fit between an empirical solution and a number of models, and the empirical solution will never have arisen from one of our pure theoretical models. Leave-One-Out Cross Validation for Selecting Optimal K Before working on more complex approaches, let's simply make sure we're choosing the optimal number of neighbors for the k-Nearest Neighbors classifier. End of explanation """
fionapigott/Data-Science-45min-Intros
k-means-101/K-means-Clustering.ipynb
unlicense
# Import some python libraries that we'll need import matplotlib.pyplot as plt import random import math import sys %matplotlib inline def make_data(n_points, n_clusters=2, dim=2, sigma=1): x = [[] for i in range(dim)] for i in range(n_clusters): for d in range(dim): x[d].extend([random.gauss(i*3,sigma) for j in range(n_points)]) return x # make our synthetic data num_clusters = 2 num_points = 100 data_sample = make_data(num_points, num_clusters) # plot our synthetic data fig = plt.figure(figsize = [6,6]) ax = fig.add_subplot(111) ax.set_aspect('equal') ax.scatter(*data_sample) ax.set_title("Sample dataset, {} points per cluster, {} clusters".format(num_points,num_clusters)) """ Explanation: An Introduction to K-Means Clustering by Scott Hendrickson & Fiona Pigott K-Means is for learning unknown categories K-means is a machine learning technique for learning unknown categories--in other words, a technique for unsupervised learning. K-means tries to group n-dimensional data into clusters, where the actual position of those clusters is unknown. Basic Idea From the Wikipedia article on k-means clustering: "k-means clustering aims to partition n observations into k clusters in which each observation belongs to the cluster with the nearest mean, serving as a prototype of the cluster" Basically, k-means assumes that for some sensible distance metric, it's possible to partition data into groups around the "center" ("centroid") of different naturally separated clusters in the data. This concept can be very useful for separating datasets that came from separate generative processes, where the location of each dataset is pretty much unknown. It only works well if there is an expectation that the datasets are clustered around their means, and that the means would reasonably be different. A classic example of where k-means would not separate datasets well is when the datasets have different distibutions, but a similar mean. Not a good problem to apply k-means to: <img src="files/ring_clusters.png" style="width: 300px;"> Good problem to apply k-means to: <img src="files/kmeans_clusters.jpg" style="width: 300px;"> Question: Is there an underlying stucture to my data? Does my data have defined categories that I don't know about? How can I identify whuich datapoint belongs to which category? Solution: For a selection of centers (centroids) of data clusters (and we'll talk about how to choose centroids), for each data point, label that data point with the centroid it is closest to. Algorithm 0) Have a dataset that you want to sort into clusters 1) Choose a number of clusters that you're going to look for (there are ways to optimize this, but you have to fix it for the next step) 2) Guess at cluster membership for each data point (basically, for each data point, randomly assign it to a cluster) 3) Find the center ("centroid") of each cluster (with the data points that you've assigned to it) 4) For each centroid, find which data points are closest to it, and assign those data points to its cluster 5) Repeat 3 & 4 (re-evaluate centroids based on new cluster memberhip, then re-assign clusters based on new centroids) 0) A cloud of data in two dimensions Setting up an example of data that could be separated by k-means: First, we'll generate a synthetic dataset from two different spherical gaussian distributions, setting the spacing so that clouds of data overlap a litte. End of explanation """ # plot our synthetic data fig = plt.figure(figsize = [6,6]) ax = fig.add_subplot(111) ax.set_aspect('equal') ax.scatter(data_sample[0][0:100], data_sample[1][0:100]) ax.scatter(data_sample[0][100:200], data_sample[1][100:200]) ax.set_title("Sample dataset, {} points per cluster, {} clusters".format(num_points,num_clusters)) """ Explanation: 1) How might we identify the two clusters? We're going to set $k = 2$ before trying to use k-means to separate the clusters We happen to know that $k = 2$ because we k=just made up this data with two distributions. I'll talk a little at the end about how to guess $k$ for a real-world dataset. Because we created this example, we know the "truth" We know which data came from which distribution (this is what k-means is trying to discover). Here's the truth, just to compare: End of explanation """ # each cluster membership is going to have a color label ("red" cluster, "orange" cluster, etc) co = ["red", "orange", "yellow", "green", "purple", "blue", "black","brown"] def guess_clusters(x, n_clusters): # req co list of identifiers for i in range(len(x[0])): return [ co[random.choice(range(n_clusters))] for i in range(len(x[0]))] # now guess the cluster membership--simply by randomly assigning a cluster label # "orange" or "red" to each of the data points membership_2 = guess_clusters(data_sample,2) fig = plt.figure(figsize = [6,6]) ax = fig.add_subplot(111) ax.set_aspect('equal') ax.scatter(*data_sample, color=membership_2) ax.set_title("Data set drawn from 2 different 2D Gaussian distributions") """ Explanation: 2) Start by guessing the cluster membership In this case, guessing means "randomly assign cluster membership." There are other heuristics that you could use to make an initial guess, but we won't get into those here. End of explanation """ def centroid(x): return [[sum(col)/float(len(x[0]))] for col in x] # function to select members of only one cluster def select_members(x, membership, cluster): return [ [i for i,label in zip(dim, membership) if label == cluster] for dim in x ] fig = plt.figure(figsize = [6,6]) ax = fig.add_subplot(111) ax.set_aspect('equal') ax.scatter(*select_members(data_sample, membership_2, "red"), color="red") ax.scatter(*centroid(select_members(data_sample, membership_2, "red")), color="black", marker="*", s = 100) ax.set_title("Centroid of the 'red' cluster (black star)") fig = plt.figure(figsize = [6,6]) ax = fig.add_subplot(111) ax.set_aspect('equal') ax.scatter(*select_members(data_sample, membership_2, "orange"), color="orange") ax.scatter(*centroid(select_members(data_sample, membership_2, "orange")), color="black", marker="*", s = 100) ax.set_title("Centroid of the 'orange' cluster (black star)") """ Explanation: 3) Find the center of a set of data points We'll need a way of determining the center of a set of data, after we make a guess at cluster membership. In this case, we'll find the centers of the two clusters that we guessed about. End of explanation """ def distance(p1, p2): # odd... vectors are lists of lists with only 1 element in each dim return math.sqrt(sum([(i[0]-j[0])**2 for i,j in zip(p1, p2)])) # here's the distance between two points, just to show how it works print("Distance between (-1,-1) and (2,3): {}".format(distance([[-1],[-1]],[[2],[3]]))) def reassign(x, centriods): membership = [] for idx in range(len(x[0])): min_d = sys.maxsize cluster = "" for c, vc in centriods.items(): dist = distance(vc, [[t[idx]] for t in x]) if dist < min_d: min_d = dist cluster = c membership.append(cluster) return membership cent_2 = {i:centroid(select_members(data_sample, membership_2, i)) for i in co[:2]} membership_2 = reassign(data_sample, cent_2) fig = plt.figure(figsize = [6,6]) ax = fig.add_subplot(111) ax.set_aspect('equal') ax.scatter(*data_sample, color=membership_2) ax.scatter(*cent_2["red"], color="black", marker="*", s = 360) ax.scatter(*cent_2["orange"], color="black", marker="*", s = 360) ax.scatter(*cent_2["red"], color="red", marker="*", s = 200) ax.scatter(*cent_2["orange"], color="orange", marker="*", s = 200) """ Explanation: 4) Update membership of points to closest centroid Find distances (will use to find distances to the centroid, in this case): End of explanation """ # function def get_centroids(x, membership): return {i:centroid(select_members(x, membership, i)) for i in set(membership)} # redefine with total distance measure def reassign(x, centroids): membership, scores = [], {} # step through all the vectors for idx in range(len(x[0])): min_d, cluster = sys.maxsize, None # set the min distance to a large number (we're about to minimize it) for c, vc in centroids.items(): # get the sum of the distances from each point in the cluster to the centroids dist = distance(vc, [[t[idx]] for t in x]) if dist < min_d: min_d = dist cluster = c # score is the minumum distance from each point in a cluster to the centroid of that cluster scores[cluster] = min_d + scores.get(cluster, 0) membership.append(cluster) # retrun the membership & the sum of all the score over all of the clusters return membership, sum(scores.values())/float(len(x[0])) def k_means(data, k): # start with random distribution membership = guess_clusters(data, k) score, last_score = 0.0, sys.maxsize while abs(last_score - score) > 1e-7: last_score = score c = get_centroids(data, membership) membership, score = reassign(data, c) #print(last_score - score) return membership, c, score mem, cl, s = k_means(data_sample, 2) fig = plt.figure(figsize = [6,6]) ax = fig.add_subplot(111) ax.set_aspect('equal') ax.scatter(*data_sample, color = mem) for i, pt in cl.items(): ax.scatter(*pt, color="black", marker="*", s = 16*8) ax.set_title("Clustering from k-means") """ Explanation: 5) Put it all together so that we can iterate Now we're going to iterate--assign clusters, finda centroid, reassign clusters--until the centroid positions stop changing very much. End of explanation """ err = [] trial_ks = range(1,5) results = {} for k in trial_ks: mem_2, cl_2, s_2 = k_means(data_sample, k) results[k] = mem_2 err.append(s_2) f, axes = plt.subplots(1, len(trial_ks), sharey=True, figsize = (18,4)) for i,k in enumerate(trial_ks): axes[i].set_aspect('equal') axes[i].set_title("k-means results with k = {} \n error = {:f}".format(k, err[i])) axes[i].scatter(*data_sample, color = results[k]) # plot the error as a function of the number of clusters fig = plt.figure(figsize = [6,6]) ax = fig.add_subplot(111) ax.set_aspect('equal') ax.plot(trial_ks,err,'o--') ax.set_title("Error as a funtion of k") ax.xaxis.set_ticks(trial_ks) _ = ax.set_xlabel("number of clusters (k)") # a different example, this time with 4 clusters ex4 = make_data(200, 4) err4 = [] trial_ks_4 = range(1,9) results_4 = {} for k in trial_ks_4: mem_ex4, cl_ex4, s_ex4 = k_means(ex4, k) results_4[k] = mem_ex4 err4.append(s_ex4) f, axes = plt.subplots(2, int(len(trial_ks_4)/2), sharey=True, figsize = (18,11)) for i,k in enumerate(trial_ks_4): axes[int(i >= 4)][i%4].set_aspect('equal') axes[int(i >= 4)][i%4].set_title("k-means results with k = {} \n error = {:f}".format(k, err4[i])) axes[int(i >= 4)][i%4].scatter(*ex4, color = results_4[k]) # plot the error as a function of the number of clusters fig = plt.figure(figsize = [6,6]) ax = fig.add_subplot(111) ax.set_aspect('equal') ax.plot(trial_ks_4,err4,'o--') ax.set_title("Error as a funtion of k") ax.xaxis.set_ticks(trial_ks_4) _ = ax.set_xlabel("number of clusters (k)") """ Explanation: K-means with real data Figuring out how many clusters to look for (that pesky "Step 1") Now, one thing we haven't covered yet is how to decide on the number of clusters to look for in the first place. There are several different heuristics that we can use to figure out what the "best" number of clusters is (we go into this more in https://github.com/DrSkippy/Data-Science-45min-Intros/tree/master/choosing-k-in-kmeans). The one heuristic that we're going to talk about here is finding the "knee" in the k-means error function. The error function: In this case, the error function is simply the sum of all of the distances from each data point to its assigned cluster, summed over all of the clusters. The further each data point is from its assigned cluster, the larger this error score is. Look for the "knee": When I say "knee" I mean to look for a bend in the graoh of the error score vs $k$. The idea is to find the place where you get a smaller decrease in the error (distance from each data point to a centroid) for every increase in the number of clusters ($k$). End of explanation """
mayank-johri/LearnSeleniumUsingPython
Section 1 - Core Python/Chapter 11 - Exceptions/Chapter13_Exceptions.ipynb
gpl-3.0
print (10/0) """ Explanation: Chapter 13: Exceptions When a failure occurs in the program (such as division by zero, for example) at runtime, an exception is generated. If the exception is not handled, it will be propagated through function calls to the main program module, interrupting execution. End of explanation """ try: print (1/0) except ZeroDivisionError: print ('Error trying to divide by zero.') try: print (1/0) except: print ('Error trying to divide by zero.') try: print (1/0) except Exception: # Please put the ex.message in some logfile instead of on the console print ('Error trying to divide by zero.', Exception) """ Explanation: The try instruction allows exception handling in Python. If an exception occurs in a block marked by try, it is possible to handle the exception through the instruction except. It is possible to have many except blocks for the same try block. End of explanation """ import sys try: print("... TESTing.. ") with open('myfile.txt', "w") as myFile: for a in ["a", "b", "c"]: myFile.write(str(a)) for a in [1,2,3,4,5,"6"]: myFile.write(str(a)) f = open('myfile.txt') s = f.readline() i = int(s.strip()) # raise Exception("Test Exception") except OSError as err: print("OS error: {0}".format(err)) except ValueError: print("Could not convert data to an integer.") raise except: print("Unexpected error:", sys.exc_info()) try: print(1/0) except: print("Hallo, Ja") raise int("2A") # -*- coding: utf-8 -*- """ Created on Fri Aug 5 08:50:42 2016 @author: mayankjohri@gmail.com """ import traceback # Try to get a file name try: fn = input('File Name (temp.txt): ').strip() # Numbering lines for i, s in enumerate(open(fn)): print( i + 1,"> ", s,) # If an error happens except: # Show it on the screen trace = traceback.format_exc() # And save it on a file print ('An error happened:\n', trace) with open("trace_asd.log", "a+") as file: file.write(trace) # file('trace_asd.log', 'a').write(trace) # end the program # raise SystemExit """ Explanation: If except receives the name of an exception, only that exception will be handled. If no exception name is passed as a parameter, all exceptions will be handled. Example: End of explanation """ def do_some_stuff(): print("Doing some stuff") def do_some_stuff_e(): print("Doing some stuff and will now raise error") raise ValueError('A very specific bad thing happened') def rollback(): print("reverting the changes") def commit(): print("commiting the changes") print("Testing") try: # do_some_stuff() do_some_stuff_e() except: rollback() # raise else: commit() finally: print("Exiting out") # #### ERROR Condtion # Testing # try block # Doing some stuff and will now raise error # except block # reverting the changes # Finally block # Exiting out # NO ERROR # Testing # Try block # Doing some stuff # else block # commiting the changes # finally block # Exiting out """ Explanation: The module traceback offers functions for dealing with error messages. The function format_exc() returns the output of the last exception formatted in a string. The handling of exceptions may have an else block, which will be executed when no exception occurs and a finally block, which will be executed anyway, whether an exception occurred or <span class="note" title="The finally declaration may be used for freeing resources that were used in the try block, such as database connections or open files.">not</span>. New types of exceptions may be defined through inheritance of the class Exception. Since version 2.6, the instruction with is available, that may replace the combination of try / finally in many situations. It is possible to define an object that will be used during the with block execution. The object will support the context management protocol, which means that it will need to have an __enter__() method, which will be executed at the beginning of the block, and another called __exit__(), which will be called at the end of the block. Example: End of explanation """ class HostNotFound(Exception): def __init__( self, host ): self.host = host Exception.__init__(self, 'Host Not Found exception: missing %s' % host) try: raise HostNotFound("gitpub.com") except HostNotFound as hcf: # Handle exception. print (hcf) # -> 'Host Not Found exception: missing taoriver.net' print (hcf.host) # -> 'gitpub.net' try: fh = open("nonexisting.txt", "r") try: fh.write("This is my test file for exception handling!!") print(1/0) except: print("Caught error message") finally: print ("Going to close the file") fh.close() except IOError: print ("Error: can\'t find file or read data") try: # fh = open("nonexisting.txt", "r") try: fh.write("This is my test file for exception handling!!") print(1/0) except: print("Caught error message") raise finally: print ("Going to close the file") fh.close() except: print ("Error: can\'t find file or read data") try: # fh = open("nonexisting.txt", "r") try: # fh.write("This is my test file for exception handling!!") print(1/0) except: print("Caught error message") finally: print ("Going to close the file") # fh.close()print(1/0) print(1/0) except : print ("Error: can\'t find file or read data") raise """ Explanation: Writing Exception Classes End of explanation """ import inspect inspect.getclasstree(inspect.getmro(Exception)) # https://stackoverflow.com/questions/18296653/print-the-python-exception-error-hierarchy def classtree(cls, indent=0): print ('.' * indent, cls.__name__) for subcls in cls.__subclasses__(): classtree(subcls, indent + 3) classtree(BaseException) """ Explanation: Exception hierarchy The class hierarchy for built-in exceptions is: ``` BaseException +-- SystemExit +-- KeyboardInterrupt +-- GeneratorExit +-- Exception +-- StopIteration +-- StopAsyncIteration +-- ArithmeticError | +-- FloatingPointError | +-- OverflowError | +-- ZeroDivisionError +-- AssertionError +-- AttributeError +-- BufferError +-- EOFError +-- ImportError +-- LookupError | +-- IndexError | +-- KeyError +-- MemoryError +-- NameError | +-- UnboundLocalError +-- OSError | +-- BlockingIOError | +-- ChildProcessError | +-- ConnectionError | | +-- BrokenPipeError | | +-- ConnectionAbortedError | | +-- ConnectionRefusedError | | +-- ConnectionResetError | +-- FileExistsError | +-- FileNotFoundError | +-- InterruptedError | +-- IsADirectoryError | +-- NotADirectoryError | +-- PermissionError | +-- ProcessLookupError | +-- TimeoutError +-- ReferenceError +-- RuntimeError | +-- NotImplementedError | +-- RecursionError +-- SyntaxError | +-- IndentationError | +-- TabError +-- SystemError +-- TypeError +-- ValueError | +-- UnicodeError | +-- UnicodeDecodeError | +-- UnicodeEncodeError | +-- UnicodeTranslateError +-- Warning +-- DeprecationWarning +-- PendingDeprecationWarning +-- RuntimeWarning +-- SyntaxWarning +-- UserWarning +-- FutureWarning +-- ImportWarning +-- UnicodeWarning +-- BytesWarning +-- ResourceWarning ``` End of explanation """
tBuLi/symfit
docs/examples/ex_mexican_hat.ipynb
mit
from symfit import Parameter, Variable, Model, Fit, solve, diff, N, re from symfit.core.minimizers import DifferentialEvolution, BFGS import numpy as np import matplotlib.pyplot as plt """ Explanation: Global minimization: Skewed Mexican hat In this example we will demonstrate the ease of performing global minimization using symfit. In order to do this we will have a look at a simple skewed mexican hat potential, which has a local minimum and a global minimum. We will then use DifferentialEvolution to find the global minimum. End of explanation """ x = Parameter('x') x.min, x.max = -100, 100 y = Variable('y') model = Model({y: x**4 - 10 * x**2 + 5 * x}) # Skewed Mexican hat print(model) """ Explanation: First we define a model for the skewed mexican hat. End of explanation """ xdata = np.linspace(-4, 4, 201) ydata = model(x=xdata).y plt.axhline(0, color='black') plt.axvline(0, color='black') plt.plot(xdata, ydata, label=r'$f(x)$') plt.xlabel('x') plt.ylabel('f(x)') plt.ylim(1.1 * ydata.min(), 1.1 * ydata.max()) plt.legend() """ Explanation: Let us visualize what this potential looks like. End of explanation """ sol = solve(diff(model[y], x), x) # Give numerical value sol = [re(N(s)) for s in sol] sol """ Explanation: Using sympy, it is easy to solve the solution analytically, by finding the places where the gradient is zero. End of explanation """ fit = Fit(model) fit_result = fit.execute() print('exact value', sol[1]) print('num value ', fit_result.value(x)) """ Explanation: Without providing any initial guesses, symfit finds the local minimum. This is because the initial guess is set to 1 by default. End of explanation """ fit = Fit(model, minimizer=DifferentialEvolution) fit_result = fit.execute() print('exact value', sol[2]) print('num value ', fit_result.value(x)) """ Explanation: Let's use DifferentialEvolution instead. End of explanation """ fit = Fit(model, minimizer=[DifferentialEvolution, BFGS]) fit_result = fit.execute() print('exact value', sol[2]) print('num value ', fit_result.value(x)) """ Explanation: Using DifferentialEvolution, we find the correct global minimum. However, it is not exactly the same as the analytical solution. This is because DifferentialEvolution is expensive to perform, and therefore does not solve to high precision by default. We could demand a higher precission from DifferentialEvolution, but this isn't worth the high computational cost. Instead, we will just tell symfit to perform DifferentialEvolution, followed by BFGS. End of explanation """
PhonologicalCorpusTools/PolyglotDB
examples/tutorial/tutorial_1_first_steps.ipynb
mit
from polyglotdb import CorpusContext import polyglotdb.io as pgio corpus_root = '/mnt/e/Data/pg_tutorial' """ Explanation: Tutorial 1: First steps Downloading the tutorial corpus The tutorial corpus used here is a version of the LibriSpeech test-clean subset, forced aligned with the Montreal Forced Aligner (tutorial corpus download link). Extract the files to somewhere on your local machine. Importing the tutorial corpus We begin by importing the necessary classes and functions from polyglotdb as well as defining variables. Change the path to reflect where the tutorial corpus was extracted to on your local machine. End of explanation """ parser = pgio.inspect_mfa(corpus_root) parser.call_back = print # To show progress output with CorpusContext('pg_tutorial') as c: c.load(parser, corpus_root) """ Explanation: The import statements get the necessary classes and functions for importing, namely the CorpusContext class and the polyglot IO module. CorpusContext objects are how all interactions with the database are handled. The CorpusContext is created as a context manager in Python (the with ... as ... pattern), so that clean up and closing of connections are automatically handled both on successful completion of the code as well as if errors are encountered. The IO module handles all import and export functionality in polyglotdb. The principle functions that a user will encounter are the inspect_X functions that generate parsers for corpus formats. In the above code, the MFA parser is used because the tutorial corpus was aligned using the MFA. See Importing corpora for more information on the inspect functions and parser objects they generate for various formats. Once the proper path to the tutorial corpus is set, it can be imported via the following code: End of explanation """ with CorpusContext('pg_tutorial') as c: c.reset() """ Explanation: Important If during the running of the import code, a neo4j.exceptions.ServiceUnavailable error is raised, then double check that the pgdb database is running. Once polyglotdb is installed, simply call pgdb start, assuming pgdb install has already been called. See the relevant documentation for more information. Resetting the corpus If at any point there's some error or interruption in import or other stages of the tutorial, the corpus can be reset to a fresh state via the following code: End of explanation """ with CorpusContext('pg_tutorial') as c: print('Speakers:', c.speakers) print('Discourses:', c.discourses) q = c.query_lexicon(c.lexicon_phone) q = q.order_by(c.lexicon_phone.label) q = q.columns(c.lexicon_phone.label.column_name('phone')) results = q.all() print(results) """ Explanation: Warning Be careful when running this code as it will delete any and all information in the corpus. For smaller corpora such as the one presented here, the time to set up is not huge, but for larger corpora this can result in several hours worth of time to reimport and re-enrich the corpus. Testing some simple queries To ensure that data import completed successfully, we can print the list of speakers, discourses, and phone types in the corpus, via: End of explanation """ from polyglotdb.query.base.func import Count, Average with CorpusContext('pg_tutorial') as c: q = c.query_graph(c.phone).group_by(c.phone.label.column_name('phone')) results = q.aggregate(Count().column_name('count'), Average(c.phone.duration).column_name('average_duration')) for r in results: print('The phone {} had {} occurrences and an average duration of {}.'.format(r['phone'], r['count'], r['average_duration'])) """ Explanation: A more interesting summary query is perhaps looking at the count and average duration of different phone types across the corpus, via: End of explanation """
landlab/landlab
notebooks/tutorials/hillslope_geomorphology/taylor_diffuser/taylor_diffuser.ipynb
mit
import numpy as np import matplotlib.pyplot as plt from landlab import RasterModelGrid from landlab.components import TaylorNonLinearDiffuser """ Explanation: <a href="http://landlab.github.io"><img style="float: left" src="../../../landlab_header.png"></a> Component Overview: TaylorNonLinearDiffuser <hr> <small>For more Landlab tutorials, click here: <a href="https://landlab.readthedocs.io/en/latest/user_guide/tutorials.html">https://landlab.readthedocs.io/en/latest/user_guide/tutorials.html</a></small> <hr> Introduction and background This tutorial introduces the TaylorNonLinearDiffuser component, which we'll refer to here as "TNLD". The TNLD component models the process of downslope soil creep and its role in modifying topography. Inspired by Ganti et al. (2012), it uses a slope-dependent flux law with a user-specified number of terms in a Taylor expansion described in that paper. The component is described (as one element in the terrainBento package) in Barnhart et al. (2019), which is the appropriate paper to cite for it. Theory Consider a topographic surface in which the elevation at any time $t$ and horizontal position $(x,y)$ is $\eta (x,y,t)$. Let $\mathbf{q}_s$ be a 2D vector that represents the rate of soil volume flow per unit slope width (with dimensions of length squared per time; we'll assume that $\mathbf{q}_s$ represents a "bulk" flux that includes pore spaces between soil grains). In the absence of any "local" input sources (such as weathering of rock) or output (such as removal by wash erosion), conservation of mass dictates that: $$\frac{\partial \eta}{\partial t} = -\nabla \cdot \mathbf{q}_s$$ The TNLD component represents the soil flux as: $$\mathbf{q}_s = D \mathbf{S} [ 1 + (S/S_c)^2 + (S/S_c)^4 + ... + (S/S_c)^2(n-1) ]$$ where $\mathbf{S} = -\nabla \eta$ is the downslope topographic gradient, and $S$ is its magnitude. Parameter $D$ is a diffusion-like coefficient with dimensions of length squared per time. The above can be written slightly more compactly: $$\mathbf{q}s = D \mathbf{S} \left[1 + \sum{i=1}^N \left( \frac{S}{S_c}\right)^{2i}\right]$$ where $i$ is the number of additional terms desired. If $i=0$, the expression reduces to plain old linear diffusion (see LinearDiffuser). The use of a truncated Taylor series is meant to approximate the Andrews-Bucknam transport function (e.g., Roering et al., 1999) while avoiding that equation's blow-up at $S=S_c$; the idea of using a truncated Taylor series comes from Ganti et al. (2012). Numerical implementation The component uses an explicit finite-volume solution method. Soil flux values are calculated from the gradient values on the active links, using the grid method calc_grad_at_link. Flux divergence is then calculated using the grid method calc_flux_div_at_node. The code then updates the elevation field. An optional dynamic timestep capability will check the local Courant condition (which can vary in time and space when nonlinear terms are included) and sub-divide the user-specified time step as needed to ensure stability. Examples Needed imports Like all Landlab components, TNLD requires a grid object on which to operate, so for this example we'll import RasterModelGrid as well as the component itself. End of explanation """ # define parameters L = 50.0 # distance from base to ridgeline, m dx = 2.0 # node spacing, m D = 0.01 # diffusion-like coefficient, m2/y U = 0.0001 # uplift rate, m/y H = 100.0 # initial soil thickness, m num_steps = 20000 # number of time steps # time step size (calculate using Courant condition for linear diffusion) dt = 0.1 * dx * dx / D # prediction predicted_crest_height = 0.5 * (U / D) * L * L print("Crest height should be " + str(predicted_crest_height)) # create grid grid = RasterModelGrid((3, 51), xy_spacing=dx) grid.set_closed_boundaries_at_grid_edges(False, True, False, True) # create elevation field elev = grid.add_zeros("topographic__elevation", at="node") # this is eta # instantiate component tnld = TaylorNonLinearDiffuser(grid, linear_diffusivity=D, nterms=1) # run the model in a time loop with uplift applied for i in range(num_steps): elev[grid.core_nodes] += U * dt tnld.run_one_step(dt) midrow = np.arange(51, 102, dtype=int) plt.plot(grid.x_of_node[midrow], elev[midrow]) plt.xlabel("Distance (m)") plt.ylabel("Elevation (m)") print(np.amax(elev)) """ Explanation: Example 1: equilibrium hillslope profile with linear diffusion For the first example, we'll use a long and skinny grid to effectively create a 1D domain. We'll test the ability of TNLD to reduce to a simple linear, depth-independent diffusive model when $i=0$. We'll impose (relative) rock uplift by raising the interior of the domain at a specified rate $U$ relative to the fixed boundary nodes on either side. The expectation is that: $$\frac{d\eta}{dx} = -\frac{U}{D}x$$ where $x$ is distance from the ridge top (because the ridge top will form in the middle of the domain, $x<0$ on the left and $x>0$ on the right). Integrating this, we get $$\eta = -\frac{U}{2D} x^2 + C$$ We can evaluate the integration constant by noting that $\eta = 0$ at $x = \pm L$, where $L$ is the distance from base to crest. Therefore, $$\boxed{\eta = \frac{U}{2D} \left( L^2 - x^2 \right)}$$ We'll test this using a hill that is 100 m long (51 nodes, two of which are fixed boundaries, with 2 m spacing between them; 50 m from base to crest on each side), a soil layer that is much thicker than the characteristic decay depth $H^*$, a transport coefficient of 0.01 m$^2$/y, and an uplift rate of 0.0001 m/y. With these parameters, the predicted ridge height (at $x=0$) is calculated below. End of explanation """ U = 0.0005 # uplift rate, m/yr Sc = 0.6 # critical slope gradient, m/m num_steps = 2000 # number of time steps # create grid grid = RasterModelGrid((3, 51), xy_spacing=dx) grid.set_closed_boundaries_at_grid_edges(False, True, False, True) # create fields elev = grid.add_zeros("topographic__elevation", at="node") # this is eta # instantiate component tnld = TaylorNonLinearDiffuser( grid, linear_diffusivity=D, slope_crit=Sc, dynamic_dt=True, nterms=2 ) # run the model in a time loop with uplift applied for i in range(num_steps): elev[grid.core_nodes] += U * dt tnld.run_one_step(dt) plt.plot(grid.x_of_node[midrow], elev[midrow]) plt.xlabel("Distance (m)") plt.ylabel("Elevation (m)") """ Explanation: Example 2: Nonlinear behavior When we include nonlinear terms in the transport law, we expect to see slopes that become more planar in character. We'll test this by setting a critical slope value $S_c = 0.6$ (about 31$^\circ$), and using a higher uplift rate. We'll have two terms, one linear and one cubic. We will also invoke the dynamic_dt option, which allows the component to subdivide each "global" timestep if needed for numerical stability: a useful thing to do because now our Courant condition varies according to slope gradient. End of explanation """ active_link_midpts = ( grid.x_of_node[grid.node_at_link_tail[grid.active_links]] + 0.5 * dx ) plt.plot(active_link_midpts, grid.at_link["soil__flux"][grid.active_links]) plt.grid(True) plt.xlabel("Distance (m)") plt.ylabel("Soil flux (m2/yr)") """ Explanation: The resulting hill is taller (due to the higher uplift rate) and no longer has uniform convexity. How do we know whether it has reached equilibrium? One way is to inspect the soil flux: it should increase linearly with $x$, and be zero at the crest. The values at the base of the slope should equal slope length times uplift rate, or 50 m x 0.0005 m/yr = 0.025 m$^2$/yr. End of explanation """
makism/dyfunconn
tutorials/EEG - 4 - Dynamic Connectivity (Group Analysis).ipynb
bsd-3-clause
import numpy as np import tqdm raw_eeg_eyes_open = np.load("data/eeg_eyes_opened.npy") raw_eeg_eyes_closed = np.load("data/eeg_eyes_closed.npy") num_trials, num_channels, num_samples = np.shape(raw_eeg_eyes_open) read_trials = 10 eeg_eyes_open = raw_eeg_eyes_open[0:read_trials, ...] eeg_eyes_closed = raw_eeg_eyes_closed[0:read_trials, ...] """ Explanation: In this short tutorial, we will build and expand on the previous tutorials by computing the dynamic connectivity, using Time-Varying Functional Connectivity Graphs. In the near future, the standard method of "sliding window" will be supported. Load data End of explanation """ import warnings warnings.simplefilter(action='ignore', category=FutureWarning) from dyconnmap import tvfcg from dyconnmap.fc import IPLV fb = [7.0, 13.0] cc = 4.0 fs = 160.0 step = 80 estimator = IPLV(fb, fs) """ Explanation: Dynamic connectivity Prepare and configure the estimator object End of explanation """ X = np.squeeze(eeg_eyes_open[0]) fcgs = tvfcg(X, estimator, fb, fs, cc, step) fcgs_eyes_open = np.array(np.real(fcgs)) for i in tqdm.tqdm(range(1, read_trials)): X = np.squeeze(eeg_eyes_open[i]) fcgs = tvfcg(X, estimator, fb, fs, cc, step) fcgs_eyes_open = np.vstack([fcgs_eyes_open, np.real(fcgs)]) """ Explanation: Process condition "eyes open" End of explanation """ X = np.squeeze(eeg_eyes_closed[0]) fcgs = tvfcg(X, estimator, fb, fs, cc, step) fcgs_eyes_closed = np.array(np.real(fcgs)) for i in tqdm.tqdm(range(1, read_trials)): X = np.squeeze(eeg_eyes_closed[i]) fcgs = tvfcg(X, estimator, fb, fs, cc, step) fcgs_eyes_closed = np.vstack([fcgs_eyes_closed, np.real(fcgs)]) """ Explanation: Process condition "eyes closed" End of explanation """ from dyconnmap.cluster import NeuralGas num_fcgs_eo, _, _ = np.shape(fcgs_eyes_open) num_fcgs_ec, _, _ = np.shape(fcgs_eyes_closed) fcgs = np.vstack([fcgs_eyes_open, fcgs_eyes_closed]) num_fcgs, num_channels, num_channels = np.shape(fcgs) triu_ind = np.triu_indices_from(np.squeeze(fcgs[0, ...]), k=1) fcgs = fcgs[:, triu_ind[0], triu_ind[1]] rng = np.random.RandomState(0) mdl = NeuralGas(n_protos=5, rng=rng).fit(fcgs) encoding, symbols = mdl.encode(fcgs) """ Explanation: FCμstates / Clustering End of explanation """ grp_dist_eo = symbols[0:num_fcgs_eo] grp_dist_ec = symbols[num_fcgs_eo:] """ Explanation: Separate the encoded symbols based on their original groupings End of explanation """ h_grp_dist_eo = np.histogram(grp_dist_eo, bins=mdl.n_protos, normed=True) h_grp_dist_ec = np.histogram(grp_dist_ec, bins=mdl.n_protos, normed=True) import matplotlib.pyplot as plt fig, ax = plt.subplots(figsize=(12, 6)) ind = np.arange(mdl.n_protos) p1 = ax.bar(ind - 0.125, h_grp_dist_ec[0], 0.25, label='Eyes Closed') p2 = ax.bar(ind + 0.125, h_grp_dist_eo[0], 0.25, label='Eyes Open') ax.legend() ax.set_xlabel('Symbol Index') ax.set_ylabel('Hits %') ax.set_xticks(np.arange(mdl.n_protos)) plt.show() """ Explanation: Plot End of explanation """ protos_mtx = np.zeros((mdl.n_protos, 64, 64)) for i in range(mdl.n_protos): symbol_state = np.zeros((64, 64)) symbol_state[triu_ind] = mdl.protos[i, :] symbol_state = symbol_state + symbol_state.T np.fill_diagonal(symbol_state, 1.0) protos_mtx[i, :, :] = symbol_state mtx_min = np.min(protos_mtx) mtx_max = np.max(protos_mtx) f, ax = plt.subplots(ncols=mdl.n_protos, figsize=(12, 12)) for i in range(mdl.n_protos): cax = ax[i].imshow(np.squeeze(protos_mtx[i,...]), vmin=mtx_min, vmax=mtx_max, cmap=plt.cm.Spectral) ax[i].set_title('#{0}'.format(i)) # move the colorbar to the side ;) f.subplots_adjust(right=0.8) cbar_ax = f.add_axes([0.82, 0.445, 0.0125, 0.115]) cb = f.colorbar(cax, cax=cbar_ax) cb.set_label('Imaginary PLV') """ Explanation: Convert state prototypes to symmetric matrices and plot them End of explanation """ grp_sym_eo = np.array_split(grp_dist_eo, 10, axis=0) grp_sym_ec = np.array_split(grp_dist_ec, 10, axis=0) """ Explanation: Separate symbols per subject Now we would like to analyze the symbols per subject, per group. End of explanation """ subj1_eyes_open = grp_sym_eo[0] subj1_eyes_closed = grp_sym_ec[0] from dyconnmap.ts import markov_matrix markov_matrix_eo = markov_matrix(subj1_eyes_open) markov_matrix_ec = markov_matrix(subj1_eyes_closed) from mpl_toolkits.axes_grid1 import ImageGrid f = plt.figure(figsize=(8, 6)) grid = ImageGrid(f, 111, nrows_ncols=(1,2), axes_pad=0.15, share_all=True, cbar_location="right", cbar_mode="single", cbar_size="7%", cbar_pad=0.15, ) im = grid[0].imshow(markov_matrix_eo, vmin=0.0, vmax=1.0, cmap=plt.cm.Spectral) grid[0].set_xlabel('Prototype') grid[0].set_ylabel('Prototype') grid[0].set_title('Eyes Open') im = grid[1].imshow(markov_matrix_ec, vmin=0.0, vmax=1.0, cmap=plt.cm.Spectral) grid[1].set_xlabel('Prototype') grid[1].set_ylabel('Prototype') grid[1].set_title('Eyes Close') cb = grid[1].cax.colorbar(im) cax = grid.cbar_axes[0] axis = cax.axis[cax.orientation] axis.label.set_text("Transition Probability") plt.show() from dyconnmap.ts import transition_rate, occupancy_time tr_eo = transition_rate(subj1_eyes_open) tr_ec = transition_rate(subj1_eyes_closed) print(f""" Transition rate =============== Eyes open: {tr_eo:.3f} Eyes closed: {tr_ec:.3f} """) occ_eo = occupancy_time(subj1_eyes_open)[0] occ_ec = occupancy_time(subj1_eyes_closed)[0] print(""" Occupancy time ============== State \t 0 \t 1 \t 2 \t 3 \t 4 ----- Eyes open \t {0:.3f} \t {1:.3f} \t {2:.3f} \t {3:.3f} \t {4:.3f} Eyes closed \t {5:.3f} \t {6:.3f} \t {7:.3f} \t {8:.3f} \t {9:.3f} """.format(*occ_eo, *occ_ec)) """ Explanation: Examine the first subject End of explanation """
JeffAbrahamson/MLWeek
practicum/04_features/tokenizing.ipynb
gpl-3.0
from sklearn.feature_extraction.text import CountVectorizer corpus = [ "Il est nuit. La cabane est pauvre, mais bien close.", "Le logis est plein d'ombre et l'on sent quelque chose", "Qui rayonne à travers ce crépuscule obscur.", "Des filets de pêcheur sont accrochés au mur.", "Au fond, dans l'encoignure où quelque humble vaisselle", "Aux planches d'un bahut vaguement étincelle,", "On distingue un grand lit aux longs rideaux tombants.", "Tout près, un matelas s'étend sur de vieux bancs,", "Et cinq petits enfants, nid d'âmes, y sommeillent", "La haute cheminée où quelques flammes veillent", "Rougit le plafond sombre, et, le front sur le lit,", "Une femme à genoux prie, et songe, et pâlit.", "C'est la mère. Elle est seule. Et dehors, blanc d'écume,", "Au ciel, aux vents, aux rocs, à la nuit, à la brume,", "Le sinistre océan jette son noir sanglot.", ] corpus2 = corpus[0:2] vectorizer = CountVectorizer() corpus_encoded = vectorizer.fit_transform(corpus2) print(corpus_encoded.todense()) print('----------------------------------------------------------------') print(vectorizer.vocabulary_) """ Explanation: CountVectorizer ...compte le nombre d'instances de mots. La représentation dense nous montre le vecteur de critères correspondant à chaque phrase dans corpus. Sinon, la représentation est creuse. Essaiez avec le corpus entier pour voir ce que donne un document plus important. À essayer et expliquer : * corpus_encoded.shape * corpus_encoded[0].todense() * corpus_encoded[0].data et print(corpus_encoded[0]). * corpus_encoded.todense()[0] * vectorizer.inverse_transform(corpus_encoded.todense()[0]) * vectorizer.transform('The dog runs quickly towards the cat.') * vectorizer.transform(['The dog runs quickly towards the cat.']) * vectorizer2 = CountVectorizer(binary=True) End of explanation """ from sklearn.metrics.pairwise import euclidean_distances print(euclidean_distances(corpus_encoded[0], corpus_encoded[1])) print(euclidean_distances(corpus_encoded[0], corpus_encoded[0])) jour = vectorizer.transform(['Il est jour. La cabane est pauvre, mais bien close.']) print(euclidean_distances(corpus_encoded[0], jour)) """ Explanation: Mots vides Ceci ne marche pas : vectorizer2 = CountVectorizer(stop_words='francais') Découvrez un peu plus. (Astuce : qu'est-ce qui est l'erreur si vous appelez fit_transform()?) Distance Les vecteurs et matrices sont creuse. Pourquoi? Quel problème aurons-nous avec la distance euclidienne? End of explanation """ from sklearn.feature_extraction.text import TfidfVectorizer tfidf_vectorizer = TfidfVectorizer() print(corpus2[0]) print(corpus2[1]) print(tfidf_vectorizer.fit_transform(corpus2).todense()) print('--------------------------------------------------------------') print(vectorizer.fit_transform(corpus2).todense()) """ Explanation: TF-IDF End of explanation """ from sklearn.feature_extraction.text import HashingVectorizer hash_vectorizer = HashingVectorizer(n_features=6, norm=None) corpus1 = corpus[:1] print(corpus1) print(hash_vectorizer.transform(corpus1).todense()) # Trouvons les indexes non-zéro. scipy.sparse.find(hash_vectorizer.transform(corpus1)) """ Explanation: HashingVectorizer() Pourquoi disons-nous transform([corpus0]) au lieu de transform(corpus0)? Expliquez l'option norm Pourquoi n_features=6? Et si on essaie n_features=10? n_features=50? End of explanation """ from sklearn import datasets digits = datasets.load_digits() print('Digit:', digits.target[0]) print(digits.images[0]) print('Feature vector:\n', digits.images[0].reshape(-1, 64)) plt.figure(1, figsize=(3, 3)) plt.imshow(digits.images[-1], cmap=plt.cm.gray_r, interpolation='nearest') plt.show() """ Explanation: Computer Vision ROC : reconnaissance optique de caractères (OCR : optical characater recognition) End of explanation """ # Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org> # License: BSD 3 clause # Standard scientific Python imports import matplotlib.pyplot as plt import math # Import datasets, classifiers and performance metrics from sklearn import datasets, svm, metrics # The digits dataset digits = datasets.load_digits() # The data that we are interested in is made of 8x8 images of digits, let's # have a look at the first 3 images, stored in the `images` attribute of the # dataset. If we were working from image files, we could load them using # pylab.imread. Note that each image must have the same size. For these # images, we know which digit they represent: it is given in the 'target' of # the dataset. images_and_labels = list(zip(digits.images, digits.target)) for index, (image, label) in enumerate(images_and_labels[:4]): plt.subplot(2, 4, index + 1) plt.axis('off') plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest') plt.title('Training: %i' % label) # To apply a classifier on this data, we need to flatten the image, to # turn the data in a (samples, feature) matrix: n_samples = len(digits.images) data = digits.images.reshape((n_samples, -1)) # Create a classifier: a support vector classifier classifier = svm.SVC(gamma=0.001) # We learn the digits on the first half of the digits num_training = int(math.floor(n_samples / 2)) num_test = int(math.ceil(n_samples / 2)) classifier.fit(data[:num_training], digits.target[:num_training]) # Now predict the value of the digit on the second half: expected = digits.target[num_test:] predicted = classifier.predict(data[num_test:]) print("Classification report for classifier %s:\n%s\n" % (classifier, metrics.classification_report(expected, predicted))) print("Confusion matrix:\n%s" % metrics.confusion_matrix(expected, predicted)) images_and_predictions = list(zip(digits.images[num_test:], predicted)) for index, (image, prediction) in enumerate(images_and_predictions[:4]): plt.subplot(2, 4, index + 5) plt.axis('off') plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest') plt.title('Prediction: %i' % prediction) plt.show() """ Explanation: Tout en même temps Ici nous prenons un exemple de sklearn. Nous chargeons toutes les images du dataset digits, nous construisons un classifieur SVM (à venir, pour l'instant, c'est juste un classifieur), et nous apprenons comment classifier les chiffres sur la première moitié des données. Puis nous testons notre classifieur avec la deuxième moitié, où nous savons toujours le target (vérité, ground truth). Étudiez (et executez) l'exemple. Comprenez tout ce que vous pouvez. Puis nous allons le discuter ensemble. End of explanation """ from skimage.filters import roberts, sobel, scharr, prewitt from skimage.color import rgb2gray from skimage.data import camera import skimage.io as io # Au choix : #image = camera() image = rgb2gray(io.imread('victor.jpg')) edge_roberts = roberts(image) edge_sobel = sobel(image) fig, (ax0, ax1) = plt.subplots(ncols=2) ax0.imshow(edge_roberts, cmap=plt.cm.gray) ax0.set_title('Roberts Edge Detection') ax0.axis('off') ax1.imshow(edge_sobel, cmap=plt.cm.gray) ax1.set_title('Sobel Edge Detection') ax1.axis('off') plt.tight_layout() """ Explanation: Feature extraction Nous revenons aux techniques que nous comprenons. Commençons par la détection de contours. End of explanation """ from skimage.feature import corner_harris, corner_peaks from skimage.color import rgb2gray from skimage.exposure import equalize_hist def show_corners(corners, image): """Show corners on image.""" fig = plt.figure() plt.gray() plt.imshow(image) y_corner, x_corner = zip(*corners) plt.plot(x_corner, y_corner, 'or') plt.xlim(0, image.shape[1]) plt.ylim(image.shape[0], 0) fig.set_size_inches(np.array(fig.get_size_inches()) * 1.5) plt.show() victor = io.imread('victor.jpg') plt.imshow(victor) victor = equalize_hist(rgb2gray(victor)) corners = corner_peaks(corner_harris(victor), min_distance=2) show_corners(corners, victor) """ Explanation: Et maintenant procédons à la détection de coins (corners). End of explanation """ import mahotas as mh from mahotas.features import surf image = mh.imread('victor.jpg', as_grey=True) print('The first SURF descriptor:\n{img}'.format(img=surf.surf(image)[0])) print('Extracted {num} SURF descriptors'.format(num=len(surf.surf(image)))) from sklearn import preprocessing X = np.random.rand(4,4) * 100 print(X) print('\n') print(preprocessing.scale(X)) """ Explanation: SURF End of explanation """
mne-tools/mne-tools.github.io
0.19/_downloads/2fc30e4810d35d643811cc11759b3b9a/plot_resample.ipynb
bsd-3-clause
# Authors: Marijn van Vliet <w.m.vanvliet@gmail.com> # # License: BSD (3-clause) from matplotlib import pyplot as plt import mne from mne.datasets import sample """ Explanation: Resampling data When performing experiments where timing is critical, a signal with a high sampling rate is desired. However, having a signal with a much higher sampling rate than is necessary needlessly consumes memory and slows down computations operating on the data. This example downsamples from 600 Hz to 100 Hz. This achieves a 6-fold reduction in data size, at the cost of an equal loss of temporal resolution. End of explanation """ data_path = sample.data_path() raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif' raw = mne.io.read_raw_fif(raw_fname).crop(120, 240).load_data() """ Explanation: Setting up data paths and loading raw data (skip some data for speed) End of explanation """ events = mne.find_events(raw) epochs = mne.Epochs(raw, events, event_id=2, tmin=-0.1, tmax=0.8, preload=True) # Downsample to 100 Hz print('Original sampling rate:', epochs.info['sfreq'], 'Hz') epochs_resampled = epochs.copy().resample(100, npad='auto') print('New sampling rate:', epochs_resampled.info['sfreq'], 'Hz') # Plot a piece of data to see the effects of downsampling plt.figure(figsize=(7, 3)) n_samples_to_plot = int(0.5 * epochs.info['sfreq']) # plot 0.5 seconds of data plt.plot(epochs.times[:n_samples_to_plot], epochs.get_data()[0, 0, :n_samples_to_plot], color='black') n_samples_to_plot = int(0.5 * epochs_resampled.info['sfreq']) plt.plot(epochs_resampled.times[:n_samples_to_plot], epochs_resampled.get_data()[0, 0, :n_samples_to_plot], '-o', color='red') plt.xlabel('time (s)') plt.legend(['original', 'downsampled'], loc='best') plt.title('Effect of downsampling') mne.viz.tight_layout() """ Explanation: Since downsampling reduces the timing precision of events, we recommend first extracting epochs and downsampling the Epochs object: End of explanation """ # Resample to 300 Hz raw_resampled_300 = raw.copy().resample(300, npad='auto') """ Explanation: When resampling epochs is unwanted or impossible, for example when the data doesn't fit into memory or your analysis pipeline doesn't involve epochs at all, the alternative approach is to resample the continuous data. This can only be done on loaded or pre-loaded data. End of explanation """ print('Number of events before resampling:', len(mne.find_events(raw))) # Resample to 100 Hz (suppress the warning that would be emitted) raw_resampled_100 = raw.copy().resample(100, npad='auto', verbose='error') print('Number of events after resampling:', len(mne.find_events(raw_resampled_100))) # To avoid losing events, jointly resample the data and event matrix events = mne.find_events(raw) raw_resampled, events_resampled = raw.copy().resample( 100, npad='auto', events=events) print('Number of events after resampling:', len(events_resampled)) """ Explanation: Because resampling also affects the stim channels, some trigger onsets might be lost in this case. While MNE attempts to downsample the stim channels in an intelligent manner to avoid this, the recommended approach is to find events on the original data before downsampling. End of explanation """
tpin3694/tpin3694.github.io
python/seaborn_pandas_timeseries_plot.ipynb
mit
import pandas as pd %matplotlib inline import matplotlib.pyplot as plt import seaborn as sns data = {'date': ['2014-05-01 18:47:05.069722', '2014-05-01 18:47:05.119994', '2014-05-02 18:47:05.178768', '2014-05-02 18:47:05.230071', '2014-05-02 18:47:05.230071', '2014-05-02 18:47:05.280592', '2014-05-03 18:47:05.332662', '2014-05-03 18:47:05.385109', '2014-05-04 18:47:05.436523', '2014-05-04 18:47:05.486877'], 'deaths_regiment_1': [34, 43, 14, 15, 15, 14, 31, 25, 62, 41], 'deaths_regiment_2': [52, 66, 78, 15, 15, 5, 25, 25, 86, 1], 'deaths_regiment_3': [13, 73, 82, 58, 52, 87, 26, 5, 56, 75], 'deaths_regiment_4': [44, 75, 26, 15, 15, 14, 54, 25, 24, 72], 'deaths_regiment_5': [25, 24, 25, 15, 57, 68, 21, 27, 62, 5], 'deaths_regiment_6': [84, 84, 26, 15, 15, 14, 26, 25, 62, 24], 'deaths_regiment_7': [46, 57, 26, 15, 15, 14, 26, 25, 62, 41]} df = pd.DataFrame(data, columns = ['date', 'battle_deaths', 'deaths_regiment_1', 'deaths_regiment_2', 'deaths_regiment_3', 'deaths_regiment_4', 'deaths_regiment_5', 'deaths_regiment_6', 'deaths_regiment_7']) df = df.set_index(df.date) """ Explanation: Title: Creating A Time Series Plot With Seaborn And Pandas Slug: seaborn_pandas_timeseries_plot Summary: Creating A Time Series Plot With Seaborn And Pandas Date: 2016-05-01 12:00 Category: Python Tags: Data Visualization Authors: Chris Albon Preliminaries End of explanation """ sns.tsplot([df.deaths_regiment_1, df.deaths_regiment_2, df.deaths_regiment_3, df.deaths_regiment_4, df.deaths_regiment_5, df.deaths_regiment_6, df.deaths_regiment_7], color="indianred") """ Explanation: Time Series Plot End of explanation """ sns.tsplot([df.deaths_regiment_1, df.deaths_regiment_2, df.deaths_regiment_3, df.deaths_regiment_4, df.deaths_regiment_5, df.deaths_regiment_6, df.deaths_regiment_7], err_style="ci_bars", interpolate=False) """ Explanation: Time Series Splot With Confidence Interval Lines But No Lines End of explanation """
adityaka/misc_scripts
python-scripts/data_analytics_learn/link_pandas/Ex_Files_Pandas_Data/Exercise Files/02_01/Final/.ipynb_checkpoints/Object Creation-checkpoint.ipynb
bsd-3-clause
import pandas as pd import numpy as np """ Explanation: Rapid Overview build intuition about pandas details later documentation: http://pandas.pydata.org/pandas-docs/stable/10min.html End of explanation """ my_series = pd.Series([1,3,5,np.nan,6,8]) my_series """ Explanation: Basic series; default integer index documentation: http://pandas.pydata.org/pandas-docs/stable/generated/pandas.Series.html End of explanation """ my_dates_index = pd.date_range('20160101', periods=6) my_dates_index """ Explanation: datetime index documentation: http://pandas.pydata.org/pandas-docs/stable/timeseries.html End of explanation """ sample_numpy_data = np.array(np.arange(24)).reshape((6,4)) sample_numpy_data """ Explanation: sample NumPy data End of explanation """ sample_df = pd.DataFrame(sample_numpy_data, index=my_dates_index, columns=list('ABCD')) sample_df """ Explanation: sample data frame, with column headers; uses our dates_index documentation: http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.html End of explanation """ df_from_dictionary = pd.DataFrame({ 'float' : 1., 'time' : pd.Timestamp('20160825'), 'series' : pd.Series(1,index=list(range(4)),dtype='float32'), 'array' : np.array([3] * 4,dtype='int32'), 'categories' : pd.Categorical(["test","train","taxes","tools"]), 'dull' : 'boring data' }) df_from_dictionary """ Explanation: data frame from a Python dictionary End of explanation """ df_from_dictionary.dtypes """ Explanation: pandas retains data type for each column End of explanation """ sample_df.head() sample_df.tail(2) """ Explanation: head and tail; default is 5 rows End of explanation """ sample_df.values sample_df.index sample_df.columns """ Explanation: underlying data: values, index and columns End of explanation """ sample_df.describe() """ Explanation: describe(): a quick statistical summary notice: integer data summarized with floating point numbers End of explanation """ pd.set_option('display.precision', 2) sample_df.describe() """ Explanation: control precision of floating point numbers for options and settings, please see: http://pandas.pydata.org/pandas-docs/stable/generated/pandas.set_option.html End of explanation """ sample_df.T """ Explanation: transpose rows and columns End of explanation """ sample_df.sort_index(axis=1, ascending=False) """ Explanation: sort by axis End of explanation """ sample_df.sort_values(by='B', ascending=False) """ Explanation: sort by data within a column (our data was already sorted) End of explanation """
MetabolicEngineeringGroupCBMA/Cunha_et_al_2017
notebooks/pMEC9001-2-3.ipynb
bsd-3-clause
from pydna.parsers import parse_primers p1,p2 = parse_primers(''' >P1 TTATCTTCATCACCGCCATAC >P2 ACAAGAGAAACTTTTGGGTAAAATG ''') """ Explanation: Construction of the pMEC9001, 2 and 3 vectors The vector pMEC1049 vector was used in Romaní et al. 2014 The pMEC1049 expresses a D-xylose metabolic pathway and has a hygromycin selectable marker. The details of the construction of pMEC1049 can be found here. This document describe the construction of the pMEC9001, 2 and 3 vectors. The pMEC9001 is the pMEC1049 with an additional expression cassette for the Saccharomyces cerevisiae gene HAA1(YPR008W). The pMEC9002 is the pMEC1049 with an additional expression cassette for S. cerevisiae PRS3(YHL011C) and pMEC9003 has both of them. | Vector | Relevant property | |----------|--------------------------------------------------------------| | pMEC9001 | HAA1 | | pMEC9002 | PRS3 | | pMEC9003 | HAA1 & PRS3 | Normally, this would be done following the yeast pathway kit strategy by adding genes with a set of new promoters and terminators, but in this case a requirement was to retain the native promoters and terminators for HAA1 and PRS3. The strategy involves linearizing the vector in two locations (before and after the xylose pathway) and adding the HAA1 and PRS3 expression cassettes amplified using tailed primers. The PRS3 construct The PRS3 cassette was previously cloned according to the description below in vector YEpJCP according to this description: "obtained by PCR amplification of fragment carrying PRS3 from Saccharomyces cerevisiae CEN.PK113-7D genomic DNA using appropriate primers and insertion into plasmid pGEM-T Easy. Cloning into YEplac195KanMX using EcoRI digestion sites." Cunha et al. 2015 Primers: P1: TTATCTTCATCACCGCCATAC P2: ACAAGAGAAACTTTTGGGTAAAATG The exact same PRS3 fragment will be cloned in pMEC9002. End of explanation """ from pygenome import saccharomyces_cerevisiae as sg from pydna.dseqrecord import Dseqrecord PRS3_locus = Dseqrecord(sg.stdgenes["PRS3"].locus()) """ Explanation: We gain access to the S. cerevisiae genome through the pygenome module. End of explanation """ PRS3_locus from pydna.amplify import pcr PRS3_product = pcr(p1, p2, PRS3_locus) PRS3_product PRS3_product.figure() """ Explanation: The PRS3_locus contain the DNA from the end of the upstream ORF to the beginning of the downstream ORF. End of explanation """ from pydna.parsers import parse_primers h1,h2 = parse_primers(''' >HAA1hc_fw GTC GAC CCC ATT TCC CCT TTC TTT TCC >HAA1hc_rev GGA TCC ATA CCT CAT CTC TGC GTG TTC G ''') HAA1_locus = Dseqrecord(sg.stdgenes["HAA1"].locus()) HAA1_locus HAA1_product = pcr(h1, h2, HAA1_locus) HAA1_product HAA1_product.figure() """ Explanation: The primers anneal perfectly to the template, so this is the PCR product we want. HAA1 construct We will now do the same with the HAA1 cassette. Vector: BHUM1737 Construction: obtained by PCR amplification of a SalI/BamHI fragment carrying HAA1 from yeast genomic DNA using appropriate primers and subsequent insertion into plasmid YEplac195. Primers described in Malcher et al. 2011 supporting information, Table S1 HAA1hc_fw: GTC GAC CCC ATT TCC CCT TTC TTT TCC HAA1hc_rev: GGA TCC ATA CCT CAT CTC TGC GTG TTC G End of explanation """ h1 = h1[6:] h2 = h2[6:] HAA1_product = pcr(h1, h2, HAA1_locus) HAA1_product.figure() HAA1_product.seq """ Explanation: These primers are tailed, but we have no reason to include these tails (containing restriction sites). We therefore cut six bp from the beginning and six bp from the end of the sequence: End of explanation """ from pydna.readers import read pMEC1049 = read("pMEC1049.gb") pMEC1049 from Bio.Restriction import XhoI, AleI, OliI pMEC1049_xho = pMEC1049.linearize(XhoI) """ Explanation: Now we will have to design tailed primers for the HAA1_product and the PRS3_product sequences so that we can add them to pMEC1049 by gap repair. First we have to decide with which restriction enzymes we should open the pMEC1049 vector. The restriction enzymes below are candidates for linearizing the pMEC1049 before an after the cassette. XhoI AleI OliI These enzymes are also unique in the pYPK0 based vectors, so we can use tha same strategy to create vectors expressing only the pRS3 and/or HAA1 but without the xylose pathway if needed. End of explanation """ from pydna.design import assembly_fragments fragments = assembly_fragments( [Dseqrecord(pMEC1049_xho.seq.mung()), HAA1_product, pMEC1049_xho] ) HAA1_product.seq Hfw = fragments[1].forward_primer Hrv = fragments[1].reverse_primer Hfw.id = "Hfw" Hrv.id = "Hrv" Hfw = Hfw[1:] Hrv = Hrv[:-1] Hfw = Hfw[:50] # we limit the length to 50 bp since these are less expensive from our provider Hrv = Hrv[:50] print( Hfw.format("tab") ) print( Hrv.format("tab") ) HAA1_recombination_product = pcr(Hfw, Hrv, HAA1_locus) HAA1_recombination_product HAA1_recombination_product.figure() from pydna.assembly import Assembly asm_haa1 = Assembly((pMEC1049_xho, HAA1_recombination_product)) asm_haa1 candidate = asm_haa1.assemble_circular()[0] candidate.figure() pMEC9001 = candidate.synced(pMEC1049) pMEC9001.stamp() pMEC9001.locus="pMEC9001" """ Explanation: We design gap repair primers using the pydna assembly primers function End of explanation """ pMEC9001.write("pMEC9001.gb") """ Explanation: The pMEC9001 is the pMEC1049 with HAA1. The sequence can be downloaded using the link below. End of explanation """ pMEC1049_oli = pMEC1049.linearize(OliI) """ Explanation: PRS3 We will now make a pMEC1049 with PRS3 called pMEC9002. End of explanation """ fragments2 = assembly_fragments((pMEC1049_oli, PRS3_product, pMEC1049_oli)) Pfw = fragments2[1].forward_primer Prv = fragments2[1].reverse_primer Pfw.id = "Pfw" Prv.id = "Prv" Prv=Prv[:-2] Pfw = Pfw[:51] Prv = Prv[:51] print( Pfw.format("tab")) print( Prv.format("tab")) PRS3_recombination_product = pcr(Pfw, Prv, PRS3_locus) PRS3_recombination_product """ Explanation: The integration site was chosen to be the uniqie OliI site. End of explanation """ pMEC1049_ale = pMEC1049.linearize(AleI) asm_prs3 = Assembly((pMEC1049_ale, PRS3_recombination_product)) asm_prs3 candidate = asm_prs3.assemble_circular()[0] candidate pMEC9002 = candidate.synced(pMEC1049) pMEC9002.locus = "pMEC9002" pMEC9002.stamp() """ Explanation: The recombination was designed for OliI but AleI was used. End of explanation """ pMEC9002.write("pMEC9002.gb") """ Explanation: The pMEC9002 vector is the pMEC1049 with PRS3 End of explanation """ pMEC1049_9kbp, pMEC1049_6kb = pMEC1049.cut(XhoI, AleI) pMEC1049_6kb pMEC1049_9kbp pMEC1049_6kb.locus = "pMEC1049_6kb" pMEC1049_9kbp.locus = "pMEC1049_9kbp" asm_prs_haa = Assembly((pMEC1049_6kb, pMEC1049_9kbp, HAA1_recombination_product, PRS3_recombination_product)) asm_prs_haa candidate = asm_prs_haa.assemble_circular()[0] candidate pMEC9003 = candidate.synced(pMEC1049) pMEC9003.stamp() pMEC9003.locus="pMEC9003" """ Explanation: pMEC9003 The HAA1 and PRS3 cassettes were added to the plasmid in one step to the plasmid digested with both XhoI and AleI. Cutting with XhoI and AleI makes two fragments about 6 and 9 kb. End of explanation """ pMEC9003 pMEC9003.write("pMEC9003.gb") """ Explanation: pMEC9003 is the pMEC1049 with both HAA1 and PRS3. The sequence can be downloaded from the link below. End of explanation """
ampl/amplpy
notebooks/quickstart.ipynb
bsd-3-clause
!pip install -q amplpy ampltools pandas bokeh """ Explanation: AMPLPY: Setup & Quick Start Documentation: http://amplpy.readthedocs.io GitHub Repository: https://github.com/ampl/amplpy PyPI Repository: https://pypi.python.org/pypi/amplpy Jupyter Notebooks: https://github.com/ampl/amplpy/tree/master/notebooks Setup Install from the PiPY repository: $ python -m pip install amplpy Note: For Windows, Linux, and macOS, the amplpy package comes with 33 binary wheels for Python 2.7, 3.5, 3.6, 3.7, 3.8, and 3.9. Please make sure that you are using the latest version of pip before installing amplpy (upgrade using "python -m pip install pip --upgrade"). If a binary wheel for your platform is not available, a C++ compiler and python development libraries will be required. Aditional packages In this tutorial, we will also use Pandas and Bokeh. You can install these packages using "pip install pandas bokeh" or "python -m pip install pandas bokeh". Note that Bokeh is not mandatory for this tutorial. We also recommend the use of Jupyter Notebook, which was used to create this tutorial. You can install Jupyter using "pip install jupyter" or "python -m pip install jupyter". Setup End of explanation """ MODULES=['ampl', 'gurobi'] from ampltools import cloud_platform_name, ampl_notebook from amplpy import AMPL, register_magics if cloud_platform_name() is None: ampl = AMPL() # Use local installation of AMPL else: ampl = ampl_notebook(modules=MODULES) # Install AMPL and use it register_magics(ampl_object=ampl) # Evaluate %%ampl_eval cells with ampl.eval() """ Explanation: Google Colab & Kaggle interagration End of explanation """ import pandas as pd """ Explanation: Quick start Step 1: Import some packages that we will use Import Pandas: End of explanation """ from bokeh.layouts import row from bokeh.plotting import figure, show """ Explanation: Import Bokeh (do not run if you do not have Bokeh installed): End of explanation """ from bokeh.io import output_notebook output_notebook() """ Explanation: For Jupyter Notebooks only (do not run if you are not using Bokeh and Jupyter): End of explanation """ from amplpy import AMPL, Environment, DataFrame """ Explanation: Step 2: Import the amplpy components that we will use End of explanation """ ampl = AMPL() ampl.eval('option version;') """ Explanation: Step 3: Create an AMPL object End of explanation """ ampl.setOption('solver', 'gurobi') """ Explanation: If the AMPL installation directory is not in the system search path, you should create the AMPL object as follows instead: python ampl = AMPL(Environment('full path to the AMPL installation directory')) Note that you may need to use raw strings (e.g., r'C:\ampl\ampl.mswin64') or escape the slashes (e.g., 'C:\\ampl\\ampl.mswin64') if the path includes backslashes. Step 4: Select the solver End of explanation """ ampl.eval(''' set NUTR; set FOOD; param cost {FOOD} > 0; param f_min {FOOD} >= 0; param f_max {j in FOOD} >= f_min[j]; param n_min {NUTR} >= 0; param n_max {i in NUTR} >= n_min[i]; param amt {NUTR,FOOD} >= 0; var Buy {j in FOOD} >= f_min[j], <= f_max[j]; minimize Total_Cost: sum {j in FOOD} cost[j] * Buy[j]; subject to Diet {i in NUTR}: n_min[i] <= sum {j in FOOD} amt[i,j] * Buy[j] <= n_max[i]; ''') """ Explanation: Step 5: Define the model End of explanation """ foods = ['BEEF', 'CHK', 'FISH', 'HAM', 'MCH', 'MTL', 'SPG', 'TUR'] nutrients = ['A', 'C', 'B1', 'B2', 'NA', 'CAL'] """ Explanation: Note: Alternatively you can read the model from a file using "ampl.read(filename)". Step 6: Define the initial data End of explanation """ ampl.getSet('FOOD').setValues(foods) ampl.getSet('NUTR').setValues(nutrients) """ Explanation: Define AMPL sets fom python lists End of explanation """ ampl.setData(DataFrame( index=[('FOOD', foods)], columns=[ ('cost', [3.59, 2.59, 2.29, 2.89, 1.89, 1.99, 1.99, 2.49]), ('f_min', [2, 2, 2, 2, 2, 2, 2, 2]), ('f_max', [10, 10, 10, 10, 10, 10, 10, 10]) ] )) """ Explanation: Define data using an amplpy DataFrame End of explanation """ df = pd.DataFrame({ 'n_min': [700, 700, 700, 700, 0, 16000], 'n_max': [20000, 20000, 20000, 20000, 50000, 24000] }, index=nutrients ) ampl.setData(DataFrame.fromPandas(df)) """ Explanation: Define data using a Pandas DataFrame End of explanation """ amounts = [ [ 60, 8, 8, 40, 15, 70, 25, 60], [ 20, 0, 10, 40, 35, 30, 50, 20], [ 10, 20, 15, 35, 15, 15, 25, 15], [ 15, 20, 10, 10, 15, 15, 15, 10], [928, 2180, 945, 278, 1182, 896, 1329, 1397], [295, 770, 440, 430, 315, 400, 379, 450] ] df = DataFrame(('NUTR', 'FOOD'), 'amt') df.setValues({ (nutrient, food): amounts[i][j] for i, nutrient in enumerate(nutrients) for j, food in enumerate(foods) }) ampl.setData(df) """ Explanation: Define data using a python dictionary End of explanation """ ampl.solve() """ Explanation: Step 7: Solve the model End of explanation """ ampl.getVariable('Buy').getValues().toPandas() """ Explanation: Step 8: Create a Pandas DataFrame with the values of the variable 'Buy' End of explanation """ totalcost = ampl.getObjective('Total_Cost') print("Objective is:", totalcost.value()) """ Explanation: Step 9: Display the objective value End of explanation """ cost = ampl.getParameter('cost') cost.setValues({'BEEF': 5.01, 'HAM': 4.55}) print("Increased costs of beef and ham.") """ Explanation: Step 10: Increase the costs of beef and ham End of explanation """ ampl.solve() """ Explanation: Step 11: Solve the model with the new costs End of explanation """ print("New objective value:", totalcost.value()) """ Explanation: Step 12: Display the new objective value End of explanation """ Buy = ampl.getVariable('Buy') print("Buy['BEEF'].val = {}".format(Buy['BEEF'].value())) """ Explanation: Step 13: Display the value of Buy['BEEF'] End of explanation """ diet = ampl.getConstraint('Diet') for nutr in nutrients: print("Diet['{}'].dual = {}".format(nutr, diet[nutr].dual())) """ Explanation: Step 14: Display the dual value of each diet constraint End of explanation """ rows = [tuple(row) for row in Buy.getValues()] factors = [index for index, value in rows] x = [value for index, value in rows] dot = figure( title="Categorical Dot Plot", tools='', toolbar_location=None, y_range=factors, x_range=[0,12] ) dot.segment(0, factors, x, factors, line_width=2, line_color='green') dot.circle(x, factors, size=15, fill_color='orange', line_color='green', line_width=3) show(dot) """ Explanation: Step 15: Display the values of the variable 'Buy' using Bokeh End of explanation """
jmhsi/justin_tinker
data_science/courses/temp/courses/dl1/lesson5-movielens.ipynb
apache-2.0
%reload_ext autoreload %autoreload 2 %matplotlib inline from fastai.learner import * from fastai.column_data import * """ Explanation: Movielens End of explanation """ path='data/ml-latest-small/' """ Explanation: Data available from http://files.grouplens.org/datasets/movielens/ml-latest-small.zip End of explanation """ ratings = pd.read_csv(path+'ratings.csv') ratings.head() """ Explanation: We're working with the movielens data, which contains one rating per row, like this: End of explanation """ movies = pd.read_csv(path+'movies.csv') movies.head() """ Explanation: Just for display purposes, let's read in the movie names too. End of explanation """ g=ratings.groupby('userId')['rating'].count() topUsers=g.sort_values(ascending=False)[:15] g=ratings.groupby('movieId')['rating'].count() topMovies=g.sort_values(ascending=False)[:15] top_r = ratings.join(topUsers, rsuffix='_r', how='inner', on='userId') top_r = top_r.join(topMovies, rsuffix='_r', how='inner', on='movieId') pd.crosstab(top_r.userId, top_r.movieId, top_r.rating, aggfunc=np.sum) """ Explanation: Create subset for Excel We create a crosstab of the most popular movies and most movie-addicted users which we'll copy into Excel for creating a simple example. This isn't necessary for any of the modeling below however. End of explanation """ val_idxs = get_cv_idxs(len(ratings)) wd=2e-4 n_factors = 50 cf = CollabFilterDataset.from_csv(path, 'ratings.csv', 'userId', 'movieId', 'rating') learn = cf.get_learner(n_factors, val_idxs, 64, opt_fn=optim.Adam) learn.fit(1e-2, 2, wds=wd, cycle_len=1, cycle_mult=2, use_wd_sched=True) """ Explanation: Collaborative filtering End of explanation """ math.sqrt(0.776) """ Explanation: Let's compare to some benchmarks. Here's some benchmarks on the same dataset for the popular Librec system for collaborative filtering. They show best results based on RMSE of 0.91. We'll need to take the square root of our loss, since we use plain MSE. End of explanation """ preds = learn.predict() y=learn.data.val_y sns.jointplot(preds, y, kind='hex', stat_func=None); """ Explanation: Looking good - we've found a solution better than any of those benchmarks! Let's take a look at how the predictions compare to actuals for this model. End of explanation """ movie_names = movies.set_index('movieId')['title'].to_dict() g=ratings.groupby('movieId')['rating'].count() topMovies=g.sort_values(ascending=False).index.values[:3000] topMovieIdx = np.array([cf.item2idx[o] for o in topMovies]) m=learn.model; m.cuda() """ Explanation: Analyze results Movie bias End of explanation """ movie_bias = to_np(m.ib(V(topMovieIdx))) movie_bias movie_ratings = [(b[0], movie_names[i]) for i,b in zip(topMovies,movie_bias)] """ Explanation: First, we'll look at the movie bias term. Here, our input is the movie id (a single id), and the output is the movie bias (a single float). End of explanation """ sorted(movie_ratings, key=lambda o: o[0])[:15] sorted(movie_ratings, key=itemgetter(0))[:15] sorted(movie_ratings, key=lambda o: o[0], reverse=True)[:15] """ Explanation: Now we can look at the top and bottom rated movies. These ratings are corrected for different levels of reviewer sentiment, as well as different types of movies that different reviewers watch. End of explanation """ movie_emb = to_np(m.i(V(topMovieIdx))) movie_emb.shape """ Explanation: Embedding interpretation We can now do the same thing for the embeddings. End of explanation """ from sklearn.decomposition import PCA pca = PCA(n_components=3) movie_pca = pca.fit(movie_emb.T).components_ movie_pca.shape fac0 = movie_pca[0] movie_comp = [(f, movie_names[i]) for f,i in zip(fac0, topMovies)] """ Explanation: Because it's hard to interpret 50 embeddings, we use PCA to simplify them down to just 3 vectors. End of explanation """ sorted(movie_comp, key=itemgetter(0), reverse=True)[:10] sorted(movie_comp, key=itemgetter(0))[:10] fac1 = movie_pca[1] movie_comp = [(f, movie_names[i]) for f,i in zip(fac1, topMovies)] """ Explanation: Here's the 1st component. It seems to be 'easy watching' vs 'serious'. End of explanation """ sorted(movie_comp, key=itemgetter(0), reverse=True)[:10] sorted(movie_comp, key=itemgetter(0))[:10] """ Explanation: Here's the 2nd component. It seems to be 'CGI' vs 'dialog driven'. End of explanation """ idxs = np.random.choice(len(topMovies), 50, replace=False) X = fac0[idxs] Y = fac1[idxs] plt.figure(figsize=(15,15)) plt.scatter(X, Y) for i, x, y in zip(topMovies[idxs], X, Y): plt.text(x,y,movie_names[i], color=np.random.rand(3)*0.7, fontsize=11) plt.show() """ Explanation: We can draw a picture to see how various movies appear on the map of these components. This picture shows the first two components. End of explanation """ a = T([[1.,2],[3,4]]) b = T([[2.,2],[10,10]]) a,b a*b (a*b).sum(1) class DotProduct(nn.Module): def forward(self, u, m): return (u*m).sum(1) model=DotProduct() model(a,b) """ Explanation: Collab filtering from scratch Dot product example End of explanation """ u_uniq = ratings.userId.unique() user2idx = {o:i for i,o in enumerate(u_uniq)} ratings.userId = ratings.userId.apply(lambda x: user2idx[x]) m_uniq = ratings.movieId.unique() movie2idx = {o:i for i,o in enumerate(m_uniq)} ratings.movieId = ratings.movieId.apply(lambda x: movie2idx[x]) n_users=int(ratings.userId.nunique()) n_movies=int(ratings.movieId.nunique()) class EmbeddingDot(nn.Module): def __init__(self, n_users, n_movies): super().__init__() self.u = nn.Embedding(n_users, n_factors) self.m = nn.Embedding(n_movies, n_factors) self.u.weight.data.uniform_(0,0.05) self.m.weight.data.uniform_(0,0.05) def forward(self, cats, conts): users,movies = cats[:,0],cats[:,1] u,m = self.u(users),self.m(movies) return (u*m).sum(1) x = ratings.drop(['rating', 'timestamp'],axis=1) y = ratings['rating'].astype(np.float32) data = ColumnarModelData.from_data_frame(path, val_idxs, x, y, ['userId', 'movieId'], 64) wd=1e-5 model = EmbeddingDot(n_users, n_movies).cuda() opt = optim.SGD(model.parameters(), 1e-1, weight_decay=wd, momentum=0.9) fit(model, data, 3, opt, F.mse_loss) set_lrs(opt, 0.01) fit(model, data, 3, opt, F.mse_loss) """ Explanation: Dot product model End of explanation """ min_rating,max_rating = ratings.rating.min(),ratings.rating.max() min_rating,max_rating def get_emb(ni,nf): e = nn.Embedding(ni, nf) e.weight.data.uniform_(-0.01,0.01) return e class EmbeddingDotBias(nn.Module): def __init__(self, n_users, n_movies): super().__init__() (self.u, self.m, self.ub, self.mb) = [get_emb(*o) for o in [ (n_users, n_factors), (n_movies, n_factors), (n_users,1), (n_movies,1) ]] def forward(self, cats, conts): users,movies = cats[:,0],cats[:,1] um = (self.u(users)* self.m(movies)).sum(1) res = um + self.ub(users).squeeze() + self.mb(movies).squeeze() res = F.sigmoid(res) * (max_rating-min_rating) + min_rating return res wd=2e-4 model = EmbeddingDotBias(cf.n_users, cf.n_items).cuda() opt = optim.SGD(model.parameters(), 1e-1, weight_decay=wd, momentum=0.9) fit(model, data, 3, opt, F.mse_loss) set_lrs(opt, 1e-2) fit(model, data, 3, opt, F.mse_loss) """ Explanation: Bias End of explanation """ class EmbeddingNet(nn.Module): def __init__(self, n_users, n_movies, nh=10, p1=0.05, p2=0.5): super().__init__() (self.u, self.m) = [get_emb(*o) for o in [ (n_users, n_factors), (n_movies, n_factors)]] self.lin1 = nn.Linear(n_factors*2, nh) self.lin2 = nn.Linear(nh, 1) self.drop1 = nn.Dropout(p1) self.drop2 = nn.Dropout(p2) def forward(self, cats, conts): users,movies = cats[:,0],cats[:,1] x = self.drop1(torch.cat([self.u(users),self.m(movies)], dim=1)) x = self.drop2(F.relu(self.lin1(x))) return F.sigmoid(self.lin2(x)) * (max_rating-min_rating+1) + min_rating-0.5 wd=1e-5 model = EmbeddingNet(n_users, n_movies).cuda() opt = optim.Adam(model.parameters(), 1e-3, weight_decay=wd) fit(model, data, 3, opt, F.mse_loss) set_lrs(opt, 1e-3) fit(model, data, 3, opt, F.mse_loss) """ Explanation: Mini net End of explanation """
goldmanm/tools
cookbook.ipynb
mit
import cantera_tools as ctt import numpy as np from scipy import integrate import cantera as ct import pandas as pd import matplotlib.pyplot as plt %matplotlib inline """ Explanation: Table of Contents <p><div class="lev1 toc-item"><a href="#Cookbook-for-cantera_tools-module" data-toc-modified-id="Cookbook-for-cantera_tools-module-1"><span class="toc-item-num">1&nbsp;&nbsp;</span>Cookbook for cantera_tools module</a></div><div class="lev2 toc-item"><a href="#better-names-for-RMG-mechanisms" data-toc-modified-id="better-names-for-RMG-mechanisms-11"><span class="toc-item-num">1.1&nbsp;&nbsp;</span>better names for RMG mechanisms</a></div><div class="lev2 toc-item"><a href="#reducing-a-mechanism-by-reactions" data-toc-modified-id="reducing-a-mechanism-by-reactions-12"><span class="toc-item-num">1.2&nbsp;&nbsp;</span>reducing a mechanism by reactions</a></div><div class="lev2 toc-item"><a href="#running-a-simulation" data-toc-modified-id="running-a-simulation-13"><span class="toc-item-num">1.3&nbsp;&nbsp;</span>running a simulation</a></div><div class="lev3 toc-item"><a href="#run_simulation-example" data-toc-modified-id="run_simulation-example-131"><span class="toc-item-num">1.3.1&nbsp;&nbsp;</span><code>run_simulation</code> example</a></div><div class="lev3 toc-item"><a href="#run_simulation_till_conversion-example" data-toc-modified-id="run_simulation_till_conversion-example-132"><span class="toc-item-num">1.3.2&nbsp;&nbsp;</span><code>run_simulation_till_conversion</code> example</a></div><div class="lev3 toc-item"><a href="#find_ignition_delay-example" data-toc-modified-id="find_ignition_delay-example-133"><span class="toc-item-num">1.3.3&nbsp;&nbsp;</span><code>find_ignition_delay</code> example</a></div><div class="lev3 toc-item"><a href="#set-specific-state-variables-with-time" data-toc-modified-id="set-specific-state-variables-with-time-134"><span class="toc-item-num">1.3.4&nbsp;&nbsp;</span>set specific state variables with time</a></div><div class="lev2 toc-item"><a href="#analyzing-data" data-toc-modified-id="analyzing-data-14"><span class="toc-item-num">1.4&nbsp;&nbsp;</span>analyzing data</a></div><div class="lev3 toc-item"><a href="#obtaining-reaction-and-species-data" data-toc-modified-id="obtaining-reaction-and-species-data-141"><span class="toc-item-num">1.4.1&nbsp;&nbsp;</span>obtaining reaction and species data</a></div><div class="lev3 toc-item"><a href="#looking-at-a-list-of-reactions-consuming/producing-a-molecule" data-toc-modified-id="looking-at-a-list-of-reactions-consuming/producing-a-molecule-142"><span class="toc-item-num">1.4.2&nbsp;&nbsp;</span>looking at a list of reactions consuming/producing a molecule</a></div><div class="lev3 toc-item"><a href="#view-branching-ratio" data-toc-modified-id="view-branching-ratio-143"><span class="toc-item-num">1.4.3&nbsp;&nbsp;</span>view branching ratio</a></div><div class="lev3 toc-item"><a href="#creating-flux-diagrams" data-toc-modified-id="creating-flux-diagrams-144"><span class="toc-item-num">1.4.4&nbsp;&nbsp;</span>creating flux diagrams</a></div> # Cookbook for cantera_tools module This notebook describes some of the methods in this package and how they can be used. End of explanation """ ctt.obtain_cti_file_nicely_named('cookbook_files/',original_ck_file='chem.inp') """ Explanation: better names for RMG mechanisms Many RMG models have poorly-named species, due in part to restrictions of CHEMKIN names. Cantera have fewer restrictions, so mechanisms produced with it can have more understandable names. This example converts an RMG CHEMKIN file to a Cantera file which uses SMILES to names species. This method will place an input_nicely_named.cti and a species_dictionary_nicely_named.txt into the folder specified in the method End of explanation """ model_link = 'cookbook_files/model.cti' desired_reactions = ['CH3OH + O2 <=> CH2OH(29) + HO2(12)', 'C3H8 + O2 <=> C3H7(61) + HO2(12)', 'C3H8 + O2 <=> C3H7(60) + HO2(12)', 'CH3OH + OH(10) <=> CH2OH(29) + H2O(11)', 'C3H8 + OH(10) <=> C3H7(60) + H2O(11)', 'C3H8 + OH(10) <=> C3H7(61) + H2O(11)', 'CH3OH + HO2(12) <=> CH2OH(29) + H2O2(13)', 'C3H8 + HO2(12) <=> C3H7(61) + H2O2(13)', 'C3H8 + HO2(12) <=> C3H7(60) + H2O2(13)', 'C3H7(60) + O2 <=> C3H7O2(78)', 'C3H7(61) + O2 <=> C3H7O2(80)',] # make the reduced mechanism using the full mechanism `.cti` file. solution_reduced = ctt.create_mechanism(model_link, kept_reaction_equations=desired_reactions) # NOTE: this cantera Solution object can now be used like any other """ Explanation: reducing a mechanism by reactions The modules can create a reduced mechanism given a list of desired reaction strings, using how cantera represents the reaction strings (this can be found by solution.reaction_equations()). It will remove any unused species as well. End of explanation """ model_link = 'cookbook_files/model.cti' # creates the cantera Solution object solution = ctt.create_mechanism(model_link) #initial mole fractions mole_fractions = {'N2':5, 'O2':1, 'C3H8': 0.3} # set initial conditions of solution in kelvin pascals and mole fractions conditions = 800, 10**6, mole_fractions solution.TPX = conditions # store 100 times between 10^-8s and 1s, with an initial point at t=0 times = np.logspace(-8,0,num=100) times = np.insert(times,0,0) # run the simulation outputs = ctt.run_simulation(solution, times, condition_type = 'constant-temperature-and-pressure', output_reactions = True, output_directional_reactions = True, output_rop_roc=True) # you can combine outputs how you would like with pd.concat result = pd.concat([outputs['conditions'], outputs['species'], outputs['directional_reactions']], axis = 'columns') # data can be saved to avoid rerunning the simulation for data analysis (in most cases). these can be loaded using pandas.from_pickle() and pandas.from_csv() result.to_pickle('cookbook_files/{}.pic'.format('run_simulation_example')) result.to_csv('cookbook_files/{}.csv'.format('run_simulation_example')) """ Explanation: running a simulation Simulations can be run in the following ways: run_simulation - you give the method times which you want data saved, and it saves data at each time. run_simulation_till_conversion - this method will run a simulation until the specified conversion is reached for a target species. find_ignition_delay - you give this method the initial conditions and it outputs the ignition delay determined by the maximum of $\frac{dT}{dt}$, as well as simulation data given every so many iterator steps. These methods currently work for constant temperature and pressure or adiabatic constant volume. It's also possible to adapt these methods to your specific situation. If you think your adaption will be useful for others, consider talking with the author (posting a issue or in person) or just making a pull request. run_simulation example End of explanation """ model_link = 'cookbook_files/model.cti' # creates the cantera Solution object solution = ctt.create_mechanism(model_link) # finds initial mole fraction for a fuel-air ratio mole_fractions = ctt.get_initial_mole_fractions(stoich_ratio = 1, fuel_mole_ratios=[1], oxygen_per_fuel_at_stoich_list = [5], fuels = ['C3H8']) # set initial conditions of solution in kelvin pascals and mole fractions conditions = 950, 10**6, mole_fractions solution.TPX = conditions # run simulation output_till_conversion = ctt.run_simulation_till_conversion(solution, species='C3H8', conversion=0.5, condition_type = 'constant-temperature-and-pressure', output_species = True, output_reactions = True, output_directional_reactions = True, output_rop_roc = True, skip_data = 25) """ Explanation: run_simulation_till_conversion example End of explanation """ model_link = 'cookbook_files/model.cti' # creates the cantera Solution object solution = ctt.create_mechanism(model_link) # finds initial mole fraction for a fuel-air ratio of 1 with 30%/70% methanol/propane blend # for non-combustion conditions, this can be replaced by a dictionary of values {'CH3OH': 0.3, 'C3H8':0.7} mole_fractions = ctt.get_initial_mole_fractions(stoich_ratio = 1, fuel_mole_ratios = [.3,.7], oxygen_per_fuel_at_stoich_list = [1.5,5], fuels = ['CH3OH','C3H8']) # set initial conditions of solution in kelvin pascals and mole fractions conditions = 750, 10**6, mole_fractions # run simulation outputs = ctt.find_ignition_delay(solution, conditions, output_profile = True, output_directional_reactions = True, skip_data = 1000) # obtain the ignition delays ignition_delay = outputs['ignition_delay'] """ Explanation: find_ignition_delay example End of explanation """ model_link = 'cookbook_files/model.cti' # creates the cantera Solution object solution = ctt.create_mechanism(model_link) #initial mole fractions mole_fractions = {'N2':5, 'O2':1, 'C3H8': 0.3} # set initial conditions of solution in kelvin pascals and mole fractions conditions = 800, 10**6, mole_fractions solution.TPX = conditions # store 100 times between 10^-8s and 0.01s, with an initial point at t=0 times = np.logspace(-8,-2,num=100) times = np.insert(times,0,0) # set a linear ramp temperature from 800 to 1000 at 1e-5s followed by constant temperature ramp_temperatures = 800 + 2000000 * times[:50] constant_temperatures = np.ones(51) * 1000 temperatures = np.concatenate((ramp_temperatures,constant_temperatures)) # run the simulation outputs = ctt.run_simulation(solution, times, condition_type = 'specified-temperature-constant-volume', output_reactions = True, output_directional_reactions = True, output_rop_roc= False, temperature_values = temperatures) """ Explanation: set specific state variables with time Specific state variables (like temperature) can be set across a simulation. To use this, change the condition_type to the string that describes the situation (list of acceptable strings is described in the docstring of run_simulation. Typically you also need to supply a list of the state variable to change which corresponds with the times in the times variable. End of explanation """ # this outputs a dataframe of just species species = outputs['species'] reactions = outputs['net_reactions'] forward_and_reverse_reactions = outputs['directional_reactions'] net_observables = outputs['conditions'] # obtain reactions with a specific molecule reactions_with_propane = ctt.find_reactions(df=reactions, solution=solution, species = 'C3H8') """ Explanation: analyzing data obtaining reaction and species data End of explanation """ species['C3H8'].plot() """ Explanation: viewing species concentrations species concentration with time can be accessed from the dataframe contained in outputs['species'] End of explanation """ propane_production = ctt.consumption_pathways(df=reactions, solution=solution, species = 'C3H8') f, ax = plt.subplots() reactions_with_propane.plot.line(ax=ax) import plot_tools as ptt ptt.place_legend_outside_plot(axis=ax) ax.set_ylabel('production rate (kmol/m3s)') """ Explanation: view reactions consuming/producing a molecule Negative values indicate that the reaction consumes the molecule. Positive values indicate that the reaction produces the molecule. End of explanation """ # this outputs the branching ratio of propane branching = ctt.branching_ratios(df=reactions, solution=solution, compound='C3H8') f, ax = plt.subplots() # plot only the top 6 branching ratios branching.iloc[:,:6].plot.area(ax=ax) import plot_tools as ptt ptt.place_legend_outside_plot(axis=ax) ax.set_ylabel('branching ratio') """ Explanation: view branching ratio End of explanation """ model_link = 'cookbook_files/model.cti' solution = ctt.create_mechanism(model_link) mole_fractions = {'N2':5, 'O2':1, 'C3H8': 0.3} conditions = 800, 10**6, mole_fractions solution.TPX = conditions #only specify the times you want a flux diagram at times = np.logspace(-8,0,num=3) # run the simulation & create flux diagrams outputs = ctt.save_flux_diagrams(solution, times, condition_type = 'constant-temperature-and-pressure', path='cookbook_files/', filename='cookbook_fluxes', filetype = 'svg', element='C') """ Explanation: creating flux diagrams The method save_flux_diagrams, shown below, runs a simulation saving the diagrams at various times. The method save_flux_diagram can be integrated into another simulation solver. End of explanation """
AllenDowney/ModSimPy
soln/chap17soln.ipynb
mit
# Configure Jupyter so figures appear in the notebook %matplotlib inline # Configure Jupyter to display the assigned value after an assignment %config InteractiveShell.ast_node_interactivity='last_expr_or_assign' # import functions from the modsim.py module from modsim import * """ Explanation: Modeling and Simulation in Python Chapter 17 Copyright 2017 Allen Downey License: Creative Commons Attribution 4.0 International End of explanation """ data = pd.read_csv('data/glucose_insulin.csv', index_col='time') """ Explanation: Data We have data from Pacini and Bergman (1986), "MINMOD: a computer program to calculate insulin sensitivity and pancreatic responsivity from the frequently sampled intravenous glucose tolerance test", Computer Methods and Programs in Biomedicine, 23: 113-122.. End of explanation """ plot(data.glucose, 'bo', label='glucose') decorate(xlabel='Time (min)', ylabel='Concentration (mg/dL)') """ Explanation: Here's what the glucose time series looks like. End of explanation """ plot(data.insulin, 'go', label='insulin') decorate(xlabel='Time (min)', ylabel='Concentration ($\mu$U/mL)') """ Explanation: And the insulin time series. End of explanation """ subplot(2, 1, 1) plot(data.glucose, 'bo', label='glucose') decorate(ylabel='Concentration (mg/dL)') subplot(2, 1, 2) plot(data.insulin, 'go', label='insulin') decorate(xlabel='Time (min)', ylabel='Concentration ($\mu$U/mL)') savefig('figs/chap17-fig01.pdf') """ Explanation: For the book, I put them in a single figure, using subplot End of explanation """ I = interpolate(data.insulin) """ Explanation: Interpolation We have measurements of insulin concentration at discrete points in time, but we need to estimate it at intervening points. We'll use interpolate, which takes a Series and returns a function: The return value from interpolate is a function. End of explanation """ I(7) """ Explanation: We can use the result, I, to estimate the insulin level at any point in time. End of explanation """ t_0 = get_first_label(data) t_end = get_last_label(data) ts = linrange(t_0, t_end, endpoint=True) I(ts) type(ts) """ Explanation: I can also take an array of time and return an array of estimates: End of explanation """ plot(data.insulin, 'go', label='insulin data') plot(ts, I(ts), color='green', label='interpolated') decorate(xlabel='Time (min)', ylabel='Concentration ($\mu$U/mL)') savefig('figs/chap17-fig02.pdf') """ Explanation: Here's what the interpolated values look like. End of explanation """ # Solution I = interpolate(data.insulin, kind='cubic') plot(data.insulin, 'go', label='insulin data') plot(ts, I(ts), color='green', label='interpolated') decorate(xlabel='Time (min)', ylabel='Concentration ($\mu$U/mL)') """ Explanation: Exercise: Read the documentation of scipy.interpolate.interp1d. Pass a keyword argument to interpolate to specify one of the other kinds of interpolation, and run the code again to see what it looks like. End of explanation """ # Solution G = interpolate(data.glucose) plot(data.glucose, 'bo', label='gluciose data') plot(ts, G(ts), color='blue', label='interpolated') decorate(xlabel='Time (min)', ylabel='Concentration (mg/dL)') """ Explanation: Exercise: Interpolate the glucose data and generate a plot, similar to the previous one, that shows the data points and the interpolated curve evaluated at the time values in ts. End of explanation """ source_code(interpolate) """ Explanation: Under the hood End of explanation """
jhonatancasale/graduation-pool
disciplines/SME0819 - Matrices for Applied Statistics/0x00_Fundamentals/Matrices - Fundamentals.ipynb
apache-2.0
import numpy as np # for array, dot and so on """ Explanation: Fundamentos de Matrizes | Matrix Fundamentals: Uma forma organizada de representar os dados numéricos. O tamanho ou a dimensão da matriz (nro linhas) X (nro colunas), por exemplo $2x3$ O elemento que ocupa a i-ésima linha e a j-ésima coluna é denotado por $a_{ij}$ Examplo de uma Matriz $2x3$ | Example of a $2x3$ Matrix $$A_{2x3} = \begin{pmatrix} a_{11} & a_{12} & a_{13} \ a_{21} & a_{22} & a_{23} \end{pmatrix}$$ Exemplo numérico de uma Matriz $2x3$ | Numeric example of a $2x3$ Matrix $$A_{2x3} = \begin{pmatrix} -1 & 42 & 10 \ 12 & 0 & 9 \end{pmatrix}$$ Alguns exemplos em Python3 | Some examples in Python3 End of explanation """ B = np.arange(9).reshape(3, 3) print(B) A = np.array([ [-1, 42, 10], [12, 0, 9] ]) print(A) # inspecting the matrices print(A.shape) # 2 x 3 print(B.shape) # 3 x 3 # We have 2 dimensions `X1` and `X2` print(A.ndim) print(B.ndim) Zeros = np.zeros((2, 3)) print(Zeros) Ones = np.ones((3, 3)) print(Ones) Empty = np.empty((4, 4)) print(Empty) """ Explanation: Matrix creation End of explanation """ print(np.arange(5, 30, 7)) print(np.arange(10, 13, .3)) print(np.linspace(0, 2, 13)) """ Explanation: Vector creation End of explanation """ print(np.arange(10000)) print(np.arange(10000).reshape(100,100)) """ Explanation: np.arange bahevior to large numbers End of explanation """ A = np.array([10, 20, 30, 40, 50, -1]) B = np.linspace(0, 1, A.size) print("{} + {} -> {}".format(A, B, A + B)) print("{} - {} -> {}".format(A, B, A - B)) """ Explanation: Basic Operations $$A_{mxn} \pm B_{mxn} \mapsto C_{mxn}$$ $$u_{1xn} \pm v_{1xn} \mapsto w_{1xn} \quad (u_n \pm v_n \mapsto w_n)$$ End of explanation """ print("{} ** 2 -> {}".format(A, A ** 2)) """ Explanation: $$f:M_{mxn} \to M_{mxn}$$ $$a_{ij} \mapsto a_{ij}^2$$ End of explanation """ print("2 * sin({}) -> {}".format(A, 2 * np.sin(A))) """ Explanation: $$f:M_{mxn} \to M_{mxn}$$ $$a_{ij} \mapsto 2\sin(a_{ij})$$ End of explanation """ print(A > 30) """ Explanation: $$f:M_{mxn} \to M_{mxn}$$ $$ \forall \quad i, j: \quad i < m, j < n \qquad a_{ij} = \left{ \begin{array}{ll} \text{True} & \quad se \quad a_{ij} > 30 \ \text{False} & \quad \text{c.c} \end{array} \right. $$ End of explanation """ print(A[A > 30]) """ Explanation: Usando um vetor de Bools como Indexador End of explanation """ print("{} * {} -> {}".format(A, B, A * B)) """ Explanation: $$A_{mxn} * B_{mxn} \mapsto C_{mxn}$$ $$c_{ij} = a_{ij} * b_{ij}$$ $$\forall \quad i, j: \quad i < m, j < n$$ End of explanation """ print("{}.{} -> {}".format(A, B, A.dot(B))) print("{}.{} -> {}".format(A, B, np.dot(A, B))) print(np.ones(10) * 12) M = np.linspace(-1, 1, 16).reshape(4, 4) print(M) print("sum(A) -> {}".format(M.sum())) print("max(A) -> {} | min(A) -> {}" .format(M.max(), M.min())) N = np.arange(16).reshape(4, 4) print(N) print(N.sum(axis=0)) # sum by column print(N.sum(axis=1)) #sum by row print(N.min(axis=1)) print(N.cumsum(axis=0)) print(N) for column in range(N.shape[1]): print(N[:,column]) print(N.T) print(N) print(N.transpose()) print(N) I = np.eye(2) print(I) I2 = I * 2 I2_inv = np.linalg.inv(I2) print(I2_inv) print(np.dot(I2, I2_inv)) dir(np.linalg) print(np.trace(I2)) Prod = np.dot(I2, I2) print(Prod) print(np.linalg.eig(Prod)) """ Explanation: End of explanation """ A = np.linspace(1, 4, 4).reshape(2, 2) print(A) y = np.array([5., 7.]) x = np.linalg.solve(A, y) print(x) print(np.dot(A, x.T)) x = np.arange(0, 10, 2) y = np.arange(5) print(np.vstack([x, y])) print(np.hstack([x, y])) print(np.hsplit(x, [2])) print(np.hsplit(x, [2, 4])) print(np.vsplit(np.eye(3), range(1, 3))) """ Explanation: $$Ax = y$$ End of explanation """
GoogleCloudPlatform/training-data-analyst
courses/machine_learning/deepdive/03_tensorflow/b_estimator.ipynb
apache-2.0
!sudo chown -R jupyter:jupyter /home/jupyter/training-data-analyst # Ensure the right version of Tensorflow is installed. !pip freeze | grep tensorflow==2.6 import tensorflow as tf import pandas as pd import numpy as np import shutil print(tf.__version__) """ Explanation: <h1>2b. Machine Learning using tf.estimator </h1> In this notebook, we will create a machine learning model using tf.estimator and evaluate its performance. The dataset is rather small (7700 samples), so we can do it all in-memory. We will also simply pass the raw data in as-is. End of explanation """ # In CSV, label is the first column, after the features, followed by the key CSV_COLUMNS = ['fare_amount', 'pickuplon','pickuplat','dropofflon','dropofflat','passengers', 'key'] FEATURES = CSV_COLUMNS[1:len(CSV_COLUMNS) - 1] LABEL = CSV_COLUMNS[0] df_train = pd.read_csv('./taxi-train.csv', header = None, names = CSV_COLUMNS) df_valid = pd.read_csv('./taxi-valid.csv', header = None, names = CSV_COLUMNS) df_test = pd.read_csv('./taxi-test.csv', header = None, names = CSV_COLUMNS) """ Explanation: Read data created in the previous chapter. End of explanation """ def make_train_input_fn(df, num_epochs): return tf.compat.v1.estimator.inputs.pandas_input_fn( x = df, y = df[LABEL], batch_size = 128, num_epochs = num_epochs, shuffle = True, queue_capacity = 1000 ) def make_eval_input_fn(df): return tf.compat.v1.estimator.inputs.pandas_input_fn( x = df, y = df[LABEL], batch_size = 128, shuffle = False, queue_capacity = 1000 ) """ Explanation: <h2> Train and eval input functions to read from Pandas Dataframe </h2> End of explanation """ def make_prediction_input_fn(df): return tf.compat.v1.estimator.inputs.pandas_input_fn( x = df, y = None, batch_size = 128, shuffle = False, queue_capacity = 1000 ) """ Explanation: Our input function for predictions is the same except we don't provide a label End of explanation """ def make_feature_cols(): input_columns = [tf.feature_column.numeric_column(k) for k in FEATURES] return input_columns """ Explanation: Create feature columns for estimator End of explanation """ tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO) OUTDIR = 'taxi_trained' shutil.rmtree(OUTDIR, ignore_errors = True) # start fresh each time model = tf.estimator.LinearRegressor( feature_columns = make_feature_cols(), model_dir = OUTDIR) model.train(input_fn = make_train_input_fn(df_train, num_epochs = 10)) """ Explanation: <h3> Linear Regression with tf.Estimator framework </h3> End of explanation """ def print_rmse(model, df): metrics = model.evaluate(input_fn = make_eval_input_fn(df)) print('RMSE on dataset = {}'.format(np.sqrt(metrics['average_loss']))) print_rmse(model, df_valid) """ Explanation: Evaluate on the validation data (we should defer using the test data to after we have selected a final model). End of explanation """ predictions = model.predict(input_fn = make_prediction_input_fn(df_test)) for items in predictions: print(items) """ Explanation: This is nowhere near our benchmark (RMSE of $6 or so on this data), but it serves to demonstrate what TensorFlow code looks like. Let's use this model for prediction. End of explanation """ tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO) shutil.rmtree(OUTDIR, ignore_errors = True) # start fresh each time model = tf.estimator.DNNRegressor(hidden_units = [32, 8, 2], feature_columns = make_feature_cols(), model_dir = OUTDIR) model.train(input_fn = make_train_input_fn(df_train, num_epochs = 100)); print_rmse(model, df_valid) """ Explanation: This explains why the RMSE was so high -- the model essentially predicts the same amount for every trip. Would a more complex model help? Let's try using a deep neural network. The code to do this is quite straightforward as well. <h3> Deep Neural Network regression </h3> End of explanation """ from google.cloud import bigquery import numpy as np import pandas as pd def create_query(phase, EVERY_N): """ phase: 1 = train 2 = valid """ base_query = """ SELECT (tolls_amount + fare_amount) AS fare_amount, EXTRACT(DAYOFWEEK FROM pickup_datetime) * 1.0 AS dayofweek, EXTRACT(HOUR FROM pickup_datetime) * 1.0 AS hourofday, pickup_longitude AS pickuplon, pickup_latitude AS pickuplat, dropoff_longitude AS dropofflon, dropoff_latitude AS dropofflat, passenger_count*1.0 AS passengers, CONCAT(CAST(pickup_datetime AS STRING), CAST(pickup_longitude AS STRING), CAST(pickup_latitude AS STRING), CAST(dropoff_latitude AS STRING), CAST(dropoff_longitude AS STRING)) AS key FROM `nyc-tlc.yellow.trips` WHERE trip_distance > 0 AND fare_amount >= 2.5 AND pickup_longitude > -78 AND pickup_longitude < -70 AND dropoff_longitude > -78 AND dropoff_longitude < -70 AND pickup_latitude > 37 AND pickup_latitude < 45 AND dropoff_latitude > 37 AND dropoff_latitude < 45 AND passenger_count > 0 """ if EVERY_N == None: if phase < 2: # Training query = "{0} AND ABS(MOD(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING)), 4)) < 2".format(base_query) else: # Validation query = "{0} AND ABS(MOD(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING)), 4)) = {1}".format(base_query, phase) else: query = "{0} AND ABS(MOD(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING)), {1})) = {2}".format(base_query, EVERY_N, phase) return query query = create_query(2, 100000) df = bigquery.Client().query(query).to_dataframe() print_rmse(model, df) """ Explanation: We are not beating our benchmark with either model ... what's up? Well, we may be using TensorFlow for Machine Learning, but we are not yet using it well. That's what the rest of this course is about! But, for the record, let's say we had to choose between the two models. We'd choose the one with the lower validation error. Finally, we'd measure the RMSE on the test data with this chosen model. <h2> Benchmark dataset </h2> Let's do this on the benchmark dataset. End of explanation """
aleph314/K2
Foundations/Python CS/Activity 07.ipynb
gpl-3.0
n = 1000000 x = np.random.rand(n) y = np.random.rand(n) %time z = x + y """ Explanation: Exercise 07.1 (indexing and timing) Create two very long NumPy arrays x and y and sum the arrays using: The NumPy addition syntax, z = x + y; and A for loop that computes the sum entry-by-entry Compare the time required for the two approaches for vectors of different lengths. The values of the array entries are not important for this test. Hint: To loop over an array using indices, try a construction like: python x = np.ones(100) y = np.ones(len(x)) for i in range(len(x)): print(x[i]*y[i]) Timing NumPy addition for 1 million elements arrays End of explanation """ def sum_vec(x, y): "Sum two vectors entry by entry" z = np.zeros(n) for i in range(n): z[i] = x[i] + y[i] return z %time w = sum_vec(x, y) """ Explanation: Timing 1 million elements arrays addition using an entry-by-entry function End of explanation """ # Test scores scores = np.array([58.0, 35.0, 24.0, 42, 7.8]) """ Explanation: Exercise 07.2 (member functions and slicing) Anonymised scores (out of 60) for an examination are stored in a NumPy array. Write: A function that takes a NumPy array of the raw scores and returns the scores as a percentage sorted from lowest to highest (try using scores.sort(), where scores is a NumPy array holding the scores). A function that returns the maximum, minimum and mean of the raw scores as a dictionary with the keys 'min', 'max' and 'mean'. Use the NumPy array functions min(), max() and mean() to do the computation, e.g. max = scores.max(). Modify your function for the min, max and mean to optionally exclude the highest and lowest scores from the computation of the min, max and mean. Hint: sort the array of scores and use array slicing to exclude the first and the last entries. Use the scores python scores = np.array([58.0, 35.0, 24.0, 42, 7.8]) End of explanation """ def percentages(scores): "Calculate percentages (max score = 60) from a list of scores and returns them sorted" sorted_scores = scores / 60 sorted_scores.sort() return sorted_scores print(percentages(scores)) """ Explanation: Function that takes a NumPy array of the raw scores and returns the scores as a percentage sorted from lowest to highest End of explanation """ def max_min_mean(scores): "Return a dictionary with max, min and mean score from a list of scores" out = {} out['min'] = scores.min() out['max'] = scores.max() out['mean'] = scores.mean() return out print(max_min_mean(scores)) """ Explanation: Function that returns the maximum, minimum and mean of the raw scores as a dictionary End of explanation """ def max_min_mean2(scores, exclude_extreme): "Return a dictionary with max, min and mean score from a list of scores excluding extremes if exclude_extreme = 1" out = {} # If extremes are excluded filter the sorted list from second to second to last element if exclude_extreme == 1: scores.sort() scores_filtered = scores[1:-1] # Else use the entire list elif exclude_extreme == 0: scores_filtered = scores # If exclude_extreme is not 0 nor 1 return a message else: return 'The second parameter should be either 0 (to include extremes) or 1 (to exclude them)' out['min'] = scores_filtered.min() out['max'] = scores_filtered.max() out['mean'] = scores_filtered.mean() return out print(max_min_mean2(scores, 1)) """ Explanation: Modify your function for the min, max and mean to optionally exclude the highest and lowest scores End of explanation """ A = np.array([[4.0, 7.0, -2.43, 67.1], [-4.0, 64.0, 54.7, -3.33], [2.43, 23.2, 3.64, 4.11], [1.2, 2.5, -113.2, 323.22]]) print(A) """ Explanation: Exercise 07.3 (slicing) For the two-dimensional array End of explanation """ print(A[:,2]) """ Explanation: use array slicing to Extract the third column as a 1D array Extract the first two rows as a 2D sub-array Extract the bottom-right $2 \times 2$ block as a 2D sub-array Sum the last column Print the results to the screen to check. Try to use array slicing such that your code would still work if the dimensions of A were enlarged. Also, compute the tranpose of A (search online to find the function/syntax to do this). Third column as a 1D array End of explanation """ print(A[:2,:]) """ Explanation: First two rows as a 2D sub-array End of explanation """ print(A[-2:,-2:]) """ Explanation: Bottom right $2 \times 2$ block as a 2D sub-array End of explanation """ print(A[:,-1].sum()) """ Explanation: Sum of the last column End of explanation """ print(A.transpose()) """ Explanation: Transpose of A End of explanation """ def f(x): return x**3 - 6*x**2 + 4*x + 12 #return x**2 + x - 20 # Roots = -5, 4 def compute_root(f, x0, x1, tol, max_it): """Computes the root of f between x0 and x1 using bisection, stops if the value of f at the root is under tol or if max_it is reached and returns the root, the value of f at the root and the number of iterations""" for i in range(max_it): # Compute x_mid x_mid = (x0 + x1) / 2 # Compute f for the three values f_0, f_1, f_mid = f(x0), f(x1), f(x_mid) # Check the value of f_0*f_mid to determine how to update the endpoints if f_0*f_mid < 0: x1 = x_mid else: x0 = x_mid # Check if f is under tol if abs(f_mid) < tol: return x_mid, f_mid, i+1 # Return the approximate root in case max_it is reached return x_mid, f_mid, i+1 # Test for the function f %time x, f_x, num_it = compute_root(f, x0=3, x1=6, tol=1.0e-6, max_it=1000) print('Approximate root:', x) print('Value of f:', f_x) print('Number of iterations:', num_it) """ Explanation: Exercise 07.4 (optional extension) In a previous exercise you implemented the bisection algorithm to find approximate roots of a mathematical function. Use the SciPy bisection function optimize.bisect (http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.optimize.bisect.html) to find roots of the mathematical function that was used in the previous exercise. Compare the results computed by SciPy and your program from the earlier exercise, and compare the computational time (using %time). End of explanation """ from scipy.optimize import bisect as bisect # Compute the root of f using scipy function %time x0 = bisect(f, a=3, b=6) print('Approximate root:', x0) print('Value of f:', f(x0)) """ Explanation: Below we compute the same root using scipy function bisect: the wall time for both functions is very low but the scipy function is considerably better: End of explanation """
hpi-epic/pricewars-merchant
docs/Building a merchant using PricewarsMerchant.ipynb
mit
import sys sys.path.append('../') """ Explanation: PricewarsMerchant A fast way to build your own merchant is to subclass the PricewarsMerchant and build your own functionality on top of it. PricewarsMerchant is an abstract base class that implements most tasks of a merchant. It also provides a server component that processes incomming requests from the marketplace. Let's build our own merchant! The following step is specific for this notebook. It is not necessary if your merchant is in the repository root. End of explanation """ from pricewars_merchant import PricewarsMerchant class ExampleMerchant(PricewarsMerchant): pass port=5010 token=None marketplace_url='http://marketplace:8080' producer_url='http://producer:3050' name='Example Merchant' try: merchant = ExampleMerchant(port, token, marketplace_url, producer_url, name) except TypeError as e: print(e) """ Explanation: Our merchants inherits from the PricewarsMerchant. It requires the arguments: port, token, marketplace_url, producer_url and name. Token can be None. If that is the case, the merchant will automatically register after it is started and get a new token. End of explanation """ class ExampleMerchant(PricewarsMerchant): def calculate_price(self, offer_id, market_situation): return 25 merchant = ExampleMerchant(port, token, marketplace_url, producer_url, name) """ Explanation: There is only one more thing to do, to have a fully working merchant. Override the pricing method called calculate_price to set your own prices. This method is called with the current market situation and a offer id. The method should return the new price for this offer id. Let's keep it simple for now and always return the same price. End of explanation """ merchant.settings """ Explanation: You can start the merchant with merchant.run() Changing merchant settings It is possible to change the merchant's settings in the management UI. This can be done for all settings that are in the settings object. End of explanation """ merchant.settings['order threshold'] = 3 merchant.settings['restock limit'] = 30 merchant.settings """ Explanation: You can override these settings. For example change order threshold and restock limit to change the order behavior. End of explanation """ class ExampleMerchant(PricewarsMerchant): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.settings['selling price'] = 25 def calculate_price(self, offer_id, market_situation): return self.settings['selling price'] """ Explanation: In the same way, you can add new settings and make them editable in the management UI. Example for a fixed but configurable selling price: End of explanation """ class ExampleMerchant(PricewarsMerchant): def calculate_price(self, offer_id, market_situation): return 25 def sold_offer(self, offer): print(offer) # do fancy stuff """ Explanation: If you run this merchant, it is possible to change its selling price in the management UI: React on sale event The merchant gets a message from the marketplace, whenever it sells a product. The PricewarsMerchant only prints a message when this happens. But you can override the sold_offer method to do something else. E.g. change the price or order new products. End of explanation """
probml/pyprobml
notebooks/book1/15/entailment_attention_mlp_torch.ipynb
mit
import numpy as np import matplotlib.pyplot as plt import math from IPython import display try: import torch except ModuleNotFoundError: %pip install -qq torch import torch from torch import nn from torch.nn import functional as F from torch.utils import data import collections import re import random import os import requests import zipfile import tarfile import hashlib import time np.random.seed(seed=1) torch.manual_seed(1) !mkdir figures # for saving plots """ Explanation: Please find jax implementation of this notebook here: https://colab.research.google.com/github/probml/pyprobml/blob/master/notebooks/book1/15/entailment_attention_mlp_jax.ipynb <a href="https://colab.research.google.com/github/Nirzu97/pyprobml/blob/entailment_attention_mlp_torch/notebooks/entailment_attention_mlp_torch.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> Textual entailment classifier using an MLP plus attention In textual entailment, the input is 2 sentences (premise and hypothesis), and the output is a label, specifying if P entails H, P contradicts H, or neither. (This is also called "natural language inference".) We use attention to align hypothesis to premise and vice versa, then compare the aligned words to estimate similarity between the sentences, and pass the weighted similarities to an MLP. Based on sec 15.5 of http://d2l.ai/chapter_natural-language-processing-applications/natural-language-inference-attention.html End of explanation """ # Required functions for downloading data def download(name, cache_dir=os.path.join("..", "data")): """Download a file inserted into DATA_HUB, return the local filename.""" assert name in DATA_HUB, f"{name} does not exist in {DATA_HUB}." url, sha1_hash = DATA_HUB[name] os.makedirs(cache_dir, exist_ok=True) fname = os.path.join(cache_dir, url.split("/")[-1]) if os.path.exists(fname): sha1 = hashlib.sha1() with open(fname, "rb") as f: while True: data = f.read(1048576) if not data: break sha1.update(data) if sha1.hexdigest() == sha1_hash: return fname # Hit cache print(f"Downloading {fname} from {url}...") r = requests.get(url, stream=True, verify=True) with open(fname, "wb") as f: f.write(r.content) return fname def download_extract(name, folder=None): """Download and extract a zip/tar file.""" fname = download(name) base_dir = os.path.dirname(fname) data_dir, ext = os.path.splitext(fname) if ext == ".zip": fp = zipfile.ZipFile(fname, "r") elif ext in (".tar", ".gz"): fp = tarfile.open(fname, "r") else: assert False, "Only zip/tar files can be extracted." fp.extractall(base_dir) return os.path.join(base_dir, folder) if folder else data_dir DATA_HUB = dict() DATA_HUB["SNLI"] = ("https://nlp.stanford.edu/projects/snli/snli_1.0.zip", "9fcde07509c7e87ec61c640c1b2753d9041758e4") data_dir = download_extract("SNLI") def read_snli(data_dir, is_train): """Read the SNLI dataset into premises, hypotheses, and labels.""" def extract_text(s): # Remove information that will not be used by us s = re.sub("\\(", "", s) s = re.sub("\\)", "", s) # Substitute two or more consecutive whitespace with space s = re.sub("\\s{2,}", " ", s) return s.strip() label_set = {"entailment": 0, "contradiction": 1, "neutral": 2} file_name = os.path.join(data_dir, "snli_1.0_train.txt" if is_train else "snli_1.0_test.txt") with open(file_name, "r") as f: rows = [row.split("\t") for row in f.readlines()[1:]] premises = [extract_text(row[1]) for row in rows if row[0] in label_set] hypotheses = [extract_text(row[2]) for row in rows if row[0] in label_set] labels = [label_set[row[0]] for row in rows if row[0] in label_set] return premises, hypotheses, labels """ Explanation: Data We use SNLI (Stanford Natural Language Inference) dataset described in sec 15.4 of http://d2l.ai/chapter_natural-language-processing-applications/natural-language-inference-and-dataset.html. End of explanation """ train_data = read_snli(data_dir, is_train=True) for x0, x1, y in zip(train_data[0][:3], train_data[1][:3], train_data[2][:3]): print("premise:", x0) print("hypothesis:", x1) print("label:", y) test_data = read_snli(data_dir, is_train=False) for data in [train_data, test_data]: print([[row for row in data[2]].count(i) for i in range(3)]) def tokenize(lines, token="word"): """Split text lines into word or character tokens.""" if token == "word": return [line.split() for line in lines] elif token == "char": return [list(line) for line in lines] else: print("ERROR: unknown token type: " + token) class Vocab: """Vocabulary for text.""" def __init__(self, tokens=None, min_freq=0, reserved_tokens=None): if tokens is None: tokens = [] if reserved_tokens is None: reserved_tokens = [] # Sort according to frequencies counter = count_corpus(tokens) self.token_freqs = sorted(counter.items(), key=lambda x: x[1], reverse=True) # The index for the unknown token is 0 self.unk, uniq_tokens = 0, ["<unk>"] + reserved_tokens uniq_tokens += [token for token, freq in self.token_freqs if freq >= min_freq and token not in uniq_tokens] self.idx_to_token, self.token_to_idx = [], dict() for token in uniq_tokens: self.idx_to_token.append(token) self.token_to_idx[token] = len(self.idx_to_token) - 1 def __len__(self): return len(self.idx_to_token) def __getitem__(self, tokens): if not isinstance(tokens, (list, tuple)): return self.token_to_idx.get(tokens, self.unk) return [self.__getitem__(token) for token in tokens] def to_tokens(self, indices): if not isinstance(indices, (list, tuple)): return self.idx_to_token[indices] return [self.idx_to_token[index] for index in indices] def count_corpus(tokens): """Count token frequencies.""" # Here `tokens` is a 1D list or 2D list if len(tokens) == 0 or isinstance(tokens[0], list): # Flatten a list of token lists into a list of tokens tokens = [token for line in tokens for token in line] return collections.Counter(tokens) class SNLIDataset(torch.utils.data.Dataset): """A customized dataset to load the SNLI dataset.""" def __init__(self, dataset, num_steps, vocab=None): self.num_steps = num_steps all_premise_tokens = tokenize(dataset[0]) all_hypothesis_tokens = tokenize(dataset[1]) if vocab is None: self.vocab = Vocab(all_premise_tokens + all_hypothesis_tokens, min_freq=5, reserved_tokens=["<pad>"]) else: self.vocab = vocab self.premises = self._pad(all_premise_tokens) self.hypotheses = self._pad(all_hypothesis_tokens) self.labels = torch.tensor(dataset[2]) print("read " + str(len(self.premises)) + " examples") def _pad(self, lines): return torch.tensor([truncate_pad(self.vocab[line], self.num_steps, self.vocab["<pad>"]) for line in lines]) def __getitem__(self, idx): return (self.premises[idx], self.hypotheses[idx]), self.labels[idx] def __len__(self): return len(self.premises) def load_data_snli(batch_size, num_steps=50): """Download the SNLI dataset and return data iterators and vocabulary.""" num_workers = 4 data_dir = download_extract("SNLI") train_data = read_snli(data_dir, True) test_data = read_snli(data_dir, False) train_set = SNLIDataset(train_data, num_steps) test_set = SNLIDataset(test_data, num_steps, train_set.vocab) train_iter = torch.utils.data.DataLoader(train_set, batch_size, shuffle=True, num_workers=num_workers) test_iter = torch.utils.data.DataLoader(test_set, batch_size, shuffle=False, num_workers=num_workers) return train_iter, test_iter, train_set.vocab def truncate_pad(line, num_steps, padding_token): """Truncate or pad sequences.""" if len(line) > num_steps: return line[:num_steps] # Truncate return line + [padding_token] * (num_steps - len(line)) train_iter, test_iter, vocab = load_data_snli(128, 50) len(vocab) for X, Y in train_iter: print(X[0].shape) print(X[1].shape) print(Y.shape) break """ Explanation: Show first 3 training examples and their labels (“0”, “1”, and “2” correspond to “entailment”, “contradiction”, and “neutral”, respectively ). End of explanation """ def mlp(num_inputs, num_hiddens, flatten): net = [] net.append(nn.Dropout(0.2)) net.append(nn.Linear(num_inputs, num_hiddens)) net.append(nn.ReLU()) if flatten: net.append(nn.Flatten(start_dim=1)) net.append(nn.Dropout(0.2)) net.append(nn.Linear(num_hiddens, num_hiddens)) net.append(nn.ReLU()) if flatten: net.append(nn.Flatten(start_dim=1)) return nn.Sequential(*net) """ Explanation: Model The model is described in the book. Below we just give the code. Attending We define attention weights $$ e_{ij} = f(a_i)^T f(b_j) $$ where $a_i \in R^E$ is the embedding of the $i$'th token from the premise, $b_j \in R^E$ is the embedding of the $j$'th token from the hypothesis, and $f: R^E \rightarrow R^H$ is an MLP that maps from the embedding space to another hidden space. End of explanation """ class Attend(nn.Module): def __init__(self, num_inputs, num_hiddens, **kwargs): super(Attend, self).__init__(**kwargs) self.f = mlp(num_inputs, num_hiddens, flatten=False) def forward(self, A, B): # Shape of `A`/`B`: (`batch_size`, no. of words in sequence A/B, # `embed_size`) # Shape of `f_A`/`f_B`: (`batch_size`, no. of words in sequence A/B, # `num_hiddens`) f_A = self.f(A) f_B = self.f(B) # Shape of `e`: (`batch_size`, no. of words in sequence A, # no. of words in sequence B) e = torch.bmm(f_A, f_B.permute(0, 2, 1)) # Shape of `beta`: (`batch_size`, no. of words in sequence A, # `embed_size`), where sequence B is softly aligned with each word # (axis 1 of `beta`) in sequence A beta = torch.bmm(F.softmax(e, dim=-1), B) # Shape of `alpha`: (`batch_size`, no. of words in sequence B, # `embed_size`), where sequence A is softly aligned with each word # (axis 1 of `alpha`) in sequence B alpha = torch.bmm(F.softmax(e.permute(0, 2, 1), dim=-1), A) return beta, alpha """ Explanation: The $i$'th word in A computes a weighted average of "relevant" words in B, and vice versa, as follows: $$ \begin{align} \beta_i &= \sum_{j=1}^n \frac{\exp(e_{ij})}{\sum_{k=1}^n \exp(e_{ik})} b_j \ \alpha_j &= \sum_{i=1}^m \frac{\exp(e_{ij})}{\sum_{k=1}^m \exp(e_{kj})} a_i \end{align} $$ End of explanation """ class Compare(nn.Module): def __init__(self, num_inputs, num_hiddens, **kwargs): super(Compare, self).__init__(**kwargs) self.g = mlp(num_inputs, num_hiddens, flatten=False) def forward(self, A, B, beta, alpha): V_A = self.g(torch.cat([A, beta], dim=2)) V_B = self.g(torch.cat([B, alpha], dim=2)) return V_A, V_B """ Explanation: Comparing We concatenate word $i$ in A, $a_i$, with its "soft counterpart" in B, $\beta_i$, and vice versa, and then pass this through another MLP $g$ to get a "comparison vector" for each input location. $$ \begin{align} v_{A,i} &= g([a_i, \beta_i]), \; i=1,\ldots, m \ v_{B,j} &= g([b_j, \alpha_j]), \; j=1,\ldots, n \end{align} $$ End of explanation """ class Aggregate(nn.Module): def __init__(self, num_inputs, num_hiddens, num_outputs, **kwargs): super(Aggregate, self).__init__(**kwargs) self.h = mlp(num_inputs, num_hiddens, flatten=True) self.linear = nn.Linear(num_hiddens, num_outputs) def forward(self, V_A, V_B): # Sum up both sets of comparison vectors V_A = V_A.sum(dim=1) V_B = V_B.sum(dim=1) # Feed the concatenation of both summarization results into an MLP Y_hat = self.linear(self.h(torch.cat([V_A, V_B], dim=1))) return Y_hat """ Explanation: Aggregation We sum-pool the "comparison vectors" for each input sentence, and then pass the pair of poolings to yet another MLP $h$ to generate the final classification. $$ \begin{align} v_A &= \sum_{i=1}^m v_{A,i} \ v_B &= \sum_{j=1}^n v_{B,j} \ \hat{y} &= h([v_A, v_B]) \end{align} $$ End of explanation """ class DecomposableAttention(nn.Module): def __init__( self, vocab, embed_size, num_hiddens, num_inputs_attend=100, num_inputs_compare=200, num_inputs_agg=400, **kwargs ): super(DecomposableAttention, self).__init__(**kwargs) self.embedding = nn.Embedding(len(vocab), embed_size) self.attend = Attend(num_inputs_attend, num_hiddens) self.compare = Compare(num_inputs_compare, num_hiddens) # There are 3 possible outputs: entailment, contradiction, and neutral self.aggregate = Aggregate(num_inputs_agg, num_hiddens, num_outputs=3) def forward(self, X): premises, hypotheses = X A = self.embedding(premises) B = self.embedding(hypotheses) beta, alpha = self.attend(A, B) V_A, V_B = self.compare(A, B, beta, alpha) Y_hat = self.aggregate(V_A, V_B) return Y_hat class TokenEmbedding: """Token Embedding.""" def __init__(self, embedding_name): self.idx_to_token, self.idx_to_vec = self._load_embedding(embedding_name) self.unknown_idx = 0 self.token_to_idx = {token: idx for idx, token in enumerate(self.idx_to_token)} def _load_embedding(self, embedding_name): idx_to_token, idx_to_vec = ["<unk>"], [] data_dir = download_extract(embedding_name) # GloVe website: https://nlp.stanford.edu/projects/glove/ # fastText website: https://fasttext.cc/ with open(os.path.join(data_dir, "vec.txt"), "r") as f: for line in f: elems = line.rstrip().split(" ") token, elems = elems[0], [float(elem) for elem in elems[1:]] # Skip header information, such as the top row in fastText if len(elems) > 1: idx_to_token.append(token) idx_to_vec.append(elems) idx_to_vec = [[0] * len(idx_to_vec[0])] + idx_to_vec return idx_to_token, torch.tensor(idx_to_vec) def __getitem__(self, tokens): indices = [self.token_to_idx.get(token, self.unknown_idx) for token in tokens] vecs = self.idx_to_vec[torch.tensor(indices)] return vecs def __len__(self): return len(self.idx_to_token) def try_all_gpus(): """Return all available GPUs, or [cpu(),] if no GPU exists.""" devices = [torch.device(f"cuda:{i}") for i in range(torch.cuda.device_count())] return devices if devices else [torch.device("cpu")] DATA_URL = "http://d2l-data.s3-accelerate.amazonaws.com/glove.6B.100d.zip" DATA_HUB["glove.6b.100d"] = (DATA_URL, "cd43bfb07e44e6f27cbcc7bc9ae3d80284fdaf5a") embed_size, num_hiddens, devices = 100, 200, try_all_gpus() net = DecomposableAttention(vocab, embed_size, num_hiddens) # get pre-trained GloVE embeddings of size 100 glove_embedding = TokenEmbedding("glove.6b.100d") embeds = glove_embedding[vocab.idx_to_token] net.embedding.weight.data.copy_(embeds); """ Explanation: Putting it altogether We use a pre-trained embedding of size E=100. The $f$ (attend) function maps from $E=100$ to $H=200$ hiddens. The $g$ (compare) function maps $2E=200$ to $H=200$. The $h$ (aggregate) function maps $2H=400$ to 3 outputs. End of explanation """ class Animator: """For plotting data in animation.""" def __init__( self, xlabel=None, ylabel=None, legend=None, xlim=None, ylim=None, xscale="linear", yscale="linear", fmts=("-", "m--", "g-.", "r:"), nrows=1, ncols=1, figsize=(3.5, 2.5), ): # Incrementally plot multiple lines if legend is None: legend = [] display.set_matplotlib_formats("svg") self.fig, self.axes = plt.subplots(nrows, ncols, figsize=figsize) if nrows * ncols == 1: self.axes = [ self.axes, ] # Use a lambda function to capture arguments self.config_axes = lambda: set_axes(self.axes[0], xlabel, ylabel, xlim, ylim, xscale, yscale, legend) self.X, self.Y, self.fmts = None, None, fmts def add(self, x, y): # Add multiple data points into the figure if not hasattr(y, "__len__"): y = [y] n = len(y) if not hasattr(x, "__len__"): x = [x] * n if not self.X: self.X = [[] for _ in range(n)] if not self.Y: self.Y = [[] for _ in range(n)] for i, (a, b) in enumerate(zip(x, y)): if a is not None and b is not None: self.X[i].append(a) self.Y[i].append(b) self.axes[0].cla() for x, y, fmt in zip(self.X, self.Y, self.fmts): self.axes[0].plot(x, y, fmt) self.config_axes() display.display(self.fig) display.clear_output(wait=True) class Timer: """Record multiple running times.""" def __init__(self): self.times = [] self.start() def start(self): """Start the timer.""" self.tik = time.time() def stop(self): """Stop the timer and record the time in a list.""" self.times.append(time.time() - self.tik) return self.times[-1] def avg(self): """Return the average time.""" return sum(self.times) / len(self.times) def sum(self): """Return the sum of time.""" return sum(self.times) def cumsum(self): """Return the accumulated time.""" return np.array(self.times).cumsum().tolist() class Accumulator: """For accumulating sums over `n` variables.""" def __init__(self, n): self.data = [0.0] * n def add(self, *args): self.data = [a + float(b) for a, b in zip(self.data, args)] def reset(self): self.data = [0.0] * len(self.data) def __getitem__(self, idx): return self.data[idx] def set_axes(axes, xlabel, ylabel, xlim, ylim, xscale, yscale, legend): """Set the axes for matplotlib.""" axes.set_xlabel(xlabel) axes.set_ylabel(ylabel) axes.set_xscale(xscale) axes.set_yscale(yscale) axes.set_xlim(xlim) axes.set_ylim(ylim) if legend: axes.legend(legend) axes.grid() def accuracy(y_hat, y): """Compute the number of correct predictions.""" if len(y_hat.shape) > 1 and y_hat.shape[1] > 1: y_hat = torch.argmax(y_hat, axis=1) cmp_ = y_hat.type(y.dtype) == y return float(cmp_.type(y.dtype).sum()) def evaluate_accuracy_gpu(net, data_iter, device=None): """Compute the accuracy for a model on a dataset using a GPU.""" if isinstance(net, torch.nn.Module): net.eval() # Set the model to evaluation mode if not device: device = next(iter(net.parameters())).device # No. of correct predictions, no. of predictions metric = Accumulator(2) for X, y in data_iter: if isinstance(X, list): # Required for BERT Fine-tuning X = [x.to(device) for x in X] else: X = X.to(device) y = y.to(device) metric.add(accuracy(net(X), y), y.numel()) return metric[0] / metric[1] def train_batch(net, X, y, loss, trainer, devices): if isinstance(X, list): # Required for BERT Fine-tuning X = [x.to(devices[0]) for x in X] else: X = X.to(devices[0]) y = y.to(devices[0]) net.train() trainer.zero_grad() pred = net(X) l = loss(pred, y) l.sum().backward() trainer.step() train_loss_sum = l.sum() train_acc_sum = accuracy(pred, y) return train_loss_sum, train_acc_sum def train(net, train_iter, test_iter, loss, trainer, num_epochs, devices=try_all_gpus()): timer, num_batches = Timer(), len(train_iter) animator = Animator( xlabel="epoch", xlim=[1, num_epochs], ylim=[0, 1], legend=["train loss", "train acc", "test acc"] ) net = nn.DataParallel(net, device_ids=devices).to(devices[0]) for epoch in range(num_epochs): # Store training_loss, training_accuracy, num_examples, num_features metric = Accumulator(4) for i, (features, labels) in enumerate(train_iter): timer.start() l, acc = train_batch(net, features, labels, loss, trainer, devices) metric.add(l, acc, labels.shape[0], labels.numel()) timer.stop() if (i + 1) % (num_batches // 5) == 0 or i == num_batches - 1: animator.add(epoch + (i + 1) / num_batches, (metric[0] / metric[2], metric[1] / metric[3], None)) test_acc = evaluate_accuracy_gpu(net, test_iter) animator.add(epoch + 1, (None, None, test_acc)) print(f"loss {metric[0] / metric[2]:.3f}, train acc " f"{metric[1] / metric[3]:.3f}, test acc {test_acc:.3f}") print(f"{metric[2] * num_epochs / timer.sum():.1f} examples/sec on " f"{str(devices)}") lr, num_epochs = 0.001, 4 trainer = torch.optim.Adam(net.parameters(), lr=lr) loss = nn.CrossEntropyLoss(reduction="none") train(net, train_iter, test_iter, loss, trainer, num_epochs, devices) """ Explanation: Training End of explanation """ def try_gpu(i=0): """Return gpu(i) if exists, otherwise return cpu().""" if torch.cuda.device_count() >= i + 1: return torch.device(f"cuda:{i}") return torch.device("cpu") def predict_snli(net, vocab, premise, hypothesis): net.eval() premise = torch.tensor(vocab[premise], device=try_gpu()) hypothesis = torch.tensor(vocab[hypothesis], device=try_gpu()) label = torch.argmax(net([premise.reshape((1, -1)), hypothesis.reshape((1, -1))]), dim=1) return "entailment" if label == 0 else "contradiction" if label == 1 else "neutral" predict_snli(net, vocab, ["he", "is", "good", "."], ["he", "is", "bad", "."]) predict_snli(net, vocab, ["he", "is", "very", "naughty", "."], ["he", "is", "bad", "."]) predict_snli(net, vocab, ["he", "is", "awful", "."], ["he", "is", "bad", "."]) predict_snli(net, vocab, ["he", "is", "handsome", "."], ["he", "is", "bad", "."]) """ Explanation: Testing End of explanation """ predict_snli( net, vocab, ["a", "person", "on", "a", "horse", "jumps", "over", "a", "log" "."], ["a", "person", "is", "outdoors", "on", "a", "horse", "."], ) predict_snli( net, vocab, ["a", "person", "on", "a", "horse", "jumps", "over", "a", "log" "."], ["a", "person", "is", "at", "a", "diner", "ordering", "an", "omelette", "."], ) predict_snli( net, vocab, ["a", "person", "on", "a", "horse", "jumps", "over", "a", "log" "."], ["a", "person", "is", "training", "a", "horse", "for", "a", "competition", "."], ) """ Explanation: Examples from training set End of explanation """
ireapps/cfj-2017
completed/12. Web scraping (Part 2).ipynb
mit
from bs4 import BeautifulSoup import csv """ Explanation: Let's scrape a practice table The latest Mountain Goats album is called Goths. (It's good!) I made a simple HTML table with the track listing -- let's scrape it into a CSV. Import the modules we'll need End of explanation """ # in a with block, open the HTML file with open('mountain-goats.html', 'r') as html_file: # .read() in the contents of a file -- it'll be a string html_code = html_file.read() # print the string to see what's there print(html_code) """ Explanation: Read in the file, see what we're working with We'll use the read() method to get the contents of the file. End of explanation """ with open('mountain-goats.html', 'r') as html_file: html_code = html_file.read() # use the type() function to see what kind of object `html_code` is print(type(html_code)) # feed the file's contents (the string of HTML) to BeautifulSoup # will complain if you don't specify the parser soup = BeautifulSoup(html_code, 'html.parser') # use the type() function to see what kind of object `soup` is print(type(soup)) """ Explanation: Parse the table with BeautifulSoup Right now, Python isn't interpreting our table as data -- it's just a string. We need to use BeautifulSoup to parse that string into data objects that Python can understand. Once the string is parsed, we'll be working with a "tree" of data that we can navigate. End of explanation """ with open('mountain-goats.html', 'r') as html_file: html_code = html_file.read() soup = BeautifulSoup(html_code, 'html.parser') # by position on the page # find_all returns a list of matching elements, and we want the second ([1]) one # song_table = soup.find_all('table')[1] # by class name # => with `find`, you can pass a dictionary of element attributes to match on # song_table = soup.find('table', {'class': 'song-table'}) # by ID # song_table = soup.find('table', {'id': 'my-cool-table'}) # by style song_table = soup.find('table', {'style': 'width: 95%;'}) print(song_table) """ Explanation: Decide how to target the table BeautifulSoup has several methods for targeting elements -- by position on the page, by attribute, etc. Right now we just want to find the correct table. End of explanation """ with open('mountain-goats.html', 'r') as html_file: html_code = html_file.read() soup = BeautifulSoup(html_code, 'html.parser') song_table = soup.find('table', {'style': 'width: 95%;'}) # find the rows in the table # slice to skip the header row song_rows = song_table.find_all('tr')[1:] # loop over the rows for row in song_rows: # get the table cells in the row song = row.find_all('td') # assign them to variables track, title, duration, artist, album = song # use the .string attribute to get the text in the cell print(track.string, title.string) """ Explanation: Looping over the table rows Let's print a list of track numbers and song titles. Look at the structure of the table -- a table has rows represented by the tag tr, and within each row there are cells represented by td tags. The find_all() method returns a list. And we know how to iterate over lists: with a for loop. Let's do that. End of explanation """ with open('mountain-goats.html', 'r') as html_file, open('mountain-goats.csv', 'w') as outfile: html_code = html_file.read() soup = BeautifulSoup(html_code, 'html.parser') song_table = soup.find('table', {'style': 'width: 95%;'}) song_rows = song_table.find_all('tr')[1:] # set up a writer object writer = csv.DictWriter(outfile, fieldnames=['track', 'title', 'duration', 'artist', 'album']) writer.writeheader() for row in song_rows: # get the table cells in the row song = row.find_all('td') # assign them to variables track, title, duration, artist, album = song # write out the dictionary to file writer.writerow({ 'track': track.string, 'title': title.string, 'duration': duration.string, 'artist': artist.string, 'album': album.string }) """ Explanation: Write data to file Let's put it all together and open a file to write the data to. End of explanation """
GoogleCloudPlatform/cloudml-samples
notebooks/xgboost/HyperparameterTuningWithXGBoostInCMLE.ipynb
apache-2.0
# Replace <PROJECT_ID> and <BUCKET_ID> with proper Project and Bucket ID's: %env PROJECT_ID <PROJECT_ID> %env BUCKET_ID <BUCKET_ID> %env JOB_DIR gs://<BUCKET_ID>/xgboost_job_dir %env REGION us-central1 %env TRAINER_PACKAGE_PATH ./auto_mpg_hp_tuning %env MAIN_TRAINER_MODULE auto_mpg_hp_tuning.train %env RUNTIME_VERSION 1.9 %env PYTHON_VERSION 3.5 %env HPTUNING_CONFIG hptuning_config.yaml ! mkdir auto_mpg_hp_tuning """ Explanation: XGBoost HP Tuning on AI Platform This notebook trains a model on Ai Platform using Hyperparameter Tuning to predict a car's Miles Per Gallon. It uses Auto MPG Data Set from UCI Machine Learning Repository. Citation: Dua, D. and Karra Taniskidou, E. (2017). UCI Machine Learning Repository [http://archive.ics.uci.edu/ml]. Irvine, CA: University of California, School of Information and Computer Science. How to train your model on AI Platform with HP tuning. Using HP Tuning for training can be done in a few steps: 1. Create your python model file 1. Add argument parsing for the hyperparameter values. (These values are chosen for you in this notebook) 1. Add code to download your data from Google Cloud Storage so that AI Platform can use it 1. Add code to track the performance of your hyperparameter values. 1. Add code to export and save the model to Google Cloud Storage once AI Platform finishes training the model 1. Prepare a package 1. Submit the training job Prerequisites Before you jump in, let’s cover some of the different tools you’ll be using to get HP tuning up and running on AI Platform. Google Cloud Platform lets you build and host applications and websites, store data, and analyze data on Google's scalable infrastructure. AI Platform is a managed service that enables you to easily build machine learning models that work on any type of data, of any size. Google Cloud Storage (GCS) is a unified object storage for developers and enterprises, from live data serving to data analytics/ML to data archiving. Cloud SDK is a command line tool which allows you to interact with Google Cloud products. In order to run this notebook, make sure that Cloud SDK is installed in the same environment as your Jupyter kernel. Overview of Hyperparameter Tuning - Hyperparameter tuning takes advantage of the processing infrastructure of Google Cloud Platform to test different hyperparameter configurations when training your model. Part 0: Setup Create a project on GCP Create a Google Cloud Storage Bucket Enable AI Platform Training and Prediction and Compute Engine APIs Install Cloud SDK Install XGBoost [Optional: used if running locally] Install pandas [Optional: used if running locally] Install cloudml-hypertune [Optional: used if running locally] These variables will be needed for the following steps. * TRAINER_PACKAGE_PATH &lt;./auto_mpg_hp_tuning&gt; - A packaged training application that will be staged in a Google Cloud Storage location. The model file created below is placed inside this package path. * MAIN_TRAINER_MODULE &lt;auto_mpg_hp_tuning.train&gt; - Tells AI Platform which file to execute. This is formatted as follows <folder_name.python_file_name> * JOB_DIR &lt;gs://$BUCKET_ID/xgboost_learn_job_dir&gt; - The path to a Google Cloud Storage location to use for job output. * RUNTIME_VERSION &lt;1.9&gt; - The version of AI Platform to use for the job. If you don't specify a runtime version, the training service uses the default AI Platform runtime version 1.0. See the list of runtime versions for more information. * PYTHON_VERSION &lt;3.5&gt; - The Python version to use for the job. Python 3.5 is available with runtime version 1.4 or greater. If you don't specify a Python version, the training service uses Python 2.7. * HPTUNING_CONFIG &lt;hptuning_config.yaml&gt; - Path to the job configuration file. Replace: * PROJECT_ID &lt;YOUR_PROJECT_ID&gt; - with your project's id. Use the PROJECT_ID that matches your Google Cloud Platform project. * BUCKET_ID &lt;YOUR_BUCKET_ID&gt; - with the bucket id you created above. * JOB_DIR &lt;gs://YOUR_BUCKET_ID/xgboost_job_dir&gt; - with the bucket id you created above. * REGION &lt;REGION&gt; - select a region from here or use the default 'us-central1'. The region is where the model will be deployed. End of explanation """ %%writefile ./auto_mpg_hp_tuning/train.py import argparse import datetime import os import pandas as pd import subprocess import pickle from google.cloud import storage import hypertune import xgboost as xgb from random import shuffle def split_dataframe(dataframe, rate=0.8): indices = dataframe.index.values.tolist() length = len(dataframe) shuffle(indices) train_size = int(length * rate) train_indices = indices[:train_size] test_indices = indices[train_size:] return dataframe.iloc[train_indices], dataframe.iloc[test_indices] """ Explanation: The data The Auto MPG Data Set that this sample uses for training is provided by the UC Irvine Machine Learning Repository. We have hosted the data on a public GCS bucket gs://cloud-samples-data/ml-engine/auto_mpg/. The data has been pre-processed to remove rows with incomplete data so as not to create additional steps for this notebook. Training file is auto-mpg.data Note: Your typical development process with your own data would require you to upload your data to GCS so that AI Platform can access that data. However, in this case, we have put the data on GCS to avoid the steps of having you download the data from UC Irvine and then upload the data to GCS. Citation: Dua, D. and Karra Taniskidou, E. (2017). UCI Machine Learning Repository [http://archive.ics.uci.edu/ml]. Irvine, CA: University of California, School of Information and Computer Science. Disclaimer This dataset is provided by a third party. Google provides no representation, warranty, or other guarantees about the validity or any other aspects of this dataset. Part 1: Create your python model file First, we'll create the python model file (provided below) that we'll upload to AI Platform. This is similar to your normal process for creating a XGBoost model. However, there are a few key differences: 1. Downloading the data from GCS at the start of your file, so that AI Platform can access the data. 1. Exporting/saving the model to GCS at the end of your file, so that you can use it for predictions. 1. Define a command-line argument in your main training module for each tuned hyperparameter. 1. Use the value passed in those arguments to set the corresponding hyperparameter in your application's XGBoost code. 1. Use cloudml-hypertune to track your training jobs metrics. The code in this file first handles the hyperparameters passed to the file from AI Platform. Then it loads the data into a pandas DataFrame that can be used by XGBoost. Then the model is fit against the training data and the metrics for that data are shared with AI Platform. Lastly, Python's built in pickle library is used to save the model to a file that can be uploaded to AI Platform's prediction service. Note: In normal practice you would want to test your model locally on a small dataset to ensure that it works, before using it with your larger dataset on AI Platform. This avoids wasted time and costs. Setup the imports and helper functions End of explanation """ %%writefile -a ./auto_mpg_hp_tuning/train.py parser = argparse.ArgumentParser() parser.add_argument( '--job-dir', # handled automatically by AI Platform help='GCS location to write checkpoints and export models', required=True ) parser.add_argument( '--max_depth', # Specified in the config file help='Maximum depth of the XGBoost tree. default: 3', default=3, type=int ) parser.add_argument( '--n_estimators', # Specified in the config file help='Number of estimators to be created. default: 100', default=100, type=int ) parser.add_argument( '--booster', # Specified in the config file help='which booster to use: gbtree, gblinear or dart. default: gbtree', default='gbtree', type=str ) args = parser.parse_args() """ Explanation: Load the hyperparameter values that are passed to the model during training. In this tutorial, the Lasso regressor is used, because it has several parameters that can be used to help demonstrate how to choose HP tuning values. (The range of values are set below in the configuration file for the HP tuning values.) End of explanation """ %%writefile -a ./auto_mpg_hp_tuning/train.py # Public bucket holding the auto mpg data bucket = storage.Client().bucket('cloud-samples-data') # Path to the data inside the public bucket blob = bucket.blob('ml-engine/auto_mpg/auto-mpg.data') # Download the data blob.download_to_filename('auto-mpg.data') # --------------------------------------- # This is where your model code would go. Below is an example model using the auto mpg dataset. # --------------------------------------- # Define the format of your input data including unused columns # (These are the columns from the auto-mpg data files) COLUMNS = [ 'mpg', 'cylinders', 'displacement', 'horsepower', 'weight', 'acceleration', 'model-year', 'origin', 'car-name' ] FEATURES = [ 'cylinders', 'displacement', 'horsepower', 'weight', 'acceleration', 'model-year', 'origin' ] TARGET = 'mpg' # Load the training auto mpg dataset with open('./auto-mpg.data', 'r') as train_data: raw_training_data = pd.read_csv(train_data, header=None, names=COLUMNS, delim_whitespace=True) raw_training_data = raw_training_data[FEATURES + [TARGET]] train_df, test_df = split_dataframe(raw_training_data, 0.8) """ Explanation: Add code to download the data from GCS In this case, using the publicly hosted data,AI Platform will then be able to use the data when training your model. End of explanation """ %%writefile -a ./auto_mpg_hp_tuning/train.py # Create the regressor, here we will use a Lasso Regressor to demonstrate the use of HP Tuning. # Here is where we set the variables used during HP Tuning from # the parameters passed into the python script regressor = xgb.XGBRegressor(max_depth=args.max_depth, n_estimators=args.n_estimators, booster=args.booster ) # Transform the features and fit them to the regressor regressor.fit(train_df[FEATURES], train_df[TARGET]) """ Explanation: Use the Hyperparameters Use the Hyperparameter values passed in those arguments to set the corresponding hyperparameters in your application's XGBoost code. End of explanation """ %%writefile -a ./auto_mpg_hp_tuning/train.py # Calculate the mean accuracy on the given test data and labels. score = regressor.score(test_df[FEATURES], test_df[TARGET]) # The default name of the metric is training/hptuning/metric. # We recommend that you assign a custom name. The only functional difference is that # if you use a custom name, you must set the hyperparameterMetricTag value in the # HyperparameterSpec object in your job request to match your chosen name. # https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs#HyperparameterSpec hpt = hypertune.HyperTune() hpt.report_hyperparameter_tuning_metric( hyperparameter_metric_tag='my_metric_tag', metric_value=score, global_step=1000) """ Explanation: Report the mean accuracy as hyperparameter tuning objective metric. End of explanation """ %%writefile -a ./auto_mpg_hp_tuning/train.py # Export the model to a file model_filename = 'model.pkl' with open(model_filename, "wb") as f: pickle.dump(regressor, f) # Example: job_dir = 'gs://BUCKET_ID/xgboost_job_dir/1' job_dir = args.job_dir.replace('gs://', '') # Remove the 'gs://' # Get the Bucket Id bucket_id = job_dir.split('/')[0] # Get the path bucket_path = job_dir[len('{}/'.format(bucket_id)):] # Example: 'xgboost_job_dir/1' # Upload the model to GCS bucket = storage.Client().bucket(bucket_id) blob = bucket.blob('{}/{}'.format( bucket_path, model_filename)) blob.upload_from_filename(model_filename) """ Explanation: Export and save the model to GCS End of explanation """ %%writefile ./auto_mpg_hp_tuning/__init__.py #!/usr/bin/env python # Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Note that __init__.py can be an empty file. """ Explanation: Part 2: Create Trainer Package with Hyperparameter Tuning Next we need to build the Trainer Package, which holds all your code and dependencies need to train your model on AI Platform. First, we create an empty __init__.py file. End of explanation """ %%writefile ./hptuning_config.yaml #!/usr/bin/env python # Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # hyperparam.yaml trainingInput: hyperparameters: goal: MAXIMIZE maxTrials: 30 maxParallelTrials: 5 hyperparameterMetricTag: my_metric_tag enableTrialEarlyStopping: TRUE params: - parameterName: max_depth type: INTEGER minValue: 3 maxValue: 8 - parameterName: n_estimators type: INTEGER minValue: 50 maxValue: 200 - parameterName: booster type: CATEGORICAL categoricalValues: [ "gbtree", "gblinear", "dart" ] """ Explanation: Next, we need to set the hp tuning values used to train our model. Check HyperparameterSpec for more info. In this config file several key things are set: * maxTrials - How many training trials should be attempted to optimize the specified hyperparameters. * maxParallelTrials: 5 - The number of training trials to run concurrently. * params - The set of parameters to tune.. These are the different parameters to pass into your model and the specified ranges you wish to try. * parameterName - The parameter name must be unique amongst all ParameterConfigs * type - The type of the parameter. [INTEGER, DOUBLE, ...] * minValue & maxValue - The range of values that this parameter could be. * scaleType - How the parameter should be scaled to the hypercube. Leave unset for categorical parameters. Some kind of scaling is strongly recommended for real or integral parameters (e.g., UNIT_LINEAR_SCALE). End of explanation """ %%writefile ./setup.py #!/usr/bin/env python # Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from setuptools import find_packages from setuptools import setup REQUIRED_PACKAGES = ['cloudml-hypertune'] setup( name='auto_mpg_hp_tuning', version='0.1', install_requires=REQUIRED_PACKAGES, packages=find_packages(), include_package_data=True, description='Auto MPG XGBoost HP tuning training application' ) """ Explanation: Lastly, we need to install the dependencies used in our model. Check adding_standard_pypi_dependencies for more info. To do this, AI Platform uses a setup.py file to install your dependencies. End of explanation """ ! gcloud config set project $PROJECT_ID """ Explanation: Part 3: Submit Training Job Next we need to submit the job for training on AI Platform. We'll use gcloud to submit the job which has the following flags: job-name - A name to use for the job (mixed-case letters, numbers, and underscores only, starting with a letter). In this case: auto_mpg_hp_tuning_$(date +"%Y%m%d_%H%M%S") job-dir - The path to a Google Cloud Storage location to use for job output. package-path - A packaged training application that is staged in a Google Cloud Storage location. If you are using the gcloud command-line tool, this step is largely automated. module-name - The name of the main module in your trainer package. The main module is the Python file you call to start the application. If you use the gcloud command to submit your job, specify the main module name in the --module-name argument. Refer to Python Packages to figure out the module name. region - The Google Cloud Compute region where you want your job to run. You should run your training job in the same region as the Cloud Storage bucket that stores your training data. Select a region from here or use the default 'us-central1'. runtime-version - The version of AI Platform to use for the job. If you don't specify a runtime version, the training service uses the default AI Platform runtime version 1.0. See the list of runtime versions for more information. python-version - The Python version to use for the job. Python 3.5 is available with runtime version 1.4 or greater. If you don't specify a Python version, the training service uses Python 2.7. scale-tier - A scale tier specifying the type of processing cluster to run your job on. This can be the CUSTOM scale tier, in which case you also explicitly specify the number and type of machines to use. config - Path to the job configuration file. This file should be a YAML document (JSON also accepted) containing a Job resource as defined in the API Note: Check to make sure gcloud is set to the current PROJECT_ID End of explanation """ ! gcloud ml-engine jobs submit training auto_mpg_hp_tuning_$(date +"%Y%m%d_%H%M%S") \ --job-dir $JOB_DIR \ --package-path $TRAINER_PACKAGE_PATH \ --module-name $MAIN_TRAINER_MODULE \ --region $REGION \ --runtime-version=$RUNTIME_VERSION \ --python-version=$PYTHON_VERSION \ --scale-tier basic \ --config $HPTUNING_CONFIG """ Explanation: Submit the training job. End of explanation """ ! gsutil ls $JOB_DIR/* """ Explanation: [Optional] StackDriver Logging You can view the logs for your training job: 1. Go to https://console.cloud.google.com/ 1. Select "Logging" in left-hand pane 1. In left-hand pane, go to "AI Platform" and select Jobs 1. In filter by prefix, use the value of $JOB_NAME to view the logs On the logging page of your model, you can view the different results for each HP tuning job. Example: { "trialId": "15", "hyperparameters": { "booster": "dart", "max_depth": "7", "n_estimators": "102" }, "finalMetric": { "trainingStep": "1000", "objectiveValue": 0.9259230441279733 } } [Optional] Verify Model File in GCS View the contents of the destination model folder to verify that all 30 model files have indeed been uploaded to GCS. Note: The model can take a few minutes to train and show up in GCS. End of explanation """
J535D165/recordlinkage
docs/guides/link_two_dataframes.ipynb
bsd-3-clause
import recordlinkage from recordlinkage.datasets import load_febrl4 """ Explanation: Link two datasets Introduction This example shows how two datasets with data about persons can be linked. We will try to link the data based on attributes like first name, surname, sex, date of birth, place and address. The data used in this example is part of Febrl and is fictitious. First, start with importing the recordlinkage module. The submodule recordlinkage.datasets contains several datasets that can be used for testing. For this example, we use the Febrl datasets 4A and 4B. These datasets can be loaded with the function load_febrl4. End of explanation """ dfA, dfB = load_febrl4() dfA """ Explanation: The datasets are loaded with the following code. The returned datasets are of type pandas.DataFrame. This makes it easy to manipulate the data if desired. For details about data manipulation with pandas, see their comprehensive documentation http://pandas.pydata.org/. End of explanation """ indexer = recordlinkage.Index() indexer.full() pairs = indexer.index(dfA, dfB) """ Explanation: Make record pairs It is very intuitive to compare each record in DataFrame dfA with all records of DataFrame dfB. In fact, we want to make record pairs. Each record pair should contain one record of dfA and one record of dfB. This process of making record pairs is also called "indexing". With the recordlinkage module, indexing is easy. First, load the index.Index class and call the .full method. This object generates a full index on a .index(...) call. In case of deduplication of a single dataframe, one dataframe is sufficient as argument. End of explanation """ print (len(dfA), len(dfB), len(pairs)) """ Explanation: With the method index, all possible (and unique) record pairs are made. The method returns a pandas.MultiIndex. The number of pairs is equal to the number of records in dfA times the number of records in dfB. End of explanation """ indexer = recordlinkage.Index() indexer.block("given_name") candidate_links = indexer.index(dfA, dfB) len(candidate_links) """ Explanation: Many of these record pairs do not belong to the same person. In case of one-to-one matching, the number of matches should be no more than the number of records in the smallest dataframe. In case of full indexing, min(len(dfA), len(N_dfB)) is much smaller than len(pairs). The recordlinkage module has some more advanced indexing methods to reduce the number of record pairs. Obvious non-matches are left out of the index. Note that if a matching record pair is not included in the index, it can not be matched anymore. One of the most well known indexing methods is named blocking. This method includes only record pairs that are identical on one or more stored attributes of the person (or entity in general). The blocking method can be used in the recordlinkage module. End of explanation """ compare_cl = recordlinkage.Compare() compare_cl.exact("given_name", "given_name", label="given_name") compare_cl.string("surname", "surname", method="jarowinkler", threshold=0.85, label="surname") compare_cl.exact("date_of_birth", "date_of_birth", label="date_of_birth") compare_cl.exact("suburb", "suburb", label="suburb") compare_cl.exact("state", "state", label="state") compare_cl.string("address_1", "address_1", threshold=0.85, label="address_1") features = compare_cl.compute(candidate_links, dfA, dfB) """ Explanation: The argument "given_name" is the blocking variable. This variable has to be the name of a column in dfA and dfB. It is possible to parse a list of columns names to block on multiple variables. Blocking on multiple variables will reduce the number of record pairs even further. Another implemented indexing method is Sorted Neighbourhood Indexing (recordlinkage.index.SortedNeighbourhood). This method is very useful when there are many misspellings in the string were used for indexing. In fact, sorted neighbourhood indexing is a generalisation of blocking. See the documentation for details about sorted neighbourd indexing. Compare records Each record pair is a candidate match. To classify the candidate record pairs into matches and non-matches, compare the records on all attributes both records have in common. The recordlinkage module has a class named Compare. This class is used to compare the records. The following code shows how to compare attributes. End of explanation """ features features.describe() """ Explanation: The comparing of record pairs starts when the compute method is called. All attribute comparisons are stored in a DataFrame with horizontally the features and vertically the record pairs. End of explanation """ features.sum(axis=1).value_counts().sort_index(ascending=False) features[features.sum(axis=1) > 3] """ Explanation: The last step is to decide which records belong to the same person. In this example, we keep it simple: End of explanation """ import recordlinkage from recordlinkage.datasets import load_febrl4 dfA, dfB = load_febrl4() # Indexation step indexer = recordlinkage.Index() indexer.block("given_name") candidate_links = indexer.index(dfA, dfB) # Comparison step compare_cl = recordlinkage.Compare() compare_cl.exact("given_name", "given_name", label="given_name") compare_cl.string("surname", "surname", method="jarowinkler", threshold=0.85, label="surname") compare_cl.exact("date_of_birth", "date_of_birth", label="date_of_birth") compare_cl.exact("suburb", "suburb", label="suburb") compare_cl.exact("state", "state", label="state") compare_cl.string("address_1", "address_1", threshold=0.85, label="address_1") features = compare_cl.compute(candidate_links, dfA, dfB) # Classification step matches = features[features.sum(axis=1) > 3] print(len(matches)) """ Explanation: Full code End of explanation """
gVallverdu/cookbook
mpl_seaborn_styles.ipynb
gpl-2.0
import matplotlib import matplotlib.pyplot as plt %matplotlib inline seaborn_style = [style for style in matplotlib.style.available if "seaborn" in style] seaborn_style """ Explanation: Seaborn style in matplotlib Gemain Salvato-Vallverdu germain.vallverdu@univ-pau.fr Matplotlib provides several styles in oder to produce aesthetic plots. Among them, there is a serie of seaborn styles from the python library of the same name. End of explanation """ import numpy as np """ Explanation: Actually, seaborn styles can be divided into 3 categories : color palette seaborn-bright seaborn-colorblind seaborn-dark-palette seaborn-deep seaborn-muted seaborn-pastel themes mainly about elements' frame seaborn-dark(grid) => dark theme with or without grid seaborn-white(grid) => white theme with or without grid seaborn-ticks => white theme with ticks * fonts and sizes seaborn-notebook seaborn-paper * seaborn-poster seaborn-talk Thus one should select one of each category to have a full theme. End of explanation """ colors = { "seaborn-bright": ['003FFF', '03ED3A', 'E8000B', '8A2BE2', 'FFC400', '00D7FF'], "seaborn-colorblind": ['0072B2', '009E73', 'D55E00', 'CC79A7', 'F0E442', '56B4E9'], "seaborn-dark-palette": ['001C7F', '017517', '8C0900', '7600A1', 'B8860B', '006374'], "seaborn-deep": ['4C72B0', '55A868', 'C44E52', '8172B2', 'CCB974', '64B5CD'], "seaborn-muted": ['4878CF', '6ACC65', 'D65F5F', 'B47CC7', 'C4AD66', '77BEDB'], "seaborn-pastel": ['92C6FF', '97F0AA', 'FF9F9A', 'D0BBFF', 'FFFEA3', 'B0E0E6'] } f, axes = plt.subplots(2, 3, figsize=(12, 6), sharex=True, sharey=True) x = np.linspace(0, 1, 100) for ax, cname in zip(axes.flat, colors): for i, color in enumerate(colors[cname]): ax.plot(x, (i+1)*x**2 + i, color="#" + color, linewidth=5) ax.set_title(cname) """ Explanation: Color palettes Hereafter, the 6 available color palettes are used in order to plot 6 lines. End of explanation """ base = "/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/matplotlib/mpl-data/stylelib/" styles = ["seaborn-notebook", "seaborn-paper", "seaborn-poster", "seaborn-talk"] f, axes = plt.subplots(2, 2, figsize=(6, 6)) x = np.linspace(0, 1, 100) for ax, style in zip(axes.flat, styles): rc = matplotlib.style.core.rc_params_from_file(base + style + ".mplstyle") matplotlib.rcParams.update(rc) for i in range(6): ax.plot(x, (i+1)*x**2 + i, linewidth=5) ax.set_title(style) plt.tight_layout() """ Explanation: Themes about fonts and sizes The four themes : poster, talk, notebook and paper, mainly modify the sizes of fonts, lines, figure, ticks etc ... The poster theme set the largest sizes and the paper theme the smallest. On the examples below, the main differences are from the fontsizes. End of explanation """ matplotlib.style.use(["seaborn-darkgrid", "seaborn-colorblind", "seaborn-notebook"]) x = np.linspace(0, 1, 100) for i in range(6): plt.plot(x, (i+1)*x**2 + i) plt.title("With the darkgrid, colorblind and notebook styles") """ Explanation: Complete themes Now we will plot the same plots using either the dark theme either the white theme with one of the color palette. Using the dark theme End of explanation """ matplotlib.style.use(["seaborn-whitegrid", "seaborn-deep", "seaborn-notebook"]) x = np.linspace(0, 1, 100) for i in range(6): plt.plot(x, (i+1)*x**2 + i) plt.title("With the whitegrid, deep and notebook styles") """ Explanation: Using the white theme The ticks style add the ticks to the white style. Nevertheless, whitegrid and ticks style are not consistant as the former remove the ticks and add a grid and the latter remove the grid and add the ticks. End of explanation """ matplotlib.style.use(["seaborn-ticks", "seaborn-deep", "seaborn-paper"]) x = np.linspace(0, 1, 100) for i in range(6): plt.plot(x, (i+1)*x**2 + i) plt.title("With the white, ticks, deep and notebook styles") """ Explanation: Same as above, without the grid but with ticks and paper size. End of explanation """
shareactorIO/pipeline
source.ml/jupyterhub.ml/notebooks/zz_old/TensorFlow/HvassLabsTutorials/08_Transfer_Learning.ipynb
apache-2.0
from IPython.display import Image, display Image('images/08_transfer_learning_flowchart.png') """ Explanation: TensorFlow Tutorial #08 Transfer Learning by Magnus Erik Hvass Pedersen / GitHub / Videos on YouTube Introduction We saw in the previous Tutorial #07 how to use the pre-trained Inception model for classifying images. Unfortunately the Inception model seemed unable to classify images of people. The reason was the data-set used for training the Inception model, which had some confusing text-labels for classes. The Inception model is actually quite capable of extracting useful information from an image. So we can instead train the Inception model using another data-set. But it takes several weeks using a very powerful and expensive computer to fully train the Inception model on a new data-set. We can instead re-use the pre-trained Inception model and merely replace the layer that does the final classification. This is called Transfer Learning. This tutorial builds on the previous tutorials so you should be familiar with Tutorial #07 on the Inception model, as well as earlier tutorials on how to build and train Neural Networks in TensorFlow. A part of the source-code for this tutorial is located in the inception.py file. Flowchart The following chart shows how the data flows when using the Inception model for Transfer Learning. First we input and process an image with the Inception model. Just prior to the final classification layer of the Inception model, we save the so-called Transfer Values to a cache-file. The reason for using a cache-file is that it takes a long time to process an image with the Inception model. My laptop computer with a Quad-Core 2 GHz CPU can process about 3 images per second using the Inception model. If each image is processed more than once then we can save a lot of time by caching the transfer-values. The transfer-values are also sometimes called bottleneck-values, but that is a confusing term so it is not used here. When all the images in the new data-set have been processed through the Inception model and the resulting transfer-values saved to a cache file, then we can use those transfer-values as the input to another neural network. We will then train the second neural network using the classes from the new data-set, so the network learns how to classify images based on the transfer-values from the Inception model. In this way, the Inception model is used to extract useful information from the images and another neural network is then used for the actual classification. End of explanation """ %matplotlib inline import matplotlib.pyplot as plt import tensorflow as tf import numpy as np import time from datetime import timedelta import os # Functions and classes for loading and using the Inception model. import inception # We use Pretty Tensor to define the new classifier. import prettytensor as pt """ Explanation: Imports End of explanation """ tf.__version__ """ Explanation: This was developed using Python 3.5.2 (Anaconda) and TensorFlow version: End of explanation """ import cifar10 """ Explanation: Load Data for CIFAR-10 End of explanation """ from cifar10 import num_classes """ Explanation: The data dimensions have already been defined in the cifar10 module, so we just need to import the ones we need. End of explanation """ # cifar10.data_path = "data/CIFAR-10/" """ Explanation: Set the path for storing the data-set on your computer. End of explanation """ cifar10.maybe_download_and_extract() """ Explanation: The CIFAR-10 data-set is about 163 MB and will be downloaded automatically if it is not located in the given path. End of explanation """ class_names = cifar10.load_class_names() class_names """ Explanation: Load the class-names. End of explanation """ images_train, cls_train, labels_train = cifar10.load_training_data() """ Explanation: Load the training-set. This returns the images, the class-numbers as integers, and the class-numbers as One-Hot encoded arrays called labels. End of explanation """ images_test, cls_test, labels_test = cifar10.load_test_data() """ Explanation: Load the test-set. End of explanation """ print("Size of:") print("- Training-set:\t\t{}".format(len(images_train))) print("- Test-set:\t\t{}".format(len(images_test))) """ Explanation: The CIFAR-10 data-set has now been loaded and consists of 60,000 images and associated labels (i.e. classifications of the images). The data-set is split into 2 mutually exclusive sub-sets, the training-set and the test-set. End of explanation """ def plot_images(images, cls_true, cls_pred=None, smooth=True): assert len(images) == len(cls_true) # Create figure with sub-plots. fig, axes = plt.subplots(3, 3) # Adjust vertical spacing. if cls_pred is None: hspace = 0.3 else: hspace = 0.6 fig.subplots_adjust(hspace=hspace, wspace=0.3) # Interpolation type. if smooth: interpolation = 'spline16' else: interpolation = 'nearest' for i, ax in enumerate(axes.flat): # There may be less than 9 images, ensure it doesn't crash. if i < len(images): # Plot image. ax.imshow(images[i], interpolation=interpolation) # Name of the true class. cls_true_name = class_names[cls_true[i]] # Show true and predicted classes. if cls_pred is None: xlabel = "True: {0}".format(cls_true_name) else: # Name of the predicted class. cls_pred_name = class_names[cls_pred[i]] xlabel = "True: {0}\nPred: {1}".format(cls_true_name, cls_pred_name) # Show the classes as the label on the x-axis. ax.set_xlabel(xlabel) # Remove ticks from the plot. ax.set_xticks([]) ax.set_yticks([]) # Ensure the plot is shown correctly with multiple plots # in a single Notebook cell. plt.show() """ Explanation: Helper-function for plotting images Function used to plot at most 9 images in a 3x3 grid, and writing the true and predicted classes below each image. End of explanation """ # Get the first images from the test-set. images = images_test[0:9] # Get the true classes for those images. cls_true = cls_test[0:9] # Plot the images and labels using our helper-function above. plot_images(images=images, cls_true=cls_true, smooth=False) """ Explanation: Plot a few images to see if data is correct End of explanation """ # inception.data_dir = 'inception/' """ Explanation: Download the Inception Model The Inception model is downloaded from the internet. This is the default directory where you want to save the data-files. The directory will be created if it does not exist. End of explanation """ inception.maybe_download() """ Explanation: Download the data for the Inception model if it doesn't already exist in the directory. It is 85 MB. See Tutorial #07 for more details. End of explanation """ model = inception.Inception() """ Explanation: Load the Inception Model Load the Inception model so it is ready for classifying images. Note the deprecation warning, which might cause the program to fail in the future. End of explanation """ from inception import transfer_values_cache """ Explanation: Calculate Transfer-Values Import a helper-function for caching the transfer-values of the Inception model. End of explanation """ file_path_cache_train = os.path.join(cifar10.data_path, 'inception_cifar10_train.npy') file_path_cache_test = os.path.join(cifar10.data_path, 'inception_cifar10_test.npy') print("Processing Inception transfer-values for training-images ...") # Scale images because Inception needs pixels to be between 0 and 255, # while the CIFAR-10 functions return pixels between 0.0 and 1.0 images_scaled = images_train * 255.0 # If transfer-values have already been calculated then reload them, # otherwise calculate them and save them to a cache-file. transfer_values_train = transfer_values_cache(file_path=file_path_cache_train, images=images_scaled, model=model) print("Processing Inception transfer-values for test-images ...") # Scale images because Inception needs pixels to be between 0 and 255, # while the CIFAR-10 functions return pixels between 0.0 and 1.0 images_scaled = images_test * 255.0 # If transfer-values have already been calculated then reload them, # otherwise calculate them and save them to a cache-file. transfer_values_test = transfer_values_cache(file_path=file_path_cache_test, images=images_scaled, model=model) """ Explanation: Set the file-paths for the caches of the training-set and test-set. End of explanation """ transfer_values_train.shape """ Explanation: Check the shape of the array with the transfer-values. There are 50,000 images in the training-set and for each image there are 2048 transfer-values. End of explanation """ transfer_values_test.shape """ Explanation: Similarly, there are 10,000 images in the test-set with 2048 transfer-values for each image. End of explanation """ def plot_transfer_values(i): print("Input image:") # Plot the i'th image from the test-set. plt.imshow(images_test[i], interpolation='nearest') plt.show() print("Transfer-values for the image using Inception model:") # Transform the transfer-values into an image. img = transfer_values_test[i] img = img.reshape((32, 64)) # Plot the image for the transfer-values. plt.imshow(img, interpolation='nearest', cmap='Reds') plt.show() plot_transfer_values(i=16) plot_transfer_values(i=17) """ Explanation: Helper-function for plotting transfer-values End of explanation """ from sklearn.decomposition import PCA """ Explanation: Analysis of Transfer-Values using PCA Use Principal Component Analysis (PCA) from scikit-learn to reduce the array-lengths of the transfer-values from 2048 to 2 so they can be plotted. End of explanation """ pca = PCA(n_components=2) """ Explanation: Create a new PCA-object and set the target array-length to 2. End of explanation """ transfer_values = transfer_values_train[0:3000] """ Explanation: It takes a while to compute the PCA so the number of samples has been limited to 3000. You can try and use the full training-set if you like. End of explanation """ transfer_values.shape """ Explanation: Check that the array has 3000 samples and 2048 transfer-values for each sample. End of explanation """ transfer_values_reduced = pca.fit_transform(transfer_values) """ Explanation: Use PCA to reduce the transfer-value arrays from 2048 to 2 elements. End of explanation """ transfer_values_reduced.shape """ Explanation: Check that it is now an array with 3000 samples and 2 values per sample. End of explanation """ def plot_scatter(values): # Create a color-map with a different color for each class. import matplotlib.cm as cm cmap = cm.rainbow(np.linspace(0.0, 1.0, num_classes)) # Get the color for each sample. colors = cmap[cls_train] # Extract the x- and y-values. x = values[:, 0] y = values[:, 1] # Plot it. plt.scatter(x, y, color=colors) plt.show() """ Explanation: Helper-function for plotting the reduced transfer-values. End of explanation """ plot_scatter(transfer_values_reduced) """ Explanation: Plot the transfer-values that have been reduced using PCA. There are 10 different colors for the different classes in the CIFAR-10 data-set. The colors are grouped together but with very large overlap. This may be because PCA cannot properly separate the transfer-values. End of explanation """ from sklearn.manifold import TSNE """ Explanation: Analysis of Transfer-Values using t-SNE End of explanation """ pca = PCA(n_components=50) transfer_values_50d = pca.fit_transform(transfer_values) """ Explanation: Another method for doing dimensionality reduction is t-SNE. Unfortunately, t-SNE is very slow so we first use PCA to reduce the transfer-values from 2048 to 50 elements. End of explanation """ tsne = TSNE(n_components=2) """ Explanation: Create a new t-SNE object for the final dimensionality reduction and set the target to 2-dim. End of explanation """ transfer_values_reduced = tsne.fit_transform(transfer_values_50d) """ Explanation: Perform the final reduction using t-SNE. The current implemenation of t-SNE in scikit-learn cannot handle data with many samples so this might crash if you use the full training-set. End of explanation """ transfer_values_reduced.shape """ Explanation: Check that it is now an array with 3000 samples and 2 transfer-values per sample. End of explanation """ plot_scatter(transfer_values_reduced) """ Explanation: Plot the transfer-values that have been reduced to 2-dim using t-SNE, which shows better separation than the PCA-plot above. This means the transfer-values from the Inception model appear to contain enough information to separate the CIFAR-10 images into classes, although there is still some overlap so the separation is not perfect. End of explanation """ transfer_len = model.transfer_len """ Explanation: New Classifier in TensorFlow Now we will create another neural network in TensorFlow. This network will take as input the transfer-values from the Inception model and output the predicted classes for CIFAR-10 images. It is assumed that you are already familiar with how to build neural networks in TensorFlow, otherwise see e.g. Tutorial #03. Placeholder Variables First we need the array-length for transfer-values which is stored as a variable in the object for the Inception model. End of explanation """ x = tf.placeholder(tf.float32, shape=[None, transfer_len], name='x') """ Explanation: Now create a placeholder variable for inputting the transfer-values from the Inception model into the new network that we are building. The shape of this variable is [None, transfer_len] which means it takes an input array with an arbitrary number of samples as indicated by the keyword None and each sample has 2048 elements, equal to transfer_len. End of explanation """ y_true = tf.placeholder(tf.float32, shape=[None, num_classes], name='y_true') """ Explanation: Create another placeholder variable for inputting the true class-label of each image. These are so-called One-Hot encoded arrays with 10 elements, one for each possible class in the data-set. End of explanation """ y_true_cls = tf.argmax(y_true, dimension=1) """ Explanation: Calculate the true class as an integer. This could also be a placeholder variable. End of explanation """ # Wrap the transfer-values as a Pretty Tensor object. x_pretty = pt.wrap(x) with pt.defaults_scope(activation_fn=tf.nn.relu): y_pred, loss = x_pretty.\ fully_connected(size=1024, name='layer_fc1').\ softmax_classifier(class_count=num_classes, labels=y_true) """ Explanation: Neural Network Create the neural network for doing the classification on the CIFAR-10 data-set. This takes as input the transfer-values from the Inception model which will be fed into the placeholder variable x. The network outputs the predicted class in y_pred. See Tutorial #03 for more details on how to use Pretty Tensor to construct neural networks. End of explanation """ global_step = tf.Variable(initial_value=0, name='global_step', trainable=False) """ Explanation: Optimization Method Create a variable for keeping track of the number of optimization iterations performed. End of explanation """ optimizer = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(loss, global_step) """ Explanation: Method for optimizing the new neural network. End of explanation """ y_pred_cls = tf.argmax(y_pred, dimension=1) """ Explanation: Classification Accuracy The output of the network y_pred is an array with 10 elements. The class number is the index of the largest element in the array. End of explanation """ correct_prediction = tf.equal(y_pred_cls, y_true_cls) """ Explanation: Create an array of booleans whether the predicted class equals the true class of each image. End of explanation """ accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) """ Explanation: The classification accuracy is calculated by first type-casting the array of booleans to floats, so that False becomes 0 and True becomes 1, and then taking the average of these numbers. End of explanation """ session = tf.Session() """ Explanation: TensorFlow Run Create TensorFlow Session Once the TensorFlow graph has been created, we have to create a TensorFlow session which is used to execute the graph. End of explanation """ session.run(tf.initialize_all_variables()) """ Explanation: Initialize Variables The variables for the new network must be initialized before we start optimizing them. End of explanation """ train_batch_size = 64 """ Explanation: Helper-function to get a random training-batch There are 50,000 images (and arrays with transfer-values for the images) in the training-set. It takes a long time to calculate the gradient of the model using all these images (transfer-values). We therefore only use a small batch of images (transfer-values) in each iteration of the optimizer. If your computer crashes or becomes very slow because you run out of RAM, then you may try and lower this number, but you may then need to perform more optimization iterations. End of explanation """ def random_batch(): # Number of images (transfer-values) in the training-set. num_images = len(transfer_values_train) # Create a random index. idx = np.random.choice(num_images, size=train_batch_size, replace=False) # Use the random index to select random x and y-values. # We use the transfer-values instead of images as x-values. x_batch = transfer_values_train[idx] y_batch = labels_train[idx] return x_batch, y_batch """ Explanation: Function for selecting a random batch of transfer-values from the training-set. End of explanation """ def optimize(num_iterations): # Start-time used for printing time-usage below. start_time = time.time() for i in range(num_iterations): # Get a batch of training examples. # x_batch now holds a batch of images (transfer-values) and # y_true_batch are the true labels for those images. x_batch, y_true_batch = random_batch() # Put the batch into a dict with the proper names # for placeholder variables in the TensorFlow graph. feed_dict_train = {x: x_batch, y_true: y_true_batch} # Run the optimizer using this batch of training data. # TensorFlow assigns the variables in feed_dict_train # to the placeholder variables and then runs the optimizer. # We also want to retrieve the global_step counter. i_global, _ = session.run([global_step, optimizer], feed_dict=feed_dict_train) # Print status to screen every 100 iterations (and last). if (i_global % 100 == 0) or (i == num_iterations - 1): # Calculate the accuracy on the training-batch. batch_acc = session.run(accuracy, feed_dict=feed_dict_train) # Print status. msg = "Global Step: {0:>6}, Training Batch Accuracy: {1:>6.1%}" print(msg.format(i_global, batch_acc)) # Ending time. end_time = time.time() # Difference between start and end-times. time_dif = end_time - start_time # Print the time-usage. print("Time usage: " + str(timedelta(seconds=int(round(time_dif))))) """ Explanation: Helper-function to perform optimization This function performs a number of optimization iterations so as to gradually improve the variables of the neural network. In each iteration, a new batch of data is selected from the training-set and then TensorFlow executes the optimizer using those training samples. The progress is printed every 100 iterations. End of explanation """ def plot_example_errors(cls_pred, correct): # This function is called from print_test_accuracy() below. # cls_pred is an array of the predicted class-number for # all images in the test-set. # correct is a boolean array whether the predicted class # is equal to the true class for each image in the test-set. # Negate the boolean array. incorrect = (correct == False) # Get the images from the test-set that have been # incorrectly classified. images = images_test[incorrect] # Get the predicted classes for those images. cls_pred = cls_pred[incorrect] # Get the true classes for those images. cls_true = cls_test[incorrect] n = min(9, len(images)) # Plot the first n images. plot_images(images=images[0:n], cls_true=cls_true[0:n], cls_pred=cls_pred[0:n]) """ Explanation: Helper-Functions for Showing Results Helper-function to plot example errors Function for plotting examples of images from the test-set that have been mis-classified. End of explanation """ # Import a function from sklearn to calculate the confusion-matrix. from sklearn.metrics import confusion_matrix def plot_confusion_matrix(cls_pred): # This is called from print_test_accuracy() below. # cls_pred is an array of the predicted class-number for # all images in the test-set. # Get the confusion matrix using sklearn. cm = confusion_matrix(y_true=cls_test, # True class for test-set. y_pred=cls_pred) # Predicted class. # Print the confusion matrix as text. for i in range(num_classes): # Append the class-name to each line. class_name = "({}) {}".format(i, class_names[i]) print(cm[i, :], class_name) # Print the class-numbers for easy reference. class_numbers = [" ({0})".format(i) for i in range(num_classes)] print("".join(class_numbers)) """ Explanation: Helper-function to plot confusion matrix End of explanation """ # Split the data-set in batches of this size to limit RAM usage. batch_size = 256 def predict_cls(transfer_values, labels, cls_true): # Number of images. num_images = len(transfer_values) # Allocate an array for the predicted classes which # will be calculated in batches and filled into this array. cls_pred = np.zeros(shape=num_images, dtype=np.int) # Now calculate the predicted classes for the batches. # We will just iterate through all the batches. # There might be a more clever and Pythonic way of doing this. # The starting index for the next batch is denoted i. i = 0 while i < num_images: # The ending index for the next batch is denoted j. j = min(i + batch_size, num_images) # Create a feed-dict with the images and labels # between index i and j. feed_dict = {x: transfer_values[i:j], y_true: labels[i:j]} # Calculate the predicted class using TensorFlow. cls_pred[i:j] = session.run(y_pred_cls, feed_dict=feed_dict) # Set the start-index for the next batch to the # end-index of the current batch. i = j # Create a boolean array whether each image is correctly classified. correct = (cls_true == cls_pred) return correct, cls_pred """ Explanation: Helper-functions for calculating classifications This function calculates the predicted classes of images and also returns a boolean array whether the classification of each image is correct. The calculation is done in batches because it might use too much RAM otherwise. If your computer crashes then you can try and lower the batch-size. End of explanation """ def predict_cls_test(): return predict_cls(transfer_values = transfer_values_test, labels = labels_test, cls_true = cls_test) """ Explanation: Calculate the predicted class for the test-set. End of explanation """ def classification_accuracy(correct): # When averaging a boolean array, False means 0 and True means 1. # So we are calculating: number of True / len(correct) which is # the same as the classification accuracy. # Return the classification accuracy # and the number of correct classifications. return correct.mean(), correct.sum() """ Explanation: Helper-functions for calculating the classification accuracy This function calculates the classification accuracy given a boolean array whether each image was correctly classified. E.g. classification_accuracy([True, True, False, False, False]) = 2/5 = 0.4. The function also returns the number of correct classifications. End of explanation """ def print_test_accuracy(show_example_errors=False, show_confusion_matrix=False): # For all the images in the test-set, # calculate the predicted classes and whether they are correct. correct, cls_pred = predict_cls_test() # Classification accuracy and the number of correct classifications. acc, num_correct = classification_accuracy(correct) # Number of images being classified. num_images = len(correct) # Print the accuracy. msg = "Accuracy on Test-Set: {0:.1%} ({1} / {2})" print(msg.format(acc, num_correct, num_images)) # Plot some examples of mis-classifications, if desired. if show_example_errors: print("Example errors:") plot_example_errors(cls_pred=cls_pred, correct=correct) # Plot the confusion matrix, if desired. if show_confusion_matrix: print("Confusion Matrix:") plot_confusion_matrix(cls_pred=cls_pred) """ Explanation: Helper-function for showing the classification accuracy Function for printing the classification accuracy on the test-set. It takes a while to compute the classification for all the images in the test-set, that's why the results are re-used by calling the above functions directly from this function, so the classifications don't have to be recalculated by each function. End of explanation """ print_test_accuracy(show_example_errors=False, show_confusion_matrix=False) """ Explanation: Results Performance before any optimization The classification accuracy on the test-set is very low because the model variables have only been initialized and not optimized at all, so it just classifies the images randomly. End of explanation """ optimize(num_iterations=10000) print_test_accuracy(show_example_errors=True, show_confusion_matrix=True) """ Explanation: Performance after 10,000 optimization iterations After 10,000 optimization iterations, the classification accuracy is about 90% on the test-set. Compare this to the basic Convolutional Neural Network from Tutorial #06 which had less than 80% accuracy on the test-set. End of explanation """ # This has been commented out in case you want to modify and experiment # with the Notebook without having to restart it. # model.close() """ Explanation: Close TensorFlow Session We are now done using TensorFlow, so we close the session to release its resources. Note that the TensorFlow-session is inside the model-object, so we close the session through that object. End of explanation """
AISpace2/AISpace2
notebooks/search/search.ipynb
gpl-3.0
# Run this to import pre-defined problems from aipython.searchProblem import search_simple1, search_simple2, search_cyclic_delivery, search_acyclic_delivery, search_tree, search_extended_tree, search_cyclic, search_vancouver_neighbour, search_misleading_heuristic, search_multiple_path_pruning, search_module_4_graph, search_module_5_graph, search_bicycle_courier_acyclic, search_bicycle_courier_cyclic """ Explanation: 3. Searching for Solutions About This chapter casts the problem of an agent deciding how to solve a goal as the problem of searching to find a path in a graph. You can run each cell by selecting it and pressing Ctrl+Enter in Windows or Shift+Return in MacOS. Alternatively, you can click the Play button in the toolbar, to the left of the stop button. For more information, check out our AISpace2 Tutorial. Feel free to modify our codes either in this notebook or somewhere outside (e.g. python files in /aipython/). If you want to modify our codes outside, you might find this helpful for how your changes can take effect. You need to run the following command to import our pre-defined problems. End of explanation """ # Run this to import utilities that support self-defined problems from aipython.searchProblem import Arc, Search_problem_from_explicit_graph """ Explanation: You can also define your own problems (how?). You need to run the following command to import utilities that support your self-defined problems. End of explanation """ from aipython.searchGeneric import Searcher s = Searcher(problem=search_simple2) # Visualization options # For more explanation please visit: https://aispace2.github.io/AISpace2/tutorial.html#tutorial-common-visualization-options s.sleep_time = 0.2 # The time, in seconds, between each step in auto solving s.line_width = 2.0 # The thickness of edges s.text_size = 13 # The fontsize of the text s.detail_level = 2 # 0=no text, 1=truncated text, 2=full text s.show_edge_costs = True s.show_node_heuristics = False # Controls the layout engine used. Either "force" for force layout, or "tree". s.layout_method = "force" # s.layout_method = "tree" # Display the widget display(s) s.search() """ Explanation: 3.5.2 Depth-First Search Implementation Details (page 39) In depth-first search, the frontier acts like a LIFO (last-in, first-out) stack of paths. This means that the path selected and removed from the frontier at any time is the last path that was added. Depth-first search is appropriate when space is restricted, or when there are many solutions. On the other hand, depth-first search is not appropriate if it is possible to get stuck into infinite paths or if solutions exist at shallow depths. End of explanation """ from aipython.searchGeneric import AStarSearcher s_astar = AStarSearcher(problem=search_simple1) # Visualization options # For more explanation please visit: https://aispace2.github.io/AISpace2/tutorial.html#tutorial-common-visualization-options s_astar.sleep_time = 0.2 # The time, in seconds, between each step in auto solving s_astar.line_width = 2.0 # The thickness of edges s_astar.text_size = 13 # The fontsize of the text s_astar.detail_level = 2 # 0=no text, 1=truncated text, 2=full text s_astar.show_edge_costs = True s_astar.show_node_heuristics = True # Display the widget display(s_astar) s_astar.search() """ Explanation: 3.6.1 A* Search Implementation Details (page 41) A* search uses both path cost and heuristic information in its selection of which path to expand. For each path on the frontier, A* uses an estimate of the total path cost from the start node to a goal node constrained to follow that path initially. The estimated total path cost is the sum of the cost of the path found $\text{c⁢o⁢s⁢t}⁢(p)$ and the heuristic function $h(p)$, which estimates the cost from the end of $p$ to the goal. End of explanation """ from aipython.searchMPP import SearcherMPP s_mpp = SearcherMPP(problem=search_simple1) # Visualization options # For more explanation please visit: https://aispace2.github.io/AISpace2/tutorial.html#tutorial-common-visualization-options s_mpp.sleep_time = 0.2 # The time, in seconds, between each step in auto solving s_mpp.line_width = 2.0 # The thickness of edges s_mpp.text_size = 13 # The fontsize of the text s_mpp.detail_level = 1 # 0=no text, 1=truncated text, 2=full text s_mpp.show_edge_costs = True s_mpp.show_node_heuristics = True # Display the widget display(s_mpp) s_mpp.search() """ Explanation: 3.7.2 A* Search with Multiple Path Pruning Implementation Details (page 43) There is often more than one path to a node. If only one path is required, a search algorithm can prune from the frontier any path that leads to a node to which it has already found a path. Multiple-path pruning is implemented by maintaining an explored set (traditionally called closed list) of nodes that are at the end of paths that have been expanded. The explored set is initially empty. When a path $⟨n_0,…,n_k⟩$ is selected , if $n_k$ is already in the explored set, the path can be discarded. Otherwise, $n_k$ is added to the explored set, and the algorithm proceeds as before. End of explanation """ from aipython.searchBranchAndBound import DF_branch_and_bound s_dfbb = DF_branch_and_bound(problem=search_simple1) # Visualization options # For more explanation please visit: https://aispace2.github.io/AISpace2/tutorial.html#tutorial-common-visualization-options s_dfbb.sleep_time = 0.2 # The time, in seconds, between each step in auto solving s_dfbb.line_width = 2.0 # The thickness of edges s_dfbb.text_size = 13 # The fontsize of the text s_dfbb.detail_level = 2 # 0=no text, 1=truncated text, 2=full text s_dfbb.show_edge_costs = True s_dfbb.show_node_heuristics = True # Display the widget display(s_dfbb) s_dfbb.search() """ Explanation: 3.8.1 Branch-and-bound Search Implementation Details (page 44) Depth-first branch-and-bound search is a way to combine the space saving of depth-first search with heuristic information for finding optimal paths. It is particularly applicable when there are many paths to a goal. As in A* search, the heuristic function h$⁢(n)$ is non-negative and less than or equal to the cost of a lowest-cost path from n to a goal node. The idea of a branch-and-bound search is to maintain the lowest cost $b$ ("bound") of a path to a goal found so far. If the search encounters a path $p$ such that $\text{c⁢o⁢s⁢t}⁢(p)+h⁢(p)≥b$, path $p$ can be pruned. If a non-pruned path to a goal is found, it must be better than the previous best path. This new solution is remembered and b⁢o⁢u⁢n⁢d is set to the cost of this new solution. The searcher then proceeds to search for a better solution. End of explanation """
jorgedominguezchavez/dlnd_first_neural_network
Your_first_neural_network.ipynb
mit
%matplotlib inline %config InlineBackend.figure_format = 'retina' import numpy as np import pandas as pd import matplotlib.pyplot as plt """ Explanation: Your first neural network In this project, you'll build your first neural network and use it to predict daily bike rental ridership. We've provided some of the code, but left the implementation of the neural network up to you (for the most part). After you've submitted this project, feel free to explore the data and the model more. End of explanation """ data_path = 'Bike-Sharing-Dataset/hour.csv' rides = pd.read_csv(data_path) rides.head() """ Explanation: Load and prepare the data A critical step in working with neural networks is preparing the data correctly. Variables on different scales make it difficult for the network to efficiently learn the correct weights. Below, we've written the code to load and prepare the data. You'll learn more about this soon! End of explanation """ rides[:24*10].plot(x='dteday', y='cnt') """ Explanation: Checking out the data This dataset has the number of riders for each hour of each day from January 1 2011 to December 31 2012. The number of riders is split between casual and registered, summed up in the cnt column. You can see the first few rows of the data above. Below is a plot showing the number of bike riders over the first 10 days or so in the data set. (Some days don't have exactly 24 entries in the data set, so it's not exactly 10 days.) You can see the hourly rentals here. This data is pretty complicated! The weekends have lower over all ridership and there are spikes when people are biking to and from work during the week. Looking at the data above, we also have information about temperature, humidity, and windspeed, all of these likely affecting the number of riders. You'll be trying to capture all this with your model. End of explanation """ dummy_fields = ['season', 'weathersit', 'mnth', 'hr', 'weekday'] for each in dummy_fields: dummies = pd.get_dummies(rides[each], prefix=each, drop_first=False) rides = pd.concat([rides, dummies], axis=1) fields_to_drop = ['instant', 'dteday', 'season', 'weathersit', 'weekday', 'atemp', 'mnth', 'workingday', 'hr'] data = rides.drop(fields_to_drop, axis=1) data.head() """ Explanation: Dummy variables Here we have some categorical variables like season, weather, month. To include these in our model, we'll need to make binary dummy variables. This is simple to do with Pandas thanks to get_dummies(). End of explanation """ quant_features = ['casual', 'registered', 'cnt', 'temp', 'hum', 'windspeed'] # Store scalings in a dictionary so we can convert back later scaled_features = {} for each in quant_features: mean, std = data[each].mean(), data[each].std() scaled_features[each] = [mean, std] data.loc[:, each] = (data[each] - mean)/std """ Explanation: Scaling target variables To make training the network easier, we'll standardize each of the continuous variables. That is, we'll shift and scale the variables such that they have zero mean and a standard deviation of 1. The scaling factors are saved so we can go backwards when we use the network for predictions. End of explanation """ # Save data for approximately the last 21 days test_data = data[-21*24:] # Now remove the test data from the data set data = data[:-21*24] # Separate the data into features and targets target_fields = ['cnt', 'casual', 'registered'] features, targets = data.drop(target_fields, axis=1), data[target_fields] test_features, test_targets = test_data.drop(target_fields, axis=1), test_data[target_fields] """ Explanation: Splitting the data into training, testing, and validation sets We'll save the data for the last approximately 21 days to use as a test set after we've trained the network. We'll use this set to make predictions and compare them with the actual number of riders. End of explanation """ # Hold out the last 60 days or so of the remaining data as a validation set train_features, train_targets = features[:-60*24], targets[:-60*24] val_features, val_targets = features[-60*24:], targets[-60*24:] """ Explanation: We'll split the data into two sets, one for training and one for validating as the network is being trained. Since this is time series data, we'll train on historical data, then try to predict on future data (the validation set). End of explanation """ class NeuralNetwork(object): def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate): # Set number of nodes in input, hidden and output layers. self.input_nodes = input_nodes self.hidden_nodes = hidden_nodes self.output_nodes = output_nodes # Initialize weights self.weights_input_to_hidden = np.random.normal(0.0, self.input_nodes**-0.5, (self.input_nodes, self.hidden_nodes)) self.weights_hidden_to_output = np.random.normal(0.0, self.hidden_nodes**-0.5, (self.hidden_nodes, self.output_nodes)) self.lr = learning_rate #### TODO: Set self.activation_function to your implemented sigmoid function #### # # Note: in Python, you can define a function with a lambda expression, # as shown below. self.activation_function = lambda x : 1/(1+np.exp(-x)) # Replace 0 with sigmoid calculation. DONE ### If the lambda code above is not something you're familiar with, # You can uncomment out the following three lines and put your # implementation there instead. # #def sigmoid(x): # return 0 # Replace 0 with your sigmoid calculation here #self.activation_function = sigmoid def train(self, features, targets): ''' Train the network on batch of features and targets. Arguments --------- features: 2D array, each row is one data record, each column is a feature targets: 1D array of target values ''' n_records = features.shape[0] delta_weights_i_h = np.zeros(self.weights_input_to_hidden.shape) delta_weights_h_o = np.zeros(self.weights_hidden_to_output.shape) for X, y in zip(features, targets): #### Implement the forward pass here #### ### Forward pass ### # TODO: Hidden layer - Replace these values with your calculations. hidden_inputs = np.dot(X, self.weights_input_to_hidden ) # signals into hidden layer DONE hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer DONE # TODO: Output layer - Replace these values with your calculations. final_inputs = np.dot(hidden_outputs, self.weights_hidden_to_output) # signals into final output layer final_outputs = final_inputs # signals from final output layer #### Implement the backward pass here #### ### Backward pass ### # TODO: Output error - Replace this value with your calculations. error = y - final_outputs # Output layer error is the difference between desired target and actual output. # TODO: Backpropagated error terms - Replace these values with your calculations. output_error_term = error # TODO: Calculate the hidden layer's contribution to the error hidden_error = np.dot(self.weights_hidden_to_output, output_error_term) # TODO: Backpropagated error terms - Replace these values with your calculations. hidden_error_term = hidden_error * hidden_outputs * (1 - hidden_outputs) # Weight step (input to hidden) delta_weights_i_h += hidden_error_term * X[:, None] # Weight step (hidden to output) delta_weights_h_o += output_error_term * hidden_outputs[:, None] # TODO: Update the weights - Replace these values with your calculations. self.weights_hidden_to_output += self.lr * delta_weights_h_o / n_records # update hidden-to-output weights with gradient descent step self.weights_input_to_hidden += self.lr * delta_weights_i_h / n_records # update input-to-hidden weights with gradient descent step def run(self, features): ''' Run a forward pass through the network with input features Arguments --------- features: 1D array of feature values ''' #### Implement the forward pass here #### # TODO: Hidden layer - replace these values with the appropriate calculations. hidden_inputs = np.dot(features, self.weights_input_to_hidden ) # signals into hidden layer hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer # TODO: Output layer - Replace these values with the appropriate calculations. final_inputs = np.dot(hidden_outputs, self.weights_hidden_to_output) # signals into final output layer final_outputs = final_inputs # signals from final output layer return final_outputs def MSE(y, Y): return np.mean((y-Y)**2) """ Explanation: Time to build the network Below you'll build your network. We've built out the structure and the backwards pass. You'll implement the forward pass through the network. You'll also set the hyperparameters: the learning rate, the number of hidden units, and the number of training passes. <img src="assets/neural_network.png" width=300px> The network has two layers, a hidden layer and an output layer. The hidden layer will use the sigmoid function for activations. The output layer has only one node and is used for the regression, the output of the node is the same as the input of the node. That is, the activation function is $f(x)=x$. A function that takes the input signal and generates an output signal, but takes into account the threshold, is called an activation function. We work through each layer of our network calculating the outputs for each neuron. All of the outputs from one layer become inputs to the neurons on the next layer. This process is called forward propagation. We use the weights to propagate signals forward from the input to the output layers in a neural network. We use the weights to also propagate error backwards from the output back into the network to update our weights. This is called backpropagation. Hint: You'll need the derivative of the output activation function ($f(x) = x$) for the backpropagation implementation. If you aren't familiar with calculus, this function is equivalent to the equation $y = x$. What is the slope of that equation? That is the derivative of $f(x)$. Below, you have these tasks: 1. Implement the sigmoid function to use as the activation function. Set self.activation_function in __init__ to your sigmoid function. 2. Implement the forward pass in the train method. 3. Implement the backpropagation algorithm in the train method, including calculating the output error. 4. Implement the forward pass in the run method. End of explanation """ import unittest inputs = np.array([[0.5, -0.2, 0.1]]) targets = np.array([[0.4]]) test_w_i_h = np.array([[0.1, -0.2], [0.4, 0.5], [-0.3, 0.2]]) test_w_h_o = np.array([[0.3], [-0.1]]) class TestMethods(unittest.TestCase): ########## # Unit tests for data loading ########## def test_data_path(self): # Test that file path to dataset has been unaltered self.assertTrue(data_path.lower() == 'bike-sharing-dataset/hour.csv') def test_data_loaded(self): # Test that data frame loaded self.assertTrue(isinstance(rides, pd.DataFrame)) ########## # Unit tests for network functionality ########## def test_activation(self): network = NeuralNetwork(3, 2, 1, 0.5) # Test that the activation function is a sigmoid self.assertTrue(np.all(network.activation_function(0.5) == 1/(1+np.exp(-0.5)))) def test_train(self): # Test that weights are updated correctly on training network = NeuralNetwork(3, 2, 1, 0.5) network.weights_input_to_hidden = test_w_i_h.copy() network.weights_hidden_to_output = test_w_h_o.copy() network.train(inputs, targets) self.assertTrue(np.allclose(network.weights_hidden_to_output, np.array([[ 0.37275328], [-0.03172939]]))) self.assertTrue(np.allclose(network.weights_input_to_hidden, np.array([[ 0.10562014, -0.20185996], [0.39775194, 0.50074398], [-0.29887597, 0.19962801]]))) def test_run(self): # Test correctness of run method network = NeuralNetwork(3, 2, 1, 0.5) network.weights_input_to_hidden = test_w_i_h.copy() network.weights_hidden_to_output = test_w_h_o.copy() self.assertTrue(np.allclose(network.run(inputs), 0.09998924)) suite = unittest.TestLoader().loadTestsFromModule(TestMethods()) unittest.TextTestRunner().run(suite) """ Explanation: Unit tests Run these unit tests to check the correctness of your network implementation. This will help you be sure your network was implemented correctly befor you starting trying to train it. These tests must all be successful to pass the project. End of explanation """ import sys ### Set the hyperparameters here ### iterations = 40000 learning_rate = 0.5 hidden_nodes = 35 output_nodes = 1 N_i = train_features.shape[1] network = NeuralNetwork(N_i, hidden_nodes, output_nodes, learning_rate) losses = {'train':[], 'validation':[]} for ii in range(iterations): # Go through a random batch of 128 records from the training data set batch = np.random.choice(train_features.index, size=128) X, y = train_features.ix[batch].values, train_targets.ix[batch]['cnt'] network.train(X, y) # Printing out the training progress train_loss = MSE(network.run(train_features).T, train_targets['cnt'].values) val_loss = MSE(network.run(val_features).T, val_targets['cnt'].values) sys.stdout.write("\rProgress: {:2.1f}".format(100 * ii/float(iterations)) \ + "% ... Training loss: " + str(train_loss)[:5] \ + " ... Validation loss: " + str(val_loss)[:5]) sys.stdout.flush() losses['train'].append(train_loss) losses['validation'].append(val_loss) plt.plot(losses['train'], label='Training loss') plt.plot(losses['validation'], label='Validation loss') plt.legend() _ = plt.ylim() """ Explanation: Training the network Here you'll set the hyperparameters for the network. The strategy here is to find hyperparameters such that the error on the training set is low, but you're not overfitting to the data. If you train the network too long or have too many hidden nodes, it can become overly specific to the training set and will fail to generalize to the validation set. That is, the loss on the validation set will start increasing as the training set loss drops. You'll also be using a method know as Stochastic Gradient Descent (SGD) to train the network. The idea is that for each training pass, you grab a random sample of the data instead of using the whole data set. You use many more training passes than with normal gradient descent, but each pass is much faster. This ends up training the network more efficiently. You'll learn more about SGD later. Choose the number of iterations This is the number of batches of samples from the training data we'll use to train the network. The more iterations you use, the better the model will fit the data. However, if you use too many iterations, then the model with not generalize well to other data, this is called overfitting. You want to find a number here where the network has a low training loss, and the validation loss is at a minimum. As you start overfitting, you'll see the training loss continue to decrease while the validation loss starts to increase. Choose the learning rate This scales the size of weight updates. If this is too big, the weights tend to explode and the network fails to fit the data. A good choice to start at is 0.1. If the network has problems fitting the data, try reducing the learning rate. Note that the lower the learning rate, the smaller the steps are in the weight updates and the longer it takes for the neural network to converge. Choose the number of hidden nodes The more hidden nodes you have, the more accurate predictions the model will make. Try a few different numbers and see how it affects the performance. You can look at the losses dictionary for a metric of the network performance. If the number of hidden units is too low, then the model won't have enough space to learn and if it is too high there are too many options for the direction that the learning can take. The trick here is to find the right balance in number of hidden units you choose. End of explanation """ fig, ax = plt.subplots(figsize=(8,4)) mean, std = scaled_features['cnt'] predictions = network.run(test_features).T*std + mean ax.plot(predictions[0], label='Prediction') ax.plot((test_targets['cnt']*std + mean).values, label='Data') ax.set_xlim(right=len(predictions)) ax.legend() dates = pd.to_datetime(rides.ix[test_data.index]['dteday']) dates = dates.apply(lambda d: d.strftime('%b %d')) ax.set_xticks(np.arange(len(dates))[12::24]) _ = ax.set_xticklabels(dates[12::24], rotation=45) """ Explanation: Check out your predictions Here, use the test data to view how well your network is modeling the data. If something is completely wrong here, make sure each step in your network is implemented correctly. End of explanation """
ucsd-ccbb/jupyter-genomics
notebooks/tcrSeq/TCR-seq.ipynb
mit
!perl demultiplex_fastq_TCRplates.pl Sample_S1_L001_R1_001.fastq Sample_S1_L001_R2_001.fastq !ls *[A,B].fastq """ Explanation: TCR-seq protocol By Roman Sasik (rsasik@ucsd.edu) This Notebook describes the sequence of commands used in TCR-seq analysis. The multiplexing barcodes are assumed to follow the design described in this paper: "Linking T-cell receptor sequence to functional phenotype at the single-cell level", A Han, J Glanville and MD Davis, Nature Biotechnology, 2014, 32 (7), p.684-92 In addition to original perl scripts below, you need to install the superfast TCR repertoir processing java program mitcr.jar, which can be downloaded at http://mitcr.milaboratory.com/. The relevant paper is MiTCR: software for T-cell receptor sequencing data analysis by DA Bolotin et al., Nature Methods 10, 813-814 (2013). Perl and java are assumed to be installed. Demultiplexing TCR reads Processing starts with demultiplexing the reads from a single pair of large fastq files: End of explanation """ #!/usr/bin/perl $fileR1 = $ARGV[0]; $fileR2 = $ARGV[1]; open(F1,$fileR1); open(F2,$fileR2); %plate = ( "GCAGA" => "01", #uncomment this line if plate code 01 is among the sequences to be demultiplexed # "TCGAA" => "02", # "AACAA" => "03", # "GGTGC" => "04", # "TTGGT" => "05", # "CATTC" => "06", # "ATTGG" => "07", # "CGGTT" => "08", # "ATCCT" => "09", # "ATGTC" => "10", # "TCACG" => "11", # "AGACC" => "12", # "CCCCA" => "13", # "GCGCT" => "14", # "TCCTT" => "15", # "TATAT" => "16", # "CGTAA" => "17", # "AAGGT" => "18", # "AGCTC" => "19", # "CTTGC" => "20", # "GTATC" => "21", # "TATGA" => "22", # "CACAC" => "23", # "ACACT" => "24", # "ACTAC" => "25", # "GTTAC" => "26", ); %row = ( #if you want output for all rows, leave them all uncommented "TAAGC" => "A", "TGCAC" => "B", "CTCAG" => "C", "GGAAT" => "D", "CGAGG" => "E", "AGGAG" => "F", "TGTTG" => "G", "CAACT" => "H", ); %col = ( #if you want output for all columns, leave them all uncommented "GTTCA" => "01", "CAGGA" => "02", "TTATA" => "03", "CCTGT" => "04", "ACCGC" => "05", "ACTTA" => "06", "GCTAG" => "07", "GACGT" => "08", "GGCTA" => "09", "GAATG" => "10", "CCAAC" => "11", "GAGAC" => "12", ); %TCR = ( "GTCAC" => "A", # TCRA "GAGAT" => "B", ); foreach $plateID (keys(%plate)) { foreach $rowID (keys(%row)) { foreach $colID (keys(%col)) { foreach $TCRID (keys(%TCR)) { $fh = $plate{$plateID}.$row{$rowID}.$col{$colID}.$TCR{$TCRID}; open $fh, '>', $fh.".fastq"; #open file for writing at the end } } } } while($A1 = <F1>) { #read 4 lines from R1 and 4 lines from R2 $A2 = <F1>; $A3 = <F1>; $A4 = <F1>; $B1 = <F2>; $B2 = <F2>; $B3 = <F2>; $B4 = <F2>; $ID = substr($A2, 2, 5); #plate ID barcode # now find what the true bar code should have been if imperfect match $score = 0; $trueID = ""; foreach $key (keys(%plate)) { my $count = ($ID^$key) =~ tr/\0//; if ($count > $score) { $score = $count; $trueID = $key } } if ($score >= 4) {#accept $true_plateID as the true plate ID $rowID = $trueID; } else {#leave $plateID blank - sequence won't be output $rowID = "" } $ID = substr($B2, 2, 5); #column ID # now find what the true bar code should have been if imperfect match $score = 0; $trueID = ""; foreach $key (keys(%col)) { my $count = ($ID^$key) =~ tr/\0//; if ($count > $score) { $score = $count; $trueID = $key } } if ($score >= 4) {#accept $true_plateID as the true plate ID $colID = $trueID; } else {#leave $plateID blank - sequence won't be output $colID = "" } $ID = substr($B2, 7, 5); #TCR ID # now find what the true bar code should have been if imperfect match $score = 0; $trueID = ""; foreach $key (keys(%TCR)) { my $count = ($ID^$key) =~ tr/\0//; if ($count > $score) { $score = $count; $trueID = $key } } if ($score >= 4) { $TCRID = $trueID; } else { $TCRID = "" } if (exists $plate{$plateID} and exists $row{$rowID} and exists $col{$colID} and exists $TCR{$TCRID}) { $fh = $plate{$plateID}.$row{$rowID}.$col{$colID}.$TCR{$TCRID}; print $fh $A1.$A2.$A3.$A4.$B1.$B2.$B3.$B4; }; } close(F1); close(F2); """ Explanation: This script demultiplexes reads multiplexed in a single pair of large fastq files and saves them into separate fastq files whose names indicate Plate, Well, and TCR isoform (A or B), for instance 01H12B.fastq. Up to one mismatch is allowed in any of the Plate, Well Row, Well Column, and TCR Isoform barcodes. It will create 2x96 files (one per TCR isoform) per each Plate (a lot of files!) This script will ignore all reads from plates whose code is commented out (see below in source code). This is useful when there is a mixture of TCR genotyping reads and phenotyping reads. There is a separate demultiplex script for the phenotyping reads (see below). This is demultiplex_fastq_TCRplates.pl: End of explanation """ !perl analyze_wells.pl !ls *_result.txt """ Explanation: Analyzing demultiplexed fastq files for TCRA/B species After demultiplexing, each individual fastq file will be processed by mitcr. The output is a separate result file for each well, e.g., 01A06A_result.txt. The example below will produce reports for plate 01, row A and columns 06 through 09 (see source code below). End of explanation """ #!/usr/bin/perl %plate = ( "GCAGA" => "01", # "TCGAA" => "02", # "AACAA" => "03", # "GGTGC" => "04", # "TTGGT" => "05", # "CATTC" => "06", # "ATTGG" => "07", # "CGGTT" => "08", # "ATCCT" => "09", # "ATGTC" => "10", # "TCACG" => "11", # "AGACC" => "12", # "CCCCA" => "13", # "GCGCT" => "14", # "TCCTT" => "15", # "TATAT" => "16", # "CGTAA" => "17", # "AAGGT" => "18", # "AGCTC" => "19", # "CTTGC" => "20", # "GTATC" => "21", # "TATGA" => "22", # "CACAC" => "23", # "ACACT" => "24", # "ACTAC" => "25", # "GTTAC" => "26", ); %row = ( #uncomment line if you want output for row A, etc. "TAAGC" => "A", # "TGCAC" => "B", # "CTCAG" => "C", # "GGAAT" => "D", # "CGAGG" => "E", # "AGGAG" => "F", # "TGTTG" => "G", # "CAACT" => "H", ); %col = ( #uncomment line if you want output for column 01, etc. # "GTTCA" => "01", # "CAGGA" => "02", # "TTATA" => "03", # "CCTGT" => "04", # "ACCGC" => "05", "ACTTA" => "06", "GCTAG" => "07", "GACGT" => "08", "GGCTA" => "09", # "GAATG" => "10", # "CCAAC" => "11", # "GAGAC" => "12", ); %TCR = ( "GTCAC" => "A", # TCRA "GAGAT" => "B", ); foreach $plateID (sort (keys(%plate))) { foreach $rowID (sort (keys(%row))) { foreach $colID (sort (keys(%col))) { foreach $TCRID (sort (keys(%TCR))) { $fh = $plate{$plateID}.$row{$rowID}.$col{$colID}.$TCR{$TCRID}; print "$fh\n"; system("java -Xmx10g -jar ./mitcr.jar -pset flex -gene TR$TCR{$TCRID} $fh.fastq $fh\_result.txt") } } } } """ Explanation: The output is a tab-delimited file whose main components are these (this is the content of file 01A06A_result.txt): <img src = "files/TCRA.png"> The first column is the number of times this sequence is seen; the second column is the fraction (not a percentage) of the total count of sequences in the well. This is especially useful when there are two species of TCRA expressed in a single cell (as in this case). It does not happen with TCRB.The v- j- and d- alleles of the TCR are listed. The last two lines (a tiny fraction of the number of reads) are a result of sequencing/PCR errors. The program mitcr has an error-checking algorithm that reduces these calls. For details see MiTCR: software for T-cell receptor sequencing data analysis by DA Bolotin et al., Nature Methods 10, 813-814 (2013). This is the source of analyze_wells.pl: End of explanation """ !perl demultiplex_fastq_phenoplates.pl Sample_S1_L001_R1_001.fastq Sample_S1_L001_R2_001.fastq !ls 03*.fastq """ Explanation: Demultiplexing phenotyping reads The following command demultiplexes phenotyping reads multiplexed in a single pair of large fastq files and saves them into separate fastq files whose names indicate Plate, Well, and "R1" or "R2" for left or right read, for instance 03H12R1.fastq. Up to one mismatch is allowed in any of the Plate, Well Row, or Well Column barcodes. It will create 2x96 files per each Plate. This script will ignore all reads from plates whose code is commented out (see below in source code). This is useful when there is a mixture of TCR genotyping reads and phenotyping reads. End of explanation """ #!/usr/bin/perl $fileR1 = $ARGV[0]; $fileR2 = $ARGV[1]; open(F1,$fileR1); open(F2,$fileR2); %plate = ( # "GCAGA" => "01", # "TCGAA" => "02", "AACAA" => "03", # "GGTGC" => "04", # "TTGGT" => "05", # "CATTC" => "06", ); %row = ( "TAAGC" => "A", "TGCAC" => "B", "CTCAG" => "C", "GGAAT" => "D", "CGAGG" => "E", "AGGAG" => "F", "TGTTG" => "G", "CAACT" => "H", ); %col = ( "GTTCA" => "01", "CAGGA" => "02", "TTATA" => "03", "CCTGT" => "04", "ACCGC" => "05", "ACTTA" => "06", "GCTAG" => "07", "GACGT" => "08", "GGCTA" => "09", "GAATG" => "10", "CCAAC" => "11", "GAGAC" => "12", ); foreach $plateID (keys(%plate)) { foreach $rowID (keys(%row)) { foreach $colID (keys(%col)) { $fh = $plate{$plateID}.$row{$rowID}.$col{$colID}; $fh1 = $plate{$plateID}.$row{$rowID}.$col{$colID}."1"; $fh2 = $plate{$plateID}.$row{$rowID}.$col{$colID}."2"; open $fh1, '>', $fh."R1.fastq"; open $fh2, '>', $fh."R2.fastq"; } } } while($A1 = <F1>) { #read 4 lines from R1 and 4 lines from R2 $A2 = <F1>; $A3 = <F1>; $A4 = <F1>; $B1 = <F2>; $B2 = <F2>; $B3 = <F2>; $B4 = <F2>; # now find out if the bar codes make sense $ID = substr($A2, 2, 5); #plate ID # now find what the true bar code should have been if imperfect match $score = 0; $trueID = ""; foreach $key (keys(%plate)) { my $count = ($ID^$key) =~ tr/\0//; if ($count > $score) { $score = $count; $trueID = $key } } if ($score >= 4) {#accept $true_plateID as the true plate ID $plateID = $trueID; } else {#leave $plateID blank - sequence won't be output $plateID = "" } $ID = substr($A2, 9, 5); #row ID # now find what the true bar code should have been if imperfect match $score = 0; $trueID = ""; foreach $key (keys(%row)) { my $count = ($ID^$key) =~ tr/\0//; if ($count > $score) { $score = $count; $trueID = $key } } if ($score >= 4) { $rowID = $trueID; } else { $rowID = "" } $ID = substr($B2, 2, 5); #column ID # now find what the true bar code should have been if imperfect match $score = 0; $trueID = ""; foreach $key (keys(%col)) { my $count = ($ID^$key) =~ tr/\0//; if ($count > $score) { $score = $count; $trueID = $key } } if ($score >= 4) { $colID = $trueID; } else { $colID = "" } if (exists $plate{$plateID} and exists $row{$rowID} and exists $col{$colID} ) { $fh1 = $plate{$plateID}.$row{$rowID}.$col{$colID}."1"; $fh2 = $plate{$plateID}.$row{$rowID}.$col{$colID}."2"; print $fh1 $A1.$A2.$A3.$A4; print $fh2 $B1.$B2.$B3.$B4; }; } close(F1); close(F2); """ Explanation: The source code of demultiplex_fastq_phenoplates.pl is here (in this example, Plate 03 contains phenotyping reads): End of explanation """ !perl count_cytokines.pl !ls *.count """ Explanation: Analyze demultiplexed phenotyping fastq files for expression levels of 17 cytokines and transcription factors The following command will produce expression counts for all 17 cytokines and TF's, separately for each well: End of explanation """ #!/usr/bin/perl %plate = ( # "GCAGA" => "01", # "TCGAA" => "02", "AACAA" => "03", # "GGTGC" => "04", # "TTGGT" => "05", # "CATTC" => "06", ); %row = ( "TAAGC" => "A", "TGCAC" => "B", "CTCAG" => "C", "GGAAT" => "D", "CGAGG" => "E", "AGGAG" => "F", "TGTTG" => "G", "CAACT" => "H", ); %col = ( "GTTCA" => "01", "CAGGA" => "02", "TTATA" => "03", "CCTGT" => "04", "ACCGC" => "05", "ACTTA" => "06", "GCTAG" => "07", "GACGT" => "08", "GGCTA" => "09", "GAATG" => "10", "CCAAC" => "11", "GAGAC" => "12", ); %cyt = ( "GCCGGAGGAGGTGGATGTGC" => "GATA3", "CCCAACACAGGAGCGCACTG" => "TBET", "GGCAGCCAAGGCCCTGTCGT" => "FOXP3", "AGAGGAAGTCCATGTGGGAG" => "RORC", "GCGAGCTGGTGCGCACCGAC" => "RUNX1", "GGACCACGCAGGCGAGCTCG" => "RUNX3", "CCTACACGGCCCCACCTGCC" => "BCL6", "CCACAGAACTGAAACATCTT" => "IL2", "CCCAAGCTGAGAACCAAGAC" => "IL10", "AGACCTCTTTTATGATGGCC" => "IL12A", "GGTATGGAGCATCAACCTGA" => "IL13", "CAACCTGAACATCCATAACC" => "IL17A", "GGGTTCTCTTGGCTGTTACT" => "IFNG", "GGAGGCGCTCCCCAAGAAGA" => "TNFA", "CCGAGAAGCGGTACCTGAAC" => "TGFB", "GCCAACTTTGCAGCCCAGAA" => "PRF1", "CCACAATATCAAAGAACAGG" => "GZMB", ); foreach $plateID (sort (keys(%plate))) { foreach $rowID (sort (keys(%row))) { foreach $colID (sort (keys(%col))) { $fh = $plate{$plateID}.$row{$rowID}.$col{$colID}; open(F1,$fh."R1.fastq"); open $fh, '>', $fh."R1.count"; print $fh "\t$fh\n"; #print header # zero out counters foreach $key (keys(%cyt)) {$count{$cyt{$key}} = 0}; while($A1 = <F1>) { #read 4 lines from R1 and 4 lines from R2 $A2 = <F1>; $A3 = <F1>; $A4 = <F1>; # now find out if the bar codes make sense $seq = substr($A2, 36, 20); if (exists $cyt{$seq}) {$count{$cyt{$seq}}++}; #add to count }; foreach $key (keys(%cyt)) { print $fh $cyt{$key}."\t".$count{$cyt{$key}}."\n" }; close(F1); close($fh); } } } """ Explanation: The output is a set of tab-delimited files such as 03F03R1.count. Only the R1 read is used for counting; the R2 read is redundant (and lower quality anyway). The content of this file looks something close to this: <img src = "files/counts.png"> The source code of count_cytokines.pl is here (Plate 03 has pheno reads): End of explanation """ !rm 0* """ Explanation: Cleanup after exercize: End of explanation """
crystalzhaizhai/cs207_yi_zhai
homeworks/HW6/HW6_P1_AnswerKey.ipynb
mit
from enum import Enum class AccountType(Enum): SAVINGS = 1 CHECKING = 2 """ Explanation: Problem 1: Bank Account Revisited We are going to rewrite the bank account closure problem we had a few assignments ago, only this time developing a formal class for a Bank User and Bank Account to use in our closure (recall previously we just had a nonlocal variable amount that we changed). First we are going to define two types of bank accounts, run the code below: End of explanation """ AccountType.SAVINGS """ Explanation: An Enum stands for an enumeration, it's a convenient way for you to define lists of things. Typing: End of explanation """ AccountType.SAVINGS == AccountType.SAVINGS AccountType.SAVINGS == AccountType.CHECKING """ Explanation: returns a Python representation of an enumeration. You can compare these account types: End of explanation """ AccountType.SAVINGS.name """ Explanation: To get a string representation of an Enum, you can use: End of explanation """ class BankAccount: def __init__(self, owner, accountType): self.owner = owner self.accountType = accountType self.balance = 0 def withdraw(self, amount): if self.balance >= amount and amount >= 0: self.balance -= amount return True else: return False def deposit(self, amount): if amount >= 0: self.balance += amount return True else: return False def __str__(self): return self.owner + "'s " + self.accountType.name + " Account" def __len__(self): return self.balance """ Explanation: Create a BankAccount class with the following specification: Constructor is BankAccount(self, owner, accountType) where owner is a string representing the name of the account owner and accountType is one of the AccountType enums Methods withdraw(self, amount) and deposit(self, amount) to modify the account balance of the account Override methods __str__ to write an informative string of the account owner and the type of account, and __len__ to return the balance of the account End of explanation """ class BankUser: def __init__(self, owner): self.owner = owner self.accounts = {} def addAccount(self, accountType): if accountType.name in self.accounts: return False else: self.accounts[accountType.name] = BankAccount(self.owner, accountType) return True def getBalance(self, accountType): return len(self.accounts[accountType.name]) if accountType.name in self.accounts else -1 def deposit(self, accountType, amount): if accountType.name in self.accounts: return self.accounts[accountType.name].deposit(amount) else: return False def withdraw(self, accountType, amount): if accountType.name in self.accounts: return self.accounts[accountType.name].withdraw(amount) else: return False def __str__(self): s = self.owner + "'s Accounts:\n" for t, a in self.accounts.items(): s += t + ": " + str(len(a)) + "\n" return s """ Explanation: Write some simple tests to make sure the BankAccount is working as expected. Next, write a class BankUser with the following specification: Constructor BankUser(self, owner) where owner is the name of the account. Method addAccount(self, accountType) - to start, a user will have no accounts when the BankUser object is created. addAccount will add a new account to the user of the accountType specified. Only one savings/checking account per user, return appropriate error otherwise Methods getBalance(self, accountType), deposit(self, accountType, amount), and withdraw(self, accountType, amount) for a specific AccountType. Override __str__ to have an informative summary of user's accounts End of explanation """ b = BankUser("Charles Liu") print(b.deposit(AccountType.SAVINGS, 100)) print(b.addAccount(AccountType.SAVINGS)) print(b.deposit(AccountType.SAVINGS, 50)) print(str(b)) """ Explanation: Write some simple tests to make sure this is working. Think of edge scenarios a user might try to do. End of explanation """ def ATMSession(bankUser): def Interface(): option = 0 while option != 1: try: option = int(input('Enter Option:\n1)Exit\n2)Create Account\n3)Check Balance\n4)Deposit\n5)WithDraw\n\n')) if option > 1: accountOption = 0 while accountOption != 1 and accountOption != 2: accountOption = int(input('Select Account Type:\n1) Checking\n2) Savings\n')) if accountOption != 1 and accountOption != 2: print('Invalid Account Specified\n') accountType = AccountType.SAVINGS if accountOption == 2 else AccountType.CHECKING if option == 2: if bankUser.addAccount(accountType): print('Account Created\n') else: print('Account Already Exists\n') elif option == 3: balance = bankUser.getBalance(accountType) if balance < 0: print('Account Not Found\n') else: print('Balance:{}'.format(balance)) else: amount = -1 while amount < 0: amount = int(input('Enter integer amount, cannot be negative\n')) if amount < 0: print('Invalid Amount Entered\n') bankFunc = bankUser.deposit if option == 4 else bankUser.withdraw if bankFunc(accountType, amount): print('Transaction was successful\n') else: balance = bankUser.getBalance(accountType) if balance >= 0: print('Insufficient Funds\n') else: print('No Account Found\n') print(str(bankUser)) except ValueError: print('Invalid Entry') option=1 return Interface interface = ATMSession(b) interface() """ Explanation: ATM Closure Finally, we are going to rewrite a closure to use our bank account. We will make use of the input function which takes user input to decide what actions to take. Write a closure called ATMSession(bankUser) which takes in a BankUser object. Return a method called Interface that when called, would provide the following interface: First screen for user will look like: Enter Option: 1)Exit 2)Create Account 3)Check Balance 4)Deposit 5)Withdraw Pressing 1 will exit, any other option will show the options: Enter Option: 1)Checking 2)Savings If a deposit or withdraw was chosen, then there must be a third screen: Enter Integer Amount, Cannot Be Negative: This is to keep the code relatively simple, if you'd like you can also curate the options depending on the BankUser object (for example, if user has no accounts then only show the Create Account option), but this is up to you. In any case, you must handle any input from the user in a reasonable way that an actual bank would be okay with, and give the user a proper response to the action specified End of explanation """
therealAJ/python-sandbox
data-science/learning/ud1/DataScience/MatPlotLib.ipynb
gpl-3.0
%matplotlib inline from scipy.stats import norm import matplotlib.pyplot as plt import numpy as np x = np.arange(-3, 3, 0.001) plt.plot(x, norm.pdf(x)) plt.show() """ Explanation: MatPlotLib Basics Draw a line graph End of explanation """ plt.plot(x, norm.pdf(x)) plt.plot(x, norm.pdf(x, 1.0, 0.5)) plt.show() """ Explanation: Mutiple Plots on One Graph End of explanation """ plt.plot(x, norm.pdf(x)) plt.plot(x, norm.pdf(x, 1.0, 0.5)) plt.savefig('C:\\Users\\Frank\\MyPlot.png', format='png') """ Explanation: Save it to a File End of explanation """ axes = plt.axes() axes.set_xlim([-5, 5]) axes.set_ylim([0, 1.0]) axes.set_xticks([-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5]) axes.set_yticks([0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]) plt.plot(x, norm.pdf(x)) plt.plot(x, norm.pdf(x, 1.0, 0.5)) plt.show() """ Explanation: Adjust the Axes End of explanation """ axes = plt.axes() axes.set_xlim([-5, 5]) axes.set_ylim([0, 1.0]) axes.set_xticks([-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5]) axes.set_yticks([0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]) axes.grid() plt.plot(x, norm.pdf(x)) plt.plot(x, norm.pdf(x, 1.0, 0.5)) plt.show() """ Explanation: Add a Grid End of explanation """ axes = plt.axes() axes.set_xlim([-5, 5]) axes.set_ylim([0, 1.0]) axes.set_xticks([-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5]) axes.set_yticks([0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]) axes.grid() plt.plot(x, norm.pdf(x), 'b-') plt.plot(x, norm.pdf(x, 1.0, 0.5), 'r:') plt.show() """ Explanation: Change Line Types and Colors End of explanation """ axes = plt.axes() axes.set_xlim([-5, 5]) axes.set_ylim([0, 1.0]) axes.set_xticks([-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5]) axes.set_yticks([0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]) axes.grid() plt.xlabel('Greebles') plt.ylabel('Probability') plt.plot(x, norm.pdf(x), 'b-') plt.plot(x, norm.pdf(x, 1.0, 0.5), 'r:') plt.legend(['Sneetches', 'Gacks'], loc=4) plt.show() """ Explanation: Labeling Axes and Adding a Legend End of explanation """ plt.xkcd() fig = plt.figure() ax = fig.add_subplot(1, 1, 1) ax.spines['right'].set_color('none') ax.spines['top'].set_color('none') plt.xticks([]) plt.yticks([]) ax.set_ylim([-30, 10]) data = np.ones(100) data[70:] -= np.arange(30) plt.annotate( 'THE DAY I REALIZED\nI COULD COOK BACON\nWHENEVER I WANTED', xy=(70, 1), arrowprops=dict(arrowstyle='->'), xytext=(15, -10)) plt.plot(data) plt.xlabel('time') plt.ylabel('my overall health') """ Explanation: XKCD Style :) End of explanation """ # Remove XKCD mode: plt.rcdefaults() values = [12, 55, 4, 32, 14] colors = ['r', 'g', 'b', 'c', 'm'] explode = [0, 0, 0.2, 0, 0] labels = ['India', 'United States', 'Russia', 'China', 'Europe'] plt.pie(values, colors= colors, labels=labels, explode = explode) plt.title('Student Locations') plt.show() """ Explanation: Pie Chart End of explanation """ values = [12, 55, 4, 32, 14] colors = ['r', 'g', 'b', 'c', 'm'] plt.bar(range(0,5), values, color= colors) plt.show() """ Explanation: Bar Chart End of explanation """ from pylab import randn X = randn(500) Y = randn(500) plt.scatter(X,Y) plt.show() """ Explanation: Scatter Plot End of explanation """ incomes = np.random.normal(27000, 15000, 10000) plt.hist(incomes, 50) plt.show() """ Explanation: Histogram End of explanation """ uniformSkewed = np.random.rand(100) * 100 - 40 high_outliers = np.random.rand(10) * 50 + 100 low_outliers = np.random.rand(10) * -50 - 100 data = np.concatenate((uniformSkewed, high_outliers, low_outliers)) plt.boxplot(data) plt.show() """ Explanation: Box & Whisker Plot Useful for visualizing the spread & skew of data. The red line represents the median of the data, and the box represents the bounds of the 1st and 3rd quartiles. So, half of the data exists within the box. The dotted-line "whiskers" indicate the range of the data - except for outliers, which are plotted outside the whiskers. Outliers are 1.5X or more the interquartile range. This example below creates uniformly distributed random numbers between -40 and 60, plus a few outliers above 100 and below -100: End of explanation """
palrogg/foundations-homework
08/Homework8-passengers.ipynb
mit
print("Q1: Which Swiss railway station is the most frequented?") print("A: The most frequented station is Zürich HB:") df[['Station', 'DTV']].sort_values(by='DTV', ascending=False).head(1) print("Q2: Which stations have a higher average daily circulation on Saturday and Sunday?") print("A: These 21 stations:") df[df['DTV'] > df['DWV']] print("Q3: Print a comma-separated list of all the comments in the Comments column. Escape them with the “\"” character and don't include any empty cell.") comments_list = df[df['Comments'] == df['Comments']]['Comments'].tolist() print("A: The comments are:", '"' + str.join('","',comments_list) + '".') print("Q4: How many rows contains another year than 2014?") print("A: I counted", len(df[df['Year'] != 2014]), "rows containing another year than 2014.") print("Q5: What is the size (rows, columns) of the data?") print("A: There is", df.shape[0], "rows and", df.shape[1], "columns.") df[df['Station'] == 'Zürich HB'] print("Q6: How many stations have a name starting with A?") import re a_stations = df[df['Station'].str.match('^A')] print("A: There is", len(a_stations), "“A stations”. Here they are:") a_stations print("Q7: Which are the least frequented stations during the work days? And the full week?") print("A(a): During the work days:") df[['Station', 'DWV']].sort_values(by='DWV').head(10) print("A(b): During the full week:") df[['Station', 'DTV']].sort_values(by='DTV').head(10) print("Q8: Take the most frequented and the least frequented stations. How many times more passengers has the most frequented one?") most_freq = df[['Station', 'DTV']].sort_values(by='DTV', ascending=False).head(1) least_freq = df[['Station', 'DTV']].sort_values(by='DTV').head(1) most_freq[['Station', 'DTV']] print("A:", most_freq['Station'].tolist()[0], "has", most_freq['DTV'].tolist()[0], "average daily passengers and", least_freq['Station'].tolist()[0], str(least_freq['DTV'].tolist()[0]) + ".") ratio = most_freq['DTV'].tolist()[0] / least_freq['DTV'].tolist()[0] print("This means that Zurich HB has", ratio, "times more daily passengers than Oron.") print("Q9: Which stations have far more passengers during work days than during the full week? Group them in a subset.") work_days = df[df['DWV'] >= 1.35 * df['DTV']] print("A: These", len(work_days), "stations have at least 35% more passengers during the work days:") work_days print("Q10: Find a crazy station name. Is its average frequency near to the mean average frequency of all stations?") # Let's try to find a very long name... longnames = df[df['Station'].str.match('.{25,}')] longnames # … We'll pick “Geneveys-sur-Coffrane, Les”. This is an pretty long name. meanDTV = df['DTV'].mean() GeneveysDTV = df[df['Code'] == 'GEC']['DTV'].values print("A: “Geneveys-sur-Coffran, Les” has an average daily frequency of", str(GeneveysDTV[0]) + ".") print("This is far less than", str(meanDTV) + ", the mean average frequency of all stations.") print("However, the _median_ frequency of all stations is only", str(df['DTV'].median()) + ".") print("Q11: Who else than the SBB CFF FFS (Federal Railways) owns stations? Make a list of them (remove any duplicate).") other_owner = df[(df['Owner'] != 'CFF') & (df['Owner'] != 'SBB') & (df['Owner'] != 'FFS')] list_owners = other_owner['Owner'].tolist() print("A:", str.join(", ", set(list_owners))) print("Q12: Print how many stations each owner has.") print("A: Here is how many stations they have:\n" + str(df['Owner'].value_counts())) """ Explanation: Data set 1: Passengers frequence in the Swiss railway stations Source and documentation: http://data.sbb.ch/explore/dataset/passagierfrequenz/ DTV = Durchschnittlicher täglicher Verkehr (Montag bis Sonntag) = average daily circulation (including the weekend) DWV = Durchschnittlicher werktäglicher Verkehr (Montag bis Freitag) = average daily circulation Mo-Friday End of explanation """ import matplotlib.pyplot as plt %matplotlib inline plt.style.use('ggplot') standard = df[(df['DWV'] > 300) & (df['DWV'] < 2300) ] standard.plot(kind='scatter', x='DWV', y='DTV') print("These are the stations in Q2 and Q3 and their average daily passengers during the full week vs. the work days:") plt.style.use('ggplot') least_frequented = df.sort_values(by='DWV').head(20) least_frequented.plot(kind='barh', x='Station', y='DTV').invert_yaxis() print("These are the 20 least frequented stations, in average daily passengers:") q1_freq = df[df['DWV'] <= 340] q2_freq = df[(df['DWV'] <= 915) & (df['DWV'] > 340)] q3_freq = df[(df['DWV'] <= 2700) & (df['DWV'] > 915)] plt.scatter(y=q1_freq["DWV"], x=q1_freq["DTV"], c='c', alpha=0.75, marker='1') plt.scatter(y=q2_freq["DWV"], x=q2_freq["DTV"], c='y', alpha=0.75, marker='2') plt.scatter(y=q3_freq["DWV"], x=q3_freq["DTV"], c='m', alpha=0.75, marker='3') print("Q1, Q2 and Q3 of average daily circulation; x axis = DTV, y axis = DWV") plt.xlim(-15,2500) plt.ylim(-30,2800) """ Explanation: Graphics End of explanation """
tensorflow/docs-l10n
site/en-snapshot/probability/examples/Factorial_Mixture.ipynb
apache-2.0
#@title Licensed under the Apache License, Version 2.0 (the "License"); { display-mode: "form" } # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Explanation: Copyright 2018 The TensorFlow Probability Authors. Licensed under the Apache License, Version 2.0 (the "License"); End of explanation """ import tensorflow as tf import numpy as np import tensorflow_probability as tfp import matplotlib.pyplot as plt import seaborn as sns tfd = tfp.distributions # Use try/except so we can easily re-execute the whole notebook. try: tf.enable_eager_execution() except: pass """ Explanation: Factorial Mixture <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://www.tensorflow.org/probability/examples/Factorial_Mixture"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/probability/blob/main/tensorflow_probability/examples/jupyter_notebooks/Factorial_Mixture.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/probability/blob/main/tensorflow_probability/examples/jupyter_notebooks/Factorial_Mixture.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> </td> <td> <a href="https://storage.googleapis.com/tensorflow_docs/probability/tensorflow_probability/examples/jupyter_notebooks/Factorial_Mixture.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> </td> </table> In this notebook we show how to use TensorFlow Probability (TFP) to sample from a factorial Mixture of Gaussians distribution defined as: $$p(x_1, ..., x_n) = \prod_i p_i(x_i)$$ where: $$\begin{align} p_i &\equiv \frac{1}{K}\sum_{k=1}^K \pi_{ik}\,\text{Normal}\left(\text{loc}=\mu_{ik},\, \text{scale}=\sigma_{ik}\right)\1&=\sum_{k=1}^K\pi_{ik}, \forall i.\hphantom{MMMMMMMMMMM}\end{align}$$ Each variable $x_i$ is modeled as a mixture of Gaussians, and the joint distribution over all $n$ variables is a product of these densities. Given a dataset $x^{(1)}, ..., x^{(T)}$, we model each dataponit $x^{(j)}$ as a factorial mixture of Gaussians: $$p(x^{(j)}) = \prod_i p_i (x_i^{(j)})$$ Factorial mixtures are a simple way of creating distributions with a small number of parameters and a large number of modes. End of explanation """ num_vars = 2 # Number of variables (`n` in formula). var_dim = 1 # Dimensionality of each variable `x[i]`. num_components = 3 # Number of components for each mixture (`K` in formula). sigma = 5e-2 # Fixed standard deviation of each component. # Choose some random (component) modes. component_mean = tfd.Uniform().sample([num_vars, num_components, var_dim]) factorial_mog = tfd.Independent( tfd.MixtureSameFamily( # Assume uniform weight on each component. mixture_distribution=tfd.Categorical( logits=tf.zeros([num_vars, num_components])), components_distribution=tfd.MultivariateNormalDiag( loc=component_mean, scale_diag=[sigma])), reinterpreted_batch_ndims=1) """ Explanation: Build the Factorial Mixture of Gaussians using TFP End of explanation """ plt.figure(figsize=(6,5)) # Compute density. nx = 250 # Number of bins per dimension. x = np.linspace(-3 * sigma, 1 + 3 * sigma, nx).astype('float32') vals = tf.reshape(tf.stack(np.meshgrid(x, x), axis=2), (-1, num_vars, var_dim)) probs = factorial_mog.prob(vals).numpy().reshape(nx, nx) # Display as image. from matplotlib.colors import ListedColormap cmap = ListedColormap(sns.color_palette("Blues", 256)) p = plt.pcolor(x, x, probs, cmap=cmap) ax = plt.axis('tight'); # Plot locations of means. means_np = component_mean.numpy().squeeze() for mu_x in means_np[0]: for mu_y in means_np[1]: plt.scatter(mu_x, mu_y, s=150, marker='*', c='r', edgecolor='none'); plt.axis(ax); plt.xlabel('$x_1$') plt.ylabel('$x_2$') plt.title('Density of factorial mixture of Gaussians'); """ Explanation: Notice our use of tfd.Independent. This "meta-distribution" applies a reduce_sum in the log_prob calculation over the rightmost reinterpreted_batch_ndims batch dimensions. In our case, this sums out the variables dimension leaving only the batch dimension when we compute log_prob. Note that this does not affect sampling. Plot the Density Compute the density on a grid of points, and show the locations of the modes with red stars. Each mode in the factorial mixture corresponds to a pair of modes from the underlying individual-variable mixture of Gaussians. We can see 9 modes in the plot below, but we only needed 6 parameters (3 to specify the locations of the modes in $x_1$, and 3 to specify the locations of the modes in $x_2$). In contrast, a mixture of Gaussians distribution in the 2d space $(x_1, x_2)$ would require 2 * 9 = 18 parameters to specify the 9 modes. End of explanation """ samples = factorial_mog.sample(1000).numpy() g = sns.jointplot( x=samples[:, 0, 0], y=samples[:, 1, 0], kind="scatter", marginal_kws=dict(bins=50)) g.set_axis_labels("$x_1$", "$x_2$"); """ Explanation: Plot samples and marginal density estimates End of explanation """
deepfield/ibis
docs/source/notebooks/tutorial/1-Intro-and-Setup.ipynb
apache-2.0
import ibis import os """ Explanation: Impala/HDFS intro and Setup Getting started You're going to want to make sure you can import ibis End of explanation """ hdfs_port = os.environ.get('IBIS_WEBHDFS_PORT', 50070) hdfs = ibis.hdfs_connect(host='quickstart.cloudera', port=hdfs_port) """ Explanation: If you have WebHDFS available, connect to HDFS with according to your WebHDFS config. For kerberized or more complex HDFS clusters please look at http://hdfscli.readthedocs.org/en/latest/ for info on connecting. You can use a connection from that library instead of using hdfs_connect End of explanation """ con = ibis.impala.connect('quickstart.cloudera', hdfs_client=hdfs) con """ Explanation: Finally, create the Ibis client End of explanation """
IST256/learn-python
content/lessons/01-Intro/LAB-Intro.ipynb
mit
your_name = input("What is your name? ") print('Hello there',your_name) """ Explanation: Class Coding Lab: Introduction to Programming The goals of this lab are to help you to understand: How to turn in your lab and homework the Jupyter programming environments basic Python Syntax variables and their use how to sequence instructions together into a cohesive program the input() function for input and print() function for output Let's start with an example: Hello, world! This program asks for your name as input, then says hello to you as output. Most often it's the first program you write when learning a new programming language. TO RUN THIS CODE: Click in the cell below and click the run cell button. NOTE: After the code executes, you will see a sequence number next to the code and output below the code itself. This is your indication the code in the cell has run. You must run all code cells in the notebook for full credit. End of explanation """ # Here's an example of variable assignment. country = 'USA' """ Explanation: Believe it or not there's a lot going on in this simple two-line program, so let's break it down. The first line: Asks you for input, prompting you with What is your Name? It then stores your input in the variable your_name The second line: prints out the following text: Hello there then prints out the contents of the variable your_name At this point you might have a few questions. What is a variable? Why do I need it? Why is this two lines? Etc... All will be revealed in time. Variables Variables are names in our code which store values. I think of variables as cardboard boxes. Boxes hold things. Variables hold things. The name of the variable is on the ouside of the box (that way you know which box it is), and value of the variable represents the contents of the box. Variable Assignment Assignment is an operation where we store data in our variable. It's like packing something up in the box. In this example we assign the value "USA" to the variable country End of explanation """ country # Run this cell. Itshould say 'USA' """ Explanation: Variable Access What good is storing data if you cannot retrieve it? Lucky for us, retrieving the data in variable is as simple as calling its name: End of explanation """ country = 'Canada' """ Explanation: At this point you might be thinking: Can I overwrite a variable? The answer, of course, is yes! Just re-assign it a different value: End of explanation """ country, country, country """ Explanation: You can also access a variable multiple times. Each time it simply gives you its value: End of explanation """ input("What is your name? ") print('Hello there') """ Explanation: The Purpose Of Variables Variables play an vital role in programming. Computer instructions have no memory of each other. That is one line of code has no idea what is happening in the other lines of code. The only way we can "connect" what happens from one line to the next is through variables. For example, if we re-write the Hello, World program at the top of the page without variables, we get the following: End of explanation """ # TODO: Write your code here """ Explanation: When you execute this program, notice there is no longer a connection between the input and the output. In fact, the input on line 1 doesn't matter because the output on line 2 doesn't know about it. It cannot because we never stored the results of the input into a variable! 1.1 You Code Re-write the program above to input a name and then say hello there, name. It will need to store the first line in a variable so that it can be printed on the 2nd line. End of explanation """ y = input("Enter your city: ") x = input("Enter your state: ") print(x,y,'is a nice place to live') """ Explanation: What's in a name? Um, EVERYTHING Computer code serves two equally important purposes: To solve a problem (obviously) To communicate how you solved problem to another person (hmmm... I didn't think of that!) If our code does something useful, like land a rocket, predict the weather, or calculate month-end account balances then the chances are 100% certain that someone else will need to read and understand our code. Therefore it's just as important we develop code that is easily understood by both the computer and our colleagues. This starts with the names we choose for our variables. Consider the following program: End of explanation """ city = input("Enter your city: ") state = input("Enter your state: ") print(city, state, 'is a nice place to live') """ Explanation: What do x and y represent? Is there a semantic (design) error in this program? You might find it easy to figure out the answers to these questions, but consider this more human-friendly version: End of explanation """ # TODO: Debug this code here. name = input "Enter your name: " foo = input("Enter your age: ") print(name, "is" ) """ Explanation: Do the aptly-named variables make it easier to find the semantic errors in this second version? OF COURSE THEY DO!!! 1.2 You Code Debug the program below (remove errors to get it working). When it is correct it should input your name and your age and the print name and age on a single line. Make sure you use aptly-named variables!!! Example of the Program running: Enter your name: Mike Enter your age: 25 Mike is 25 In the above example Mike was the entered name, and 25 was the entered age. End of explanation """ # TODO: write your code here """ Explanation: 1.3 You Code Now try to write a program which asks for two separate inputs: your first name and your last name. The program should then output Hello with your first name and last name. For example if you enter Mike for the first name and Fudge for the last name the program should output Hello Mike Fudge HINTS Use appropriate variable names. If you need to create a two word variable name use an underscore in place of the space between the words. eg. two_words You will need a separate set of inputs for each name. End of explanation """ prefix = "re" suffix = "ment" root = input("Enter a root word, like 'ship': ") print( prefix + root + suffix) first = input("Enter first name: ") last = input("enter last name: ") name_last_first = last + "," + first print(name_last_first) """ Explanation: Variable Concatenation: Your First Operator The + symbol is used to combine to variables containing text values together. Consider the following example: End of explanation """ # TODO: write your code here """ Explanation: 1.4 You Code Write a program to prompt for three colors as input, then outputs those three colors in order they were entered, informing me which one was the middle (2nd entered) color. For example if you were to input red then green then blue the program would output: Your colors are: red, green, and blue. The middle color is green. HINTS you'll need three variables one for each input you should try to make the program output like my example. This includes commas and the word and. name your variables appropriately! use the + operator. End of explanation """ name = "Mary" major = "Data Science" gpa = "4.0" print(f"{name} is a {major} major. Her gpa is {gpa}") """ Explanation: F-Strings In Python 3.7, f-strings were introduced to make it easier to format string literals in the print() statement. Here's how it works: Put an f in front of the string literal, like this: f" For any variable you want to print, enclose in {curly braces} within the string literal. At run-time the variable in {curly braces} is replaced with its value! This is called string interpolation. For example: End of explanation """ # TODO: write your code here """ Explanation: 1.5 You Code Re-write the last program (1.4 You Code) to print using f-strings! As good practice, do not copy and paste code, instead re-write it. This will result in fewer bugs (mistakes) in your code. End of explanation """ # run this code to turn in your work! from coursetools.submission import Submission Submission().submit() """ Explanation: Metacognition Rate your comfort level with this week's material so far. 1 ==> I don't understand this at all yet and need extra help. If you choose this please try to articulate that which you do not understand to the best of your ability in the questions and comments section below. 2 ==> I can do this with help or guidance from other people or resources. If you choose this level, please indicate HOW this person helped you in the questions and comments section below. 3 ==> I can do this on my own without any help. 4 ==> I can do this on my own and can explain/teach how to do it to others. --== Double-Click Here then Enter a Number 1 through 4 Below This Line ==-- Questions And Comments Record any questions or comments you have about this lab that you would like to discuss in your recitation. It is expected you will have questions if you did not complete the code sections correctly. Learning how to articulate what you do not understand is an important skill of critical thinking. Write them down here so that you remember to ask them in your recitation. We expect you will take responsilbity for your learning and ask questions in class. --== Double-click Here then Enter Your Questions Below this Line ==-- How Do I hand in my Work? FIRST AND FOREMOST: Save Your work! Yes, it auto-saves, but you should get in the habit of saving before submitting. From the menu, choose File --> Save Notebook. Or you can use the shortcut keys CTRL+S Handing in your Homework and Labs is easy! All you need to do is run the code cell below and follow the directions. This code sends your assignment to a private cloud where your instructor can download a copy of it at the time of submission. Once the assignment is graded, you will see a grade and feedback / comments in Blackboard. End of explanation """
cosmodesi/desibgsdev
Redshift_Efficiency_Study/BGS_z-efficiency_uniform-sampling.ipynb
bsd-3-clause
import os import numpy as np import matplotlib as mpl mpl.use('Agg') import matplotlib.pyplot as plt from astropy.table import Table, vstack from astropy.io import fits from desispec.io.util import write_bintable from desiutil.log import get_logger, DEBUG log = get_logger() from desitarget.cuts import isBGS_bright, isBGS_faint ## Following not yet available in the master branch from desitarget.mock.mockmaker import BGSMaker from desitarget.mock.mockmaker import SKYMaker import multiprocessing nproc = multiprocessing.cpu_count() // 2 import seaborn as sns sns.set(style='white', font_scale=1.1, palette='deep') # Specify if using this from command line as a .py or as an ipynb using_py = False class arg: pass simnames = ['sim46']#['sim13','sim14','sim16','sim17','sim18'] #'sim12', if using_py: import argparse parser = argparse.ArgumentParser() parser.add_argument('--sim', type=int, default=None, help='Simulation number (see documentation)') parser.add_argument('--part', type=str, default=None, help='Which part of the simulation to run. Options are all, newexp, group, zfit') args = parser.parse_args() if args.sim is None: parser.print_help() sys.exit(1) else: %matplotlib inline %load_ext autoreload %autoreload 2 args = arg() args.sim = 1 args.part = 'all' """ Explanation: BGS Signal-to-Noise Ratio and Redshift Efficiency The goal of this notebook is to assess the signal-to-noise ratio and redshift efficiency of BGS targets observed in "nominal" observing conditions (which are defined here and discussed here, among other places). Specifically, the nominal BGS observing conditions we adopt (note the 5-minute exposure time is with the moon down!) are: python {'AIRMASS': 1.0, 'EXPTIME': 300, 'SEEING': 1.1, 'MOONALT': -60, 'MOONFRAC': 0.0, 'MOONSEP': 180} During the survey itself, observations with the moon up (i.e., during bright time) will be obtained with longer exposure times according to the bright-time exposure-time model (see here). Because we fix the observing conditions, we only consider how redshift efficiency depends on galaxy properties (apparent magnitude, redshift, 4000-A break, etc.). However, note that the code is structured such that we could (now or in the future) explore variations in seeing, exposure time, and lunar parameters. For code to generate large numbers of spectra over significant patches of sky and to create a representative DESI dataset (with parallelism), see desitarget/bin/select_mock_targets and desitarget.mock.build.targets_truth. Finally, note that the various python Classes instantiated here (documented in desitarget.mock.mockmaker) are easily extensible to other mock catalogs and galaxy/QSO/stellar physics. End of explanation """ simdir = os.path.join(os.getenv('DESI_ROOT'), 'spectro', 'sim', 'bgs', 'kremin', 'flat_priors') if not os.path.exists(simdir): os.makedirs(simdir) seed = 626 """ Explanation: Establish the I/O path, random seed, and path to the dust maps and desired healpixel. End of explanation """ overwrite_spectra = True #overwrite_templates = overwrite_spectra overwrite_redshifts = True overwrite_results = True """ Explanation: All or none of the output files can be overwritten using these keywords. End of explanation """ rand = np.random.RandomState(seed) """ Explanation: Initialize random state End of explanation """ from desistudy import get_predefined_sim_dict, get_predefined_obs_dict all_sims = [] all_obsconds = [] for simname in simnames: all_sims.append(get_predefined_sim_dict(simname)) all_obsconds.append(get_predefined_obs_dict(simname)) print(all_obsconds) sims = np.atleast_1d(all_sims) conditions = np.atleast_1d(all_obsconds) """ Explanation: Set up the simulation parameters. Here we use the mock to capture the correct distribution of apparent magnitudes, galaxy properties, and redshifts. Note that if use_mock=False then rmagmin, rmagmax, zmin, and zmax are required. For example, here's another possible simulation of 1000 spectra in which the magnitude (r=19.5) and redshift (z=0.2) are held fixed while moonfrac and moonsep are varied (as well as intrinsic galaxy properties): python sim2 = dict(suffix='sim02', use_mock=False, nsim=10, nspec=100, seed=22, zmin=0.2, zmax=0.2, rmagmin=19.5, rmagmax=19.5, moonfracmin=0.0, moonfracmax=1.0, moonsepmin=0.0, moonsepmax=120.0, ) End of explanation """ from desistudy import bgs_sim_spectra if overwrite_spectra: for sim,cond in zip(sims,conditions): log.info("\n\n\n\nNow performing sim {}".format(sim['suffix'])) bgs_sim_spectra(sim, cond, simdir, verbose=False, overwrite=overwrite_spectra) log.info("\n\nFinished simulating templates\n\n") """ Explanation: Generate Spectra End of explanation """ from desistudy import bgs_redshifts if overwrite_redshifts: for sim in sims: log.info("\n\n\n\nNow performing sim {}".format(sim['suffix'])) bgs_redshifts(sim, simdir=simdir, overwrite=overwrite_redshifts) log.info("\n\n\n\n\nFinished redshift fitting\n\n\n") """ Explanation: Fit the redshifts. This step took ~1.8 seconds per spectrum, ~3 minutes per 100 spectra, or ~30 minutes for all 1000 spectra with my 4-core laptop. End of explanation """ from desistudy import bgs_gather_results if overwrite_results: for sim in sims: log.info("\n\n\n\nNow performing sim {}".format(sim['suffix'])) bgs_gather_results(sim, simdir=simdir, overwrite=overwrite_results) log.info("Finished gathering results") """ Explanation: Gather the results. End of explanation """ # from desistudy import bgs_sim_spectra # from desistudy import bgs_redshifts # from desistudy import bgs_gather_results # for sim,cond in zip(sims,conditions): # log.info("\n\n\n\nNow performing sim {}".format(sim['suffix'])) # if overwrite_spectra: # bgs_sim_spectra(sim, cond, simdir, verbose=False, overwrite=overwrite_spectra) # log.info("Finished simulating templates") # if overwrite_redshifts: # bgs_redshifts(sim, simdir=simdir, overwrite=overwrite_redshifts) # log.info("Finished redshift fitting") # if overwrite_results: # bgs_gather_results(sim, simdir=simdir, overwrite=overwrite_results) # log.info("Finished gathering results") """ Explanation: Do everything in one cell End of explanation """
CarlosGrohmann/hypsometric
hypsometric_analysis.ipynb
mit
import sys, os import numpy as np import math as math import numpy.ma as ma from matplotlib import cm from matplotlib.colors import LightSource from scipy import ndimage import matplotlib.pyplot as plt from mpl_toolkits.basemap import Basemap %matplotlib inline # import osgeo libs after basemap, so it # won't cause conflicts (Assertion failed..) # with mannualy-installed GEOS import gdal, ogr import shapefile as shpf """ Explanation: Hypsometric analysis of Mountain Ranges Carlos H. Grohmann Institute of Energy and Environment University of São Paulo, São Paulo, Brazil guano -at- usp -dot- br Hypsometry Hypsometric analysis as the study of land elevations about a given datum can be traced back to the works of German Geographer Albrecht Penck$^1$, although its modern implementation is usually related to a seminal paper by A.N.Strahler$^2$. The area-altitude distribution can be shown as hypsographic or hypsometric curves. The hypsographic curve uses absolute units of measure, where elevation is plotted on the ordinate and the area above a given elevation on the abscissa. The hypsometric curve uses adimensional axes to show the relation of area an elevation of a point about the total area and maximum elevation of a region$^{3,4,5}$ (Supplementary Figure 1A). One important point is that both representations are cumulative curves, not simple histograms of elevation distribution. The Empirical Cumulative Distribution Function (ECDF) of a DEM can be used to calculate the accumulated area (or relative area) per elevation and used to construct a hypsometric curve (as in the R package hydroTSM$^6$). To plot the hypsographic curve, on the other hand, the real area of pixels is needed, and the ECDF cannot be used. The area of the pixels of a DEM in a 'Latitude-Longitude' projection will decrease towards the pole as the length of an arc of longitude tends to zero at $90^\circ$, and this variation must be considered for a proper hypsometric analysis. This could be achieved by calculating the size of the pixels (as shown in the code below) or by using tools such as the R package raster$^7$, or the GRASS-GIS$^8$ module r.stats. Natural Earth Data Elsen & Tingley used a data set of "182 expert-delineated mountain ranges" available from Natural Earth. The authors misinterpreted the metadata and stated that the data set is "roughly accurate to 50m". That is not the case. Natural Earth distributes geographical data at three scales: "1:10m" (1:10,000,000), "1:50m" (1:50,000,000) and "1:250m" (1:250,000,000). Despite the use of a lower case "m" to indicate the 1:1,000,000 scale, the documentation is clear: "Primarily derived from Patterson’s Physical Map of the World. Polygons defined by international team of volunteers. The boundaries of physical regions should be taken with a grain of salt. They are roughly accurate to 50m scale, although the number of features included is to the 10m scale. Use these polygons to for map algebra operations at your own risk!" The README file for this dataset is available at http://www.naturalearthdata.com/downloads/10m-physical-vectors/10m-physical-labels/ and Tom Patterson's Map can be accessed at http://www.shadedrelief.com/world/index.html. The maps in Figure 1(B-F) show Natural Earth polygons (in black) and polygons delimiting the same mountain ranges at larger scales (in red) for five of the mountain ranges analysed by Elsen & Tingley: Alps (range #09 of Elsen & Tingley) Blue Ridge (range #30) Ibiapaba (range #136) Cachimbo (range #140) Espinhaco (range #141) The differences between the boundaries are considered to be large enough to influence the results obtained by Elsen & Tingley, as it can be seen in the graphics of Supplementary Figure 1(B-F). Computer Code In this Supplementary Information I intent to show how the "1:50m" boundaries used to delineate the mountain ranges will influence on the hypsometric analysis. Additionally, Python code is presented for the calculation of hypsographic and hypsometric curves, and for the "hypsographic histograms" used by Elsen & Tingley. The code is presented as an IPython (Jupyter) Notebook, available at GitHub (https://github.com/CarlosGrohmann/hypsometric), where the data directory contains all necessary GeoTIFFs and shapefiles. The plots shown in the code are low-resolution examples of the results obtained. The reader is referred to the Supplementary Figure 1 for the final plots of each mountain range analysed here. Data The data used in this supplementary information was acquired from the following sources: Natural Earth - Boundaries of mountain ranges derived from Patterson’s Physical Map of the World and used by Elsen & Tingley. Scale 1:50,000,000. Available at http://www.naturalearthdata.com/http//www.naturalearthdata.com/download/10m/physical/ne_10m_geography_regions_polys.zip (Last access: 2015-06-17) Alps - Boundary of the Alps from the "Eco-pedological Map for the Alpine Territory" project (ECALP). No scale indicated. Available at http://eusoils.jrc.ec.europa.eu/projects/alpsis/Ecalp_data.html (Last access: 2015-06-17) Blue Ridge (USA) - Boundary of Blue Ridge range at 1:7,000,000. From: Fenneman, N.M., and Johnson, D.W., 1946, Physiographic Divisions of the United States, U.S. Geological Survey (USGS), Washington, D.C.; Available at http://water.usgs.gov/GIS/metadata/usgswrd/XML/physio.xml (Last access: 2015-06-17) Cachimbo, Ibiapaba and Espinhaco Ranges (Brazil) - From: IBGE (Brazilian Institute of Geography and Statistics), 2006. Map of Landscape Units of Brazil at 1:5,000,000 (Instituto Brasileiro de Geografia e Estatística, 2006. Mapa de unidades de relevo do Brasil 1:5.000.000). Available at ftp://geoftp.ibge.gov.br/mapas_tematicos/mapas_murais/shapes/relevo/ (Last access: 2015-06-17) Supplementary References 1 - Penck, A., 1894, Morphologie der Erdoberfläche, Stuttgart, J. Engelhorn, 2 vols. 2 - Strahler, A.N., 1952. Hypsometric (area-altitude) analysis of erosional topography. Bulletin of the Geological Society of America, 63, 1117-1142. 3 - Péguy, C.P., 1942. Principes de morphométrie alpine. Revue de Géographie Alpine, 30, 453-486. 4 - Langbein, W.B., 1947. Topographic characteristics of drainage basin. U.S. Geological Survey, Water Supply Paper 968-C, 125-157. 5 - Luo, W., 1998. Hypsometric analysis with a Geographic Information System. Computers & Geosciences, 24, 815-821. 6 - Zambrano-Bigiarini, M., 2014. hydroTSM: Time series management, analysis and interpolation for hydrological modelling. R package. Available at: http://cran.r-project.org/web/packages/hydroTSM/index.html. (Last access: 2015-06-17) 7 - Hijmans, R. J., 2015. raster: Geographic Data Analysis and Modeling. R package. Available at: http://cran.r-project.org/web/packages/raster/index.html. (Last access: 2015-06-17 8 - Neteler, M., Bowman, M.H., Landa, M., Metz, M., 2012. GRASS GIS: A multi-purpose open source GIS. Environmental Modelling & Software, 31, 124-130. Python code Import required packages End of explanation """ # auxiliar functions def roundBase(x, base=5): return int(base * round(float(x)/base)) def roundUp(x, base=50): return int(base * np.ceil(float(x)/base)) def roundDown(x, base=50): return int(base * np.floor(float(x)/base)) def haversine(lon1, lat1, lon2, lat2, r=6371.009): R = r # Earth radius in kilometers dLat = math.radians(lat2 - lat1) dLon = math.radians(lon2 - lon1) lat1 = math.radians(lat1) lat2 = math.radians(lat2) a = math.sin(dLat/2)**2 + math.cos(lat1) * math.cos(lat2) * math.sin(dLon/2)**2 c = 2 * math.asin(math.sqrt(a)) return R * c """ Explanation: Define functions The haversine formula is used to calculate the distance between two points on a spherical approximation of the Earth. Adapted from http://rosettacode.org/wiki/Haversine_formula#Python. Latitude and Longitude must be in decimal degrees. The value used here is the Mean Radius for Earth as defined by the International Union of Geodesy and Geophysics (IUGG). End of explanation """ # files dataDir = './data/' mountain = 'cachimbo' # 'alps', 'blueRidge', 'espinhaco', 'cachimbo', 'ibiapaba' mtn = mountain + '.shp' mtn_NE = mountain + '_NEarth.shp' tiff = mountain + '.tif' # label for 5M/7M boundaries source = 'IBGE'# brazilian maps # source = 'ECALP' # Alps # source = 'Fenneman & Johnson 1946' # USA Physiographic Provinces """ Explanation: Define variables for shapefiles and GeoTIFF End of explanation """ rast = gdal.Open(tiff) rast_band = rast.GetRasterBand(1) rast_array = rast.ReadAsArray() rast_stats = rast_band.GetStatistics( True, True ) rast_min = rast_stats[0] rast_max = rast_stats[1] """ Explanation: Import GeoTIFF End of explanation """ w_lon, xdim, rot1, n_lat, rot2, ydim = rast.GetGeoTransform() e_lon = w_lon + xdim * rast.RasterXSize s_lat = n_lat + ydim * rast.RasterYSize """ Explanation: Get GeoTransformation parameters, calculate image extents End of explanation """ bound_5M = shpf.Reader(mtn) bound_5M_lonlat = np.array(bound_5M.shape().points) bound_NE = shpf.Reader(mtn_NE) bound_NE_lonlat = np.array(bound_NE.shape().points) """ Explanation: Load shapefiles (for plotting only) End of explanation """ m = Basemap(projection='merc', llcrnrlat=s_lat, urcrnrlat=n_lat, llcrnrlon=w_lon, \ urcrnrlon=e_lon, resolution='c') ls = LightSource(azdeg=135,altdeg=25) rgb = ls.shade(rast_array,plt.cm.Greys) m_shade = m.imshow(rgb, origin='upper') m_color = m.imshow(rast_array, origin='upper',cmap=plt.cm.terrain, alpha=0.8, vmin=-150) bounds = range(0, roundUp(rast_max), 50) cbar = m.colorbar(size='3%', boundaries=bounds) cbar.ax.tick_params(labelsize=8) m.drawmapscale(lon=e_lon-0.8, lat=s_lat+0.5, lon0=e_lon, lat0=s_lat, length=100) xticks = np.arange(roundBase(w_lon), roundBase(e_lon), 2) yticks = np.arange(roundBase(s_lat), roundBase(n_lat), 2) m.drawparallels(yticks, linewidth=0.2, labels=[1,0,0,0], fontsize=9) # draw parallels m.drawmeridians(xticks, linewidth=0.2, labels=[0,0,1,0], fontsize=9) # draw meridians m.plot(bound_NE_lonlat[:,0], bound_NE_lonlat[:,1], c='k', label='Natural Earth', latlon=True) m.plot(bound_5M_lonlat[:,0], bound_5M_lonlat[:,1], c='r', label=source, latlon=True) lg = plt.legend(loc='upper right', fontsize=9) lg.get_frame().set_alpha(.8) # A little transparency # plt.show() # plt.savefig(mtn + '.pdf', dpi=600, bbox_inches='tight') # plt.clf() """ Explanation: Create basemap with shaded relief image and mountain range boundaries End of explanation """ # 5M limits out_mtn = dataDir + mountain + '_clip_5M.tif' os.system('gdalwarp -overwrite -dstnodata -9999 -cutline %s %s %s' %(mtn, tiff, out_mtn)) # Natural Earth out_NE = dataDir + mountain + '_clip_NE.tif' os.system('gdalwarp -overwrite -dstnodata -9999 -cutline %s %s %s' %(mtn_NE, tiff, out_NE)) """ Explanation: Mask original raster with shapefiles Uses external gdalwarp utility. Pixels outside the boundary polygon will be assigned a -9999 value. End of explanation """ # 5M rast_clip = gdal.Open(out_mtn) clip_bd = rast_clip.GetRasterBand(1) clip_array = rast_clip.ReadAsArray() clip_mask = ma.masked_where(clip_array == -9999, clip_array) # NatEarth rast_clip_NE = gdal.Open(out_NE) clip_NE_bd = rast_clip_NE.GetRasterBand(1) clip_NE_array = rast_clip_NE.ReadAsArray() clip_NE_mask = ma.masked_where(clip_NE_array == -9999, clip_NE_array) """ Explanation: Load clipped rasters The -9999 value is set to NaN (Not a Number), in a masked Numpy array. End of explanation """ if ydim < 0: yres = ydim * -1.0 """ Explanation: Set yres to a positive value Used to calculate the area of each pixel. End of explanation """ dy = haversine(0, 0, 0, ydim, r=6371.009) """ Explanation: Calculate pixel size (in km) along the N-S direction This value (dy) does not change with Latitutde End of explanation """ # array with indices rows, cols = np.indices(rast_array.shape) nrows = rast_array.shape[0] ncols = rast_array.shape[1] # new array for area values area_array = np.empty(rast_array.shape) # nested loop to create array with area values for row in range(nrows): for col in range(ncols): y = row lat = n_lat - ((y - 0.5) * yres) dx = haversine(0, lat, xdim, lat, r=6371.009) area_array[row,col] = dx * dy """ Explanation: Calculate pixel size along the E-W direction, create array with area values E-W dimension (dx) of pixels change with latitude. The haversine function is used to calculate it and area is approximated as (dx * dy). End of explanation """ # elevation 5M stats_clip = clip_bd.GetStatistics( True, True ) clip_min = stats_clip[0] clip_max = stats_clip[1] # heigh of point/contour above base of basin clip_array_comp = ma.compressed(clip_mask) h_clip = clip_array_comp - clip_min # total height of basin H_clip = clip_max - clip_min # normalize elev for hypsometric curve elevNorm_clip = h_clip / H_clip # elevation NatEarth stats_clip_NE = clip_NE_bd.GetStatistics( True, True ) clip_NE_min = stats_clip_NE[0] clip_NE_max = stats_clip_NE[1] clip_array_NE_comp = ma.compressed(clip_NE_mask) h_clip_NE = clip_array_NE_comp - clip_min H_clip_NE = clip_NE_max - clip_NE_min elevNorm_clip_NE = h_clip_NE / H_clip_NE """ Explanation: Get base statistics for clipped rasters and calculate Elevation values used in hypsometric analysis End of explanation """ # cell area 5M area_clip = ma.masked_where(clip_array == -9999, area_array) # total area of basin/area area_clip_sum = np.sum(area_clip) # cumulative area for hypsographyc curve area_clip_csum = np.cumsum(ma.compressed(area_clip)) # normalized area for hypsometric curve area_norm_clip = area_clip / area_clip_sum area_norm_csum = np.cumsum(ma.compressed(area_norm_clip)) # cell area NatEarth area_clip_NE = ma.masked_where(clip_NE_array == -9999, area_array) area_clip_sum_NE = np.sum(area_clip_NE) area_clip_csum_NE = np.cumsum(ma.compressed(area_clip_NE)) area_norm_clip_NE = area_clip_NE / area_clip_sum_NE area_norm_csum_NE = np.cumsum(ma.compressed(area_norm_clip_NE)) """ Explanation: Make a masked array of cell area and calculate Area values used in hypsometric analysis End of explanation """ # 5M plt.plot(area_clip_csum[::-1], np.sort(ma.compressed(clip_mask)), c='r', label=source) # NatEarth plt.plot(area_clip_csum_NE[::-1], np.sort(ma.compressed(clip_NE_mask)), c='k', \ label='Natural Earth') # decorations plt.ylabel('Elevation') plt.xlabel('Area km^2') plt.title('Hypsographic curve for ' + mountain) # plt.ylim(0.0, 5000.0) lg = plt.legend(loc='upper right', fontsize=9) # fighist = mountain + '_hypsographic.pdf' # plt.savefig(fighist) # plt.clf() """ Explanation: Plot hypsographic (absolute values) curve End of explanation """ # 5M plt.plot(area_norm_csum[::-1], np.sort(ma.compressed(elevNorm_clip)), c='r', label=source) # NatEarth plt.plot(area_norm_csum_NE[::-1], np.sort(ma.compressed(elevNorm_clip_NE)), c='k', \ label='Natural Earth') # decorations plt.xlim(0.0,1.0) plt.ylim(0.0,1.0) plt.ylabel('Elevation: h/H') plt.xlabel('Area: a/A') plt.title('Hypsometric curve for ' + mountain) lg = plt.legend(loc='upper right', fontsize=9) # fighist = mountain + '_hypsometric.pdf' # plt.savefig(fighist) # plt.clf() """ Explanation: Plot hypsometric (normalized values) curve End of explanation """ # define bins for all histograms binsize = 50 # 5M bins_clip = range(0, roundUp(clip_max), binsize) bincenters = [i + binsize/2 for i in bins_clip] # Nat Earth bins_clip_NE = range(0, roundUp(clip_NE_max), binsize) bincenters_NE = [i + binsize/2 for i in bins_clip_NE] """ Explanation: Make histograms Histograms of DEM can be of frequency (cell count per elevation) or of area per elevation. End of explanation """ # 5M vals, edges = np.histogram(clip_array_comp, bins=bins_clip) plt.plot(bincenters[:-1], vals, c='r', label='IBGE') # NatEarth vals_NE, edges_NE = np.histogram(clip_array_NE_comp, bins=bins_clip_NE) plt.plot(bincenters_NE[:-1], vals_NE, c='k', label='Natural Earth') # decorations plt.ylabel('Elevation frequency (counts)') plt.xlabel('Elevation (m)') plt.title('Frequency histograms for ' + mountain) lg = plt.legend(loc='upper right', fontsize=9) # plt.show() # fighist = mountain + '_histogram_frequency.pdf' # plt.savefig(fighist) # plt.clf() """ Explanation: Simple frequency (cell count) histograms End of explanation """ # i) approximating area by mean cell size mean_area_clip = np.mean(area_clip) mean_area_clip_NE = np.mean(area_clip_NE) # 5M vals, edges = np.histogram(clip_array_comp, bins=bins_clip) plt.plot(bincenters[:-1], vals * mean_area_clip, c='r', label='IBGE') # NatEarth vals_NE, edges_NE = np.histogram(clip_array_NE_comp, bins=bins_clip_NE) plt.plot(bincenters_NE[:-1], vals_NE * mean_area_clip_NE, c='k', label='Natural Earth') # decorations plt.ylabel('Area km2 (approx)') plt.xlabel('Elevation (m)') plt.title('Area (approx) histograms for ' + mountain) lg = plt.legend(loc='upper right', fontsize=9) # plt.show() # fighist = mountain + '_histogram_area_approx.pdf' # plt.savefig(fighist) # plt.clf() """ Explanation: Histograms of area per elevation These can be calculated by: Approximating the area by the mean cell size, where total area = cell count * mean area of pixels Calculating area per elevation End of explanation """ # ii) calculating area per elevation # 5M data clip_range = np.arange(0, int(clip_max)+1) sum_area_clip = ndimage.sum(area_array, clip_array, clip_range) # sum the values of areas in each bin bins_sum = [] for i in bincenters: low = i - (binsize / 2) up = i + (binsize / 2) b_sum = np.sum(sum_area_clip[low:up]) bins_sum.append(b_sum) # Natural Earth clip_range_NE = np.arange(0, int(clip_NE_max)+1) sum_area_clip = ndimage.sum(area_array, clip_NE_array, clip_range_NE) bins_sum_NE = [] for i in bincenters_NE: low = i - (binsize / 2) up = i + (binsize / 2) b_sum = np.sum(sum_area_clip[low:up]) bins_sum_NE.append(b_sum) """ Explanation: To calculate the area of pixels per elevation, we use the ndimage function from SciPy. It sums the values in one array (area) based on occurence a second array (elevation). A third array is used as an index (from 0 to max+1). End of explanation """ # 5M plt.plot(bincenters, bins_sum, c='r', label='IBGE') # Natural Earth plt.plot(bincenters_NE, bins_sum_NE, c='k', label='Natural Earth') # decorations plt.ylabel('Area km2 (calc)') plt.xlabel('Elevation (m)') plt.title('Area (calc) histograms for ' + mountain) lg = plt.legend(loc='upper right', fontsize=9) # plt.show() # fighist = mountain + '_histogram_area_calc.pdf' # plt.savefig(fighist) # plt.clf() """ Explanation: Plot histograms End of explanation """ # 5M area - calculated plt.plot(bincenters, bins_sum, c='r', label='calculated') #5M area - approximated plt.plot(bincenters[:-1], vals * mean_area_clip, 'o', c='k', ms=4, label='approximated') # plt.plot(bins_sum[:-1],vals * mean_area_clip, 'ko-') # decorations plt.ylabel('Area km2') plt.xlabel('Elevation (m)') plt.title('Area histograms for ' + mountain) lg = plt.legend(loc='upper right', fontsize=9) """ Explanation: We can compare both methods and see that approximating the area of pixels by the mean cell size gives results very close to those obtained by calculating the area of each pixel. End of explanation """
henchc/Rediscovering-Text-as-Data
10-Metadata/03-Bonus-Moretti/03-Metadata.ipynb
mit
%pylab inline from datascience import * metadata_tb = Table.read_table("fiction_metadata.csv") metadata_tb # Remove rows that contain duplicate titles # Sets are specially designed to handle unique elements and check for duplicates efficiently titles = set() indexes = [] for i in range(len(metadata_tb['title'])): if metadata_tb['title'][i] not in titles: indexes.append(i) titles.add(metadata_tb['title'][i]) singlevol_tb = metadata_tb.take(indexes) # Inspect annual distribution of books singlevol_tb.hist('date') # Limit to Moretti's date range date_mask = (singlevol_tb['date'] > 1750) & (singlevol_tb['date'] < 1850) singlevol_tb = singlevol_tb.where(date_mask) # EX. Plot the distribution of page counts ('totalpages'). # Should we remove any entries from our metadata? Why or why not? # EX. Plot the distribution of confidence values that given texts are fiction('prob80precise'). # Should we remove any entries from our metadata? Why or why not? """ Explanation: This notebook revisits some of the literary historical trends found by Franco Moretti in his article "Style, Inc." (<i>Critical Inquiry</i>, 36.1 (2009), 134-158). See especially his Figures 1 (p 135) and 18 (p 155). Note that the dataset used in this notebook is not Moretti's bibliograpy of novels, but Hathi Trust's catalog of fiction texts (https://sharc.hathitrust.org/genre). Metadata <li>Inspecting & Cleaning</li> <li>Trends</li> Detecting Word Patterns <li>Intro to Regex</li> <li>A Fortunate Formula</li> Inspecting & Cleaning End of explanation """ import numpy as np singlevol_tb = singlevol_tb.select(['title', 'date']) # Determine length of each title title_tokens = [x.split() for x in singlevol_tb['title']] title_length = [len(x) for x in title_tokens] singlevol_tb['title_len'] = title_length singlevol_tb # Determine average title length per year mean_table = singlevol_tb.group('date', collect=np.mean) mean_table mean_table.scatter('date','title_len mean') # Does the pattern hold when we treat individual titles as data points? singlevol_tb.scatter('date', 'title_len') singlevol_tb.scatter('date', 'title_len', fit_line=True) # EX. Moretti also produces graphs for the median and standard deviation # of title lengths by year. Create graphs that represent these data. """ Explanation: Trends End of explanation """ import re # Example from previous lesson for line in open('lecture notes 09-22-15.txt'): for word in line.split(): if word.endswith('ing'): print(word) # Reproduced using regex for line in open('lecture notes 09-22-15.txt'): for word in line.split(): if re.search(r'ing$', word): # only change from above print(word) # EX. Remove the "$" from the code above. How does it change the output? Why? word = 'Having' re.search(r'ing$', word) word = 'Ideas' re.search(r'ing$', word) # assign list of words to variable, so we don't have to read in the file each time with open('lecture notes 09-22-15.txt') as file_in: lec_notes = file_in.read() word_list = lec_notes.split() [word for word in word_list if re.search(r'^..t..$', word)] # EX. What do you think the "^" and "." metacharacters do in the code? [word for word in word_list if re.search(r'^a.*t', word)] # EX. What do you think the "*" metacharacter does in the code? poe = "While I nodded, nearly napping, suddenly there came a tapping,\ As of someone gently rapping, rapping at my chamber door." re.findall(r'.apping', poe) re.findall(r'.(?=apping)', poe) re.findall(r"(?<=ly ).apping", poe) re.findall(r"(?<=ly ).(?=apping)", poe) # EX. Find a list of "-apping" words that are followed by a comma in the line from Poe # -- but make sure the comma doesn't appear in your list entries! """ Explanation: Intro to Regex (Regular Expressions) End of explanation """ def istheXofY(text): return re.search(r'the .* of .*', text.lower())!=None and len(text.split())<=4 print(istheXofY('The Castle of Otronto')) print(istheXofY('The Castle in which there are some people of Otronto and other places')) # Graph the frequency of "The X of Y" titles per decade singlevol_tb['theXofY'] = singlevol_tb.apply(istheXofY, 'title') singlevol_tb['decade'] = singlevol_tb['date']//10*10 singlevol_tb.group('decade', collect=np.mean).scatter('decade', 'theXofY mean') # Create table containing only "The X of Y" titles theXofY_tb = singlevol_tb.where('theXofY').drop('theXofY') def gettheX(text): X = re.findall(r'(?<=the ).*(?= of )', text.lower())[0] return X def gettheY(text): Y = re.findall(r'(?<= of ).*', text.lower())[0] return Y print(gettheX('The Castle of Otronto')) print(gettheY('The Castle of Otronto')) print() print(gettheX('The castle in which there are some people of Otronto and other places')) print(gettheY('The castle in which there are some people of Otronto and other places')) # Create new columns containing on the the Y and Y from each title theXofY_tb['theX'] = theXofY_tb.apply(gettheX, 'title') theXofY_tb['ofY'] = theXofY_tb.apply(gettheY, 'title') theXofY_tb from collections import Counter Xs = Counter(theXofY_tb['theX']) Ys = Counter(theXofY_tb['ofY']) Xs.most_common(10) Ys.most_common(10) # EX. In Moretti's study, he gives examples of titles using the formula "The X of Y" # with lengths of up to seven words. If we tweak our function istheXofY()to allow # for longer titles, how does this change our findings? Why? """ Explanation: A Fortunate Formula End of explanation """
mayankjohri/LetsExplorePython
Section 2 - Advance Python/Chapter S2.02 - XML/Working with xml - Reading.ipynb
gpl-3.0
from lxml import etree """ Explanation: 1. Working with xml : reading 1.1 Introduction Extensible Markup Language (XML) is a simple, very flexible text format derived from SGML (ISO 8879). Originally designed to meet the challenges of large-scale electronic publishing, XML is also playing an increasingly important role in the exchange of a wide variety of data on the Web and elsewhere. It has been defined at https://www.w3.org/XML/. Several schema systems exist to aid in the definition of XML-based languages, while programmers have developed many application programming interfaces (APIs) to aid the processing of XML data. 1.2 Parsing XML with Python As for querying the web, Python has many libraries for playing with xml. You will most likely encounter the following during your Pythonic journey : lxml, which we will use for this course. A clean, quite fast, strict library for dealing with xml resources. It's the most accepted library for this kind of request. If IBM writes tutorials for it, it should be good. It supports xpath and xslt. BeautifulSoup. Flexible, average speed. The good thing is if your xml markup is messed up, it will try to correct it. It's perfect for dealing with web scrapped data in HTML formats. For clean xml, it might be too slow. xml : the native integration in Python. Fast, clean but no good sides such as xpath and xslt. Read about others on the Python official wiki Based on my experience, lxml will meet most of your needs when dealing with clean data. Clean is the key word here : do not expect lxml to play well with bad html or bad xml. It will just throw errors at you until you give up or fix it by hand. We can import lxml.etree the same way we imported requests earlier. End of explanation """ # We open our file with open("data/books.xml") as file: # We use the etree.parse property parsed = etree.parse(file) # We print the object print(parsed) """ Explanation: 1.3 From file to XML object Opening an xml file is actually quite simple : you open it and you parse it. Who would have guessed ? End of explanation """ # We initiate a new parser from etree, asking it to remove nodes of text which are empty parser = etree.XMLParser(remove_blank_text=True) # We open the file with open("data/books.xml") as file: # And we parse using the new parser parsed = etree.parse(file, parser) # We print the object print(parsed) # We open the file """ Explanation: As you can see, we obtained an instance of type lxml.etree._ElementTree. It means the xml markup has been transformed into something Python understands. The parse function of etree does not take many arguments. One way to customize its behaviour is to give it a home configured or homemade xml parser : End of explanation """ xml = '<root xmlns:a="xmlns1" xmlns:b="xmlns2"><tag xmlns:c="xmlns3" /><tag xmlns:a="xmlns1" /><tag /></root>' parsed = etree.fromstring(xml) print(parsed) """ Explanation: From the documentation of the XMLParser function, here are some arguments that might be useful for you : attribute_defaults : Use DTD (if available) to add the default attributes dtd_validation : Validate against DTD while parsing load_dtd : Load and parse the DTD while parsing ns_clean : Clean up redundant namespace declarations recover : Try to fix ill-formed xml remove_blank_text : Removes blank text nodes resolve_entities : Replace entities by their value (Default : on) You can then create a new parser according to its standards or clean namespace attribute. In this context, ns_clean would transform &lt;root xmlns:a="xmlns1" xmlns:b="xmlns2"&gt;&lt;tag xmlns:c="xmlns3" /&gt;&lt;tag xmlns:a="xmlns1" /&gt;&lt;tag /&gt;&lt;/root&gt; into &lt;root xmlns:a="xmlns1" xmlns:b="xmlns2"&gt;&lt;tag xmlns:c="xmlns3" /&gt;&lt;tag/&gt;&lt;tag /&gt;&lt;/root&gt; 1.3.1 From string to XML object lxml parses strings in the same way that it parses files. The syntax differs, but is quite simple : End of explanation """ # Put your code here """ Explanation: DIY Can you parse a xml document made of one tag "humanities" with two children "field" named "classics" and "history"? End of explanation """ xml = """ <fileDesc> <titleStmt> <title>Aeneid</title> <title type="sub">Machine readable text</title> <author n="Verg.">P. Vergilius Maro</author> <editor role="editor" n="Greenough">J. B. Greenough</editor> </titleStmt> <extent>about 505Kb</extent> <!-- &Perseus.publish;--> <sourceDesc> <biblStruct> <monogr> <author>Vergil</author> <title>Bucolics, Aeneid, and Georgics Of Vergil</title> <editor role="editor">J. B. Greenough</editor> <imprint> <pubPlace>Boston</pubPlace> <publisher>Ginn &amp; Co.</publisher> <date>1900</date> </imprint> </monogr> </biblStruct> </sourceDesc> </fileDesc>""" etree.fromstring(xml) """ Explanation: 1.3.2 Errors and understanding them Previouly, we have said that lxml was quite strict about xml validity. Let's see an example : End of explanation """ #Write your xml in xml variable # invalid xml = """ """ # xml2 = """ <start>this is a text</start> """ # xml3 = """ <start attr="test"/> """ etree.fromstring(xml3) """ Explanation: What error did we raise trying to parse this XML ? We got an XMLSyntaxError. It can happen for various reasons, including when entities cannot be parsed. Can you try to find another way to raise an XMLSyntaxError ? End of explanation """ # With no namespace print(etree.fromstring("<root />")) # With namespace print(etree.fromstring("<root xmlns='http://localhost' />")) """ Explanation: As you can see, errors are detailed enough so you can correct your own XML, at least manually. 1.3.3 Node properties and methods Quick explanation : Methods and properties are something special in Python and other programming languages. Unlike traditional functions (len()) and keys of dictionaries (a["b"]), they are part of something bigger. Methods : Ever seen something such as a.method() ? Yes, you did with .split(), .join(), etc. Functions following a variable with a dot are called methods because they are an extension of the variable type. eh split() and join() are extensions of string objects, and they use their value as argument. Properties or Attributes : Such as dictionary keys, properties are indexed values of an object, but instead of using the syntax made of square brackets, you just put the name of the key after a dot : a.property Warning : namespaces : In lxml, namespaces are expressed using the Clark notation. This mean that, if a namespace defines a node, this node will be named using the following syntax "{namespace}tagname. Here is an example : End of explanation """ # First, we will need some xml xml = """ <div type="Book" n="1"> <l n="1">Arma virumque cano, Troiae qui primus ab oris</l> <tei:l n="2" xmlns:tei="http://www.tei-c.org/ns/1.0">Italiam, fato profugus, Laviniaque venit</tei:l> <l n="3">litora, multum ille et terris iactatus et alto</l> <l n="4">vi superum saevae memorem Iunonis ob iram;</l> <l n="5">multa quoque et bello passus, dum conderet urbem,</l> <l n="6">inferretque deos Latio, genus unde Latinum,</l> <l n="7">Albanique patres, atque altae moenia Romae.</l> </div> """ div = etree.fromstring(xml) print(parsed) """ Explanation: You can do plenty of things using lxml and access properties or methods of nodes, here is an overview of reading functionalities offered by lxml : Let's see what that means in real life : End of explanation """ type_div = div.get("type") print(type_div) print(div.get("n")) # If we want a dictionary of attributes print(div.attrib) attributes_div = dict(div.attrib) print(attributes_div) # Of if we want a list list_attributes_div = div.items() print(list_attributes_div) """ Explanation: If we want to retrieve the attributes of our div, we can do as follow : End of explanation """ children = div.getchildren() print(children) line_1 = children[0] # Because it's a list we can access children through index print(line_1) """ Explanation: Great ! We accessed our first information using lxml ! Now, how about getting somewhere other than the root tag ? To do so, there are two ways : getchildren() will returns a list of children tags, such as div. list(div) will transform div in a list of children. Both syntaxes return the same results, so it's up to you to decide which one you prefer. End of explanation """ print(line_1.text) """ Explanation: Now that we have access to our children, we can have access to their text : End of explanation """ # <tei:l n="2" xmlns:tei="http://www.tei-c.org/ns/1.0">Italiam, fato profugus, Laviniaque venit</tei:l> line_2 = children[1] print(line_2.nsmap) print(line_2.prefix) print(line_2.tag) """ Explanation: Ok, we are now able to get some stuff done. Remember the namespace naming ? Sometimes it's useful to retrieve namespaces and their prefix : End of explanation """ # We generate some xml and parse it ## TODO xml = """<div> <l n="1"> <p>Text</p> <p>new p</p> followed <test> <p>p3</p> </test> </l> <l n="2"> by line two </l> <p>test</p> <p><l n="3"> line 3</l></p> </div>""" div = etree.fromstring(xml) print(div) # When doing an xpath, the results will be a list print("-"*20) ps = div.xpath("/div/l") for p in ps: print(p) print("-"*20) # print(ps) print([value.values()[0] for value in ps]) print(ps[0].text == "Text") """ Explanation: What you've learned : How to parse a xml file or a string representing xml through etree.parse() and etree.fromstring() How to configure the way xml is parsed with etree.XMLParser() What is an attribute and a method Properties and methods of a node XMLParseError handling Clark's notation for namespaces and tags. 1.4 . XPath and XSLT with lxml 1.4.1 XPath XPath is a powerful tool for traversing an xml tree. XML is made of nodes such as tags, comments, texts. These nodes have attributes that can be used to identify them. For example, with the following xml : &lt;div&gt;&lt;l n="1"&gt;&lt;p&gt;Text&lt;/p&gt; followed&lt;/l&gt;&lt;l n="2"&gt;by line two&lt;/div&gt; the node p will be accessible by /div/l[@n="1"]/p. LXML has great support for complex XPath, which makes it the best friend of Humanists dealing with xml : End of explanation """ print(div.xpath("//l")) """ Explanation: As you can see, the xpath returns a list. This behaviour is intended, since an xpath can retrieve more than one item : End of explanation """ # We assign our first line to a variable line_1 = div.xpath("//l")[0] #print(dir(line_1)) print(line_1.attrib['n']) # We look for p print(line_1.xpath("p")) # This works print(line_1.xpath("./p")) # This works too print(line_1.xpath(".//p")) # This still works print(line_1.xpath("//p")) # entire doc """ Explanation: You see ? The xpath //l returns two elements, just like python does in a list. Now, let's apply some xpath to the children and see what happens : End of explanation """ root.xpath("wrong:xpath:never:works") """ Explanation: As you can see, you can do xpath from any node in lxml. One important thing though : xpath //tagname will return to the root if you do not add a dot in front of it such as .//tagname. This is really important to remember, because most xpath resolvers do not behave this way. Another point to kepe in mind : if you write your xpath incorrectly, Python will raise an *XPathEvalError * error End of explanation """ # We create a valid xml object xml = """<root> <tag xmlns="http://localhost">Text</tag> <tei:tag xmlns:tei="http://www.tei-c.org/ns/1.0">Other text</tei:tag> <teiTwo:tag xmlns:teiTwo="http://www.tei-c.org/ns/2.0">Other text</teiTwo:tag> </root>""" root = etree.fromstring(xml) # We register every namespaces in a dictionary using prefix as keys : ns = { "local" : "http://localhost", # Even if this namespace had no prefix, we can register one for it "tei" : "http://www.tei-c.org/ns/1.0", "two": "http://www.tei-c.org/ns/2.0" } print([d.text for namespace in ns for d in root.xpath("//{namespace}:tag".format(namespace=namespace), namespaces=ns) ]) """ Explanation: Xpath with namespaces and prefix As you've seen, lxml use Clark's naming convention for expressing namespaces. This is extremely important regarding xpath, because you will be able to retrieve a node using it under certain conditions : End of explanation """ # Here is an xml containing an xsl: for each text node of an xml file in the xpath /humanities/field, # this will return a node <name> with the text inside xslt_root = etree.fromstring(""" <xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform"> <xsl:template match="/"> <fields><xsl:apply-templates /></fields> </xsl:template> <xsl:template match="/humanities/field"> <name><xsl:value-of select="./text()" /></name> </xsl:template> </xsl:stylesheet>""") # We transform our document to an xsl xslt = etree.XSLT(xslt_root) # We create some xml we need to change xml = """<humanities> <field>History</field> <field>Classics</field> <field>French</field> <field>German</field> </humanities>""" parsed_xml = etree.fromstring(xml) # And now we process our xml : transformed = xslt(parsed_xml) print(transformed) """ Explanation: What you have learned : Each node and xml document has an .xpath() method which takes as its first parameter xpath Method xpath() always returns a list, even for a single result Method xpath() will return to the root when you don't prefix your // with a dot. An incorrect XPath will issue a XPathEvalError Method xpath() accepts a namespaces argument : you should enter a dictionary where keys are prefixes and values namespaces Unlike findall(), xpath() does not accept Clark's notation 1.4.2 XSLT XSLT stands for Extensible Stylesheet Language Transformations. It's an xml-based language made for transforming xml documents to xml or other formats such as LaTeX and HTML. XSLT is really powerful when dealing with similarly formated data. It's far easier to transform 100 documents with the exact same structure via XSLT than in Python or any other language. While Python is great at dealing with weird transformations of xml, the presence of XSLT in Python allows you to create production chains without leaving your favorite IDE. To do some XSL, lxml needs two things : first, an xml document representing the xsl that will be parsed and entered into the function etree.XSLT(), and second, a document to transform. End of explanation """ print(type(transformed)) print(type(parsed_xml)) """ Explanation: Did you see what happened ? We used xslt(xml). etree.XSLT() transforms a xsl document into a function, which then takes one parameter (in this case an xml document). But can you figure out what this returns ? Let's ask Python : End of explanation """ print(transformed.xpath("//name")) """ Explanation: The result is not of the same type of element we usually have, even though it does share most of its methods and attributes : End of explanation """ string_result = str(transformed) print(string_result) """ Explanation: And has something more : you can change its type to string ! End of explanation """ # Here is an xml containing an xsl: for each text node of an xml file in the xpath /humanities/field, # this will return a node <name> with the text inside xslt_root = etree.fromstring(""" <xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform"> <xsl:param name="n" /> <xsl:template match="/humanities"> <fields> <xsl:attribute name="n"> <xsl:value-of select="$n"/> </xsl:attribute> <xsl:apply-templates select="field"/> </fields> </xsl:template> <xsl:template match="/humanities/field"> <name><xsl:value-of select="./text()" /></name> </xsl:template> </xsl:stylesheet>""") # We transform our document to an xsl xslt = etree.XSLT(xslt_root) # We create some xml we need to change xml = """<humanities> <category>Humanities</category> <field>History</field> <field>Classics</field> <field>French</field> <field>German</field> </humanities>""" parsed_xml = etree.fromstring(xml) # And now we process our xml : transformed = xslt(parsed_xml, n="'Humanities'") # Note that for a string, we encapsulate it within single quotes print(transformed) # Be aware that you can use xpath as a value for the argument, though it can be rather complex sometimes transformed = xslt(parsed_xml, n=etree.XPath("//category/text()")) print(transformed) """ Explanation: XSLT is more complex than just inputing xml. You can do XSLT using parameters as well. In this case, your parameters will be accessibles as a named argument to the generated function. If your XSL has a name xsl-param, the function given back by etree.XSLT will have a name argument : End of explanation """ from xml.etree import ElementTree with open('data/books.xml', 'rt') as f: tree = ElementTree.parse(f) print(tree) """ Explanation: 2. Using ElementTree End of explanation """ from xml.etree import ElementTree with open('data/books.xml', 'r') as f: tree = ElementTree.parse(f) # print(dir(tree)) for node in tree.iter(): print (node.tag, node.attrib) print("-----") # from xml.etree import ElementTree # with open('data/books.xml', 'r') as f: # tree = ElementTree.parse(f) # # print(dir(tree)) # for node in tree.iter(): # print (node.tag, node.attrib) # print("-----") ### To print only the groups of names and feed URLs for the podcasts, # leaving out of all of the data in the header section by iterating # over only the outline nodes and print the text and xmlUrl attributes. from xml.etree import ElementTree with open('data/podcasts.opml', 'rt') as f: tree = ElementTree.parse(f) print(len( list(tree.iter('outline')))) for node in tree.iter('outline'): name = node.attrib.get('text') url = node.attrib.get('xmlUrl') if name and url: print ('\t%s :: %s' % (name, url)) else: print (name) """ Explanation: 2.1 Traversing the Parsed Tree To visit all of the children in order, use iter() to create a generator that iterates over the ElementTree instance. End of explanation """ for node in tree.findall('.//outline'): url = node.attrib.get('xmlUrl') if url: print( url) else: print(node.attrib.get("text")) print(dir(tree)) print(tree.getroot) """ Explanation: 2.1.1 Finding Nodes in a Document¶ Walking the entire tree like this searching for relevant nodes can be error prone. The example above had to look at each outline node to determine if it was a group (nodes with only a text attribute) or podcast (with both text and xmlUrl). To produce a simple list of the podcast feed URLs, without names or groups, for a podcast downloader application, the logic could be simplified using findall() to look for nodes with more descriptive search characteristics. As a first pass at converting the above example, we can construct an XPath argument to look for all outline nodes. End of explanation """ for node in tree.findall('.//outline/outline'): url = node.attrib.get('xmlUrl') print (url) """ Explanation: Another version can take advantage of the fact that the outline nodes are only nested two levels deep. Changing the search path to .//outline/outline mean the loop will process only the second level of outline nodes. End of explanation """ from xml.etree import ElementTree with open('data/data.xml', 'rt') as f: tree = ElementTree.parse(f) node = tree.find('./with_attributes') print (node.tag) for name, value in sorted(node.attrib.items()): print (' %-4s = "%s"' % (name, value)) for path in [ './child', './child_with_tail' ]: node = tree.find(path) print(node.tag) print (' child node text:', node.text) print (' and tail text :', node.tail) """ Explanation: 2.1.2 Parsed Node Attributes The items returned by findall() and iter() are Element objects, each representing a node in the XML parse tree. Each Element has attributes for accessing data pulled out of the XML. This can be illustrated with a somewhat more contrived example input file, data.xml: End of explanation """ from xml.etree.ElementTree import XML parsed = XML(''' <root> <group> <child id="a">This is child "a".</child> <child id="b">This is child "b".</child> </group> <group> <child id="c">This is child "c".</child> </group> </root> ''') print ('parsed =', parsed) for elem in parsed: print (elem.tag) if elem.text is not None and elem.text.strip(): print (' text: "%s"' % elem.text) if elem.tail is not None and elem.tail.strip(): print (' tail: "%s"' % elem.tail) for name, value in sorted(elem.attrib.items()): print(' %-4s = "%s"' % (name, value)) print from xml.etree.ElementTree import Element, tostring top = Element('top') children = [ Element('child', num=str(i)) for i in range(3) ] top.extend(children) print(top) """ Explanation: 2.1.3 Parsing Strings To work with smaller bits of XML text, especially string literals as might be embedded in the source of a program, use XML() and the string containing the XML to be parsed as the only argument. End of explanation """
owlas/magpy
docs/source/notebooks/.archive/magpy-equilibrium-tests-fix-combine0.ipynb
bsd-3-clause
import numpy as np # dipole interaction energy def dd(t1, t2, p1, p2, nu): return -nu*(2*np.cos(t1)*np.cos(t2) - np.sin(t1)*np.sin(t2)*np.cos(p1-p2)) # anisotropy energy def anis(t1, t2, sigma): return sigma*(np.sin(t1)**2 + np.sin(t2)**2) # total energy def tot(t1, t2, p1, p2, nu, sigma): return dd(t1, t2, p1, p2, nu) + anis(t1, t2, sigma) # numerator of the Boltzmann distribution (i.e. ignoring the partition function Z) def p_unorm(t1,t2,p1,p2,nu,sigma): return np.sin(t1)*np.sin(t2)*np.exp(-tot(t1, t2, p1, p2, nu, sigma)) # non interacting from scipy.integrate import nquad sigma, nu = 1.0, 0.0 Z = nquad( lambda t1, t2, p1, p2: p_unorm(t1, t2, p1, p2, nu, sigma), ranges=[(0, np.pi), (0, np.pi), (0, 2*np.pi), (0, 2*np.pi)] ) print(Z[0]) Z = nquad( lambda t1, t2: p_unorm(t1, t2, 0, 0, nu, sigma), ranges=[(0, np.pi), (0, np.pi)] ) print(Z[0] * 4 * np.pi**2) """ Explanation: Thermal equilibrium An ensemble of trajectories obtained from simulating Langevin dynamics will tend to a stable distribution: the Boltzmann distribution. Problem setup Two identical magnetic nanoparticles, aligned along their anisotropy axes. The system has 6 degrees of freedom (x,y,z components of magnetisation for each particle) but the energy is defined by the two angles $\theta_1,\theta_2$ alone. Boltzmann distribution The Boltzmann distribution represents the probabability that the system will be found within certain sets (i.e. the angles of magnetisation). The distribution is parameterised by the temperature of the system and the energy landscape of the problem. Note the sine terms appear because the distribution is over the solid angles. In other words, the distribution is over the surface of a unit sphere. The sine terms project these solid angles onto a simple elevation angle between the magnetisation and the anisotropy axis ($\theta$). $$p\left(\theta_1,\theta_2,\phi_1,\phi_2\right) = \frac{\sin(\theta_1)\sin(\theta_2)e^{-E\left(\theta_1,\theta_2,\phi_1,\phi_2\right)/\left(K_BT\right)}}{Z}$$ Stoner-Wohlfarth model The energy function for a single domain magnetic nanoparticle is given by the Stoner-Wohlfarth equation: $$\frac{E\left(\theta_1,\theta_2,\phi_1,\phi_2\right)}{K_BT}=\sigma\left(\sin^2\theta_1+\sin^2\theta_2\right) -\nu\left[2\cos\theta_1\cos\theta_2 - \sin\theta_1\sin\theta_2\cos\left(\phi_1-\phi_2\right)\right]$$ $$\sigma=\frac{KV}{K_BT}$$ $$\nu=\frac{\mu_0V^2M_s^2}{2\pi R^3K_BT}$$ $\sigma,\nu$ are the normalised anisotropy and interaction strength respectively. Functions for analytic solution End of explanation """ import magpy as mp """ Explanation: Magpy non-interacting case Using magpy we initialise the dimers with both magnetisation vectors aligned along their anisotropy axes. We allow the system to relax. End of explanation """ K = 1e5 r = 7e-9 T = 330 Ms=400e3 R=9e-9 kdir = [0, 0, 1] location1 = np.array([0, 0, 0], dtype=np.float) location2 = np.array([0, 0, R], dtype=np.float) direction = np.array([0, 0, 1], dtype=np.float) alpha = 1.0 """ Explanation: System properties Set up the parameters of the dimers. They are identical and aligned along their anisotropy axes End of explanation """ base_model = mp.Model( anisotropy=np.array([K, K], dtype=np.float), anisotropy_axis=np.array([kdir, kdir], dtype=np.float), damping=alpha, location=np.array([location1, location2], dtype=np.float), magnetisation=Ms, magnetisation_direction=np.array([direction, direction], dtype=np.float), radius=np.array([r, r], dtype=np.float), temperature=T ) ensemble = mp.EnsembleModel(50000, base_model) """ Explanation: Magpy model and simulation Build a magpy model of the dimer End of explanation """ res = ensemble.simulate(end_time=1e-9, time_step=1e-12, max_samples=500, random_state=1002, n_jobs=8, implicit_solve=True, interactions=False) m_z0 = np.array([state['z'][0] for state in res.final_state()])/Ms m_z1 = np.array([state['z'][1] for state in res.final_state()])/Ms theta0 = np.arccos(m_z0) theta1 = np.arccos(m_z1) """ Explanation: Simulate an ensemble of 10,000 dimers without interactions. End of explanation """ import matplotlib.pyplot as plt %matplotlib inline plt.plot(res.results[0].time, res.ensemble_magnetisation()) plt.title('Non-interacting dimer ensemble magnetisation'); """ Explanation: System magnetisation shows that the system has relaxed into the local minima (we could relax the system globally but it would take much longer to run since the energy barrier must be overcome). End of explanation """ # Dimensionless parameters V = 4./3*np.pi*r**3 sigma = K*V/mp.core.get_KB()/T nu = 0 print('Sigma: {:.3f}'.format(sigma)) print(' Nu: {:.3f}'.format(nu)) """ Explanation: Compare to analytic thermal equilibrium Compute the expected boltzmann distribution End of explanation """ Z = nquad( lambda t1, t2, p1, p2: p_unorm(t1, t2, p1, p2, nu, sigma), ranges=[(0, np.pi/2), (0, np.pi/2), (0, 2*np.pi), (0, 2*np.pi)] ) print(Z[0]) Z=Z[0] ts = np.linspace(min(theta0), max(theta0), 100) bdist = [[ nquad(lambda p1, p2: p_unorm(t1, t2, p1, p2, nu, sigma)/Z, ranges=[(0, 2*np.pi), (0, 2*np.pi)])[0] for t1 in ts] for t2 in ts] plt.hist2d(theta0, theta1, bins=30, normed=True); plt.contour(ts, ts, bdist, cmap='Greys') plt.title('Joint distribution') plt.xlabel('$\\theta_1$'); plt.ylabel('$\\theta_2$'); """ Explanation: The joint distribution for both angles are computed analytically and compared with the numerical result. The resulting distrubiton is symmetric because both particles are independent and identically distributed. End of explanation """ b_marginal = [nquad( lambda t2, p1, p2: p_unorm(t1, t2, p1, p2, nu, sigma)/Z, ranges=[(0, np.pi/2), (0, 2*np.pi), (0, 2*np.pi)])[0] for t1 in ts] plt.hist(theta0, bins=50, normed=True) plt.plot(ts, np.array(b_marginal)) """ Explanation: We can also compute the marginal distribution (i.e. the equilibrium of just 1 particle). It is easier to see the alignment of the two distributions. End of explanation """ # Dimensionless parameters V = 4./3*np.pi*r**3 sigma = K*V/mp.core.get_KB()/T nu = mp.core.get_mu0() * V**2 * Ms**2 / 2.0 / np.pi**2 / R**3 / mp.core.get_KB() / T print('Sigma: {:.3f}'.format(sigma)) print(' Nu: {:.3f}'.format(nu)) """ Explanation: Magpy interacting case We now simulate the exact same ensemble of dimers but with the interactions enabled. We can compute the dimensionless parameters to understand the strength of the interactions (vs. the anisotropy strength). End of explanation """ res = ensemble.simulate(end_time=1e-9, time_step=1e-13, max_samples=500, random_state=1001, n_jobs=8, implicit_solve=False, interactions=True, renorm=True) m_z0i = np.array([state['z'][0] for state in res.final_state()])/Ms m_z1i = np.array([state['z'][1] for state in res.final_state()])/Ms theta0i = np.arccos(m_z0i) theta1i = np.arccos(m_z1i) """ Explanation: The interaction strength is very strong (actually the particles are impossibly close). The following command is identical to above except that interactions=True End of explanation """ plt.plot(res.results[0].time, res.ensemble_magnetisation()) plt.title('Interacting dimer ensemble magnetisation'); """ Explanation: System relaxation The system quickly relaxes into the first minima again, as before. End of explanation """ # Dimensionless parameters V = 4./3*np.pi*r**3 sigma = K*V/mp.core.get_KB()/T nu = 1.0 * mp.core.get_mu0() * V**2 * Ms**2 / 2.0 / np.pi / np.pi / R**3 / mp.core.get_KB() / T print('Sigma: {:.3f}'.format(sigma)) print(' Nu: {:.3f}'.format(nu)) Z = nquad( lambda t1, t2, p1, p2: p_unorm(t1, t2, p1, p2, nu, sigma), ranges=[(0, np.pi/2), (0, np.pi/2), (0, 2*np.pi), (0, 2*np.pi)] )[0] ts = np.linspace(min(theta0), max(theta0), 100) bdist = [[ nquad(lambda p1, p2: p_unorm(t1, t2, p1, p2, nu, sigma)/Z, ranges=[(0, 2*np.pi), (0, 2*np.pi)])[0] for t1 in ts] for t2 in ts] plt.hist2d(theta0i, theta1i, bins=30, normed=True); # ts = np.linspace(min(theta0), max(theta0), 100) # b = boltz_2d(ts, nu, sigma) plt.contour(ts, ts, bdist, cmap='Greys') plt.title('Joint distribution') plt.xlabel('$\\theta_1$'); plt.ylabel('$\\theta_2$'); """ Explanation: Thermal equilibrium The stationary distributions align BUT: introduced fudge factor of $\pi$ into the denominator of the interaction constant $\nu$. In other words we use $$\nu_\textrm{fudge}=\frac{\nu}{\pi}$$ This factor of $1/\pi$ could come from integrating somewhere. I think this is an error with my analytic calculations and not the code. This is because the code certainly uses the correct term for interaction strength, whereas I derived this test myself. End of explanation """ b_marginal = [nquad( lambda t2, p1, p2: p_unorm(t1, t2, p1, p2, nu, sigma)/Z, ranges=[(0, np.pi/2), (0, 2*np.pi), (0, 2*np.pi)])[0] for t1 in ts] plt.hist(theta0i, bins=50, normed=True, alpha=0.6, label='Magpy + inter.') plt.hist(theta0, bins=50, normed=True, alpha=0.6, label='Magpy + no inter.') plt.plot(ts, b_marginal, label='Analytic') plt.legend(); """ Explanation: We use the marginal distribution again to check the convergence. We also compare to the interacting case End of explanation """ import pymc3 as pm with pm.Model() as model: z1 = pm.Uniform('z1', 0, 1) theta1 = pm.Deterministic('theta1', np.arccos(z1)) z2 = pm.Uniform('z2', 0, 1) theta2 = pm.Deterministic('theta2', np.arccos(z2)) phi1 = pm.Uniform('phi1', 0, 2*np.pi) phi2 = pm.Uniform('phi2', 0, 2*np.pi) energy = tot(theta1, theta2, phi1, phi2, nu, sigma) like = pm.Potential('energy', -energy) with model: step = pm.NUTS() trace = pm.sample(500000, step=step) pm.traceplot(trace) plt.hist(trace['theta1'], bins=200, normed=True); plt.plot(ts, b_marginal, label='Analytic') plt.hist(trace['theta1'], bins=200, normed=True, alpha=0.6); plt.hist(theta0i, bins=50, normed=True, alpha=0.6, label='Magpy + inter.'); """ Explanation: Possible sources of error Implementation of interaction strength in magpy (but there are many tests against the true equations) Analytic calculations for equilibrium One way to test this could be to simulate a 3D system. If another factor of $\pi$ appears then it is definitely something missing from my analytic calculations. End of explanation """ def cart_energy(mx1, my1, mz1, mx2, my2, my3, nu, sigma): t1 = np.arccos(mz1) t2 = np.arccos(mz2) anis = sigma*np.sin(t1)**2 + sigma*np.sin(t2)**2 inter = -nu*(3*np.dot) import pymc3 as pm with pm.Model() as model: z1 = pm.Uniform('z1', 0, 1) theta1 = pm.Deterministic('theta1', np.arccos(z1)) z2 = pm.Uniform('z2', 0, 1) theta2 = pm.Deterministic('theta2', np.arccos(z2)) phi1 = pm.Uniform('phi1', 0, 2*np.pi) phi2 = pm.Uniform('phi2', 0, 2*np.pi) energy = tot(theta1, theta2, phi1, phi2, nu, sigma) like = pm.Potential('energy', -energy) """ Explanation: Cartesian End of explanation """
turbomanage/training-data-analyst
courses/machine_learning/deepdive2/how_google_does_ml/inclusive_ml/solution/inclusive_ml_solution.ipynb
apache-2.0
import os import warnings warnings.filterwarnings('ignore') import numpy as np import pandas as pd import witwidget from witwidget.notebook.visualization import ( WitWidget, WitConfigBuilder, ) pd.options.display.max_columns = 50 """ Explanation: Inclusive ML - Understanding Bias Learning Objectives Invoke the What-if Tool against a deployed Model Explore attributes of the dataset Examine aspects of bias in model results Evaluate how the What-if Tool provides suggestions to remediate bias Introduction This notebook shows use of the What-If Tool inside of a Jupyter notebook. The What-If Tool, among many other things, allows you to explore the impacts of Fairness in model design and deployment. The notebook invokes a previously deployed XGBoost classifier model on the UCI census dataset which predicts whether a person earns more than $50K based on their census information. You will then visualize the results of the trained classifier on test data using the What-If Tool. First, you will import various libaries and settings that are required to complete the lab. End of explanation """ PROJECT = "<YOUR PROJECT>" BUCKET = "gs://{project}".format(project=PROJECT) MODEL = 'xgboost_model' VERSION = 'v1' MODEL_DIR = os.path.join(BUCKET, MODEL) os.environ['PROJECT'] = PROJECT os.environ['BUCKET'] = BUCKET os.environ['MODEL'] = MODEL os.environ['VERSION'] = VERSION os.environ['MODEL_DIR'] = MODEL_DIR """ Explanation: In the next cell, replace &lt;YOUR PROJECT&gt; (inside the double-quotes) with your GCP project id (for example qwiklabs-gcp-fe367d9e174dfbd3): End of explanation """ %%bash gcloud config set project $PROJECT gsutil mb $BUCKET gsutil cp gs://cloud-training-demos/mlfairness/model.bst $MODEL_DIR/model.bst gcloud ai-platform models list | grep $MODEL || gcloud ai-platform models create $MODEL gcloud ai-platform versions list --model $MODEL | grep $VERSION || gcloud ai-platform versions create $VERSION \ --model=$MODEL \ --framework='XGBOOST' \ --runtime-version=1.14 \ --origin=$MODEL_DIR \ --python-version=3.5 \ --project=$PROJECT """ Explanation: Set up the notebook environment First you must perform a few environment and project configuration steps. These steps may take 8 to 10 minutes, please wait until you see the following response before proceeding: "Creating version (this might take a few minutes)......done." End of explanation """ %%bash gsutil cp gs://cloud-training-demos/mlfairness/income.pkl . gsutil cp gs://cloud-training-demos/mlfairness/x_test.npy . gsutil cp gs://cloud-training-demos/mlfairness/y_test.npy . features = pd.read_pickle('income.pkl') x_test = np.load('x_test.npy') y_test = np.load('y_test.npy') """ Explanation: Finally, download the data and arrays needed to use the What-if Tool. End of explanation """ features.head() """ Explanation: Now take a quick look at the data. The ML model type used for this analysis is XGBoost. XGBoost is a machine learning framework that uses decision trees and gradient boosting to build predictive models. It works by ensembling multiple decision trees together based on the score associated with different leaf nodes in a tree. XGBoost requires all values to be numeric so the orginial dataset was slightly modified. The biggest change made was to assign a numeric value to Sex. The originial dataset only had the values "Female" and "Male" for Sex. The decision was made to assign the value "1" to Female and "2" to Male. As part of the data prepartion effort the Pandas function "get_dummies" was used to convert the remaining domain values into numerical equivalent. For instance the "Education" column was turned into several sub-columns named after the value in the column. For instance the "Education_HS-grad" has a value of "1" for when that was the orginial categorical value and a value of "0" for other cateogries. End of explanation """ # Combine the features and labels into one array for the What-if Tool num_wit_examples = 2000 test_examples = np.hstack(( x_test[:num_wit_examples], y_test[:num_wit_examples].reshape(-1, 1) )) """ Explanation: To connect the What-if Tool to an AI Platform model, you need to pass it a subset of your test examples. The commannd below will create a Numpy array of 2000 from our test examples. End of explanation """ # TODO 1 FEATURE_NAMES = features.columns.tolist() + ['income_prediction'] def adjust(pred): return [1 - pred, pred] config_builder = ( WitConfigBuilder(test_examples.tolist(), FEATURE_NAMES) .set_ai_platform_model(PROJECT, MODEL, VERSION, adjust_prediction=adjust) .set_target_feature('income_prediction') .set_label_vocab(['low', 'high']) ) WitWidget(config_builder, height=800) """ Explanation: Instantiating the What-if Tool is as simple as creating a WitConfigBuilder object and passing it the AI Platform model desired to be analyzed. The optional "adjust_prediction" parameter is used because the What-if Tool expects a list of scores for each class in our model (in this case 2). Since the model only returns a single value from 0 to 1, it must be transformed to the correct format in this function. Lastly, the name 'income_prediction' is used as the ground truth label. It may take 1 to 2 minutes for the What-if Tool to load and render the visualization palette, please be patient. End of explanation """
phoebe-project/phoebe2-docs
2.1/tutorials/constraints.ipynb
gpl-3.0
!pip install -I "phoebe>=2.1,<2.2" """ Explanation: Constraints Setup Let's first make sure we have the latest version of PHOEBE 2.1 installed. (You can comment out this line if you don't use pip for your installation or don't want to update to the latest release). End of explanation """ import phoebe from phoebe import u # units import numpy as np import matplotlib.pyplot as plt logger = phoebe.logger() b = phoebe.default_binary() """ Explanation: As always, let's do imports and initialize a logger and a new Bundle. See Building a System for more details. End of explanation """ b.filter(context='constraint') """ Explanation: What are Constraints? Constraints live in their own context of the Bundle, and many are created by default - either when you add a component or when you set the system hierarchy. Let's look at all the existing constraints for our binary system. End of explanation """ b['constraint']['primary']['mass'] """ Explanation: To see what all of these constraints do, see the 'Built-in Constraints' section below. For now let's look at a single constraint by accessing a ConstraintParameter. End of explanation """ print b.get_value('mass@primary@component') """ Explanation: Here we see the equation used to derive the mass of the primary star from its orbit, as well as the current value If we access the Parameter that it is constraining we can see that it is automatically kept up-to-date. End of explanation """ print b['mass@primary@component'] """ Explanation: The parameter is aware that it's being constrained and all the relevant linking parameters. End of explanation """ b['asini@constraint'] """ Explanation: If you change the hierarchy, built-in cross-object constraints (like mass that depends on its parent orbit), will be adjusted to reflect the new hierarchy. See the 'Changing Hierarchies' section below for more details. Built-in Constraints There are a number of built-in constraints that can be applied to our system. Those added by default are all listed below: asini This constraint handles computing the projected semi-major axis along the line of sight and can be automatically inverted to solve for either 'asini', 'sma', or 'incl'. End of explanation """ b['esinw@constraint'] b['ecosw@constraint'] """ Explanation: esinw, ecosw These constraints handle computing the projected eccentricity which can be helpful in that they are better representations of the geometry of a light curve and result in symmetric posteriors for near-circular orbits. Both can be inverted to also automatically solve for 'ecc' or 'per0'. End of explanation """ b['t0_perpass@constraint'] """ Explanation: t0 This constraint handles converting between different t0 conventions - namely providing a reference time at periastron passage (t0_perpass) and at superior conjunction (t0_supconj). Currently, this constraint only supports inverting to be solved for 't0_supconj' (ie you cannot automatically invert this constraint to constraint phshift or per0). End of explanation """ b['freq@constraint'] b['freq@binary@constraint'] b['freq@primary@constraint'] """ Explanation: freq This constraint handles the simple conversion to frequency from period - whether that be rotational or orbital - and does support inversion to solve for 'period'. End of explanation """ b['mass@constraint'] b['mass@primary@constraint'] """ Explanation: mass This constraint handles solving for the mass of a component by obeying Kepler's third law within the parent orbit. It can be inverted to solve for 'sma' or 'period' (in addition to 'mass'), but not 'q'. End of explanation """ b['sma@constraint'] b['sma@primary@constraint'] """ Explanation: component sma This constraint handles computing the semi-major axis of a component about the center of mass of its parent orbit. Note that this is not the same as the semi-major axis of the parent orbit. This currently can be inverted to solve for 'sma' of the parent orbit, but not 'q'. End of explanation """ b['requiv_max@constraint'] b['requiv_max@primary@constraint'] """ Explanation: requiv_max NEW IN PHOEBE 2.1 This constraint handles solving for the maxium equivalent radius (for a detached system). End of explanation """ b['period@constraint'] b['period@primary@constraint'] """ Explanation: rotation period This constraint handles computing the rotation period of a star given its synchronicity parameter (syncpar). It can be inverted to solve for any of the three parameters 'period' (both rotational and orbital) and 'syncpar'. End of explanation """ b['incl@constraint'] b['incl@primary@constraint'] b['long_an@constraint'] b['long_an@primary@constraint'] """ Explanation: pitch/yaw (incl/long_an) NEW IN PHOEBE 2.1 pitch constrains the relation between the orbital and rotational inclination whereas yaw constrains the relation between the orbital and rotational long_an. When pitch and yaw are set to 0, the system is aligned. End of explanation """ print b['mass@primary@component'].constrained_by print b['value@mass@primary@component'], b['value@mass@secondary@component'], b['value@period@orbit@component'] b.flip_constraint('mass@primary', 'period') b['mass@primary@component'] = 1.0 print b['value@mass@primary@component'], b['value@mass@secondary@component'], b['value@period@orbit@component'] """ Explanation: Re-Parameterizing NOTE: this is an experimental feature. When re-parameterizing, please be careful and make sure all results and parameters make sense. As we've just seen, the mass is a constrained (ie derived) parameter. But let's say that you would rather provide masses for some reason (perhaps that was what was provided in a paper). You can choose to provide mass and instead have one of its related parameters constrained by calling flip_constraint. End of explanation """ print b['constraint'] b['period@constraint@binary'] b['period@constraint@binary'].meta """ Explanation: You'll see that when we set the primary mass, the secondary mass has also changed (because the masses are related through q) and the period has changed (based on resolving the Kepler's third law constraint). Note that the tags for the constraint are based on those of the constrained parameter, so to switch the parameterization back, we'll have to use a slightly different twig. End of explanation """ b.flip_constraint('period@binary', 'mass') """ Explanation: Notice that the qualifier tag has changed from 'mass' to 'period' and the 'component' tag has changed from 'primary' to 'binary' (since sma is in the binary). End of explanation """ b.set_value('q', 0.8) """ Explanation: Changing Hierarchies Some of the built-in constraints depend on the system hierarchy, and will automatically adjust to reflect changes to the hierarchy. For example, the masses depend on the period and semi-major axis of the parent orbit but also depend on the mass-ratio (q) which is defined as the primary mass over secondary mass. For this reason, changing the roles of the primary and secondary components should be reflected in the masses (so long as q remains fixed). In order to show this example, let's set the mass-ratio to be non-unity. End of explanation """ print "M1: {}, M2: {}".format(b.get_value('mass@primary@component'), b.get_value('mass@secondary@component')) """ Explanation: Here the star with component tag 'primary' is actually the primary component in the hierarchy, so should have the LARGER mass (for a q < 1.0). End of explanation """ b.set_hierarchy('orbit:binary(star:secondary, star:primary)') print b.get_value('q') print "M1: {}, M2: {}".format(b.get_value('mass@primary@component'), b.get_value('mass@secondary@component')) """ Explanation: Now let's flip the hierarchy so that the star with the 'primary' component tag is actually the secondary component in the system (and so takes the role of numerator in q = M2/M1). For more information on the syntax for setting hierarchies, see the Building a System Tutorial. End of explanation """ print "M1: {}, M2: {}, period: {}, q: {}".format(b.get_value('mass@primary@component'), b.get_value('mass@secondary@component'), b.get_value('period@binary@component'), b.get_value('q@binary@component')) b.flip_constraint('mass@secondary@constraint', 'period') print "M1: {}, M2: {}, period: {}, q: {}".format(b.get_value('mass@primary@component'), b.get_value('mass@secondary@component'), b.get_value('period@binary@component'), b.get_value('q@binary@component')) b.set_value('mass@secondary@component', 1.0) print "M1: {}, M2: {}, period: {}, q: {}".format(b.get_value('mass@primary@component'), b.get_value('mass@secondary@component'), b.get_value('period@binary@component'), b.get_value('q@binary@component')) """ Explanation: Even though under-the-hood the constraints are being rebuilt from scratch, they will remember if you have flipped them to solve for some other parameter. To show this, let's flip the constraint for the secondary mass to solve for 'period' and then change the hierarchy back to its original value. End of explanation """
rebeccabilbro/titanic
titanic_wrangling.ipynb
mit
import pandas as pd import numpy as np import matplotlib.pyplot as plt import pandas.io.sql as pd_sql import sqlite3 as sql %matplotlib inline """ Explanation: TITANIC: Wrangling the Passenger Manifest Exploratory Analysis with Pandas This tutorial is based on the Kaggle Competition, "Predicting Survival Aboard the Titanic" https://www.kaggle.com/c/titanic Be sure to read the README before you begin! See also: http://www.analyticsvidhya.com/blog/2014/08/baby-steps-python-performing-exploratory-analysis-python/ http://www.analyticsvidhya.com/blog/2014/09/data-munging-python-using-pandas-baby-steps-python/ End of explanation """ con = sql.connect("titanic.db") """ Explanation: Here's a sqlite database for you to store the data once it's ready: End of explanation """ # Use pandas to open the csv. # You'll have to put in the filepath # It should look something like "../titanic/data/train.csv" df = """ Explanation: =>YOUR TURN! Use pandas to open up the csv. Read the documentation to find out how: http://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_csv.html End of explanation """ # Use pandas to view the first 10 rows. """ Explanation: Exploring the Tabular Data The file we'll be exploring today, train.csv, is the training set -- it represents a subset of the full passenger manifest dataset. The rest of the data is in another file called test.csv - we'll use that later (when we get to Machine Learning). Let's take a look... =>YOUR TURN! Use pandas to view the "head" of the file with the first 10 rows. Read the documentation to find out how: http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.head.html End of explanation """ # Use pandas to get the summary statistics. """ Explanation: What do you see? - Are there any missing values? - What kinds of values/numbers/text are there? - Are the values continuous or categorical? - Are some variables more sparse than others? - Are there multiple values in a single column? =>YOUR TURN! Use pandas to run summary statistics on the data. Read the documentation to find out how: http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.describe.html End of explanation """ # Use pandas to get the median age. """ Explanation: What can we infer from the summary statistics? - How many missing values does the 'Age' column have? - What's the age distribution? - What percent of the passengers survived? - How many passengers belonged to Class 3? - Are there any outliers in the 'Fare' column? =>YOUR TURN! Use pandas to get the median for the Age column. Read the documentation to find out how: http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.median.html End of explanation """ # Use pandas to count the number of unique Ticket values. """ Explanation: =>YOUR TURN! Use pandas to find the number of unique values in the Ticket column. Read the documentation to find out how: http://pandas.pydata.org/pandas-docs/stable/generated/pandas.Series.nunique.html End of explanation """ fig = plt.figure() ax = fig.add_subplot(111) ax.hist(df['Age'], bins = 10, range = (df['Age'].min(),df['Age'].max())) plt.title('Age distribution') plt.xlabel('Age') plt.ylabel('Count of Passengers') plt.show() """ Explanation: Visually Exploring the Data Let's look at a histogram of the age distribution. What can you tell from the graph? End of explanation """ fig = plt.figure() ax = fig.add_subplot(111) ax.hist(df['Fare'], bins = 10, range = (df['Fare'].min(),df['Fare'].max())) plt.title('Fare distribution') plt.xlabel('Fare') plt.ylabel('Count of Passengers') plt.show() """ Explanation: Now let's look at a histogram of the fares. What does it tell you? End of explanation """ # Use pandas to sum the null Cabin values. """ Explanation: Dealing with Missing Values Part of data wrangling is figuring out how to deal with missing values. But before you decide, think about which variables are likely to be predictive of survival. Which ones do you think will be the best predictors? Age Age is likely to play a role, so we'll probably want to estimate or 'impute' the missing values in some way. Fare There are a lot of extremes on the high end and low end for ticket fares. How should we handle them? Other Variables What do YOU think?? =>YOUR TURN! Use pandas to get the sum of all the null values in the Cabin column. Read the documentation to find out how: http://pandas.pydata.org/pandas-docs/stable/generated/pandas.isnull.html http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.sum.html End of explanation """ # Use pandas to drop the Ticket column. """ Explanation: =>YOUR TURN! Use pandas to drop the Ticket column. Read the documentation to find out how: http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.drop.html End of explanation """ # Use pandas to get the mean Age. # Use pandas to fill in the null Age values with the mean. """ Explanation: =>YOUR TURN! Use pandas to calculate the mean age and fill all the null values in the Age column with that number.. Read the documentation to find out how: http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.mean.html http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.fillna.html End of explanation """ # Use pandas to save your dataframe to a sqlite database. """ Explanation: Save Your Work ...you will need it in a few weeks! =>YOUR TURN! Use pandas to write your dataframe to our sqlite database. Read the documentation to find out how: http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.to_sql.html End of explanation """
slundberg/shap
notebooks/benchmark/text/Machine Translation Benchmark Demo.ipynb
mit
import numpy as np from transformers import AutoTokenizer, AutoModelForSeq2SeqLM import nlp import shap import shap.benchmark as benchmark import torch """ Explanation: Text Data Explanation Benchmarking: Machine Translation This notebook demonstrates how to use the benchmark utility to benchmark the performance of an explainer for text data. In this demo, we showcase explanation performance for partition explainer on a Machine Translation model. The metrics used to evaluate are "keep positive" and "keep negative". The masker used is Text Masker. The new benchmark utility uses the new API with MaskedModel as wrapper around user-imported model and evaluates masked values of inputs. End of explanation """ tokenizer = AutoTokenizer.from_pretrained("Helsinki-NLP/opus-mt-en-es") model = AutoModelForSeq2SeqLM.from_pretrained("Helsinki-NLP/opus-mt-en-es") dataset = nlp.load_dataset('xsum',split='train') s = [dataset['summary'][i] for i in range(10)] """ Explanation: Load Data and Model End of explanation """ explainer = shap.Explainer(model,tokenizer) """ Explanation: Create Explainer Object End of explanation """ shap_values = explainer(s) """ Explanation: Run SHAP Explanation End of explanation """ sort_order = 'positive' perturbation = 'keep' """ Explanation: Define Metrics (Sort Order & Perturbation Method) End of explanation """ sp = benchmark.perturbation.SequentialPerturbation(explainer.model, explainer.masker, sort_order, perturbation) xs, ys, auc = sp.model_score(shap_values, s) sp.plot(xs, ys, auc) sort_order = 'negative' perturbation = 'keep' sp = benchmark.perturbation.SequentialPerturbation(explainer.model, explainer.masker, sort_order, perturbation) xs, ys, auc = sp.model_score(shap_values, s) sp.plot(xs, ys, auc) """ Explanation: Benchmark Explainer End of explanation """
james-prior/cohpy
20170720-dojo-primes-revisited.ipynb
mit
from itertools import islice, count known_good_primes = [2, 3, 5, 7, 11, 13, 17,19, 23, 29] def check(): assert list(islice(gen_primes(), len(known_good_primes))) == known_good_primes def gen_primes(start=2): for p in count(start): for divisor in range(2, p): if p % divisor == 0: break else: yield p check() def is_divisible(p): return any(p % divisor == 0 for divisor in range(2, p)) def gen_primes(start=2): return ( p for p in count(start) if not is_divisible(p) ) check() def is_prime(p): return all(p % divisor != 0 for divisor in range(2, p)) def gen_primes(start=2): return (p for p in count(start) if is_prime(p)) check() """ Explanation: Just playing around with various ways of generating prime numbers. The focus is on readability, not speed. End of explanation """ def is_prime(p): return all(p % divisor != 0 for divisor in range(2, p)) def gen_primes(start=2): return filter(is_prime, count(start)) check() """ Explanation: This is pretty readable. End of explanation """ def gen_primes(start=2): return filter( lambda p: all(p % divisor != 0 for divisor in range(2, p)), count(start) ) check() """ Explanation: Do it all in one expression. It works, but is ugly and hard to read. End of explanation """
statkraft/shyft-doc
notebooks/nea-example/run_nea_nidelva.ipynb
lgpl-3.0
# Pure python modules and jupyter notebook functionality # first you should import the third-party python modules which you'll use later on # the first line enables that figures are shown inline, directly in the notebook %pylab inline import os import datetime as dt import pandas as pd from os import path import sys from matplotlib import pyplot as plt """ Explanation: A first simulation with Shyft Introduction Shyft provides a toolbox for running hydrologic simulations. As it was designed to work in an operational environment, we've provided several different workflows for running a model simulation. The main concept to be aware of is that while we demonstrate and build on the use of a 'configuration', nearly all simulation functionality is also accessible with pure python through access to the API. This is the encouraged approach to simulation. The use of configurations is intended to be a mechanism of running repeated operational simulations when one is interested in archiving and storing (potentially to a database) the specifics of the simulation. Below we start with a high level description using a configuration object, and in Part II of the simulation notebooks we describe the approach using the lower level APIs. It is recommended, if you intend to use Shyft for any kind of hydrologic exploration, to become familiar with the API functionality. This notebook briefly runs through the simulation process for a pre-configured catchment. The following steps are described: Loading required python modules and setting path to Shyft installation Configuration of a Shyft simulation Running a Shyft simulation Post-processing: Fetching simulation results from the simulator-object. 1. Loading required python modules and setting path to SHyFT installation Shyft requires a number of different modules to be loaded as part of the package. Below, we describe the required steps for loading the modules, and note that some steps are only required for the use of the jupyter notebook. End of explanation """ # try to auto-configure the path, -will work in all cases where doc and data # are checked out at same level shyft_data_path = path.abspath("../../../shyft-data") if path.exists(shyft_data_path) and 'SHYFT_DATA' not in os.environ: os.environ['SHYFT_DATA']=shyft_data_path # shyft should be available either by it's install in python # or by PYTHONPATH set by user prior to starting notebook. # This is equivalent to the two lines below # shyft_path=path.abspath('../../../shyft') # sys.path.insert(0,shyft_path) # once the shyft_path is set correctly, you should be able to import shyft modules import shyft # if you have problems here, it may be related to having your LD_LIBRARY_PATH # pointing to the appropriate libboost_python libraries (.so files) from shyft import api from shyft.repository.default_state_repository import DefaultStateRepository from shyft.orchestration.configuration.yaml_configs import YAMLSimConfig from shyft.orchestration.simulators.config_simulator import ConfigSimulator # now you can access the api of shyft with tab completion # and help, try this: # help(api.GeoPoint) # remove the hashtag and run the cell to print the documentation of the api.GeoPoint class # remove the hashtag below, set the pointer behind the dot and use # tab completion to see the available attributes of the shyft api #api """ Explanation: The Shyft Environment This next step is highly specific on how and where you have installed Shyft. If you have followed the guidelines at github, and cloned the three shyft repositories: i) shyft, ii) shyft-data, and iii) shyft-doc, then you may need to tell jupyter notebooks where to find shyft. Uncomment the relevant lines below. If you have a 'system' shyft, or used conda install -s sigbjorn shyft to install shyft, then you probably will want to make sure you have set the SHYFT_DATA directory correctly, as otherwise, Shyft will assume the above structure and fail. This has to be done before import shyft. In that case, uncomment the relevant lines below. note: it is most likely that you'll need to do one or the other. End of explanation """ # set up configuration using *.yaml configuration files # here is the *.yaml file that configures the simulation: config_file_path = os.path.abspath("./nea-config/neanidelva_simulation.yaml") # and here we pass it to the configurator, together with the name of the region # stated in the simulation.yaml file (here: "neanidelva") which we would like to run cfg = YAMLSimConfig(config_file_path, "neanidelva") print(cfg) # Once we have all the configuration in place (read in from the .yaml files) # we can start to do the simulation. Here we use the ConfigSimulator class # to initialize a simulator-object. Shyft has several ways to achieve this # but the .yaml configs are the most straight forward simulator = ConfigSimulator(cfg) # Now the simulator is ready to run! """ Explanation: 2. Configuration of a SHyFT simulation The following shows how to set up a Shyft simulation using the yaml_configs.YAMLSimConfig class. Note that this is a high level approach, providing a working example for a simple simulation. More advanced users will want to eventually make use of direct API calls, as outlined in Part II. At this point, you may want to have a look to the configuration file used in this example. ``` neanidelva: region_config_file: neanidelva_region.yaml model_config_file: neanidelva_model_calibrated.yaml datasets_config_file: neanidelva_datasets.yaml interpolation_config_file: neanidelva_interpolation.yaml start_datetime: 2013-09-01T00:00:00 run_time_step: 86400 # 1 hour time step number_of_steps: 365 # 1 year region_model_id: 'neanidelva-ptgsk' #interpolation_id: 2 # this is optional (default 0) initial_state: repository: class: !!python/name:shyft.repository.generated_state_repository.GeneratedStateRepository params: model: !!python/name:shyft.api.pt_gs_k.PTGSKModel tags: [] ... ``` The file is structured as follows: neanidelva is the name of the simulation. Your configuration file may contain multiple "stanzas" or blocks of simulation configurations. You'll see below that we use the name to instantiate a configuration object. region_config_file points to another yaml file that contains basic information about the region of the simulation. You can explore that file here model_config_file contains the model parameters. Note that when you are calibrating the model, this is the file that you would put your optimized parameters into once you have completed a calibrations. datasets_config_file contains details regarding the input datasets and the repositories they are contained in. You can see this file here interpolation_config_file provides details regarding how the observational data in your catchment or region will be interpolated to the domain of the simulation. If you are using a repository with distributed data, the interpolation is still used. See this file for more details. The following: start_datetime: 2013-09-01T00:00:00 run_time_step: 86400 # 1 hour time step number_of_steps: 365 # 1 year region_model_id: 'neanidelva-ptgsk' are considered self-explantory. Note that region_model_id is simply a string name, but it should be unique. We will explain the details regarding initial_state later on in this tutorial. End of explanation """ #simulator. #try tab completion n_cells = simulator.region_model.size() print(n_cells) c0 = simulator.region_model.cells[0] type(c0) """ Explanation: The simulator and the region_model It is important to note that the simulator provides a wrapping of underlying API functionality. It is designed to provide a quick and simple interface for conducting runs based on a configuration saved in a .yaml file, or otherwise. Core functionality is contained in the region_model. This is an import concept in Shyft. To understand the framework, one should be familiar with this class. Before we begin the simulation, one should explore the simulator object with tab completion. As an example, you can see here how to get the number of cells in the region that was set up. This is used later for extracting the data. Most importantly, the simulator as an attribute called region_model. Most of the underlying functionality of the simulator methods are actually making calls to the region_model class. To conduct more advanced simulations one would use this object directly. End of explanation """ simulator.run() simulator.initial_state_repo """ Explanation: 3. Running a SHyFT simulation Okay, so thus far we have set up our cfg object which contains most the information required to run the simulation. We can simply run the simulation using the run method. End of explanation """ help(simulator.run) """ Explanation: But this is may be too simple. Let's explore the simulator.run method a bit further: End of explanation """ # Here we are going to extact data from the simulator object. # We start by creating a list to hold discharge for each of the subcatchments. # Then we'll get the data from the simulator object # mapping of internal catch ID to catchment catchment_id_map = simulator.region_model.catchment_id_map print(catchment_id_map) """ Explanation: Note that you can pass two parameters to run. To run a simulation, we need a time_axis (length of the simulation), and an initial state. Initially we got both of these from the cfg object (which takes it from the .yaml files). However, in some cases you will likely want to change these and conduct simulations for different periods, or starting from different states. We explore this further in Part II: advanced simulation 4. Post processing and data extraction You have now completed a simple simulation. You probably are interested to explore some of the output from the simulation and to visulize the quality of the results. Let's explore first, how to access the underlying data produced from the simulation. Visualizing the discharge for each [sub-]catchment Recall that we earlier referred to the importance of understanding the region_model. You'll see now that this is where information from the simulation is actually contained, and that the simulator object is more or less a convenience wrapper. End of explanation """ q_1228_ts = simulator.region_model.statistics.discharge([1228]) q_1228_np = simulator.region_model.statistics.discharge([1228]).values.to_numpy() print(type(q_1228_ts)) print(type(q_1228_np)) #sca=simulator.region_model. """ Explanation: We see here that each sub-catchment in our simulation is associated with a unique ID. These are user defined IDs. In the case of the nea-nidelva simulation, they are taken from the GIS database used to create the example configuration files. To get data out of the region_model you need to specify which catchments you are interested in evaluating. In the following example we are going to extract the data for each catchment and make a simple plot. Note that Shyft uses many specialized C++ types. Many of these have methods to convert to the more familiar numpy objects. An example may be the discharge timeseries for a catchment. End of explanation """ # First get the time-axis which we'll use as the index for the data frame ta = simulator.region_model.time_axis # and convert it to datetimes index = [dt.datetime.utcfromtimestamp(p.start) for p in ta] # Now we'll add all the discharge series for each catchment data = {} for cid in catchment_id_map: # get the discharge time series for the subcatchment q_ts = simulator.region_model.statistics.discharge([int(cid)]) data[cid] = q_ts.values.to_numpy() df = pd.DataFrame(data, index=index) # we can simply use: ax = df.plot(figsize=(20,15)) ax.legend(title="Catch. ID") ax.set_ylabel("discharge [m3 s-1]") """ Explanation: shyft.time_series.TimeSeries objects have a lot more functionality, but we'll cover that in a separate tutorial. For now we'll work in a 'pythonic' and simple way a convert all our data to python types. To do this, we'll use the Pandas library. But note also that we'll have to do some list magic to get the timeseries and datetime values out. End of explanation """ # api.TsVector() is a list of api.Timeseries type. discharge_ts = api.TsVector() # except from the type, it just works as a list() # loop over each catchment, and extract the time-series (we keep them as such for now) for cid in catchment_id_map: # fill in discharge time series for all subcatchments discharge_ts.append(simulator.region_model.statistics.discharge([int(cid)])) # discharge is a TS object,keeping a .time_axis and .values # We can make a quick plot of the data of each sub-catchment fig, ax = plt.subplots(figsize=(20,15)) # plot each catchment discharge in the catchment_id_map for i,ts in enumerate(discharge_ts): # a ts.time_axis can be enumerated to it's UtcPeriod, # that will have a .start and .end of type utctimestamp # to use matplotlib support for datetime-axis, we convert it to datetime (as above) ts_timestamps = [datetime.datetime.utcfromtimestamp(p.start) for p in ts.time_axis] ts_values = ts.values # iterable and convertible, .to_numpy() makes an np array ax.plot(ts_timestamps,ts_values, label = "{}".format(catchment_id_map[i])) fig.autofmt_xdate() ax.legend(title="Catch. ID") ax.set_ylabel("discharge [m3 s-1]") """ Explanation: A preferred approach As mentioned above, Shyft has it's own Timeseries class. This class is quite powerful, and in future tutorials we'll explore more of the functionality. For now, let's look at some key aspects, and how to create the same plot as above without pandas. End of explanation """ # First, we can also plot the statistical distribution of the # discharges over the sub-catchments # get the percentiles we want, note -1 = arithmetic average percentiles= api.IntVector([10,25,50,-1,75,90]) # create a Daily(for the fun of it!) time-axis for the percentile calculations # (our simulation could be hourly) ta_statistics = api.TimeAxis(simulator.region_model.time_axis.time(0),api.Calendar.DAY,365) # then simply get out a new set of time-series, corresponding to the percentiles we specified # note that discharge_ts is of the api.TsVector type, not a simple list as in our first example above discharge_percentiles = api.percentiles(discharge_ts,ta_statistics,percentiles) #utilize that we know that all the percentile time-series share a common time-axis common_timestamps = [datetime.datetime.utcfromtimestamp(p.start) for p in ta_statistics] # Then we can make another plot of the percentile data for the sub-catchments fig, ax = plt.subplots(figsize=(20,15)) # plot each discharge percentile in the discharge_percentiles for i,ts_percentile in enumerate(discharge_percentiles): clr='k' if percentiles[i] >= 0.0: clr= str(float(percentiles[i]/100.0)) ax.plot(common_timestamps, ts_percentile.values, label = "{}".format(percentiles[i]),color=clr) # also plot catchment discharge along with the statistics # notice that we use .average(ta_statistics) to properly align true-average values to time-axis ax.plot(common_timestamps,discharge_ts[0].average(ta_statistics).values,label = "CID {}".format(catchment_id_map[0]),linewidth=2.0,alpha=0.7,color='b') fig.autofmt_xdate() ax.legend(title="Percentiles") ax.set_ylabel("discharge [m3 s-1]") """ Explanation: A teaser to the Shyft API The Shyft API, shyft.api contains a lot of functionality worth exploring. As we mentioned, the api.Timeseries class provides some tools for adding timeseries, looking at statistics, etc. Below is a quick exploration of some of the possibilities. Users should explore using the source code, tab completion, and most of all help to get the full story... End of explanation """ # a simple percentile plot, from orchestration looks nicer from shyft.orchestration import plotting as splt oslo=api.Calendar('Europe/Oslo') # notice that we use olson tz-id to select time-zone fig,ax=plt.subplots(figsize=(16,8)) splt.set_calendar_formatter(oslo) # notice how easy it is to ensure timestamp labels are tz aware h,ph=splt.plot_np_percentiles(common_timestamps,[ p.values.to_numpy() for p in discharge_percentiles],base_color=(0.03,0.01,0.3)) """ Explanation: In shyft.orchestration there are other functions for exploring the data. We've created a function that creates the plot above. End of explanation """ cells = simulator.region_model.get_cells() # Once we have the cells, we can get their coordinate information # and fetch the x- and y-location of the cells x = np.array([cell.geo.mid_point().x for cell in cells]) y = np.array([cell.geo.mid_point().y for cell in cells]) """ Explanation: Visualizing the distributed catchment data An important, but difficult concept, to remember when working with Shyft, is that internally there is no 'grid' to speak of. The simulation is vectorized, and each 'cell' represents a spatial area with it's own area and geolocation information. Therefore, we cannot just load a datacube of data, as some may be familiar with. Visualization of this data is a bit more complex, because each individual cell is in practice an individual polygon. Depending on how the data has been configured for Shyft (see region_model), the cells may, in fact, be simple squares or more complex shapes. For the visualization below, we simply treat them as uniform size, and plot them with the scatter function in matplotlib. Extract data for individual simulation cells We'll start by looking at values of individual cells, rather than at the catchment level. Since Shyft does not have an underlying 'raster' model, you need to fetch all cells directly from the underlying region_model. End of explanation """ # let's create the mapping of catchment_id to an integer: cid_z_map = dict([ (catchment_id_map[i],i) for i in range(len(catchment_id_map))]) # then create an array the same length as our 'x' and 'y', which holds the # integer value that we'll use for the 'z' value catch_ids = np.array([cid_z_map[cell.geo.catchment_id()] for cell in cells]) # and make a quick catchment map... # using a scatter plot of the cells fig, ax = plt.subplots(figsize=(15,5)) cm = plt.cm.get_cmap('rainbow') plot = ax.scatter(x, y, c=catch_ids, marker='.', s=40, lw=0, cmap=cm) plt.colorbar(plot).set_label('zero-based mapping(proper map tbd)') """ Explanation: We also will need to get a 'z' value to make things interesting. Since this is the first time we've visualized our catchment, let's make a map of the sub-catchments. To do this, the first thing we need to do is get the membership of each cell. That is, to which catchment does it below. We do this by extracting the catchment_id of each cell -- and this is what we'll map. The result will be a map of the sub-catchments. Recall from above we extracted the catchment_id_map from the region_model: # mapping of internal catch ID to catchment catchment_id_map = simulator.region_model.catchment_id_map We could just use the catchment_id as the 'z' value, but since this could be a string, we'll take a different approach. We'll assign a unique integer to each catchment_id and plot those (it is also easier for the color bar scaling). End of explanation """ #first, set a date: year, month, day, (hour of day if hourly time step) oslo = api.Calendar('Europe/Oslo') # specifying input calendar in Oslo tz-id time_x = oslo.time(2014,5,15) # the oslo calendar(incl dst) converts calendar coordinates Y,M,D.. to its utc-time # we need to get the index of the time_axis for the time try: idx = simulator.region_model.time_axis.index_of(time_x) # index of time x on time-axis except: print("Date out of range, setting index to 0") idx = 0 # fetching SCA (the response variable is named "snow_sca") # You can use tab-completion to explore the `rc`, short for "response collector" # object of the cell, to see further response variables available. # specifying empty list [] indicates all catchments, otherwise pass catchment_id sca = simulator.region_model.gamma_snow_response.sca([],idx) """ Explanation: Visualing the Snow Cover Area of all cells for a certain point in time Here we'll do some more work to look at a snapshot value of data in each of the cells. This example is collecting the response variable (here the Snow Cover Area (SCA)) for each of the cells for a certain point of time. The "response collector" is another concept within Shyft that is important keep in mind. We don't collect and store responses for every variable, in order to keep the simulation memory use lean. Therefore, depending on your application, it may be required to explicitly enable this. The relevant code is found in region_model.h in the C++ core source code. For the ConfigSimulator class, which we used to instantiate the simulator, a standard collector is used that will provide access to the most relevant variables. For a model run during calibration, we are use a collector that just does the required minimum for the calibration. And, it is still configurable: we can turn on/off the snow-collection, so if we don't calibrate for snow, they are not collected. More on calibration is shown in the tutorial: Calibration with Shyft The state collector used for the 'highspeed' calibration models (C++), is a null-collector, so no memory allocated, and no cpu-time used. End of explanation """ # for attr in dir(simulator.region_model): # if attr[0] is not '_': #ignore privates # print(attr) # # and don't forget: # help(simulator.region_model.gamma_snow_state) """ Explanation: Let's take a closer look at this... simulator.region_model.time_axis.index_of(time_x) Simply provided an index value that we can use to index the cells for the time we're interested in looking at. Next we use: simulator.region_model.gamma_snow_response What is this? This is a collector from the simulation. In this case, for the gamma_snow routine. It contains a convenient method to access the response variables from the simulation on a per catchment level. Each response variable (outflow, sca, swe) can be called with two arguments. The first a list of the catchments, and the second an index to the time, as shown above. Note, this will return the values for each cell in the sub-catchment. Maybe one is only interested in the total outflow or total swe for the region. In this case you can use: .outflow_value which will return a single value. There is also a response collector for the state variables: .gamma_snow_state. Explore both of these further with tab completion or help. As well as the full region_model to see what other algorithm collectors are available as this example is configured. End of explanation """ # We can make a simple scatter plot again for quick visualization fig, ax = plt.subplots(figsize=(15,5)) cm = plt.cm.get_cmap('winter') plot = ax.scatter(x, y, c=sca, vmin=0, vmax=1, marker='s', s=40, lw=0, cmap=cm) plt.colorbar(plot) plt.title('Snow Covered area of {0} on {1}'.format(cfg.region_model_id, oslo.to_string(time_x))) """ Explanation: We are now ready to explore some of the variables from the simulation. We'll continue on with SCA. End of explanation """ # look at the catchment-wide average: nea_avg_sca = np.average(sca) print("Average SCA for Nea Nidelva: {0}".format(nea_avg_sca)) # And let's compute histogram of the snow covered area as well fig, ax = plt.subplots() ax.hist(sca, bins=20, range=(0,1), color='y', alpha=0.5) ax.set_xlabel("SCA of grid cell") ax.set_ylabel("frequency") """ Explanation: A note about the geometry of the region Again, keep in mind that while we have created a variable that contains the values for sca in each cell, this is only an iterable object. The only reason we know where each value is located is because we have corresponding x and y values for each cell. It is not an array. We can calculate some statistics directly out of sca: End of explanation """
saashimi/code_guild
interactive-coding-challenges/sorting_searching/quick_sort/quick_sort_solution.ipynb
mit
from __future__ import division def quick_sort(data): if len(data) < 2: return data left = [] right = [] pivot_index = len(data) // 2 pivot_value = data[pivot_index] # Build the left and right partitions for i in range(0, len(data)): if i == pivot_index: continue if data[i] < pivot_value: left.append(data[i]) else: right.append(data[i]) # Recursively apply quick_sort left = quick_sort(left) right = quick_sort(right) return left + [pivot_value] + right """ Explanation: <small><i>This notebook was prepared by Donne Martin. Source and license info is on GitHub.</i></small> Solution Notebook Problem: Implement quick sort. Constraints Test Cases Algorithm Code Pythonic-Code Unit Test Constraints Is a naiive solution sufficient (ie not in-place)? Yes Test Cases Empty input -> [] One element -> [element] Two or more elements Algorithm Wikipedia's animation: Set pivot to the middle element in the data For each element: If current element is the pivot, continue If the element is less than the pivot, add to left array Else, add to right array Recursively apply quicksort to the left array Recursively apply quicksort to the right array Merge the left array + pivot + right array Complexity: * Time: O(n log(n)) average, best, O(n^2) worst * Space: O(n), n extra space, n recursion depth, generally not stable Code End of explanation """ def quick_sort_alt(arr): if len(arr) <= 1: return arr else: return quick_sort_alt([x for x in arr[1:] if x < arr[0]]) + [arr[0]] + quick_sort_alt([x for x in arr[1:] if x >= arr[0]]) """ Explanation: Pythonic-Code The following code from Stack Overflow is very concise, although it might be a little difficult to read: End of explanation """ %%writefile test_quick_sort.py from nose.tools import assert_equal class TestQuickSort(object): def test_quick_sort(self, func): print('Empty input') data = [] sorted_data = func(data) assert_equal(sorted_data, []) print('One element') data = [5] sorted_data = func(data) assert_equal(sorted_data, [5]) print('Two or more elements') data = [5, 1, 7, 2, 6, -3, 5, 7, -1] sorted_data = func(data) assert_equal(sorted_data, sorted(data)) print('Success: test_quick_sort\n') def main(): test = TestQuickSort() test.test_quick_sort(quick_sort) try: test.test_quick_sort(quick_sort_alt) except NameError: # Alternate solutions are only defined # in the solutions file pass if __name__ == '__main__': main() %run -i test_quick_sort.py """ Explanation: Unit Test End of explanation """
rojassergio/Aprendiendo-a-programar-en-Python-con-mi-computador
Instalando_python.ipynb
mit
from IPython.display import HTML HTML('<iframe src=https://www.continuum.io/downloads/?useformat=mobile width=700 height=350></iframe>') """ Explanation: <center><font color=red> Instalando Python: un breve tutorial </font></center> Sergio Rojas<br> Departamento de F&iacute;sica, Universidad Sim&oacute;n Bol&iacute;var, Venezuela Content under Creative Commons Attribution license CC-BY 4.0, code under MIT license (c)2015-2016 Sergio Rojas (srojas@usb.ve). <font color=red>Sobre Python</font> Python es un lenguaje de programaci&oacute;n multi-paradigma que por satisfacer las exigencias de un lenguaje de programaci&oacute;n moderno (como programaci&oacute;n en funciones y orientada a objeto) <b>ha ganado mucha popularidad</b> en los &uacute;ltimos a&ntilde;os en el medio de la computaci&oacute;n cient&iacute;fica, gracias a que se han incorporado al mismo m&oacute;dulos que facilitan la tarea de c&oacute;mputo cient&iacute;fico tales como NumPy y SciPy (que incorporan bibliotecas de c&aacute;lculo en pr&aacute;cticamente todas las &aacute;reas que abarca el c&oacute;mputo num&eacute;rico), Matplotlib (para satisfacer las necesidades de visualizaci&oacute;n), SymPy (para cubrir las necesidades de ejecutar c&oacute;mputo algebra&iacute;co o matem&aacute;tica simb&oacute;lica) y mucho m&aacute;s. En ese desarrollo, tambi&eacute;n se han incorporado a Python ambientes de programaci&oacute;n que hacen m&aacute;s amigable y eficiente la tarea de programar. Uno de tales ambientes es la consola IPython, que se ha extendido o ha evolucionado a la forma de una libreta de notas (IPython Notebook) que favorece la integraci&oacute;n en un navegador de la Internet, no solo el ambiente de programaci&oacute;n Python, sino tambi&eacute;n texto o notas asociados a los c&oacute;mputos que se realizan. Cabe mencionar que este tutorial es uno de los tantos ejemplos que se han escrito en el IPython (ahora Jupyter) Notebook. <font color=red>Instalando Python</font> Opci&oacute;n 1: Descarga Anaconda siguiendo el enlace que se muestra. <b>Alternativamente, puedes seguir el enlace que se muestra a continuaci&oacute;n. En caso que nada se muestre, entonces sigue el enlace mencionado anteriormente.</b> End of explanation """ HTML('<iframe src=https://store.enthought.com/?useformat=mobile width=700 height=350></iframe>') """ Explanation: Opci&oacute;n 2: Descarga Enthought Canopy siguiendo el enlace que se muestra. <b>Alternativamente, puedes seguir el enlace que se muestra a continuaci&oacute;n. En caso que nada se muestre, entonces sigue el enlace mencionado anteriormente.</b> End of explanation """ from IPython.display import YouTubeVideo """ Explanation: <b>Once descargado, Instala Python usando un terminal Linux v&iacute;a el comando:</b> $ bash FILENAME <b>Tendr&aacute;s que acetar los t&eacute;rminos de la licencia escribiendo <font color=red>yes</font> y presionando ENTER/RETURN. Luego tendr&aacute;s que presionar ENTER/RETURN nuevamente para instalar el programa en el directorio que se muestra por omisi&oacute;n. No obstante, i as&iacute; lo deseas, puedes elegir otro directorio de instalaci&oacute;n</b> LOS SIGUIENTES VIDEOS MUESTRAN EL PROCESO End of explanation """ YouTubeVideo('x4xegDME5C0') """ Explanation: Instalando Anaconda en Linux End of explanation """ YouTubeVideo('XAV-QB-Y6iY') """ Explanation: Instalando Canopy en Linux End of explanation """
fedor1113/LineCodes
Decoder.ipynb
mit
# Makes sure to install PyPNG image handling module import sys !{sys.executable} -m pip install pypng import png r = png.Reader("ex.png") t = r.asRGB() img = list(t[2]) # print(img) """ Explanation: Decode line codes in png graphs Assumptions (format): The clock is given and it is a red line on the top. The signal line is black ... End of explanation """ # Let us first define colour red # We'll work with RGB for colours # So for accepted variants we'll make a list of 3-lists. class colourlist(list): """Just lists of 3-lists with some fancy methods to work with RGB colours """ def add_deviations(self, d=8): # Magical numbers are so magical! """Adds deviations for RGB colours to a given list. Warning! Too huge - it takes forever. Input: list of 3-lists Output: None (side-effects - changes the list) """ #l = l[:] Nah, let's make it a method l = self v = len(l) max_deviation = d for i in range(v): # Iterate through the list of colours for j in range(-max_deviation, max_deviation+1): # Actually it is the deviation. #for k in range(3): # RGB! (no "a"s here) newcolour = self[i][:] # Take one of the original colours newcolour[0] = abs(newcolour[0]+j) # Create a deviation l.append(newcolour) # Append new colour to the end of the list. # <- Here it is changed! for j in range(-max_deviation, max_deviation+1): # Work with all the possibilities with this d newcolour1 = newcolour[:] newcolour1[1] = abs(newcolour1[1]+j) l.append(newcolour1) # Append new colour to the end of the list. Yeah! # <- Here it is changed! for j in range(-max_deviation, max_deviation+1): # Work with all the possibilities with this d newcolour2 = newcolour1[:] newcolour2[2] = abs(newcolour2[2]+j) l.append(newcolour2) # Append new colour to the end of the list. Yeah! # <- Here it is changed! return None def withinDeviation(colour, cl, d=20): """This is much more efficient! Input: 3-list (colour), colourlist, int Output: bool """ for el in cl: if (abs(colour[0] - el[0]) <= d and abs(colour[1] - el[1]) <= d and abs(colour[2] - el[2]) <= d): return True return False accepted_colours = colourlist([[118, 58, 57], [97, 71, 36], [132, 56, 46], [132, 46, 47], [141, 51, 53]]) # ... #accepted_colours.add_deviations() # print(accepted_colours) # -check it! - or better don't - it is a biiiig list.... # print(len(accepted_colours)) # That will take a while... Heh.. def find_first_pixel_of_colour(pixellist, accepted_deviations): """Returns the row and column of the pixel in a converted to list with RGB colours PNG Input: ..., colourlist Output: 2-tuple of int (or None) """ accepted_deviations = accepted_deviations[:] rows = len(pixellist) cols = len(pixellist[0]) for j in range(rows): for i in range(0, cols, 3): # if [pixellist[j][i], pixellist[j][i+1], pixellist[j][i+2]] in accepted_deviations: if withinDeviation([pixellist[j][i], pixellist[j][i+1], pixellist[j][i+2]], accepted_deviations): return (j, i) return None fr = find_first_pixel_of_colour(img, accepted_colours) if fr is None: print("Warning a corrupt file or a wrong format!!!") print(fr) print(img[fr[0]][fr[1]], img[fr[0]][fr[1]+1], img[fr[0]][fr[1]+2]) print(img[fr[0]]) # [133, 56, 46] in accepted_colours # Let us now find the length of the red lines that represent the sync signal def find_next_pixel_in_row(pixel, row, accepted_deviations): """Returns the column of the next pixel of a given colour (with deviations) in a row from a converted to list with RGB colours PNG Input: 2-tuple of int, list of int with len%3==0, colourlist Output: int (returns -1 specifically if none are found) """ l = len(row) if pixel[1] >= l-1: return -1 for i in range(pixel[1]+3, l, 3): # if [row[i], row[i+1], row[i+2]] in accepted_deviations: if withinDeviation([row[i], row[i+1], row[i+2]], accepted_deviations): return i return -1 def colour_line_length(pixels, start, colour, deviations=20): line_length = 1 pr = start[:] r = (pr[0], find_next_pixel_in_row(pr, pixels[pr[0]], colour[:])) # print(pr, r) if not(r[1] == pr[1]+3): print("Ooops! Something went wrong!") else: line_length += 1 while (r[1] == pr[1]+3): pr = r r = (pr[0], find_next_pixel_in_row(pr, pixels[pr[0]], colour[:])) line_length += 1 return line_length line_length = colour_line_length(img, fr, accepted_colours, deviations=20) print(line_length) # !!! """ Explanation: Outline The outline of the idea is: Find the red lines that represent parallel synchronization signal above Calculate their size "Synchromize with rows below" (according to the rules of the code) ... PROFIT! !!! Things to keep in mind: deviations of red deviations of black noise - it might just break everything! beginning and end of image... ... A rather simple PNG we'll work with first: End of explanation """ print("It is", line_length) """ Explanation: We found the sync (clock) line length in our graph! End of explanation """ # Let's do just that black = colourlist([[0, 0, 0], [0, 1, 0], [7, 2, 8]]) # black.add_deviations(60) # experimentally it is somewhere around that # experimentally the max deviation is somewhere around 60 print(black) """ Explanation: Now the information transfer signal itself is ~"black", so we need to find the black colour range as well! End of explanation """ fb = find_first_pixel_of_colour(img, black) def signal_height(pxls, fib): signal_height = 1 # if ([img[fb[0]+1][fb[1]], img[fb[0]+1][fb[1]+1], img[fb[0]+1][fb[1]+2]] in black): if withinDeviation([pxls[fib[0]+1][fib[1]], pxls[fib[0]+1][fib[1]+1] , pxls[fib[0]+1][fib[1]+2]], black, 60): signal_height += 1 i = 2 rows = len(pxls) # while([img[fb[0]+i][fb[1]], img[fb[0]+i][fb[1]+1], img[fb[0]+i][fb[1]+2]] in black): while(withinDeviation([pxls[fib[0]+i][fib[1]] , pxls[fib[0]+i][fib[1]+1] , pxls[fib[0]+i][fib[1]+2]], black, 60)): signal_height += 1 i += 1 if (i >= rows): break else: print("") # TO DO return signal_height sheight = signal_height(img, fb)-1 print(sheight) # Let's quickly find the last red line ... def manchester(pixels, start, clock, line_colour, d=60, inv=False): """Decodes Manchester code (as per G. E. Thomas) (or with inv=True Manchester code (as per IEEE 802.4)). Input: array of int with len%3==0 (- PNG pixels), int, int, colourlist, int, bool (optional) Output: str (of '1' and '0') or None """ res = "" cols = len(pixels[0]) fb = find_first_pixel_of_colour(pixels, line_colour) m = 2*clock*3-2*3 # Here be dragons! # Hack: only check it using the upper line # (or lack thereof) if not(inv): for i in range(start, cols-2*3, m): fromUP = withinDeviation([pixels[fb[0]][i-6], pixels[fb[0]][i-5], pixels[fb[0]][i-4]], line_colour, d) if fromUP: res = res + "1" else: res = res + "0" else: for i in range(start, cols-2*3, m): fromUP = withinDeviation([pixels[fb[0]][i-6], pixels[fb[0]][i-5], pixels[fb[0]][i-4]], line_colour, d) if cond: res = res + "0" else: res = res + "1" return res def nrz(pixels, start, clock, line_colour, d=60, inv=False): """Decodes NRZ code (or with inv=True its inversed version). It is assumed that there is indeed a valid NRZ code with a valid message. Input: array of int with len%3==0 (- PNG pixels), int, int, colourlist, int, bool (optional) Output: str (of '1' and '0') or (maybe?) None """ res = "" cols = len(pixels[0]) fb = find_first_pixel_of_colour(pixels, line_colour) m = 2*clock*3-2*3 # Here be dragons! # Hack: only check it using the upper line # (or lack thereof) if not(inv): for i in range(start, cols, m): UP = withinDeviation([pixels[fb[0]][i], pixels[fb[0]][i+1], pixels[fb[0]][i+2]], line_colour, d) if UP: res = res + "1" else: res = res + "0" else: for i in range(start, cols-2*3, m): UP = withinDeviation([pixels[fb[0]][i], pixels[fb[0]][i+1], pixels[fb[0]][i+2]], line_colour, d) if cond: res = res + "0" else: res = res + "1" return res def code2B1Q(pixels, start, clock=None, line_colour=[[0, 0, 0]], d=60, inv=False): """Decodes 2B1Q code. The clock is not used - it is for compatibility only - really, so put anything there. Does _NOT_ always work! WARNING! Right now does not work AT ALL (apart from one specific case) Input: array of int with len%3==0 (- PNG pixels), int, *, colourlist, int Output: str (of '1' and '0') or None """ res = "" cols = len(pixels[0]) fb = find_first_pixel_of_colour(pixels, line_colour) # (11, 33) # will only work if the first or second dibit is 0b11 ll = colour_line_length(pixels, fb, line_colour, deviations=20) # 10 sh = signal_height(pixels, fb) - 1 # 17 -1? m = ll*3-2*3 # will only work if there is a transition # (after the first dibit) # We only need to check if the line is # on the upper, middle upper or middle lower rows... for i in range(start, cols, m): UP = withinDeviation([pixels[fb[0]][i], pixels[fb[0]][i+1], pixels[fb[0]][i+2]], line_colour, d) DOWN = withinDeviation([pixels[fb[0]+sh][i], pixels[fb[0]+sh][i+1], pixels[fb[0]+sh][i+2]], line_colour, d) almostUP = UP # if UP: # res = res + "10" if DOWN: # elif DOWN: res = res + "00" # print("00") elif almostUP: res = res + "11" # print("11") else: res = res + "01" # print("01") return res # A-a-and... here is magic! res = manchester(img, fr[1]+5*3, line_length, black, d=60, inv=False) ans = [] for i in range(0, len(res), 8): ans.append(int('0b'+res[i:i+8], 2)) # print(ans) for i in range(0, len(ans)): print(ans[i]) """ Explanation: The signal we are currently interested in is Manchester code (as per G.E. Thomas). It is a self-clocking signal, but since we do have a clock with it - we use it) Let us find the height of the Manchester signal in our PNG - just because... End of explanation """ # Here is a helper function to automate all that def parse_code(path_to_file, code, inv=False): """Guess what... Parses a line code PNG Input: str, function (~coinsides with the name of the code) Output: str (of '1' and '0') or (maybe?) None """ r1 = png.Reader(path_to_file) t1 = r1.asRGB() img1 = list(t1[2]) fr1 = find_first_pixel_of_colour(img1, accepted_colours) line_length1 = colour_line_length(img1, fr1, accepted_colours, deviations=20) res1 = code(img1, fr1[1]+5*3, line_length1, black, d=60, inv=inv) return res1 def print_nums(bitesstr): """I hope you get the gist... Input: str Output: list (side effects - prints...) """ ans1 = [] for i in range(0, len(bitesstr), 8): ans1.append(int('0b'+bitesstr[i:i+8], 2)) for i in range(0, len(ans1)): print(ans1[i]) return ans1 """ Explanation: Huzzah! And that is how we decode it. Let us now look at some specific examples. End of explanation """ ans1 = print_nums(parse_code("Line_Code_PNGs/Manchester.png", manchester)) res2d = "" for i in range(0, len(ans1)): res2d += chr(ans1[i]) ans2d = [] for i in range(0, len(res2d), 8): print(int('0b'+res2d[i:i+8], 2)) """ Explanation: Manchester Code (a rather tricky example) Here is a tricky example of Manchester code - where we have ASCII '0's and '1's with which a 3-letter "word" is encoded. End of explanation """ ans2 = print_nums(parse_code("Line_Code_PNGs/NRZ.png", nrz)) """ Explanation: NRZ End of explanation """ ans3 = print_nums(parse_code("Line_Code_PNGs/2B1Q.png", code2B1Q)) res2d3 = "" for i in range(0, len(ans3)): res2d3 += chr(ans3[i]) ans2d3 = [] for i in range(0, len(res2d3), 8): print(int('0b'+res2d3[i:i+8], 2)) """ Explanation: 2B1Q Warning! 2B1Q is currently almost completely broken. Pull requests with correct solutions are welcome :) End of explanation """
pgmpy/pgmpy
examples/Learning Parameters in Discrete Bayesian Networks.ipynb
mit
# Use the alarm model to generate data from it. from pgmpy.utils import get_example_model from pgmpy.sampling import BayesianModelSampling alarm_model = get_example_model("alarm") samples = BayesianModelSampling(alarm_model).forward_sample(size=int(1e5)) samples.head() """ Explanation: Parameter Learning in Discrete Bayesian Networks In this notebook, we show an example for learning the parameters (CPDs) of a Discrete Bayesian Network given the data and the model structure. pgmpy has two main methods for learning the parameters: 1. MaximumLikelihood Estimator (pgmpy.estimators.MaximumLikelihoodEstimator) 2. Bayesian Estimator (pgmpy.estimators.BayesianEstimator) 3. Expectation Maximization (pgmpy.estimators.ExpectationMaximization) In the examples, we will try to generate some data from given models and then try to learn the model parameters back from the generated data. Step 1: Generate some data End of explanation """ # Defining the Bayesian Model structure from pgmpy.models import BayesianNetwork model_struct = BayesianNetwork(ebunch=alarm_model.edges()) model_struct.nodes() """ Explanation: Step 2: Define a model structure In this case, since we are trying to learn the model parameters back we will use the model structure that we used to generate the data from. End of explanation """ # Fitting the model using Maximum Likelihood Estimator from pgmpy.estimators import MaximumLikelihoodEstimator mle = MaximumLikelihoodEstimator(model=model_struct, data=samples) # Estimating the CPD for a single node. print(mle.estimate_cpd(node="FIO2")) print(mle.estimate_cpd(node="CVP")) # Estimating CPDs for all the nodes in the model mle.get_parameters()[:10] # Show just the first 10 CPDs in the output # Verifying that the learned parameters are almost equal. np.allclose( alarm_model.get_cpds("FIO2").values, mle.estimate_cpd("FIO2").values, atol=0.01 ) # Fitting the using Bayesian Estimator from pgmpy.estimators import BayesianEstimator best = BayesianEstimator(model=model_struct, data=samples) print(best.estimate_cpd(node="FIO2", prior_type="BDeu", equivalent_sample_size=1000)) # Uniform pseudo count for each state. Can also accept an array of the size of CPD. print(best.estimate_cpd(node="CVP", prior_type="dirichlet", pseudo_counts=100)) # Learning CPDs for all the nodes in the model. For learning all parameters with BDeU prior, a dict of # pseudo_counts need to be provided best.get_parameters(prior_type="BDeu", equivalent_sample_size=1000)[:10] # Shortcut for learning all the parameters and adding the CPDs to the model. model_struct = BayesianNetwork(ebunch=alarm_model.edges()) model_struct.fit(data=samples, estimator=MaximumLikelihoodEstimator) print(model_struct.get_cpds("FIO2")) model_struct = BayesianNetwork(ebunch=alarm_model.edges()) model_struct.fit( data=samples, estimator=BayesianEstimator, prior_type="BDeu", equivalent_sample_size=1000, ) print(model_struct.get_cpds("FIO2")) """ Explanation: Step 3: Learning the model parameters End of explanation """ from pgmpy.estimators import ExpectationMaximization as EM # Define a model structure with latent variables model_latent = BayesianNetwork( ebunch=alarm_model.edges(), latents=["HYPOVOLEMIA", "LVEDVOLUME", "STROKEVOLUME"] ) # Dataset for latent model which doesn't have values for the latent variables samples_latent = samples.drop(model_latent.latents, axis=1) model_latent.fit(samples_latent, estimator=EM) """ Explanation: The Expecation Maximization (EM) algorithm can also learn the parameters when we have some latent variables in the model. End of explanation """
usantamaria/iwi131
ipynb/25a-C3_2015_S1/Certamen3_2015_S1_CC.ipynb
cc0-1.0
def empresas(post): emp.append(e) arch_P.close() for li in arch_P: r, p, e = li.strip().split('#') if e not in emp: arch_P = open(post) emp = list() return emp # Solucion Ordenada def empresas(post): arch_P = open(post) emp = list() for li in arch_P: r, p, e = li.strip().split('#') if e not in emp: emp.append(e) arch_P.close() return emp print empresas("data/postulaciones.txt") """ Explanation: <header class="w3-container w3-teal"> <img src="images/utfsm.png" alt="" align="left"/> <img src="images/inf.png" alt="" align="right"/> </header> <br/><br/><br/><br/><br/> IWI131 Programación de Computadores Sebastián Flores http://progra.usm.cl/ https://www.github.com/usantamaria/iwi131 Soluciones a Certamen 3, S1 2015, Casa Central Pregunta 1 [25%] (a) La web Linkedpy analiza los procesos de postulación de recién titulados a empresas. Para ello tiene el archivo titulados.txt, donde cada línea tiene a los titulados en el formato nombre;rut, y el archivo postulaciones.txt, donde cada línea tiene un rut del titulado, el puesto y la empresa a la que cada titulado postula en el formato rut#puesto#empresa. A partir de estos archivos se desea generar un archivo por empresa, los cuales deben tener los titulados que postularon a algún puesto en la empresa con el formato rut;nombre;puesto. A continuación se presentan las líneas de código que resuelven este problema, pero que están desordenadas. Usted debe ordenarlas e indentarlas (dejar los espacios correspondientes de python) para que ambas funciones estén correctas. La primera función retorna una lista con todas las empresas en el archivo con postulaciones (recibido como parámetro). Y la segunda función resuelve el problema antes descrito, recibiendo como parámetro el nombre del archivo con titulados y el nombre del archivo con postulaciones. La primera función retorna una lista con todas las empresas en el archivo con postulaciones (recibido como parámetro). End of explanation """ arch_E = open(e + '.txt', 'w') for pos in arch_P: arch_T.close() def registros(tit, post): arch_E.write(li.format(r, n, p)) emp = empresas(post) arch_T = open(tit) n, r2 = titu.strip().split(';') arch_P.close() for e in emp: arch_E.close() if e2 == e: r, p, e2 = pos.strip().split('#') if r2 == r: li = '{0};{1};{2}\n' arch_P = open(post) return None for titu in arch_T: # Solución ordenada def registros(tit, post): li = '{0};{1};{2}\n' emp = empresas(post) for e in emp: arch_E = open(e + '.txt', 'w') arch_P = open(post) for pos in arch_P: r, p, e2 = pos.strip().split('#') if e2 == e: arch_T = open(tit) for titu in arch_T: n, r2 = titu.strip().split(';') if r2 == r: arch_E.write(li.format(r, n, p)) arch_T.close() arch_E.close() arch_P.close() return None # Utilización registros("data/titulados.txt", "data/postulaciones.txt") """ Explanation: La segunda función resuelve el problema antes descrito (generar un archivo por empresa, los cuales deben tener los titulados que postularon a algún puesto en la empresa con el formato rut;nombre;puesto), recibiendo como parámetro el nombre del archivo con titulados y el nombre del archivo con postulaciones. End of explanation """ def buscar_clientes(nombre_archivo, clase_buscada): archivo = open(nombre_archivo) clientes_buscados = {} for linea in archivo: rut,nombre,clase = linea[:-1].split(";") if clase==clase_buscada: clientes_buscados[rut] = nombre archivo.close() return clientes_buscados print buscar_clientes('data/clientes.txt', 'Pendiente') print buscar_clientes('data/clientes.txt', 'VIP') print buscar_clientes('data/clientes.txt', 'RIP') print buscar_clientes('data/clientes.txt', 'Estandar') """ Explanation: Pregunta 2 [35%] Andrónico Bank es un banco muy humilde que hasta hace poco usaban sólo papel y lápiz para manejar toda la información de sus clientes, también humildes. Como una manera de mejorar sus procesos, Andrónico Bank quiere utilizar un sistema computacional basado en Python. Por eso se traspasa la información de sus clientes a un archivo de texto, indicando su rut, nombre y clase cliente. El archivo clientes.txt es un ejemplo de lo anterior: 9234539-9;Sebastian Davalos;VIP 11231709-k;Choclo Delano;Pendiente 5555555-6;Sebastian Pinera;VIP 9999999-k;Gladis Maryn;RIP 12312312-1;Michel Bachelet;VIP 8888888-8;Companero Yuri;Estandar 7987655-1;Sergio Estandarte;RIP Pregunta 2.a Escriba una función buscar_clientes(archivo, clase) que reciba como parámetros el nombre del archivo de clientes y una clase, y retorne un diccionario con los rut de los clientes como llaves y los nombres como valor de todos los clientes pertenecientes a la clase entregada como parámetro. ```Python buscar_clientes('clientes.txt', 'Pendiente') {'11231709-k': 'Choclo Delano'} ``` Estrategia de solución: * ¿Qué estructura tienen los datos de entrada? * ¿Qué estructura deben tener los datos de salida? * ¿Cómo proceso los inputs para generar el output deseado? End of explanation """ def dar_credito(nombre_archivo, rut): clientes_VIP = buscar_clientes(nombre_archivo, "VIP") return rut in clientes_VIP print dar_credito('data/clientes.txt', '9999999-k') print dar_credito('data/clientes.txt', '11231709-k') print dar_credito('data/clientes.txt', '9234539-9') """ Explanation: Pregunta 2.b Escriba una función dar_credito(archivo, rut) que reciba como parámetros el nombre del archivo de clientes y el rut de un cliente, y que retorne True si éste es VIP o False si no lo es. Si no encuentra el cliente la función retorna False ```Python dar_credito('clientes.txt', '9999999-k') False ``` Estrategia de solución: * ¿Qué estructura tienen los datos de entrada? * ¿Qué estructura deben tener los datos de salida? * ¿Cómo proceso los inputs para generar el output deseado? End of explanation """ def contar_clientes(nombre_archivo): archivo = open(nombre_archivo) cantidad_clases = {} for linea in archivo: rut,nombre,clase = linea.strip().split(";") if clase in cantidad_clases: cantidad_clases[clase] += 1 else: cantidad_clases[clase] = 1 archivo.close() return cantidad_clases print contar_clientes('data/clientes.txt') """ Explanation: Pregunta 2.c Escriba una función contar_clientes(archivo) que reciba como parámetros el nombre del archivo de clientes y que retorne un diccionario con la cantidad de clientes de cada clase en el archivo. ```Python contar_clientes('clientes.txt') {'VIP': 3, 'Pendiente': 1, 'RIP': 2, 'Estandar': 1} ``` Estrategia de solución: * ¿Qué estructura tienen los datos de entrada? * ¿Qué estructura deben tener los datos de salida? * ¿Cómo proceso los inputs para generar el output deseado? End of explanation """ def nuevo_cliente(nombre_archivo, rut, nombre, clase): archivo = open(nombre_archivo,"a") formato_linea = "{0};{1};{2}\n" linea = formato_linea.format(rut, nombre, clase) archivo.write(linea) archivo.close() return None print nuevo_cliente('data/clientes.txt', '2121211-2', 'Sergio Lagos', 'VIP') """ Explanation: Pregunta 3 [40%] Complementando la pregunta 2, se le solicita: Pregunta 3.a Escriba la función nuevo_cliente(archivo, rut, nombre, clase) que reciba como parámetro el nombre del archivo de clientes y el rut, nombre y clase de un nuevo cliente. La función debe agregar el nuevo cliente al final del archivo. Esta función retorna None. ```Python nuevo_cliente('clientes.txt', '2121211-2', 'Sergio Lagos', 'VIP') ``` Estrategia de solución: * ¿Qué estructura tienen los datos de entrada? * ¿Qué estructura deben tener los datos de salida? * ¿Cómo proceso los inputs para generar el output deseado? End of explanation """ def actualizar_clase(nombre_archivo, rut_buscado, nueva_clase): archivo = open(nombre_archivo) lista_lineas = [] formato_linea = "{0};{1};{2}\n" rut_hallado = False for linea in archivo: rut,nombre,clase = linea[:-1].split(";") if rut==rut_buscado: nueva_linea = formato_linea.format(rut,nombre,nueva_clase) lista_lineas.append(nueva_linea) rut_hallado = True else: lista_lineas.append(linea) archivo.close() # Ahora escribir todas las lineas, si es necesario if rut_hallado: archivo = open(nombre_archivo, "w") for linea in lista_lineas: archivo.write(linea) archivo.close() return rut_hallado actualizar_clase('data/clientes.txt', '9234539-9', 'Estandar') """ Explanation: Pregunta 3.b Escriba la función actualizar_clase(archivo, rut, clase) que reciba como parámetro el nombre del archivo de clientes, el rut de un cliente y una nueva clase. La función debe modificar la clase del cliente con el rut indicado, cambiándola por clase en el archivo. Esta función retorna True si logra hacer el cambio o False si no encuentra al cliente con el rut indicado. ```Python actualizar_clase('clientes.txt', '9234539-9', 'Estandar') True ``` Estrategia de solución: * ¿Qué estructura tienen los datos de entrada? * ¿Qué estructura deben tener los datos de salida? * ¿Cómo proceso los inputs para generar el output deseado? End of explanation """ def filtrar_clientes(nombre_archivo, clase_buscada): archivo_original = open(nombre_archivo) nombre_archivo_clase = "data\clientes_"+clase_buscada+".txt" archivo_clase = open(nombre_archivo_clase,"w") formato_linea = "{0};{1}\n" for linea in archivo_original: rut,nombre,clase = linea[:-1].split(";") if clase==clase_buscada: nueva_linea = formato_linea.format(rut,nombre) archivo_clase.write(nueva_linea) archivo_original.close() archivo_clase.close() return None filtrar_clientes('data/clientes.txt', 'VIP') """ Explanation: Pregunta 3.c Escriba una función filtrar_clientes(archivo, clase) que reciba como parámetros el nombre del archivo de clientes y una clase de cliente. La función debe crear un archivo clientes_[clase].txt con los rut y los nombres de los clientes pertenecientes a esa clase. Note que el archivo debe ser nombrado según la clase solicitada. Esta función retorna None. ```Python filtrar_clientes('clientes.txt', 'VIP') `` genera el archivoclientes_VIP.txt` con el siguiente contenido 5555555-6;Sebastian Pinera 12312312-1;Michel Bachelet 2121211-2;Sergio Lagos Estrategia de solución: * ¿Qué estructura tienen los datos de entrada? * ¿Qué estructura deben tener los datos de salida? * ¿Cómo proceso los inputs para generar el output deseado? End of explanation """
infilect/ml-course1
keras-notebooks/CNN/6.4-sequence-processing-with-convnets.ipynb
mit
from keras.datasets import imdb from keras.preprocessing import sequence max_features = 10000 # number of words to consider as features max_len = 500 # cut texts after this number of words (among top max_features most common words) print('Loading data...') (x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features) print(len(x_train), 'train sequences') print(len(x_test), 'test sequences') print('Pad sequences (samples x time)') x_train = sequence.pad_sequences(x_train, maxlen=max_len) x_test = sequence.pad_sequences(x_test, maxlen=max_len) print('x_train shape:', x_train.shape) print('x_test shape:', x_test.shape) """ Explanation: Sequence processing with convnets This notebook contains the code samples found in Chapter 6, Section 4 of Deep Learning with Python. Note that the original text features far more content, in particular further explanations and figures: in this notebook, you will only find source code and related comments. Implementing a 1D convnet In Keras, you would use a 1D convnet via the Conv1D layer, which has a very similar interface to Conv2D. It takes as input 3D tensors with shape (samples, time, features) and also returns similarly-shaped 3D tensors. The convolution window is a 1D window on the temporal axis, axis 1 in the input tensor. Let's build a simple 2-layer 1D convnet and apply it to the IMDB sentiment classification task that you are already familiar with. As a reminder, this is the code for obtaining and preprocessing the data: End of explanation """ from keras.models import Sequential from keras import layers from keras.optimizers import RMSprop model = Sequential() model.add(layers.Embedding(max_features, 128, input_length=max_len)) model.add(layers.Conv1D(32, 7, activation='relu')) model.add(layers.MaxPooling1D(5)) model.add(layers.Conv1D(32, 7, activation='relu')) model.add(layers.GlobalMaxPooling1D()) model.add(layers.Dense(1)) model.summary() model.compile(optimizer=RMSprop(lr=1e-4), loss='binary_crossentropy', metrics=['acc']) history = model.fit(x_train, y_train, epochs=10, batch_size=128, validation_split=0.2) """ Explanation: 1D convnets are structured in the same way as their 2D counter-parts that you have used in Chapter 5: they consist of a stack of Conv1D and MaxPooling1D layers, eventually ending in either a global pooling layer or a Flatten layer, turning the 3D outputs into 2D outputs, allowing to add one or more Dense layers to the model, for classification or regression. One difference, though, is the fact that we can afford to use larger convolution windows with 1D convnets. Indeed, with a 2D convolution layer, a 3x3 convolution window contains 3*3 = 9 feature vectors, but with a 1D convolution layer, a convolution window of size 3 would only contain 3 feature vectors. We can thus easily afford 1D convolution windows of size 7 or 9. This is our example 1D convnet for the IMDB dataset: End of explanation """ import matplotlib.pyplot as plt acc = history.history['acc'] val_acc = history.history['val_acc'] loss = history.history['loss'] val_loss = history.history['val_loss'] epochs = range(len(acc)) plt.plot(epochs, acc, 'bo', label='Training acc') plt.plot(epochs, val_acc, 'b', label='Validation acc') plt.title('Training and validation accuracy') plt.legend() plt.figure() plt.plot(epochs, loss, 'bo', label='Training loss') plt.plot(epochs, val_loss, 'b', label='Validation loss') plt.title('Training and validation loss') plt.legend() plt.show() """ Explanation: Here are our training and validation results: validation accuracy is somewhat lower than that of the LSTM we used two sections ago, but runtime is faster, both on CPU and GPU (albeit the exact speedup will vary greatly depending on your exact configuration). At that point, we could re-train this model for the right number of epochs (8), and run it on the test set. This is a convincing demonstration that a 1D convnet can offer a fast, cheap alternative to a recurrent network on a word-level sentiment classification task. End of explanation """ # We reuse the following variables defined in the last section: # float_data, train_gen, val_gen, val_steps import os import numpy as np data_dir = '/home/ubuntu/data/' fname = os.path.join(data_dir, 'jena_climate_2009_2016.csv') f = open(fname) data = f.read() f.close() lines = data.split('\n') header = lines[0].split(',') lines = lines[1:] float_data = np.zeros((len(lines), len(header) - 1)) for i, line in enumerate(lines): values = [float(x) for x in line.split(',')[1:]] float_data[i, :] = values mean = float_data[:200000].mean(axis=0) float_data -= mean std = float_data[:200000].std(axis=0) float_data /= std def generator(data, lookback, delay, min_index, max_index, shuffle=False, batch_size=128, step=6): if max_index is None: max_index = len(data) - delay - 1 i = min_index + lookback while 1: if shuffle: rows = np.random.randint( min_index + lookback, max_index, size=batch_size) else: if i + batch_size >= max_index: i = min_index + lookback rows = np.arange(i, min(i + batch_size, max_index)) i += len(rows) samples = np.zeros((len(rows), lookback // step, data.shape[-1])) targets = np.zeros((len(rows),)) for j, row in enumerate(rows): indices = range(rows[j] - lookback, rows[j], step) samples[j] = data[indices] targets[j] = data[rows[j] + delay][1] yield samples, targets lookback = 1440 step = 6 delay = 144 batch_size = 128 train_gen = generator(float_data, lookback=lookback, delay=delay, min_index=0, max_index=200000, shuffle=True, step=step, batch_size=batch_size) val_gen = generator(float_data, lookback=lookback, delay=delay, min_index=200001, max_index=300000, step=step, batch_size=batch_size) test_gen = generator(float_data, lookback=lookback, delay=delay, min_index=300001, max_index=None, step=step, batch_size=batch_size) # This is how many steps to draw from `val_gen` # in order to see the whole validation set: val_steps = (300000 - 200001 - lookback) // batch_size # This is how many steps to draw from `test_gen` # in order to see the whole test set: test_steps = (len(float_data) - 300001 - lookback) // batch_size from keras.models import Sequential from keras import layers from keras.optimizers import RMSprop model = Sequential() model.add(layers.Conv1D(32, 5, activation='relu', input_shape=(None, float_data.shape[-1]))) model.add(layers.MaxPooling1D(3)) model.add(layers.Conv1D(32, 5, activation='relu')) model.add(layers.MaxPooling1D(3)) model.add(layers.Conv1D(32, 5, activation='relu')) model.add(layers.GlobalMaxPooling1D()) model.add(layers.Dense(1)) model.compile(optimizer=RMSprop(), loss='mae') history = model.fit_generator(train_gen, steps_per_epoch=500, epochs=20, validation_data=val_gen, validation_steps=val_steps) """ Explanation: Combining CNNs and RNNs to process long sequences Because 1D convnets process input patches independently, they are not sensitive to the order of the timesteps (beyond a local scale, the size of the convolution windows), unlike RNNs. Of course, in order to be able to recognize longer-term patterns, one could stack many convolution layers and pooling layers, resulting in upper layers that would "see" long chunks of the original inputs -- but that's still a fairly weak way to induce order-sensitivity. One way to evidence this weakness is to try 1D convnets on the temperature forecasting problem from the previous section, where order-sensitivity was key to produce good predictions. Let's see: End of explanation """ import matplotlib.pyplot as plt loss = history.history['loss'] val_loss = history.history['val_loss'] epochs = range(len(loss)) plt.figure() plt.plot(epochs, loss, 'bo', label='Training loss') plt.plot(epochs, val_loss, 'b', label='Validation loss') plt.title('Training and validation loss') plt.legend() plt.show() """ Explanation: Here are our training and validation Mean Absolute Errors: End of explanation """ # This was previously set to 6 (one point per hour). # Now 3 (one point per 30 min). step = 3 lookback = 720 # Unchanged delay = 144 # Unchanged train_gen = generator(float_data, lookback=lookback, delay=delay, min_index=0, max_index=200000, shuffle=True, step=step) val_gen = generator(float_data, lookback=lookback, delay=delay, min_index=200001, max_index=300000, step=step) test_gen = generator(float_data, lookback=lookback, delay=delay, min_index=300001, max_index=None, step=step) val_steps = (300000 - 200001 - lookback) // 128 test_steps = (len(float_data) - 300001 - lookback) // 128 """ Explanation: The validation MAE stays in the low 0.40s: we cannot even beat our common-sense baseline using the small convnet. Again, this is because our convnet looks for patterns anywhere in the input timeseries, and has no knowledge of the temporal position of a pattern it sees (e.g. towards the beginning, towards the end, etc.). Since more recent datapoints should be interpreted differently from older datapoints in the case of this specific forecasting problem, the convnet fails at producing meaningful results here. This limitation of convnets was not an issue on IMDB, because patterns of keywords that are associated with a positive or a negative sentiment will be informative independently of where they are found in the input sentences. One strategy to combine the speed and lightness of convnets with the order-sensitivity of RNNs is to use a 1D convnet as a preprocessing step before a RNN. This is especially beneficial when dealing with sequences that are so long that they couldn't realistically be processed with RNNs, e.g. sequences with thousands of steps. The convnet will turn the long input sequence into much shorter (downsampled) sequences of higher-level features. This sequence of extracted features then becomes the input to the RNN part of the network. This technique is not seen very often in research papers and practical applications, possibly because it is not very well known. It is very effective and ought to be more common. Let's try this out on the temperature forecasting dataset. Because this strategy allows us to manipulate much longer sequences, we could either look at data from further back (by increasing the lookback parameter of the data generator), or look at high-resolution timeseries (by decreasing the step parameter of the generator). Here, we will chose (somewhat arbitrarily) to use a step twice smaller, resulting in twice longer timeseries, where the weather data is being sampled at a rate of one point per 30 minutes. End of explanation """ model = Sequential() model.add(layers.Conv1D(32, 5, activation='relu', input_shape=(None, float_data.shape[-1]))) model.add(layers.MaxPooling1D(3)) model.add(layers.Conv1D(32, 5, activation='relu')) model.add(layers.GRU(32, dropout=0.1, recurrent_dropout=0.5)) model.add(layers.Dense(1)) model.summary() model.compile(optimizer=RMSprop(), loss='mae') history = model.fit_generator(train_gen, steps_per_epoch=500, epochs=20, validation_data=val_gen, validation_steps=val_steps) loss = history.history['loss'] val_loss = history.history['val_loss'] epochs = range(len(loss)) plt.figure() plt.plot(epochs, loss, 'bo', label='Training loss') plt.plot(epochs, val_loss, 'b', label='Validation loss') plt.title('Training and validation loss') plt.legend() plt.show() """ Explanation: This is our model, starting with two Conv1D layers and following-up with a GRU layer: End of explanation """
dmolina/es_intro_python
03-Semantics-Variables.ipynb
gpl-3.0
x = 1 # x is an integer x = 'hello' # now x is a string x = [1, 2, 3] # now x is a list """ Explanation: <!--BOOK_INFORMATION--> <img align="left" style="padding-right:10px;" src="fig/cover-small.jpg"> This notebook contains an excerpt from the Whirlwind Tour of Python by Jake VanderPlas; the content is available on GitHub. The text and code are released under the CC0 license; see also the companion project, the Python Data Science Handbook. <!--NAVIGATION--> < A Quick Tour of Python Language Syntax | Contents | Basic Python Semantics: Operators > Basic Python Semantics: Variables and Objects This section will begin to cover the basic semantics of the Python language. As opposed to the syntax covered in the previous section, the semantics of a language involve the meaning of the statements. As with our discussion of syntax, here we'll preview a few of the essential semantic constructions in Python to give you a better frame of reference for understanding the code in the following sections. This section will cover the semantics of variables and objects, which are the main ways you store, reference, and operate on data within a Python script. Python Variables Are Pointers Assigning variables in Python is as easy as putting a variable name to the left of the equals (=) sign: ```python assign 4 to the variable x x = 4 ``` This may seem straightforward, but if you have the wrong mental model of what this operation does, the way Python works may seem confusing. We'll briefly dig into that here. In many programming languages, variables are best thought of as containers or buckets into which you put data. So in C, for example, when you write C // C code int x = 4; you are essentially defining a "memory bucket" named x, and putting the value 4 into it. In Python, by contrast, variables are best thought of not as containers but as pointers. So in Python, when you write python x = 4 you are essentially defining a pointer named x that points to some other bucket containing the value 4. Note one consequence of this: because Python variables just point to various objects, there is no need to "declare" the variable, or even require the variable to always point to information of the same type! This is the sense in which people say Python is dynamically-typed: variable names can point to objects of any type. So in Python, you can do things like this: End of explanation """ x = [1, 2, 3] y = x """ Explanation: While users of statically-typed languages might miss the type-safety that comes with declarations like those found in C, C int x = 4; this dynamic typing is one of the pieces that makes Python so quick to write and easy to read. There is a consequence of this "variable as pointer" approach that you need to be aware of. If we have two variable names pointing to the same mutable object, then changing one will change the other as well! For example, let's create and modify a list: End of explanation """ print(y) x.append(4) # append 4 to the list pointed to by x print(y) # y's list is modified as well! """ Explanation: We've created two variables x and y which both point to the same object. Because of this, if we modify the list via one of its names, we'll see that the "other" list will be modified as well: End of explanation """ x = 'something else' print(y) # y is unchanged """ Explanation: This behavior might seem confusing if you're wrongly thinking of variables as buckets that contain data. But if you're correctly thinking of variables as pointers to objects, then this behavior makes sense. Note also that if we use "=" to assign another value to x, this will not affect the value of y – assignment is simply a change of what object the variable points to: End of explanation """ x = 10 y = x x += 5 # add 5 to x's value, and assign it to x print("x =", x) print("y =", y) """ Explanation: Again, this makes perfect sense if you think of x and y as pointers, and the "=" operator as an operation that changes what the name points to. You might wonder whether this pointer idea makes arithmetic operations in Python difficult to track, but Python is set up so that this is not an issue. Numbers, strings, and other simple types are immutable: you can't change their value – you can only change what values the variables point to. So, for example, it's perfectly safe to do operations like the following: End of explanation """ x = 4 type(x) x = 'hello' type(x) x = 3.14159 type(x) """ Explanation: When we call x += 5, we are not modifying the value of the 10 object pointed to by x; we are rather changing the variable x so that it points to a new integer object with value 15. For this reason, the value of y is not affected by the operation. Everything Is an Object Python is an object-oriented programming language, and in Python everything is an object. Let's flesh-out what this means. Earlier we saw that variables are simply pointers, and the variable names themselves have no attached type information. This leads some to claim erroneously that Python is a type-free language. But this is not the case! Consider the following: End of explanation """ L = [1, 2, 3] L.append(100) print(L) """ Explanation: Python has types; however, the types are linked not to the variable names but to the objects themselves. In object-oriented programming languages like Python, an object is an entity that contains data along with associated metadata and/or functionality. In Python everything is an object, which means every entity has some metadata (called attributes) and associated functionality (called methods). These attributes and methods are accessed via the dot syntax. For example, before we saw that lists have an append method, which adds an item to the list, and is accessed via the dot (".") syntax: End of explanation """ x = 4.5 print(x.real, "+", x.imag, 'i') """ Explanation: While it might be expected for compound objects like lists to have attributes and methods, what is sometimes unexpected is that in Python even simple types have attached attributes and methods. For example, numerical types have a real and imag attribute that returns the real and imaginary part of the value, if viewed as a complex number: End of explanation """ x = 4.5 x.is_integer() x = 4.0 x.is_integer() """ Explanation: Methods are like attributes, except they are functions that you can call using opening and closing parentheses. For example, floating point numbers have a method called is_integer that checks whether the value is an integer: End of explanation """ type(x.is_integer) """ Explanation: When we say that everything in Python is an object, we really mean that everything is an object – even the attributes and methods of objects are themselves objects with their own type information: End of explanation """
mne-tools/mne-tools.github.io
0.17/_downloads/f294e4a296e7fedb40bec791d9e234e9/plot_stats_cluster_1samp_test_time_frequency.ipynb
bsd-3-clause
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr> # # License: BSD (3-clause) import numpy as np import matplotlib.pyplot as plt import mne from mne.time_frequency import tfr_morlet from mne.stats import permutation_cluster_1samp_test from mne.datasets import sample print(__doc__) """ Explanation: Non-parametric 1 sample cluster statistic on single trial power This script shows how to estimate significant clusters in time-frequency power estimates. It uses a non-parametric statistical procedure based on permutations and cluster level statistics. The procedure consists in: extracting epochs compute single trial power estimates baseline line correct the power estimates (power ratios) compute stats to see if ratio deviates from 1. End of explanation """ data_path = sample.data_path() raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif' tmin, tmax, event_id = -0.3, 0.6, 1 # Setup for reading the raw data raw = mne.io.read_raw_fif(raw_fname) events = mne.find_events(raw, stim_channel='STI 014') include = [] raw.info['bads'] += ['MEG 2443', 'EEG 053'] # bads + 2 more # picks MEG gradiometers picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True, stim=False, include=include, exclude='bads') # Load condition 1 event_id = 1 epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks, baseline=(None, 0), preload=True, reject=dict(grad=4000e-13, eog=150e-6)) # Take only one channel ch_name = 'MEG 1332' epochs.pick_channels([ch_name]) evoked = epochs.average() # Factor to down-sample the temporal dimension of the TFR computed by # tfr_morlet. Decimation occurs after frequency decomposition and can # be used to reduce memory usage (and possibly computational time of downstream # operations such as nonparametric statistics) if you don't need high # spectrotemporal resolution. decim = 5 freqs = np.arange(8, 40, 2) # define frequencies of interest sfreq = raw.info['sfreq'] # sampling in Hz tfr_epochs = tfr_morlet(epochs, freqs, n_cycles=4., decim=decim, average=False, return_itc=False, n_jobs=1) # Baseline power tfr_epochs.apply_baseline(mode='logratio', baseline=(-.100, 0)) # Crop in time to keep only what is between 0 and 400 ms evoked.crop(0., 0.4) tfr_epochs.crop(0., 0.4) epochs_power = tfr_epochs.data[:, 0, :, :] # take the 1 channel """ Explanation: Set parameters End of explanation """ threshold = 2.5 n_permutations = 100 # Warning: 100 is too small for real-world analysis. T_obs, clusters, cluster_p_values, H0 = \ permutation_cluster_1samp_test(epochs_power, n_permutations=n_permutations, threshold=threshold, tail=0) """ Explanation: Compute statistic End of explanation """ evoked_data = evoked.data times = 1e3 * evoked.times plt.figure() plt.subplots_adjust(0.12, 0.08, 0.96, 0.94, 0.2, 0.43) # Create new stats image with only significant clusters T_obs_plot = np.nan * np.ones_like(T_obs) for c, p_val in zip(clusters, cluster_p_values): if p_val <= 0.05: T_obs_plot[c] = T_obs[c] vmax = np.max(np.abs(T_obs)) vmin = -vmax plt.subplot(2, 1, 1) plt.imshow(T_obs, cmap=plt.cm.gray, extent=[times[0], times[-1], freqs[0], freqs[-1]], aspect='auto', origin='lower', vmin=vmin, vmax=vmax) plt.imshow(T_obs_plot, cmap=plt.cm.RdBu_r, extent=[times[0], times[-1], freqs[0], freqs[-1]], aspect='auto', origin='lower', vmin=vmin, vmax=vmax) plt.colorbar() plt.xlabel('Time (ms)') plt.ylabel('Frequency (Hz)') plt.title('Induced power (%s)' % ch_name) ax2 = plt.subplot(2, 1, 2) evoked.plot(axes=[ax2], time_unit='s') plt.show() """ Explanation: View time-frequency plots End of explanation """
dm-wyncode/zipped-code
content/posts/philosophy/object_classes/interfaces.ipynb
mit
%%HTML <div style="background-color:#d9edf7;color:#31708;border-color:#bce8f1;padding: 15px;margin-bottom: 20px;border: 1px; border-radius:4px;"> <strong>psittacism: </strong> <p>automatic speech without thought of the meaning of the words spoken</p> <p>New Latin psittacismus, from Latin psittacus parrot + -ismus -ism</p> <citation>Psittacism. (n.d.). Retrieved August 24, 2016, from http://www.merriam-webster.com/dictionary/psittacism</citation> </div> %%HTML <a title="By John Moose (originally posted to Flickr as Moluccan Cockatoo) [CC BY 2.0 (http://creativecommons. org/licenses/by/2.0)], via Wikimedia Commons" href="https://commons.wikimedia.org/wiki/File%3ACacatua_moluccensis_-Cincinnati_Zoo-8a.jpg"><img width="256" alt="Cacatua moluccensis -Cincinnati Zoo-8a" src="https://upload.wikimedia.org/wikipedia/commons/thumb/b/b8/Cacatua_moluccensis_-Cincinnati_Zoo-8a.jpg/256px-Cacatua_moluccensis_-Cincinnati_Zoo-8a.jpg"/></a> """ Explanation: Object identity is a frail reed upon which to rely. Whenever I use the word 'is', except in sheer tautology, I deeply misuse it. — George Santayana, philosopher Critical analysis of [design patterns] and design needs is not an optional issue: without it, you cannot spot what patterns are anti-patterns, in terms of your actual design needs — including what programming languages you intend to target with your design. Five Easy Pieces: Simple Python Non-Patterns Learning programming functional style in JavaScript. I have been making an effort to learn more JavaScript by going through basic tutorials for React which is a JavaScript library. As I was reading tutorials and referencing some books about JavaScript, I realized that my mind was mismapped when it came to the word interface. The admonition of the gang of four of Design Patterns kept coming to mind. And then I saw the admonition explicitly displayed in Eric Elliott's book: Program to an interface, not an implementation. — The Gang of Four, Design Patterns Interfaces are one of the primary tools of modular software design. Interfaces define a contract that an implementing module will fulfill. For instance, a common problem in JavaScript applications is that the application stops functioning if the Internet connection is lost. In order to solve that problem, you could use local storage and sync changes periodically with the server. Unfortunately, some browsers don’t support local storage, so you may have to fall back to cookies or even Flash (depending on how much data you need to store). Elliott, Eric (2014-06-26). Programming JavaScript Applications: Robust Web Architecture with Node, HTML5, and Modern JS Libraries (Kindle Locations 2752-2759). O'Reilly Media. Kindle Edition. So then I had to ask myself… "Do I really know what this means?" Do I know what an interface is in the context of the gang of four design patterns sense? Sometimes it is easier and much more interesting to know what something is NOT rather than what it is. Knowing what interface is NOT. Caution Do not confuse the concept of the interface with terms like graphical user interface (GUI). Although a GUI is, as its name implies, an interface, the term interfaces, as used here, is more general in nature and is not restricted to a graphical interface. Weisfeld, Matt (2008-08-25). The Object-Oriented Thought Process (Developer's Library) (Kindle Locations 1027-1029). Pearson Education. Kindle Edition. Therefore… knowing what an interface is NOT makes it easier to grasp what interface is meant to be in the context of object oriented programming. Sandi Metz explains: The methods that make up the public interface of your class comprise the face it presents to the world. Metz, Sandi (2012-09-05). Practical Object-Oriented Design in Ruby: An Agile Primer (Addison-Wesley Professional Ruby) (p. 62). Pearson Education. Kindle Edition. And then I recalled… from a long time ago when I was learning PHP that PHP actually has a thing called interface and that Java does, too. Java includes a concept called interfaces. A Java interface is a bit like a class, except a Java interface can only contain method signatures and fields. An Java interface cannot contain an implementation of the methods, only the signature (name, parameters and exceptions) of the method. Java Interfaces So when… the gang-of-four admonition Program to an interface, not an implementation. is quoted, the meaning of interface is likely meant in the sense that it is in a Java interface sense. While the concept… of interface still applies in non-staticlly typed languages such as Python and Ruby, its meaning is used in a more abstract sense rather than in a language-enforced sense. ❝…languages that force you to be explicit about this transition do offer a benefit. They make it painfully, inescapably, and explicitly clear that you are defining an abstract interface. It is impossible to create an abstraction unknowingly or by accident; in statically typed languages defining an interface is always intentional.❞ —Sandi Metz Because statically typed languages have compilers that act like unit tests for types, you would not be able to inject just any random object into Gear. Instead you would have to declare an interface, define diameter as part of that interface, include the interface in the Wheel class, and tell Gear that the class you are injecting is a kind of that interface. Rubyists are justifiably grateful to avoid these gyrations, but languages that force you to be explicit about this transition do offer a benefit. They make it painfully, inescapably, and explicitly clear that you are defining an abstract interface. It is impossible to create an abstraction unknowingly or by accident; in statically typed languages defining an interface is always intentional. Metz, Sandi (2012-09-05). Practical Object-Oriented Design in Ruby: An Agile Primer (Addison-Wesley Professional Ruby) (p. 54). Pearson Education. Kindle Edition. And then to add to my confusion… Matt Weisfeld points out that there are interfaces to the classes as well as to the methods! Interfaces Versus Interfaces It is important to note that there are interfaces to the classes as well as the methods—don't confuse the two. The interfaces to the classes are the public methods while the interfaces to the methods relate to how you call (invoke) them. Weisfeld, Matt (2008-08-25). The Object-Oriented Thought Process (Developer's Library) (Kindle Locations 705-708). Pearson Education. Kindle Edition. Note: This book The Object-Oriented Thought Process uses Java for its examples so is using "interface" to mean what it means canonically in object oriented programming. In general, reusable classes tend to have interfaces that are more abstract than concrete. Concrete interfaces tend to be very specific, whereas abstract interfaces are more general. However, simply stating that a highly abstract interface is more useful than a highly concrete interface, although often true, is not always the case. It is possible to write a very useful, concrete class that is not at all reusable. This happens all the time, and there is nothing wrong with it in some situations. However, we are now in the design business, and want to take advantage of what OO offers us. So our goal is to design abstract, highly reusable classes—and to do this we will design highly abstract user interfaces. To illustrate… Weisfeld, Matt (2008-08-25). The Object-Oriented Thought Process (Developer's Library) (Kindle Locations 1179-1185). Pearson Education. Kindle Edition. As another example, consider an automobile. The interface between you and the car includes components such as the steering wheel, gas pedal, brake, and ignition switch. For most people, aesthetic issues aside, the main concern when driving a car is that the car starts, accelerates, stops, steers, and so on. The implementation, basically the stuff that you don't see, is of little concern to the average driver. In fact, most people would not even be able to identify certain components, such as the catalytic converter and gasket. However, any driver would recognize and know how to use the steering wheel because this is a common interface. By installing a standard steering wheel in the car, manufacturers are assured that the people in their target market will be able to use the system. If, however, a manufacturer decided to install a joystick in place of the steering wheel, most drivers would balk at this, and the automobile might not be a big seller (except for some eclectic people who love bucking the trends). On the other hand, as long as the performance and aesthetics didn't change, the average driver would not notice if the manufacturer changed the engine (part of the implementation) of the automobile. Weisfeld, Matt (2008-08-25). The Object-Oriented Thought Process (Developer's Library) (Kindle Locations 1036-1045). Pearson Education. Kindle Edition. The module concept. Module We can use functions and closure to make modules. A module is a function or object that presents an interface but that hides its state and implementation. By using functions to produce modules, we can almost completely eliminate our use of global variables, thereby mitigating one of JavaScript’s worst features. Crockford, Douglas (2008-05-08). JavaScript: The Good Parts: The Good Parts (Kindle Locations 846-849). O'Reilly Media. Kindle Edition. Douglas Crockford points out… the utility of modules in JavaScript which provides namespacing. This brings to mind the last item of the Zen of Python: Namespaces are one honking great idea -- let's do more of those! And in Python module creation is easy. You only have to put Python code into a file and it becomes a module! A module is a file containing Python definitions and statements. https://docs.python.org/2/tutorial/modules.html Because Python doesn't have (and doesn't need) a formal Interface contract, the Java-style distinction between abstraction and interface doesn't exist. If someone goes through the effort to define a formal interface, it will also be an abstract class. The only differences would be in the stated intent in the docstring. http://stackoverflow.com/questions/372042/difference-between-abstract-class-and-interface-in-python End of explanation """ import inspect """ Explanation: So in conclusion… while it is easy to psittacistically repeat Program to an interface, not an implementation. I have to stop and ask myself, "Does that word mean what I think it means?" Here is an example of how one might use the concept of an interface in Python by using classes and inheritance. Some code… Using an introspection tool from included library in Python. End of explanation """ class AbstractShape: """Abstract (base) class for shapes.""" def draw(self, ): """Draw shape.""" error_message = "Method {} not defined. Abstract shapes have no idea what to draw!"\ .format(self.draw) raise NotImplementedError(error_message) """ Explanation: Python does not have interfaces. If it did, it would look something like this. Interfaces can be imitated with a class. End of explanation """ try: shape = AbstractShape() shape.draw() except NotImplementedError as e: print(Exception(e)) """ Explanation: If one tries to make an instance of AbstactShape and calls obj.draw() an ImplementationError is raised because the shape is abstract. It has no idea what to draw. End of explanation """ class Circle(AbstractShape): """Circle implements AbstactShape.""" def __init__(self, ): print("My ancestors are {}".format(inspect.getmro(Circle))) def draw(self, ): print("An instance of {} {} is drawing itself via {}."\ .format(super(), self, self.draw)) print("<circle appears>") circle = Circle() circle.draw() class Square(AbstractShape): """Square implements AbstactShape.""" def __init__(self, ): print("My ancestors are {}".format(inspect.getmro(Square))) def draw(self, ): print("An instance of {} {} is drawing itself via {}."\ .format(super(), self, self.draw)) print("<square appears>") square = Square() square.draw() """ Explanation: If one tries to make an instance of Cirlce and calls cirlce.draw() an ImplementationError is not raise because the shape is no longer abstract. End of explanation """ shapes = [class_() for class_ in (Circle, Square)] print(shapes) """ Explanation: Create a list of shapes by instantiating Circle and Square. End of explanation """ for shape in shapes: shape.draw() """ Explanation: Invoke bound draw method on each shape instance. End of explanation """
icoxfog417/gensim_notebook
topic_model_evaluation.ipynb
mit
# enable showing matplotlib image inline %matplotlib inline # autoreload module %load_ext autoreload %autoreload 2 PROJECT_ROOT = "/" def load_local_package(): import os import sys root = os.path.join(os.getcwd(), "./") sys.path.append(root) # load project root return root PROJECT_ROOT = load_local_package() """ Explanation: How to use topic model by gensim This document will show you how to use topic model by gensim. The data for this tutorial is from Recruit HotPepper Beauty API. So you need api key of it. If you get api key, then execute below scripts. scripts/download_data.py scripts/make_corpus.py python download_data.py your_api_key It is for downloading the json data from api (extract hair salons data near the Tokyo). python make_corpus path_to_downloaded_json_file It is for making the corpus from json data. You can set some options to restrict the words in corpus. Please see the help of this script. After executing above scripts, you will have corpus and dictionary in your data folder. Then, execute this notebook. Preparation End of explanation """ prefix = "salons" def load_corpus(p): import os import json from gensim import corpora s_path = os.path.join(PROJECT_ROOT, "./data/{0}.json".format(p)) d_path = os.path.join(PROJECT_ROOT, "./data/{0}_dict.dict".format(p)) c_path = os.path.join(PROJECT_ROOT, "./data/{0}_corpus.mm".format(p)) s = [] with open(s_path, "r", encoding="utf-8") as f: s = json.load(f) d = corpora.Dictionary.load(d_path) c = corpora.MmCorpus(c_path) return s, d, c salons, dictionary, corpus = load_corpus(prefix) print(dictionary) print(corpus) """ Explanation: Load Corpus End of explanation """ from gensim import models topic_range = range(2, 5) test_rate = 0.2 def split_corpus(c, rate_or_size): import math size = 0 if isinstance(rate_or_size, float): size = math.floor(len(c) * rate_or_size) else: size = rate_or_size # simple split, not take sample randomly left = c[:-size] right = c[-size:] return left, right def calc_perplexity(m, c): import numpy as np return np.exp(-m.log_perplexity(c)) def search_model(c, rate_or_size): most = [1.0e6, None] training, test = split_corpus(c, rate_or_size) print("dataset: training/test = {0}/{1}".format(len(training), len(test))) for t in topic_range: m = models.LdaModel(corpus=training, id2word=dictionary, num_topics=t, iterations=250, passes=5) p1 = calc_perplexity(m, training) p2 = calc_perplexity(m, test) print("{0}: perplexity is {1}/{2}".format(t, p1, p2)) if p2 < most[0]: most[0] = p2 most[1] = m return most[0], most[1] perplexity, model = search_model(corpus, test_rate) print("Best model: topics={0}, perplexity={1}".format(model.num_topics, perplexity)) """ Explanation: Make Topic Model End of explanation """ def calc_topic_distances(m, topic): import numpy as np def kldiv(p, q): distance = np.sum(p * np.log(p / q)) return distance # get probability of each words # https://github.com/piskvorky/gensim/blob/develop/gensim/models/ldamodel.py#L733 t = m.state.get_lambda() for i, p in enumerate(t): t[i] = t[i] / t[i].sum() base = t[topic] distances = [(i_p[0], kldiv(base, i_p[1])) for i_p in enumerate(t) if i_p[0] != topic] return distances def plot_distance_matrix(m): import numpy as np import matplotlib.pylab as plt # make distance matrix mt = [] for i in range(m.num_topics): d = calc_topic_distances(m, i) d.insert(i, (i, 0)) # distance between same topic d = [_d[1] for _d in d] mt.append(d) mt = np.array(mt) # plot matrix fig = plt.figure() ax = fig.add_subplot(1, 1, 1) ax.set_aspect("equal") plt.imshow(mt, interpolation="nearest", cmap=plt.cm.ocean) plt.yticks(range(mt.shape[0])) plt.xticks(range(mt.shape[1])) plt.colorbar() plt.show() plot_distance_matrix(model) """ Explanation: Evaluate/Visualize Topic Model Check the distance between each topic If we success to categorize the documents well, then the distance of each topic should be far apart. End of explanation """ def show_document_topics(c, m, sample_size=200, width=1): import random import numpy as np import matplotlib.pylab as plt # make document/topics matrix d_topics = [] t_documents = {} samples = random.sample(range(len(c)), sample_size) for s in samples: ts = m.__getitem__(corpus[s], -1) d_topics.append([v[1] for v in ts]) max_topic = max(ts, key=lambda x: x[1]) if max_topic[0] not in t_documents: t_documents[max_topic[0]] = [] t_documents[max_topic[0]] += [(s, max_topic[1])] d_topics = np.array(d_topics) for t in t_documents: t_documents[t] = sorted(t_documents[t], key=lambda x: x[1], reverse=True) # draw cumulative bar chart fig = plt.figure(figsize=(20, 3)) N, K = d_topics.shape indices = np.arange(N) height = np.zeros(N) bar = [] for k in range(K): color = plt.cm.coolwarm(k / K, 1) p = plt.bar(indices, d_topics[:, k], width, bottom=None if k == 0 else height, color=color) height += d_topics[:, k] bar.append(p) plt.ylim((0, 1)) plt.xlim((0, d_topics.shape[0])) topic_labels = ['Topic #{}'.format(k) for k in range(K)] plt.legend([b[0] for b in bar], topic_labels) plt.show(bar) return d_topics, t_documents document_topics, topic_documents = show_document_topics(corpus, model) num_show_ranks = 5 for t in topic_documents: print("Topic #{0} salons".format(t) + " " + "*" * 100) for i, v in topic_documents[t][:num_show_ranks]: print("{0}({1}):{2}".format(salons[i]["name"], v, salons[i]["urls"]["pc"])) """ Explanation: Check the topics in documents If we success to categorize the documents well, each document has one mainly topic. End of explanation """ def visualize_topic(m, word_count=10, fontsize_base=10): import matplotlib.pylab as plt from matplotlib.font_manager import FontProperties font = lambda s: FontProperties(fname=r'C:\Windows\Fonts\meiryo.ttc', size=s) # get words in topic topic_words = [] for t in range(m.num_topics): words = m.show_topic(t, topn=word_count) topic_words.append(words) # plot words fig = plt.figure(figsize=(8, 5)) for i, ws in enumerate(topic_words): sub = fig.add_subplot(1, m.num_topics, i + 1) plt.ylim(0, word_count + 0.5) plt.xticks([]) plt.yticks([]) plt.title("Topic #{}".format(i)) for j, (share, word) in enumerate(ws): size = fontsize_base + (fontsize_base * share * 2) w = "%s(%1.3f)" % (word, share) plt.text(0.1, word_count-j-0.5, w, ha="left", fontproperties=font(size)) plt.tight_layout() plt.show() visualize_topic(model) """ Explanation: Visualize words in topics To consider about the name of topic, show the words in topics. End of explanation """
davofis/computational_seismology
05_pseudospectral/fourier_acoustic_1d.ipynb
gpl-3.0
# This is a configuration step for the exercise. Please run it before calculating the derivative! import numpy as np import matplotlib.pyplot as plt from ricker import ricker # Show the plots in the Notebook. plt.switch_backend("nbagg") """ Explanation: <div style='background-image: url("../../share/images/header.svg") ; padding: 0px ; background-size: cover ; border-radius: 5px ; height: 250px'> <div style="float: right ; margin: 50px ; padding: 20px ; background: rgba(255 , 255 , 255 , 0.7) ; width: 50% ; height: 150px"> <div style="position: relative ; top: 50% ; transform: translatey(-50%)"> <div style="font-size: xx-large ; font-weight: 900 ; color: rgba(0 , 0 , 0 , 0.8) ; line-height: 100%">Computational Seismology</div> <div style="font-size: large ; padding-top: 20px ; color: rgba(0 , 0 , 0 , 0.5)">The Fourier Pseudospectral Method - Acoustic Waves in 1D</div> </div> </div> </div> Seismo-Live: http://seismo-live.org Authors: David Vargas (@dvargas) Heiner Igel (@heinerigel) Basic Equations We use the Fourier method to calculate exact n-th derivatives on a regular spaced grid (to machine precision). This property combined with classical time extrapolation schemes result in the so call Fourier pseudospectral method. The problem of solving the 1D acoustic wave equation in an homogeneous media \begin{equation} \partial_t^2 p(x,t) = c(x)^2 \ \partial_x^2 p(x,t) + s(x,t) \end{equation} is covered in this notebook. We explore the benefits of calculating exact spatial derivatives (up to machine precision), numerical dispersion, comparison with a Finite Difference scheme End of explanation """ def fourier_derivative_2nd(f, dx): # Length of vector f nx = np.size(f) # Initialize k vector up to Nyquist wavenumber kmax = np.pi / dx dk = kmax / (nx / 2) k = np.arange(float(nx)) k[: int(nx/2)] = k[: int(nx/2)] * dk k[int(nx/2) :] = k[: int(nx/2)] - kmax # Fourier derivative ff = np.fft.fft(f) ff = (1j*k)**2 * ff df_num = np.real(np.fft.ifft(ff)) return df_num """ Explanation: 1. Fourier derivative method The second spatial derivative is computed by multiplying the spatial Fourier transform of the pressure field $P(k,t)$ with $ (ik)^2 $ \begin{equation} \partial_x^2 p(x,t) = \mathscr{F}^{-1}[(ik)^{2}P(k,t)] = \frac{1}{\sqrt{2\pi}} \int_{-\infty}^{\infty} (ik)^{2} P(k,t) e^{ikx} dk \end{equation} where $k$ is the wavenumber and $IFT$ the Inverse Fourier Transform. A function to perform this task is implemented in the next cell. End of explanation """ # Basic parameters # --------------------------------------------------------------- nt = 3500 # number of time steps c = 343. # acoustic velocity [m/s] eps = 0.2 # stability limit isnap = 50 # snapshot frequency isx = 1250 # source location f0 = 60. # Frequency [Hz](div by 5) nx = 2024 # number of grid points in x # pressure fields Initialization p = np.zeros(nx) ; pnew = p ; pold = p ; d2p = p; dp = p ap = np.zeros(nx); apnew = ap; apold = ap; ad2p = ap; adp = ap sp = np.zeros(nx); spnew = sp; spold = sp; sd2p = sp; sdp = p dx = 1250./(nx-1) # calculate space increment x = np.arange(0, nx)*dx # initialize space coordinates dt = eps*dx/c; # calculate time step from stability criterion """ Explanation: 2. Initialization of setup End of explanation """ # source time function # --------------------------------------------------------------- t = np.arange(1, nt+1)*dt # initialize time axis T0 = 1./f0 tmp = ricker(dt, T0) tmp = np.diff(tmp) src = np.zeros(nt) src[0:np.size(tmp)] = tmp lam = c*T0 #spatial source function # --------------------------------------------------------------- sigma = 2*dx x0 = x[isx-1] sg = np.exp(-1/sigma**2 *(x - x0)**2); sg = sg/np.amax(sg) """ Explanation: 3. Source Initialization End of explanation """ # Initialize animated plot # --------------------------------------------------------------- fig, (ax1, ax2, ax3) = plt.subplots(3, sharex=True, sharey=True, figsize=(12,7)) line1 = ax1.plot(x[isx:], p[isx:], 'k', lw=1.5, label='FD-3pt') line2 = ax2.plot(x[isx:], ap[isx:], 'r', lw=1.5, label='FD-5pt') line3 = ax3.plot(x[isx:], sp[isx:], 'b', lw=1.5, label='Fourier') ax1.axis([isx*dx, nx*dx, -6E-7, 6E-7]); ax3.set_xlabel('x [m]') ax1.legend(loc=4) ax2.legend(loc=4) ax3.legend(loc=4) plt.ion() # set interective mode plt.show() # --------------------------------------------------------------- # Time extrapolation # --------------------------------------------------------------- for it in range(nt): # ---------------------------------------- # Fourier Pseudospectral Method # ---------------------------------------- sd2p = fourier_derivative_2nd(sp, dx) # 2nd space derivative spnew = 2*sp - spold + c**2 * dt**2 * sd2p # Time Extrapolation spnew = spnew + sg*src[it]*dt**2 # Add sources spold, sp = sp, spnew # Time levels sp[1] = 0; sp[nx-1] = 0 # set boundaries pressure free # ---------------------------------------- # Finite Differences Method 3pt # ---------------------------------------- for i in range(1, nx-1): d2p[i] = (p[i+1] - 2*p[i] + p[i-1])/dx**2 # Space derivative pnew = 2*p - pold + dt**2 * c**2 * d2p # Time Extrapolation pnew = pnew + sg*src[it]*dt**2 # Add source pold, p = p, pnew # Time levels p[0] = 0; p[nx-1] = 0 # set boundaries pressure free # ---------------------------------------- # Finite Differences Method 5pt # ---------------------------------------- for i in range(2, nx-2): ad2p[i] = (-1/12*ap[i+2] + 4/3*ap[i+1] - 5/2*ap[i] \ + 4/3*ap[i-1] - 1/12*ap[i-2])/dx**2 # Space derivative apnew = 2*ap - apold + dt**2 * c**2 * ad2p # Time Extrapolation apnew = apnew + sg*src[it]*dt**2 # Add source apold, ap = ap, apnew # Time levels ap[0] = 0; ap[nx-1] = 0 # set boundaries pressure free # -------------------------------------- # Animation plot. Display solution if not it % isnap: for l in line1: l.remove() del l for l in line2: l.remove() del l for l in line3: l.remove() del l # -------------------------------------- # Display lines line1 = ax1.plot(x[isx:], p[isx:], 'k', lw=1.5) line2 = ax2.plot(x[isx:], ap[isx:], 'r', lw=1.5) line3 = ax3.plot(x[isx:], sp[isx:], 'b', lw=1.5) plt.gcf().canvas.draw() """ Explanation: 4. Time Extrapolation The final solution for our 1D acoustic wave problem after introducing a finite differences time extrapolation schem can be written as \begin{equation} p_{j}^{n+1} = dt^2c_{j}^{2} \partial_{x}^{2}p_{j}^{n} + dt^2s_{j}^{n} + 2p_{j}^{n} - p_{j}^{n-1} \end{equation} where the space derivative is computed with the Fourier method. In order to compare the above numerical solution, we implement a 3-point finite difference operator, as well as a 5-point finite difference operator to compute spatial derivatives. They are given as: 1.) 3-point finite difference operator \begin{equation} \partial_x^2 p(x,t) = \frac{p(x+\mathrm{d}x,t) - 2 p(x,t) + p(x-\mathrm{d}x,t)}{\mathrm{d}x^2} \end{equation} 2.) 5-point finite difference operator \begin{equation} \partial_x^2 p(x,t) = \frac{-p(x+2\mathrm{d}x,t) + 16p(x+\mathrm{d}x,t) - 30p(x,t) + 16p(x-\mathrm{d}x,t) - p(x-2\mathrm{d}x,t)}{12\mathrm{d}x^2} \end{equation} Numerical dispersion One of the most prominent characteristic of the Fourier method is the low numerical dispersion in comparison with the finite difference method. The animation displayed below compare the effects of numerical dispersion on the solution of the 1D acoustic equation using our three different approaches. End of explanation """
taesiri/noteobooks
rl/bc/bc.ipynb
mit
import pickle import tensorflow as tf import numpy as np import tf_util import gym import load_policy expert_policy_file = 'experts/Humanoid-v1.pkl' envname = 'Humanoid-v1' policy_fn = load_policy.load_policy(expert_policy_file) num_rollouts = 1000 """ Explanation: Naïve Behavioral Cloning. First assignment of CS(1+1)(3^2)(2^2)-112 by Sergey Levine. This is just a notebook and You'll need to get rest of files from course github page. Import Some of dependencies! End of explanation """ # set to true if you want to gather data from expert, or you can load data from a pickle file (see cell bellow) gather_data = False # only allocate a fraction of GPU memory to TF, cause it doesn't give it back! gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.2) if gather_data: with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)): tf_util.initialize() env = gym.make(envname) max_steps = env.spec.timestep_limit returns = [] observations = [] actions = [] for i in range(num_rollouts): print('iter', i) obs = env.reset() done = False totalr = 0. steps = 0 while not done: action = policy_fn(obs[None,:]) observations.append(obs) actions.append(action) obs, r, done, _ = env.step(action) totalr += r steps += 1 # Don't Render anything! # env.render() if steps % 100 == 0: print("%i/%i"%(steps, max_steps)) if steps >= max_steps: break returns.append(totalr) print('returns', returns) print('mean return', np.mean(returns)) print('std of return', np.std(returns)) expert_data = {'observations': np.array(observations), 'actions': np.array(actions)} print(expert_data['actions'].shape) """ Explanation: Gathering Data from expret - Unfortunately we have to use Tensorflow, Which i don't like! You should read comments, before the cell bellow! End of explanation """ # I Don't Know!!! # Stupid TF doesn't free up the memory!!! """ Explanation: Delete the Tensorflow! Go home Tensorflow! You are not needed anymore End of explanation """ import pickle pickle_out = open('expert-' + envname + '.pickle', "wb") pickle.dump(expert_data, pickle_out) pickle_out.close() import pickle pickle_in = open('expert-' + envname + '.pickle', "rb") expert_data = pickle.load(pickle_in) """ Explanation: Save / Load Expert from file! End of explanation """ import torch import torch.nn as nn import torch.optim as optim from torch.autograd import Variable from torch.utils.data import DataLoader from torch.utils.data import sampler from torch.utils.data import Dataset import torchvision.datasets as dset import torchvision.transforms as T import timeit gpu_dtype = torch.cuda.FloatTensor # the CPU datatype # Constant to control how frequently we print train loss print_every = 100 # This is a little utility that we'll use to reset the model # if we want to re-initialize all our parameters def reset(m): if hasattr(m, 'reset_parameters'): m.reset_parameters() """ Explanation: Creating a Feed Forward Neural Network for Behavioral cloning YES I'm Using PyTorch End of explanation """ class ObservastionActionDataset(Dataset): """Observations Actions dataset.""" def __init__(self, list_observations, list_actions, transform=None): """ Args: list_observations (observation tensor): . list_actions (action tensor): . """ self.actions = list_actions self.observations = list_actions self.transform = transform def __len__(self): return actions.shape[0] def __getitem__(self, idx): return observations[idx], actions[idx] ## Copy paste from CS231n class ChunkSampler(sampler.Sampler): """Samples elements sequentially from some offset. Arguments: num_samples: # of desired datapoints start: offset where we should start selecting from """ def __init__(self, num_samples, start = 0): self.num_samples = num_samples self.start = start def __iter__(self): return iter(range(self.start, self.start + self.num_samples)) def __len__(self): return self.num_samples expert_observations = expert_data['observations'] expert_actions = expert_data['actions'] # Good old way to shuffle # I'm not exaclty sure if I need this or pytorch Dataset will take care of this step! import random idx = np.arange(expert_observations.shape[0]) np.random.shuffle(idx) expert_observations = expert_observations[idx] expert_actions = expert_actions[idx] # Tensorify! NUM_TRAIN = 900000 NUM_VAL = 99094 train_observations = expert_observations[:NUM_TRAIN] train_actions = expert_actions[:NUM_TRAIN, :NUM_TRAIN] train_actions = np.squeeze(train_actions, axis=1) train_observations = torch.from_numpy(train_observations) train_actions = torch.from_numpy(train_actions) test_observations = expert_observations[NUM_TRAIN:] test_actions = expert_actions[NUM_TRAIN:] test_actions = np.squeeze(test_actions, axis=1) test_observations = torch.from_numpy(test_observations) test_actions = torch.from_numpy(test_actions) train_dataset = ObservastionActionDataset(train_observations, train_actions) test_dataset = ObservastionActionDataset(test_observations, test_actions) loader_train = DataLoader(train_dataset, batch_size=64, sampler=ChunkSampler(NUM_TRAIN-NUM_VAL, 0)) loader_val = DataLoader(train_dataset, batch_size=64, sampler=ChunkSampler(NUM_TRAIN, NUM_TRAIN-NUM_VAL)) loader_test = DataLoader(test_dataset, batch_size=64, sampler=ChunkSampler(NUM_TRAIN-NUM_VAL, 0)) """ Explanation: Creating a Train, Test, Validation Dataset End of explanation """ ## Copy paste from CS231n def train(model, loss_fn, optimizer, num_epochs = 1): for epoch in range(num_epochs): print('Starting epoch %d / %d' % (epoch + 1, num_epochs)) model.train() for t, (x, y) in enumerate(loader_train): x_var = Variable(x.type(gpu_dtype)) # I'm not sure why we have extra dim here! y = torch.squeeze(y, 1) y_var = Variable(y.type(gpu_dtype)) scores = model(x_var) loss = loss_fn(scores, y_var) if (t + 1) % print_every == 0: print('t = %d, loss = %.4f' % (t + 1, loss.data[0])) optimizer.zero_grad() loss.backward() optimizer.step() def check_accuracy(model, loader): if loader.dataset.train: print('Checking accuracy on validation set') else: print('Checking accuracy on test set') num_correct = 0 num_samples = 0 model.eval() # Put the model in test mode (the opposite of model.train(), essentially) for x, y in loader: x_var = Variable(x.type(gpu_dtype), volatile=True) scores = model(x_var) _, preds = scores.data.cpu().max(1) num_correct += (preds == y).sum() num_samples += preds.size(0) acc = float(num_correct) / num_samples print('Got %d / %d correct (%.2f)' % (num_correct, num_samples, 100 * acc)) """ Explanation: Trainner code (Copied from CS231n Assignment 2) End of explanation """ model_def = nn.Sequential( nn.Linear(376, 2048), nn.ReLU(inplace=True), nn.Linear(2048, 1024), nn.ReLU(inplace=True), nn.Linear(1024, 256), nn.ReLU(inplace=True), nn.Linear(256, 17) ) model = model_def.type(gpu_dtype) loss_fn = nn.MSELoss().type(gpu_dtype) optimizer = optim.Adam(model.parameters(), lr=1e-4) train(model, loss_fn, optimizer, num_epochs=2) # check_accuracy(model, loader_val) """ Explanation: Creating a Simple Feed forward neural network to Fit(Observation, Action) End of explanation """ env = gym.make(envname) max_steps = env.spec.timestep_limit model.eval() # Put the model in test mode rewards = [] for i in range(5): print('iter', i) obs = env.reset() done = False totalr = 0. steps = 0 while not done: x = torch.from_numpy(obs[None,:]) x_var = Variable(x.type(gpu_dtype)) act = model(x_var) action = model(x_var) obs, r, done, _ = env.step(action) totalr += r steps += 1 # lemme see! env.render() if steps % 100 == 0: print("%i/%i"%(steps, max_steps)) if steps >= max_steps: break rewards.append(totalr) print('returns', rewards) print('mean return', np.mean(rewards)) print('std of return', np.std(rewards)) """ Explanation: The moment of truth! Let's test what we've done! End of explanation """ env = gym.make(envname) max_steps = env.spec.timestep_limit model.eval() # Put the model in test mode rewards = [] # Importing VideoRecorder from gym.monitoring import VideoRecorder for i in range(5): print('iter', i) obs = env.reset() done = False totalr = 0. steps = 0 rec = VideoRecorder(env, path=envname + '-bc-iter' + str(i) + '.mp4') rec.capture_frame() while not done: x = torch.from_numpy(obs[None,:]) x_var = Variable(x.type(gpu_dtype)) act = model(x_var) action = model(x_var) obs, r, done, _ = env.step(action) totalr += r steps += 1 # lemme see! env.render() rec.capture_frame() if steps % 100 == 0: print("%i/%i"%(steps, max_steps)) if steps >= max_steps: break rewards.append(totalr) rec.close() print('returns', rewards) print('mean return', np.mean(rewards)) print('std of return', np.std(rewards)) """ Explanation: Let's Record a video and share it with the world End of explanation """ # Code to show Video on Jupter - Copy paste from https://stackoverflow.com/a/18026076 import io import base64 from IPython.display import HTML video = io.open('Humanoid-v1-bc-iter3.mp4', 'r+b').read() encoded = base64.b64encode(video) HTML(data='''<video alt="test" controls> <source src="data:video/mp4;base64,{0}" type="video/mp4" /> </video>'''.format(encoded.decode('ascii'))) """ Explanation: Lemme see End of explanation """ # TODO """ Explanation: Share the above video and tell everyone that you'd successfully cloned a humanoid and It'll kill every single one of us! :P Using DAgger End of explanation """
mne-tools/mne-tools.github.io
0.13/_downloads/plot_artifacts_correction_ssp.ipynb
bsd-3-clause
import numpy as np import mne from mne.datasets import sample from mne.preprocessing import compute_proj_ecg, compute_proj_eog # getting some data ready data_path = sample.data_path() raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif' raw = mne.io.read_raw_fif(raw_fname, preload=True, add_eeg_ref=False) raw.set_eeg_reference() raw.pick_types(meg=True, ecg=True, eog=True, stim=True) """ Explanation: Artifact Correction with SSP End of explanation """ projs, events = compute_proj_ecg(raw, n_grad=1, n_mag=1, average=True) print(projs) ecg_projs = projs[-2:] mne.viz.plot_projs_topomap(ecg_projs) # Now for EOG projs, events = compute_proj_eog(raw, n_grad=1, n_mag=1, average=True) print(projs) eog_projs = projs[-2:] mne.viz.plot_projs_topomap(eog_projs) """ Explanation: Compute SSP projections End of explanation """ raw.info['projs'] += eog_projs + ecg_projs """ Explanation: Apply SSP projections MNE is handling projections at the level of the info, so to register them populate the list that you find in the 'proj' field End of explanation """ events = mne.find_events(raw, stim_channel='STI 014') reject = dict(grad=4000e-13, mag=4e-12, eog=150e-6) # this can be highly data dependent event_id = {'auditory/left': 1} epochs_no_proj = mne.Epochs(raw, events, event_id, tmin=-0.2, tmax=0.5, proj=False, baseline=(None, 0), reject=reject) epochs_no_proj.average().plot(spatial_colors=True) epochs_proj = mne.Epochs(raw, events, event_id, tmin=-0.2, tmax=0.5, proj=True, baseline=(None, 0), reject=reject) epochs_proj.average().plot(spatial_colors=True) """ Explanation: Yes this was it. Now MNE will apply the projs on demand at any later stage, so watch out for proj parmeters in functions or to it explicitly with the .apply_proj method Demonstrate SSP cleaning on some evoked data End of explanation """ evoked = mne.Epochs(raw, events, event_id, tmin=-0.2, tmax=0.5, proj='delayed', baseline=(None, 0), reject=reject).average() # set time instants in seconds (from 50 to 150ms in a step of 10ms) times = np.arange(0.05, 0.15, 0.01) evoked.plot_topomap(times, proj='interactive') """ Explanation: Looks cool right? It is however often not clear how many components you should take and unfortunately this can have bad consequences as can be seen interactively using the delayed SSP mode: End of explanation """
scottlittle/solar-sensors
.ipynb_checkpoints/prune-X-checkpoint.ipynb
apache-2.0
import numpy as np import matplotlib.pyplot as plt from data_helper_functions import * from IPython.display import display pd.options.display.max_columns = 999 %matplotlib inline with np.load('data/X.npz') as data: #old X, don't use, start at "Now with all channels..." X = data['X'] with np.load('data/Y.npz') as data: #old Y, don't use Y = data['Y'] print X.shape print Y.shape """ Explanation: Make X not baD! Load X and Y from getting satellite channels 1 and 6 data, see satellite-to-sensor-model.ipynb for more info. End of explanation """ from sklearn.ensemble import RandomForestRegressor from sklearn.cross_validation import train_test_split rfr = RandomForestRegressor(oob_score=True) X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.2, random_state = 14) rfr.fit(X_train,Y_train) rfr.score(X_test,Y_test) Y_pred = rfr.predict(X_test) from random import randint val = randint(0,508) print Y_pred[val] print Y_test[val] """ Explanation: Random Forest seems to be giving the best results, so we'll stick with that for now End of explanation """ mask = [] for i,row in enumerate(Y): if row[0] == 0: mask.append(False) else: mask.append(True) mask = np.array(mask) X_reduced = X[mask] Y_reduced = Y[mask][:,-7::] print X_reduced.shape print Y_reduced.shape rfr = RandomForestRegressor(oob_score=True) X_train, X_test, Y_train, Y_test = train_test_split(X_reduced, Y_reduced, test_size = 0.3, random_state = 14) rfr.fit(X_train,Y_train) print rfr.score(X_test,Y_test) print rfr.oob_score_ Y_pred = rfr.predict(X_test) from random import randint val = randint(0,Y_pred.shape[0]) print Y_pred[val] print Y_test[val] """ Explanation: Maybe I should only use the AOD values since the sensor values are sporadic and don't add as much to the data anyway. Also, I could remove the rows were the sensor readings are zero. I could look through the Y rows and make a mask to apply to both X and Y rows. End of explanation """ from sklearn.linear_model import Ridge ridge = Ridge(solver = 'auto') X_train, X_test, Y_train, Y_test = train_test_split(X_reduced, Y_reduced, test_size = 0.5, random_state = 14) ridge.fit(X_train,Y_train) ridge.score(X_test,Y_test) Y_pred = ridge.predict(X_test) from random import randint val = randint(0,Y_pred.shape[0]-1) print Y_pred[val] print Y_test[val] """ Explanation: Let's try Ridge again. End of explanation """ plt.plot(Y_pred,'go'); """ Explanation: Not too good. Or at least not an improvement over itself even. I may have to do some feature engineering for the images. Averaging, filtering or both. I wonder if I do some ratios of channels? Sort of like the "greenness" parameter that farmers use. Also, maybe +/- 3 hours is too long for searching for a satellite image. (Reduce to +/- 1 hour?) End of explanation """ val = randint(0,2301) _ = plt.hist(X_reduced[:,0:1972][val], alpha = 0.5, normed=True, bins=25, label='visible',range=(0,25000)) #visible light histogram _ = plt.hist(X_reduced[:,1973:2476][val], alpha = 0.5, normed=True, bins=25, label='IR',range=(0,25000)) plt.legend(loc='upper right'); X_hist = [] for i in xrange(X_reduced.shape[0]): hist1, _ = np.histogram(X_reduced[:,0:1972][i], density=True, bins=25, range=(0,25000)) #vis hist2, _ = np.histogram(X_reduced[:,1973:2476][i], density=True, bins=25, range=(0,25000)) #IR X_hist.append(np.hstack((hist1,hist2))) X_hist = np.array(X_hist) """ Explanation: So, from the plot above, there are values above 0, even though my Y_train and Y_test are all below zero. End of explanation """ rfr = RandomForestRegressor(oob_score=True) X_train, X_test, Y_train, Y_test = train_test_split(X_hist, Y_reduced, test_size = 0.3, random_state = 14) rfr.fit(X_train,Y_train) print rfr.score(X_test,Y_test) print rfr.oob_score_ from random import randint val = randint(0,Y_test.shape[0]) print Y_pred[val] print Y_test[val] plt.plot(Y_pred,'go'); """ Explanation: Try random forest with new X_hist: End of explanation """ X_hist = [] for i in xrange(X_reduced.shape[0]): hist1, _ = np.histogram(X_reduced[:,0:1972][i], density=True, bins=30, range=(0,25000)) #vis hist2, _ = np.histogram(X_reduced[:,1973:2476][i], density=True, bins=30, range=(0,25000)) #IR X_hist.append(np.hstack((hist1,hist2))) X_hist = np.array(X_hist) from sklearn.ensemble import ExtraTreesRegressor etr = ExtraTreesRegressor(oob_score=True, bootstrap=True, n_jobs=-1, n_estimators=100) #njobs uses all cores! X_train, X_test, Y_train, Y_test = train_test_split(X_hist, Y_reduced, test_size = 0.3, random_state = 14) etr.fit(X_train,Y_train) print etr.score(X_test,Y_test) print etr.oob_score_ from random import randint val = randint(0,Y_test.shape[0]) print Y_pred[val] print Y_test[val] plt.plot(Y_pred,'go'); """ Explanation: The score has improved (0.2) but the most remarkable is that all the predicted values are now below 0, as they should be. Let's try something Extra Trees! End of explanation """ with np.load('data/X_all_channels.npz') as data: X = data['X'] with np.load('data/Y_all_channels.npz') as data: Y = data['Y'] print X.shape print Y.shape mask = [] for i,row in enumerate(Y): if row[0] == 0: mask.append(False) else: mask.append(True) mask = np.array(mask) X_reduced = X[mask] Y_reduced = Y[mask][:,-7::] print X_reduced.shape print Y_reduced.shape val = randint(0,2301) _ = plt.hist(X_reduced[:,0:1972][val], alpha = 0.2, normed=True, bins=30, label='CH1',range=(0,25000)) #visible light histogram _ = plt.hist(X_reduced[:,1973:2476][val], alpha = 0.2, normed=True, bins=30, label='CH2',range=(0,25000)) _ = plt.hist(X_reduced[:,2477:2980][val], alpha = 0.2, normed=True, bins=30, label='CH3',range=(0,25000)) _ = plt.hist(X_reduced[:,2981:3484][val], alpha = 0.2, normed=True, bins=30, label='CH4',range=(0,25000)) _ = plt.hist(X_reduced[:,3484:3988][val], alpha = 0.2, normed=True, bins=30, label='CH6',range=(0,25000)) plt.legend(loc='upper right'); X_hist = [] bins = 20 for i in xrange(X_reduced.shape[0]): hist1, _ = np.histogram(X_reduced[:,0:1972][i], density=True, bins=bins, range=(0,25000)) hist2, _ = np.histogram(X_reduced[:,1973:2476][i], density=True, bins=bins, range=(0,25000)) hist3, _ = np.histogram(X_reduced[:,2477:2980][i], density=True, bins=bins, range=(0,25000)) hist4, _ = np.histogram(X_reduced[:,2981:3484][i], density=True, bins=bins, range=(0,25000)) hist5, _ = np.histogram(X_reduced[:,3484:3988][i], density=True, bins=bins, range=(0,25000)) X_hist.append(np.hstack((hist1,hist2,hist3,hist4,hist5))) X_hist = np.array(X_hist) from sklearn.ensemble import ExtraTreesRegressor etr = ExtraTreesRegressor( oob_score=True, bootstrap=True, n_jobs=-1, n_estimators=1000 ) #nj_obs uses all cores! X_train, X_test, Y_train, Y_test = train_test_split(X_hist, Y_reduced, test_size = 0.25, random_state = 12) etr.fit(X_train,Y_train) print etr.score(X_test,Y_test) print etr.oob_score_ from random import randint val = randint(0,Y_test.shape[0]) print Y_pred[val] print Y_test[val] plt.plot(Y_pred,'go'); """ Explanation: Not much better than random forest regressor, but does offer multicore support which speeds things up significantly. Edit: actually, this is better when increasing the number of n_estimators. Now with all channels! (Skip to "try it again..." for model used) End of explanation """ from scipy.ndimage import zoom from __future__ import division X_reduced_ratio_1_2 = [] for i in xrange(X_reduced.shape[0]): CH1 = zoom(X_reduced[:,0:1972][i].reshape((29,68)),zoom=(0.48, 0.53), order=5) CH2 = X_reduced[:,1972:2476][i].reshape((14,36)) X_reduced_ratio_1_2.append(25000* CH2 / (CH1 + CH2) ) X_reduced_ratio_1_2 = np.array(X_reduced_ratio_1_2) X_reduced_ratio_1_6 = [] for i in xrange(X_reduced.shape[0]): CH1 = zoom(X_reduced[:,0:1972][i].reshape((29,68)),zoom=(0.48, 0.53), order=5) CH6 = X_reduced[:,3484:3988][i].reshape((14,36)) X_reduced_ratio_1_6.append(25000* CH6 / (CH1 + CH6) ) X_reduced_ratio_1_6 = np.array(X_reduced_ratio_1_6) X_reduced_ratio_2_6 = [] for i in xrange(X_reduced.shape[0]): CH2 = X_reduced[:,1972:2476][i].reshape((14,36)) CH6 = X_reduced[:,3484:3988][i].reshape((14,36)) X_reduced_ratio_2_6.append(25000* CH6 / (CH2 + CH6) ) X_reduced_ratio_2_6 = np.array(X_reduced_ratio_2_6) val = randint(0,2301) _ = plt.hist(X_reduced[:,0:1972][val], alpha = 0.2, normed=True, bins=30, label='CH1',range=(0,25000)) #visible light histogram _ = plt.hist(X_reduced[:,1973:2476][val], alpha = 0.2, normed=True, bins=30, label='CH2',range=(0,25000)) _ = plt.hist(X_reduced[:,2477:2980][val], alpha = 0.2, normed=True, bins=30, label='CH3',range=(0,25000)) _ = plt.hist(X_reduced[:,2981:3484][val], alpha = 0.2, normed=True, bins=30, label='CH4',range=(0,25000)) _ = plt.hist(X_reduced[:,3484:3988][val], alpha = 0.2, normed=True, bins=30, label='CH6',range=(0,25000)) _ = plt.hist(np.ravel(X_reduced_ratio_1_2[val]), alpha = 0.2, normed=True, bins=30, label='CH2/CH1',range=(0,25000)) _ = plt.hist(np.ravel(X_reduced_ratio_1_6[val]), alpha = 0.2, normed=True, bins=30, label='CH6/CH1',range=(0,25000)) _ = plt.hist(np.ravel(X_reduced_ratio_2_6[val]), alpha = 0.2, normed=True, bins=30, label='CH6/CH2',range=(0,25000)) plt.legend(loc='upper right'); X_hist = [] bins = 30 for i in xrange(X_reduced.shape[0]): hist1, _ = np.histogram(X_reduced[:,0:1972][i], density=True, bins=bins, range=(0,25000)) hist2, _ = np.histogram(X_reduced[:,1972:2476][i], density=True, bins=bins, range=(0,25000)) hist3, _ = np.histogram(X_reduced[:,2476:2980][i], density=True, bins=bins, range=(0,25000)) hist4, _ = np.histogram(X_reduced[:,2980:3484][i], density=True, bins=bins, range=(0,25000)) hist5, _ = np.histogram(X_reduced[:,3484:3988][i], density=True, bins=bins, range=(0,25000)) hist6, _ = np.histogram(np.ravel(X_reduced_ratio_1_2[i]), density=True, bins=bins, range=(0,25000)) hist7, _ = np.histogram(np.ravel(X_reduced_ratio_1_6[i]), density=True, bins=bins, range=(0,25000)) hist8, _ = np.histogram(np.ravel(X_reduced_ratio_2_6[i]), density=True, bins=bins, range=(0,25000)) X_hist.append(np.hstack((hist1,hist2,hist3,hist4,hist5,hist6,hist7,hist8))) X_hist = np.array(X_hist) from sklearn.ensemble import ExtraTreesRegressor etr = ExtraTreesRegressor(oob_score=True, bootstrap=True, n_jobs=-1, n_estimators=1000) #nj_obs uses all cores! X_train, X_test, Y_train, Y_test = train_test_split(X_hist, Y_reduced, test_size = 0.2, random_state = 12) etr.fit(X_train,Y_train) print etr.score(X_test,Y_test) print etr.oob_score_ from random import randint val = randint(0,Y_test.shape[0]) print Y_pred[val] print Y_test[val] """ Explanation: Wow, even better, but is it good enough? If I include ratios between channels, spatial and other information will be implicitly collected. Downsample CH1 and divide by CH6: End of explanation """ from scipy.ndimage import zoom from __future__ import division from random import randint X_ratio_1_2 = [] for i in xrange(X.shape[0]): CH1 = zoom(X[:,0:1972][i].reshape((29,68)),zoom=(0.48, 0.53), order=5) CH2 = X[:,1972:2476][i].reshape((14,36)) X_ratio_1_2.append(25000* (CH2) / (CH1 + CH2+1.0) ) X_ratio_1_2 = np.array(X_ratio_1_2) X_ratio_1_6 = [] for i in xrange(X.shape[0]): CH1 = zoom(X[:,0:1972][i].reshape((29,68)),zoom=(0.48, 0.53), order=5) CH6 = X[:,3484:3988][i].reshape((14,36)) X_ratio_1_6.append(25000* CH6 / (CH1 + CH6 + 0.1) ) X_ratio_1_6 = np.array(X_ratio_1_6) X_ratio_2_6 = [] for i in xrange(X.shape[0]): CH2 = X[:,1972:2476][i].reshape((14,36)) CH6 = X[:,3484:3988][i].reshape((14,36)) X_ratio_2_6.append(25000* CH6 / (CH2 + CH6 + 0.1) ) X_ratio_2_6 = np.array(X_ratio_2_6) val = randint(0,2301) _ = plt.hist(X[:,0:1972][val], alpha = 0.2, normed=True, bins=30, label='CH1',range=(0,25000)) #visible light histogram _ = plt.hist(X[:,1973:2476][val], alpha = 0.2, normed=True, bins=30, label='CH2',range=(0,25000)) _ = plt.hist(X[:,2477:2980][val], alpha = 0.2, normed=True, bins=30, label='CH3',range=(0,25000)) _ = plt.hist(X[:,2981:3484][val], alpha = 0.2, normed=True, bins=30, label='CH4',range=(0,25000)) _ = plt.hist(X[:,3484:3988][val], alpha = 0.2, normed=True, bins=30, label='CH6',range=(0,25000)) _ = plt.hist(np.ravel(X_ratio_1_2[val]), alpha = 0.2, normed=True, bins=30, label='CH2/CH1',range=(0,25000)) _ = plt.hist(np.ravel(X_ratio_1_6[val]), alpha = 0.2, normed=True, bins=30, label='CH6/CH1',range=(0,25000)) _ = plt.hist(np.ravel(X_ratio_2_6[val]), alpha = 0.2, normed=True, bins=30, label='CH6/CH2',range=(0,25000)) plt.legend(loc='upper right'); import pandas as pd X_hist = [] bins = 25 for i in xrange(X.shape[0]): myval1 = pd.DataFrame(np.ravel(X_ratio_1_2[i])).fillna(np.mean).values.flatten(); myval2 = pd.DataFrame(np.ravel(X_ratio_1_6[i])).fillna(np.mean).values.flatten(); myval3 = pd.DataFrame(np.ravel(X_ratio_2_6[i])).fillna(np.mean).values.flatten(); hist1, _ = np.histogram(X[:,0:1972][i], density=True, bins=bins, range=(0,25000)) hist2, _ = np.histogram(X[:,1972:2476][i], density=True, bins=bins, range=(0,25000)) hist3, _ = np.histogram(X[:,2476:2980][i], density=True, bins=bins, range=(0,25000)) hist4, _ = np.histogram(X[:,2980:3484][i], density=True, bins=bins, range=(0,25000)) hist5, _ = np.histogram(X[:,3484:3988][i], density=True, bins=bins, range=(0,25000)) hist6, _ = np.histogram( myval1 , density=True, bins=bins, range=(0,25000) ) hist7, _ = np.histogram( myval2 , density=True, bins=bins, range=(0,25000)) hist8, _ = np.histogram( myval3, density=True, bins=bins, range=(0,25000)) X_hist.append(np.hstack((hist1,hist2,hist3,hist4,hist5,hist6,hist7,hist8))) X_hist = np.array(X_hist) from sklearn.ensemble import ExtraTreesRegressor from sklearn.cross_validation import train_test_split etr = ExtraTreesRegressor(oob_score=True, bootstrap=True, n_jobs=-1, n_estimators=500) #nj_obs uses all cores! X_train, X_test, Y_train, Y_test = train_test_split(X_hist, Y, test_size = 0.2, random_state = 12) etr.fit(X_train,Y_train) #pickle it! from sklearn.externals import joblib joblib.dump(etr, 'webapp/solarApp/models/sat-to-sensor-model/sat-to-sensor-model.pkl') sat_to_sensor_model = joblib.load('webapp/solarApp/models/sat-to-sensor-model/sat-to-sensor-model.pkl') print etr.score(X_test,Y_test) print etr.oob_score_ Y_pred = etr.predict(X_test) from random import randint val = randint(0,Y_test.shape[0]) print Y_pred[val] print Y_test[val] """ Explanation: Try it all again with just X, not X_reduced? End of explanation """ from sklearn.externals import joblib etr2 = joblib.load('data/sensor-to-power-model/sensor-to-power-model.pkl') etr2.predict(Y).shape #power predictions with np.load('data/y.npz') as data: y = data['y'] y_pred2 = etr2.predict(Y_pred) #the predicted y_pred2.shape y.shape """ Explanation: After pickling the sensor to power model, use it to predict power from satellite data! End of explanation """ with np.load('data/good_times.npz') as data: good_times = data['good_times'] print good_times.shape print etr2.predict(Y).shape """ Explanation: Seems like dimensions are working out. Probably will need to pickle the sat to sensor model too and just feed in data start to finish and see how it does. End of explanation """ from datetime import datetime, timedelta, time pvoutput_filefolder = 'data/pvoutput/pvoutput6months/' datetime_index = 1747 print 'Predicted power for ' + str(good_times[datetime_index]) + \ ' is ' + str(etr2.predict(Y[datetime_index])[0]) + ' W' #pvoutput data desired_datetime = good_times[datetime_index] desired_date = (desired_datetime - timedelta(hours=6)).date() #make sure correct date desired_date = datetime.combine(desired_date, time.min) #get into datetime format pvoutput_filename = find_file_from_date(desired_date, pvoutput_filefolder) df_pvoutput = return_pvoutput_data(pvoutput_filename, pvoutput_filefolder) try: print "True power: " + df_pvoutput[df_pvoutput.index == desired_datetime].values[0][0].astype(str) except: print "0" from datetime import datetime, timedelta, time pvoutput_filefolder = 'data/pvoutput/pvoutput6months/' predicted_powers = [] true_powers = [] for datetime_index in range(len(good_times)): predicted_powers.append(str(etr2.predict(Y[datetime_index])[0]) ) #pvoutput data desired_datetime = good_times[datetime_index] desired_date = (desired_datetime - timedelta(hours=6)).date() #make sure correct date desired_date = datetime.combine(desired_date, time.min) #get into datetime format pvoutput_filename = find_file_from_date(desired_date, pvoutput_filefolder) df_pvoutput = return_pvoutput_data(pvoutput_filename, pvoutput_filefolder) try: true_powers.append(df_pvoutput[df_pvoutput.index == desired_datetime].values[0][0].astype(str)) except: true_powers.append(0) true_powers = np.array(true_powers).astype(float) predicted_powers = np.array(predicted_powers).astype(float) error_add = [] for datetime_index in range(len(good_times)): error_add.append( np.abs(true_powers[datetime_index]-predicted_powers[datetime_index]) \ / true_powers[datetime_index] ) error_sum = 0 for error in error_add: if (np.isnan(error) or np.isinf(error)): pass else: error_sum += error error_sum/len(good_times) x = np.linspace(0,13000,100) y = 1.0 * x plt.figure(figsize=(8,8)) plt.plot(true_powers,predicted_powers,'go', alpha=0.25) plt.plot(x,y,'r',linewidth = 5, alpha = 0.5) plt.xlabel('True Power (W)', fontsize = 16) plt.xticks(size=16) plt.ylabel('Predicted Power (W)', fontsize = 16) plt.yticks(size=16) """ Explanation: Nice, now we have predictions for every datetime! Now we have to check this. End of explanation """ np.savez_compressed('data/true_powers.npz',true_powers=true_powers) #save np.savez_compressed('data/predicted_powers.npz',predicted_powers=predicted_powers) #save """ Explanation: Tres beau, call it a day! That's a pretty graph! End of explanation """ import numpy as np with np.load('data/true_powers.npz') as data: true_powers = data['true_powers'] with np.load('data/predicted_powers.npz') as data: predicted_powers = data['predicted_powers'] import matplotlib.pyplot as plt %matplotlib inline import statsmodels.api as sm # Fit and summarize OLS model mod = sm.OLS(predicted_powers,true_powers) res = mod.fit() print res.summary() """ Explanation: I shut down the nb, so restart! End of explanation """
tpin3694/tpin3694.github.io
machine-learning/load_images.ipynb
mit
# Load library import cv2 import numpy as np from matplotlib import pyplot as plt """ Explanation: Title: Load Images Slug: load_images Summary: How to load images using OpenCV in Python. Date: 2017-09-11 12:00 Category: Machine Learning Tags: Preprocessing Images Authors: Chris Albon Preliminaries End of explanation """ # Load image as grayscale image = cv2.imread('images/plane.jpg', cv2.IMREAD_GRAYSCALE) # Show image plt.imshow(image, cmap='gray'), plt.axis("off") plt.show() """ Explanation: Load Image As Greyscale End of explanation """ # Load image in color image_bgr = cv2.imread('images/plane.jpg', cv2.IMREAD_COLOR) # Convert to RGB image_rgb = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2RGB) # Show image plt.imshow(image_rgb), plt.axis("off") plt.show() """ Explanation: Load Image As RGB End of explanation """ # Show image data image # Show dimensions image.shape """ Explanation: View Image Data End of explanation """
ES-DOC/esdoc-jupyterhub
notebooks/thu/cmip6/models/sandbox-1/landice.ipynb
gpl-3.0
# DO NOT EDIT ! from pyesdoc.ipython.model_topic import NotebookOutput # DO NOT EDIT ! DOC = NotebookOutput('cmip6', 'thu', 'sandbox-1', 'landice') """ Explanation: ES-DOC CMIP6 Model Properties - Landice MIP Era: CMIP6 Institute: THU Source ID: SANDBOX-1 Topic: Landice Sub-Topics: Glaciers, Ice. Properties: 30 (21 required) Model descriptions: Model description details Initialized From: -- Notebook Help: Goto notebook help page Notebook Initialised: 2018-02-15 16:54:40 Document Setup IMPORTANT: to be executed each time you run the notebook End of explanation """ # Set as follows: DOC.set_author("name", "email") # TODO - please enter value(s) """ Explanation: Document Authors Set document authors End of explanation """ # Set as follows: DOC.set_contributor("name", "email") # TODO - please enter value(s) """ Explanation: Document Contributors Specify document contributors End of explanation """ # Set publication status: # 0=do not publish, 1=publish. DOC.set_publication_status(0) """ Explanation: Document Publication Specify document publication status End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.key_properties.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: Document Table of Contents 1. Key Properties 2. Key Properties --&gt; Software Properties 3. Grid 4. Glaciers 5. Ice 6. Ice --&gt; Mass Balance 7. Ice --&gt; Mass Balance --&gt; Basal 8. Ice --&gt; Mass Balance --&gt; Frontal 9. Ice --&gt; Dynamics 1. Key Properties Land ice key properties 1.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of land surface model. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.key_properties.model_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 1.2. Model Name Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Name of land surface model code End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.key_properties.ice_albedo') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "prescribed" # "function of ice age" # "function of ice density" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 1.3. Ice Albedo Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Specify how ice albedo is modelled End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.key_properties.atmospheric_coupling_variables') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 1.4. Atmospheric Coupling Variables Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Which variables are passed between the atmosphere and ice (e.g. orography, ice mass) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.key_properties.oceanic_coupling_variables') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 1.5. Oceanic Coupling Variables Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Which variables are passed between the ocean and ice End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.key_properties.prognostic_variables') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "ice velocity" # "ice thickness" # "ice temperature" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 1.6. Prognostic Variables Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Which variables are prognostically calculated in the ice model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.key_properties.software_properties.repository') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 2. Key Properties --&gt; Software Properties Software properties of land ice code 2.1. Repository Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Location of code for this component. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.key_properties.software_properties.code_version') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 2.2. Code Version Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Code version identifier. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.key_properties.software_properties.code_languages') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 2.3. Code Languages Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Code language(s). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.grid.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 3. Grid Land ice grid 3.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of the grid in the land ice scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.grid.adaptive_grid') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 3.2. Adaptive Grid Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is an adative grid being used? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.grid.base_resolution') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 3.3. Base Resolution Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 The base resolution (in metres), before any adaption End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.grid.resolution_limit') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 3.4. Resolution Limit Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If an adaptive grid is being used, what is the limit of the resolution (in metres) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.grid.projection') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 3.5. Projection Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 The projection of the land ice grid (e.g. albers_equal_area) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.glaciers.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 4. Glaciers Land ice glaciers 4.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of glaciers in the land ice scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.glaciers.description') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 4.2. Description Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe the treatment of glaciers, if any End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.glaciers.dynamic_areal_extent') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 4.3. Dynamic Areal Extent Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Does the model include a dynamic glacial extent? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.ice.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5. Ice Ice sheet and ice shelf 5.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of the ice sheet and ice shelf in the land ice scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.ice.grounding_line_method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "grounding line prescribed" # "flux prescribed (Schoof)" # "fixed grid size" # "moving grid" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 5.2. Grounding Line Method Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Specify the technique used for modelling the grounding line in the ice sheet-ice shelf coupling End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.ice.ice_sheet') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 5.3. Ice Sheet Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Are ice sheets simulated? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.ice.ice_shelf') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 5.4. Ice Shelf Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Are ice shelves simulated? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.ice.mass_balance.surface_mass_balance') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6. Ice --&gt; Mass Balance Description of the surface mass balance treatment 6.1. Surface Mass Balance Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe how and where the surface mass balance (SMB) is calulated. Include the temporal coupling frequeny from the atmosphere, whether or not a seperate SMB model is used, and if so details of this model, such as its resolution End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.ice.mass_balance.basal.bedrock') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7. Ice --&gt; Mass Balance --&gt; Basal Description of basal melting 7.1. Bedrock Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe the implementation of basal melting over bedrock End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.ice.mass_balance.basal.ocean') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7.2. Ocean Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe the implementation of basal melting over the ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.ice.mass_balance.frontal.calving') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8. Ice --&gt; Mass Balance --&gt; Frontal Description of claving/melting from the ice shelf front 8.1. Calving Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe the implementation of calving from the front of the ice shelf End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.ice.mass_balance.frontal.melting') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.2. Melting Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe the implementation of melting from the front of the ice shelf End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.ice.dynamics.description') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 9. Ice --&gt; Dynamics ** 9.1. Description Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 General description if ice sheet and ice shelf dynamics End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.ice.dynamics.approximation') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "SIA" # "SAA" # "full stokes" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 9.2. Approximation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Approximation type used in modelling ice dynamics End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.ice.dynamics.adaptive_timestep') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 9.3. Adaptive Timestep Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is there an adaptive time scheme for the ice scheme? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.ice.dynamics.timestep') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 9.4. Timestep Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Timestep (in seconds) of the ice scheme. If the timestep is adaptive, then state a representative timestep. End of explanation """
eduardojvieira/Curso-Python-MEC-UCV
3-Scipy.ipynb
mit
# ¿qué hace esta línea? La respuesta mas adelante %matplotlib inline import matplotlib.pyplot as plt from IPython.display import Image """ Explanation: <table width="100%" border="0"> <tr> <td><img src="./images/ing.png" alt="" align="left" /></td> <td><img src="./images/ucv.png" alt="" align="center" height="100" width="100" /></td> <td><img src="./images/mec.png" alt="" align="right"/></td> </tr> </table> <br> <h1 style="text-align: center;"> Curso de Python para Ingenieros Mecánicos </h1> <h3 style="text-align: center;"> Por: Eduardo Vieira</h3> <br> <br> <h1 style="text-align: center;"> SciPy - Librería de algorítmos científicos para Python </h1> <br> End of explanation """ import scipy as sp import numpy as np """ Explanation: Introducción El paquete SciPy agrega características a los algorítmos de bajo nivel de NumPy para arreglos multidimensionales, y provee un gran número de algorítmos de alto nivel de uso científico. Algunos de los tópicos que cubre SciPy son: Funciones especiales (scipy.special) Integración (scipy.integrate) Optimización (scipy.optimize) Interpolación (scipy.interpolate) Transformada de Fourier (scipy.fftpack) Procesamiento de señales (scipy.signal) Álgebra lineal (scipy.linalg) Problemas de Eigenvalores de matrices dispersas (scipy.sparse) Estadística (scipy.stats) Procesamiento de imágenes multi-dimensional (scipy.ndimage) Entrada/Salida desde/hacia archivos (scipy.io) Cada uno de estos submódulos provee un muchas funciones y clases que pueden ser usadas para resolver problemas en sus respectivos tópicos. En esta clases veremos cómo usar algunos de estos subpaquetes. Para acceder al paquete SciPy en un programa Python, comenzamos importando todo desde el módulo scipy. End of explanation """ # # El módulo scipy.special incluye muchas funciones de Bessel # Aquí usaremos las funciones jn e yn, que son las funciones de Bessel # de primera y segunda especie, y de orden real. Incluimos también las # funciones jn_zeros e yn_zeros que entregan los ceros de las # funciones jn e yn. # from scipy.special import jn, yn, jn_zeros, yn_zeros n = 0 # orden de la función x = 0.0 # Función de Bessel de primera especie print("J_%d(%f) = %f" % (n, x, jn(n, x))) x = 1.0 # Función de Bessel de segunda especie print("Y_%d(%f) = %f" % (n, x, yn(n, x))) x = np.linspace(0, 10, 100) fig, ax = plt.subplots() for n in range(4): ax.plot(x, jn(n, x), label=r"$J_%d(x)$" % n) ax.legend() # ceros de las funciones de Bessel n = 0 # orden m = 4 # número de raices a calcular jn_zeros(n, m) """ Explanation: Funciones Especiales En muchos problemas de física computacional son importantes varias funciones matemáticas especiales. SciPy provee implementaciones de muchas de estas funciones especiales. Para más detalles, ver la lista de funciones en la documentación http://docs.scipy.org/doc/scipy/reference/special.html#module-scipy.special. Para demostrar el uso típico de estas funciones especiales nos concentraremos en las funciones de Bessel: End of explanation """ from scipy.integrate import quad, dblquad, tplquad """ Explanation: Integración Integración numérica: cuadraturas La evaluación numérica de una función, del tipo $\displaystyle \int_a^b f(x) dx$ es llamada cuadratura numérica, o simplemente cuadratura. SciPy suministra funciones para diferentes tipos de cuadraturas, por ejemplo las funciones quad, dblquad y tplquad para calcular integrales simples, dobles o triples, respectivamente. End of explanation """ # define una función simple para ser integrada def f(x): return x x_inf = 0 # el límite inferior de x x_sup = 1 # el límite superior de x val, errabs = quad(f, x_inf, x_sup) print("valor de la integral =", val, ", error absoluto =", errabs) """ Explanation: Las función quad acepta una gran cantidad de argumentos opcionales, que pueden ser usados para ajustar detalles del comportamiento de la función (ingrese help(quad) para más detalles). El uso básico es el siguiente: End of explanation """ def integrando(x, n): """ función de Bessel de primera especie y orden n. """ return jn(n, x) x_inf = 0 # el límite inferior de x x_sup = 10 # el límite superior de x val, errabs = quad(integrando, x_inf, x_sup, args=(3,)) # evalua la integral con n=3 print(val, errabs) """ Explanation: Si necesitamos incluir argumento extras en la función integrando podemos usar el argumento args: End of explanation """ val, errabs = quad(lambda x: np.exp(-x ** 2), -np.Inf, np.Inf) # Inf = infinito! print("resultado numérico =", val, errabs) analitico = np.sqrt(np.pi) print("analitico =", analitico) """ Explanation: Para funciones simples podemos usar la función lambda function (función anónima) en lugar de definir explícitamente una función para el integrando: End of explanation """ def integrando(x, y): return np.exp(-x**2-y**2) x_inf = 0 x_sup = 10 y_inf = 0 y_sup = 10 val, errabs = dblquad(integrando, x_inf, x_sup, lambda x : y_inf, lambda x: y_sup) print(val, errabs) """ Explanation: Como se muestra en este ejemplo, podemos usar 'Inf' y '-Inf' como límites de la integral. Integrales de dimensión mayor se evalúan de forma similar: End of explanation """ from scipy.integrate import odeint, ode """ Explanation: Note como requerimos incorporar funciones lambda para los límites de la integración en y, ya que estos límites pueden en general ser funciones de x. Ecuaciones diferenencias ordinarias (EDOs) SciPy provee dos formas diferentes para resolver EDOs: Una API (Interfaz de programación de aplicaciones, del inglés "Application programming interface") basada en la función odeint, y una API orientada al objeto basada en la clases ode. Usualmentey odeint es más simplea de usar, pero la clase ode ofrece niveles de control más finos. Aquí usaremos las funciones odeint. Para mayor información sobre las clases ode, use help(ode). Hace casi todo lo que hace odeint, pero de una forma más orientada al objeto. Para usar odeint, primero importelo desde el módulo scipy.integrate: End of explanation """ Image(url='http://upload.wikimedia.org/wikipedia/commons/c/c9/Double-compound-pendulum-dimensioned.svg') """ Explanation: Un sistema de EDOs es usualmente formulado en forma estándar antes de ser resuelto numéricamente. La forma estánder es: $y' = f(y, t)$ donde $y = [y_1(t), y_2(t), ..., y_n(t)]$ y $f$ es una función que determina las derivadas de la función $y_i(t)$. Para resolver la EDO necesitamos conocer la función $f$ y una condición inicial, $y(0)$. Note que EDOs de orden superior siempre pueden ser escritas en esta forma introduciendo nuevas variables para las derivadas intermedias. Una vez definida la función f y el arreglo y_0, podemos usar la función odeint: y_t = odeint(f, y_0, t) donde t es un arreglo con las coordenadas temporales para las que se resolverá el sistema de EDOs. El resultado y_t es un arreglo con una linea para cada punto de tiempo t, y donde cada columna corresponde a una solución y_i(t) para ese tiempo. Veremos cómo implementar f e y_0 en código Python en los siguientes ejemplos. Ejemplo: péndulo doble Consideremos un problema físico: El péndulo doble compuesto, descrito en más detalle aquí (en inglés): http://en.wikipedia.org/wiki/Double_pendulum. End of explanation """ g = 9.82 L = 0.5 m = 0.1 def dx(x, t): """ El lado derecho de la EDO del péndulo """ x1, x2, x3, x4 = x[0], x[1], x[2], x[3] dx1 = 6.0/(m*L**2) * (2 * x3 - 3 * np.cos(x1-x2) * x4)/(16 - 9 * np.cos(x1-x2)**2) dx2 = 6.0/(m*L**2) * (8 * x4 - 3 * np.cos(x1-x2) * x3)/(16 - 9 * np.cos(x1-x2)**2) dx3 = -0.5 * m * L**2 * ( dx1 * dx2 * np.sin(x1-x2) + 3 * (g/L) * np.sin(x1)) dx4 = -0.5 * m * L**2 * (-dx1 * dx2 * np.sin(x1-x2) + (g/L) * np.sin(x2)) return [dx1, dx2, dx3, dx4] # define la condición inicial x0 = [np.pi/4, np.pi/2, 0, 0] # tiempos en los que se resolverá la EDO: desde 0 hasta 10 segundos t = np.linspace(0, 10, 250) # resuelve el sistema de EDOs x = odeint(dx, x0, t) # grafica los ángulos como funciones del tiempo fig, axes = plt.subplots(1,2, figsize=(12,4)) axes[0].plot(t, x[:, 0], 'r', label="theta1") axes[0].plot(t, x[:, 1], 'b', label="theta2") x1 = + L * np.sin(x[:, 0]) y1 = - L * np.cos(x[:, 0]) x2 = x1 + L * np.sin(x[:, 1]) y2 = y1 - L * np.cos(x[:, 1]) axes[1].plot(x1, y1, 'r', label="pendulo1") axes[1].plot(x2, y2, 'b', label="pendulo2") axes[1].set_ylim([-1, 0]) axes[1].set_xlim([1, -1]); """ Explanation: Las ecuaciones hamiltonianas de movimiento para el péndulo son dadas (ver página de wikipedia): ${\dot \theta_1} = \frac{6}{m\ell^2} \frac{ 2 p_{\theta_1} - 3 \cos(\theta_1-\theta_2) p_{\theta_2}}{16 - 9 \cos^2(\theta_1-\theta_2)}$ ${\dot \theta_2} = \frac{6}{m\ell^2} \frac{ 8 p_{\theta_2} - 3 \cos(\theta_1-\theta_2) p_{\theta_1}}{16 - 9 \cos^2(\theta_1-\theta_2)}.$ ${\dot p_{\theta_1}} = -\frac{1}{2} m \ell^2 \left [ {\dot \theta_1} {\dot \theta_2} \sin (\theta_1-\theta_2) + 3 \frac{g}{\ell} \sin \theta_1 \right ]$ ${\dot p_{\theta_2}} = -\frac{1}{2} m \ell^2 \left [ -{\dot \theta_1} {\dot \theta_2} \sin (\theta_1-\theta_2) + \frac{g}{\ell} \sin \theta_2 \right]$ Para que el código Python sea simple de leer, introduzcamos nuevos nombres de variables y la notación vectorial: $x = [\theta_1, \theta_2, p_{\theta_1}, p_{\theta_2}]$ ${\dot x_1} = \frac{6}{m\ell^2} \frac{ 2 x_3 - 3 \cos(x_1-x_2) x_4}{16 - 9 \cos^2(x_1-x_2)}$ ${\dot x_2} = \frac{6}{m\ell^2} \frac{ 8 x_4 - 3 \cos(x_1-x_2) x_3}{16 - 9 \cos^2(x_1-x_2)}$ ${\dot x_3} = -\frac{1}{2} m \ell^2 \left [ {\dot x_1} {\dot x_2} \sin (x_1-x_2) + 3 \frac{g}{\ell} \sin x_1 \right ]$ ${\dot x_4} = -\frac{1}{2} m \ell^2 \left [ -{\dot x_1} {\dot x_2} \sin (x_1-x_2) + \frac{g}{\ell} \sin x_2 \right]$ End of explanation """ from IPython.display import clear_output import time fig, ax = plt.subplots(figsize=(4,4)) for t_idx, tt in enumerate(t[:200]): x1 = + L * np.sin(x[t_idx, 0]) y1 = - L * np.cos(x[t_idx, 0]) x2 = x1 + L * np.sin(x[t_idx, 1]) y2 = y1 - L * np.cos(x[t_idx, 1]) ax.cla() ax.plot([0, x1], [0, y1], 'r.-') ax.plot([x1, x2], [y1, y2], 'b.-') ax.set_ylim([-1.5, 0.5]) ax.set_xlim([1, -1]) display(fig) clear_output() # comentar si no se observa bien time.sleep(1) """ Explanation: Animación simple del movimiento del péndulo. Veremos cómo crear mejores animaciones en la clase 4. End of explanation """ def dy(y, t, zeta, w0): """ El lado derecho de la EDO del oscilador amortiguado """ x, p = y[0], y[1] dx = p dp = -2 * zeta * w0 * p - w0**2 * x return [dx, dp] # condición inicial: y0 = [1.0, 0.0] # tiempos en los que se resolvera la EDO t = np.linspace(0, 10, 1000) w0 = 2*np.pi*1.0 # resuelve el sistema de EDOs para tres valores diferentes del factor de amortiguamiento y1 = odeint(dy, y0, t, args=(0.0, w0)) # no amortiguado y2 = odeint(dy, y0, t, args=(0.2, w0)) # subamortiguado y3 = odeint(dy, y0, t, args=(1.0, w0)) # amortiguado crítico y4 = odeint(dy, y0, t, args=(5.0, w0)) # sobreamortiguado fig, ax = plt.subplots() ax.plot(t, y1[:,0], 'k', label="no amortiguado", linewidth=0.25) ax.plot(t, y2[:,0], 'r', label="subamortiguado") ax.plot(t, y3[:,0], 'b', label=u"amortiguado crítico") ax.plot(t, y4[:,0], 'g', label="sobreamortiguado") ax.legend(); """ Explanation: Ejemplo: Oscilador armónico amortiguado Problemas de EDO son importantes en Física Computacional, de modo que veremos un ejemplo adicional: el oscilador armónico amortiguado. Este problema está bastante bien descrito en wikipedia (en inglés): http://en.wikipedia.org/wiki/Damping. La ecuación de movimiento para el oscilador amortiguado es: $\displaystyle \frac{\mathrm{d}^2x}{\mathrm{d}t^2} + 2\zeta\omega_0\frac{\mathrm{d}x}{\mathrm{d}t} + \omega^2_0 x = 0$ donde $x$ es la posición del oscilador, $\omega_0$ la frecuencia, y $\zeta$ es el factor de amortiguamiento. Para escribir esta EDO de segundo orden en la forma estándar, introducimos $p = \frac{\mathrm{d}x}{\mathrm{d}t}$: $\displaystyle \frac{\mathrm{d}p}{\mathrm{d}t} = - 2\zeta\omega_0 p - \omega^2_0 x$ $\displaystyle \frac{\mathrm{d}x}{\mathrm{d}t} = p$ En la implementación de este ejemplo agregaremos algunos argumentos extras a la función del lado derecho de la EDO, en lugar de usar variables glovales como en el ejemplo anterior. Como consecuencia de los argumentos extra, necesitamos pasar un argumento clave args a la función odeint: End of explanation """ from scipy.fftpack import * from numpy.fft import * """ Explanation: Transformada de Fourier Las transformadas de Fourier son unas de las herramientas universales de la Computación Científica, que aparece una y otra vez en distintos contextos. SciPy suministra funciones para acceder ala clásica librería FFTPACK de NetLib, que es una librería eficiente y muy bien testeada para FFT, escrita en FORTRAN. La API de SciPy contiene algunas funciones adicionales, pero en general la API está íntimamente relacionada con la librería original en FORTRAN. Para usar el módulo fftpack en un programa Python, debe incluir End of explanation """ N = len(t) dt = t[1]-t[0] # calcula la transformada rápida de Fourier # y2 es la solución del oscilador subamortiguado del ejemplo anterior F = fft(y2[:,0]) # calcula las frecuencias para las componentes en F w = fftfreq(N, dt) fig, ax = plt.subplots(figsize=(9,3)) ax.plot(w, abs(F)); """ Explanation: Para demostrar cómo calcular una transformada rápida de Fourier con SciPy, consideremos la FFT de la solución del oscilador armónico amortiguado del ejemplo anterior: End of explanation """ indices = np.where(w > 0) # selecciona sólo los índices de elementos que corresponden a frecuencias positivas w_pos = w[indices] F_pos = F[indices] fig, ax = plt.subplots(figsize=(9,3)) ax.plot(w_pos, abs(F_pos)) ax.set_xlim(0, 5); """ Explanation: Como la señal es real, el espectro es simétrico. Por eso, sólo necesitamos graficar la parte que corresponde a las frecuencias positivas. Para extraer esa parte de w y F podemos usar algunos de los trucos con índices para arreglos NumPy que vimos en la clase 2: End of explanation """ A = np.array([[8,2,5], [1,5,2], [7,8,9]]) b = np.array([1,2,3]) x = sp.linalg.solve(A, b) x # verificamos la solución (A @ x) - b """ Explanation: Como era de esperar, vemos un peak en el espectro centrado alrededor de 1, que es la frecuencia que usamos para el oscilador. Álgebra lineal El módulo de álgebra lineal contiene muchas funciones relacionadas con matrices, incluyendo resolución de ecuaciones lineales, cálculo de valores propios, funciones de matrices (por ejemplo, para exponenciación matricial), varias decomposiciones diferentes (SVD, LU, cholesky), etc. Una documentación detallada está disponible aquí: http://docs.scipy.org/doc/scipy/reference/linalg.html Veremos cómo usar algunas de estas funciones: Sistemas de ecuaciones lineales Los sistemas de ecuaciones lineales de la forma $A x = b$ donde $A$ es una matriz y $x,b$ son vectores, pueden ser resueltos del modo siguiente: End of explanation """ A = np.random.rand(3,3) B = np.random.rand(3,3) X = sp.linalg.solve(A, B) X # verificamos la solución (A @ X) - B """ Explanation: Podemos también hacer lo mismo con $A X = B$, donde ahora $A, B$ y $X$ son matrices: End of explanation """ evals = sp.linalg.eigvals(A) evals evals, evecs = np.linalg.eig(A) evals evecs """ Explanation: Valores y vectores propios El problema de valores propios para la matriz $A$: $\displaystyle A v_n = \lambda_n v_n$, donde $v_n$ es el $n$-ésimo vector propio y $\lambda_n$ es el $n$-ésimo valor propio. Para calcular los vectores propios de una matriz usamos eigvals y para calcular tanto los valores como los vectores propios, podemos usar la función eig: End of explanation """ n = 1 A @ evecs[:,n] - evals[n] * evecs[:,n] """ Explanation: Los vectores propios correspondientes al $n$-ésimo valor propio (guardado en evals[n]) es la $n$-ésima columna en evecs, es decir, evecs[:,n]. Para verificar esto, intentemos multiplicar los vectores propios con la matriz y comparar el resultado con el producto del vector propio y el valor propio: End of explanation """ # matriz inversa sp.linalg.inv(A) # determinante sp.linalg.det(A) # norma de distintos órdenes sp.linalg.norm(A, ord=2), sp.linalg.norm(A, ord=np.Inf) """ Explanation: Existen también formas más especializadas para resolver proplemas de valores propios, como por ejemplo eigh para matrices hermíticas. Operaciones matriciales End of explanation """ from scipy.sparse import * # matriz densa M = np.array([[1,0,0,0], [0,3,0,0], [0,1,1,0], [1,0,0,1]]) M # convierte de densa a dispersa A = csr_matrix(M); A # convierte de dispersa a densa A.todense() """ Explanation: Matrices dispersas Las matrices dispersas (sparse matrices) son a menudo útiles en simulaciones numéricas que involucran sistemas grandes, si es que el problema puede ser descrito en forma matricial donde las matrices o vectores contienen mayoritariamente ceros. Scipy tiene buen soporte para las matrices dispersas, con operaciones básicas de álgebra lineal (tales como resolución de ecuaciones, cálculos de valores propios, etc). Existen muchas estrategias posibles para almacenar matrices dispersas de manera eficiente. Algunas de las más comunes son las así llamadas "formas coordenadas" (CCO), "forma de lista de listas" (LIL), y "compressed-sparse column" CSC (también "compressed-sparse row", CSR). Cada formato tiene sus ventajas y desventajas. La mayoría de los algorítmos computacionales (resolución de ecuaciones, multiplicación de matrices, etc) pueden ser implementados eficientemente usando los formatos CSR o CSC, pero ellos no son tan intuitivos ni fáciles de inicializar. Por esto, a menudo una matriz dispersa es inicialmente creada en formato COO o LIL (donde podemos agregar elementos a la matriz dispersa eficientemente), y luego convertirlos a CSC o CSR antes de ser usadas en cálculos reales. Para más información sobre los formatos para matrices dispersas, vea por ejemplo (en inglés): http://en.wikipedia.org/wiki/Sparse_matrix <img src="./images/sparse.png" alt="" align="center"/> Cuando creamos una matriz dispersa debemos elegir en qué formato la almacenaremos. Por ejemplo, End of explanation """ A = lil_matrix((4,4)) # matriz dispersa vacía de 4x4 A[0,0] = 1 A[1,1] = 3 A[2,2] = A[2,1] = 1 A[3,3] = A[3,0] = 1 A A.todense() """ Explanation: Una forma más eficiente de crear matrices dispersas: crear una matriz vacía y llenarla usando indexado de matrices (evita crear una matriz densa potencialmente muy grande) End of explanation """ A A = csr_matrix(A); A A = csc_matrix(A); A """ Explanation: Conviertiendo entre distintos formatos de matriz dispersa: End of explanation """ A.todense() (A * A).todense() (A @ A).todense() v = np.array([1,2,3,4])[:,np.newaxis]; v # Multiplicación de matriz dispersa - vector denso A * v # el mismo resultado con matriz densa y vector denso A.todense() * v """ Explanation: Podemos calcular usando matrices dispersas como lo hacemos con matrices densas: End of explanation """ from scipy import optimize """ Explanation: Optimización La optimización (encontrar el máximo o el mínimo de una funciónn) constituye un campo amplio en matemáticas, y la optimización de funciones complicadas o de muchas variables puede ser complicada. Aquí sólo revisaremos algunos casos muy simples. Para una introducción detallada a la optimización con SciPy, ver (en inglés): http://scipy-lectures.github.com/advanced/mathematical_optimization/index.html Para usar el módulo de optimización de Scipy hay que importar el módulo optimize: End of explanation """ def f(x): return 4*x**3 + (x-2)**2 + x**4 fig, ax = plt.subplots() x = np.linspace(-5, 3, 100) ax.plot(x, f(x)); """ Explanation: Encontrando máximos Veamos primero cómo encontrar el mínimo de una función simple de una variable: End of explanation """ x_min = optimize.fmin_bfgs(f, -2) # busca un mínimo local cerca -2 x_min optimize.fmin_bfgs(f, 0.5) # busca un mínimo local cerca 0.5 """ Explanation: Podemos usar la función fmin_bfgs para encontrar el mínimo de la función: End of explanation """ optimize.brent(f) optimize.fminbound(f, -4, 2) # busca el mínimo en el intervalo (-4,2) """ Explanation: Podemos también usar las funciones brent o fminbound. Estas funciones tienen una sintaxis algo distinta y usan algoritmos diferentes. End of explanation """ omega_c = 3.0 def f(omega): return np.tan(2*np.pi*omega) - omega_c/omega fig, ax = plt.subplots(figsize=(10,4)) x = np.linspace(0, 3, 1000) y = f(x) mask = np.where(abs(y) > 50) x[mask] = y[mask] = np.NaN # elimina líneas verticales cuando la función cambia de signo ax.plot(x, y) ax.plot([0, 3], [0, 0], 'k') ax.set_ylim(-5,5); optimize.fsolve(f, 0.1) optimize.fsolve(f, 0.6) optimize.fsolve(f, 1.1) """ Explanation: Encontrando las raíces de una función Para encontrar las soluciones a una ecuación de la forma $f(x) = 0$ podemos usar la función fsolve. Ella requiere especificar un punto inicial: End of explanation """ from scipy.interpolate import * def f(x): return np.sin(x) n = np.arange(0, 10) x = np.linspace(0, 9, 100) y_meas = f(n) + 0.1 * np.random.randn(len(n)) # simula medidas con error y_real = f(x) linear_interpolation = interp1d(n, y_meas) y_interp1 = linear_interpolation(x) cubic_interpolation = interp1d(n, y_meas, kind='cubic') y_interp2 = cubic_interpolation(x) fig, ax = plt.subplots(figsize=(10,4)) ax.plot(n, y_meas, 'bs', label='datos con ruido') ax.plot(x, y_real, 'k', lw=2, label=u'función exacta') ax.plot(x, y_interp1, 'r', label=u'interpolación lineal') ax.plot(x, y_interp2, 'g', label=u'interpolación cúbica') ax.legend(loc=3); """ Explanation: Interpolación La interpolación es simple y conveniente en Scipy: La función interp1d, cuando se le suministran arreglos describiendo datos X e Y, retorna un objeto que se comporta como una función que puede ser llamada para un valor de x arbitrary (en el rango cubierto por X), y que retorna el correspondiente valor interpolado de y: End of explanation """ from scipy import stats # crea una variable aleatoria (discreta) con distribución poissoniana X = stats.poisson(3.5) # distribución de fotonoes en un estado coherente n=3.5 fotones n = np.arange(0,15) fig, axes = plt.subplots(3,1, sharex=True) # grafica la "probability mass function" (PMF) axes[0].step(n, X.pmf(n)) # grafica la "commulative distribution function" (CDF) axes[1].step(n, X.cdf(n)) # grafica histograma de 1000 realizaciones de la variable estocástica X axes[2].hist(X.rvs(size=1000)); # crea una variable aleatoria (contínua) con distribución normal Y = stats.norm() x = np.linspace(-5,5,100) fig, axes = plt.subplots(3,1, sharex=True) # grafica la función distribución de probabilidad ("probability distribution function", PDF) axes[0].plot(x, Y.pdf(x)) # grafica función de distribución acumulada ("commulative distributin function", CDF) axes[1].plot(x, Y.cdf(x)); # grafica histograma de 1000 realizaciones aleatorias de la variable estocástica Y axes[2].hist(Y.rvs(size=1000), bins=50); """ Explanation: Estadística El módulo scipy.stats contiene varias distribuciones estadísticas, funciones estadísticas y testss. Para una documentación completa de estas las características, ver (en inglés) http://docs.scipy.org/doc/scipy/reference/stats.html. También existe un paquete Python muy poderoso para modelamiento estadístoco llamado statsmodels. Ver http://statsmodels.sourceforge.net para más detalles. End of explanation """ X.mean(), X.std(), X.var() # distribución de Poission Y.mean(), Y.std(), Y.var() # distribucuón normal """ Explanation: Estadística: End of explanation """ t_statistic, p_value = stats.ttest_ind(X.rvs(size=1000), X.rvs(size=1000)) print("t-statistic =", t_statistic) print("valor p =", p_value) """ Explanation: Test estadísticos Test si dos conjuntos de datos aleatorios (independientes) vienen de la misma distribución: End of explanation """ stats.ttest_1samp(Y.rvs(size=1000), 0.1) """ Explanation: Como el valor p es muy grande, no podemos descartar la hiopótesis que los dos conjuntos de datos aleatorios tienen medias diferentes. Para testear si la media de una única muestra de datos tiene media 0.1 (la media verdadera es 0.0): End of explanation """ Y.mean() stats.ttest_1samp(Y.rvs(size=1000), Y.mean()) """ Explanation: Un valor de p bajo significa que podemos descartar la hipótesis que la media de Y es 0.1. End of explanation """ # Esta celda da el estilo al notebook from IPython.core.display import HTML css_file = './css/aeropython.css' HTML(open(css_file, "r").read()) """ Explanation: Lectura adicional http://www.scipy.org - La página oficial del proyecto SciPy. http://docs.scipy.org/doc/scipy/reference/tutorial/index.html - Un tutorial sobre cómo comenzar a usar SciPy. https://github.com/scipy/scipy/ - El códifo fuente de SciPy. End of explanation """
Santana9937/language-translation
dlnd_language_translation.ipynb
mit
""" DON'T MODIFY ANYTHING IN THIS CELL """ import helper import problem_unittests as tests source_path = 'data/small_vocab_en' target_path = 'data/small_vocab_fr' source_text = helper.load_data(source_path) target_text = helper.load_data(target_path) """ Explanation: Language Translation In this project, you’re going to take a peek into the realm of neural network machine translation. You’ll be training a sequence to sequence model on a dataset of English and French sentences that can translate new sentences from English to French. Get the Data Since translating the whole language of English to French will take lots of time to train, we have provided you with a small portion of the English corpus. End of explanation """ view_sentence_range = (0, 10) """ DON'T MODIFY ANYTHING IN THIS CELL """ import numpy as np print('Dataset Stats') print('Roughly the number of unique words: {}'.format(len({word: None for word in source_text.split()}))) sentences = source_text.split('\n') word_counts = [len(sentence.split()) for sentence in sentences] print('Number of sentences: {}'.format(len(sentences))) print('Average number of words in a sentence: {}'.format(np.average(word_counts))) print() print('English sentences {} to {}:'.format(*view_sentence_range)) print('\n'.join(source_text.split('\n')[view_sentence_range[0]:view_sentence_range[1]])) print() print('French sentences {} to {}:'.format(*view_sentence_range)) print('\n'.join(target_text.split('\n')[view_sentence_range[0]:view_sentence_range[1]])) """ Explanation: Explore the Data Play around with view_sentence_range to view different parts of the data. End of explanation """ def text_to_ids(source_text, target_text, source_vocab_to_int, target_vocab_to_int): """ Convert source and target text to proper word ids :param source_text: String that contains all the source text. :param target_text: String that contains all the target text. :param source_vocab_to_int: Dictionary to go from the source words to an id :param target_vocab_to_int: Dictionary to go from the target words to an id :return: A tuple of lists (source_id_text, target_id_text) """ # TODO: Implement Function ###source_sent = [ sent for sent in source_text.split("\n") ] ###target_sent = [ sent + ' <EOS>' for sent in target_text.split("\n") ] ###source_ids = [ [ source_vocab_to_int[word] for word in sent.split() ] for sent in source_sent ] ###target_ids = [ [ target_vocab_to_int[word] for word in sent.split() ] for sent in target_sent ] # Advice from Udacity Reviewer target_ids = [[target_vocab_to_int[w] for w in s.split()] + [target_vocab_to_int['<EOS>']] for s in target_text.split('\n')] source_ids = [[source_vocab_to_int[w] for w in s.split()] for s in source_text.split('\n')] return source_ids, target_ids """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_text_to_ids(text_to_ids) """ Explanation: Implement Preprocessing Function Text to Word Ids As you did with other RNNs, you must turn the text into a number so the computer can understand it. In the function text_to_ids(), you'll turn source_text and target_text from words to ids. However, you need to add the &lt;EOS&gt; word id at the end of target_text. This will help the neural network predict when the sentence should end. You can get the &lt;EOS&gt; word id by doing: python target_vocab_to_int['&lt;EOS&gt;'] You can get other word ids using source_vocab_to_int and target_vocab_to_int. End of explanation """ """ DON'T MODIFY ANYTHING IN THIS CELL """ helper.preprocess_and_save_data(source_path, target_path, text_to_ids) """ Explanation: Preprocess all the data and save it Running the code cell below will preprocess all the data and save it to file. End of explanation """ """ DON'T MODIFY ANYTHING IN THIS CELL """ import numpy as np import helper (source_int_text, target_int_text), (source_vocab_to_int, target_vocab_to_int), _ = helper.load_preprocess() """ Explanation: Check Point This is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk. End of explanation """ """ DON'T MODIFY ANYTHING IN THIS CELL """ from distutils.version import LooseVersion import warnings import tensorflow as tf from tensorflow.python.layers.core import Dense # Check TensorFlow Version assert LooseVersion(tf.__version__) >= LooseVersion('1.1'), 'Please use TensorFlow version 1.1 or newer' print('TensorFlow Version: {}'.format(tf.__version__)) # Check for a GPU if not tf.test.gpu_device_name(): warnings.warn('No GPU found. Please use a GPU to train your neural network.') else: print('Default GPU Device: {}'.format(tf.test.gpu_device_name())) """ Explanation: Check the Version of TensorFlow and Access to GPU This will check to make sure you have the correct version of TensorFlow and access to a GPU End of explanation """ def model_inputs(): """ Create TF Placeholders for input, targets, learning rate, and lengths of source and target sequences. :return: Tuple (input, targets, learning rate, keep probability, target sequence length, max target sequence length, source sequence length) """ # TODO: Implement Function input_ = tf.placeholder( tf.int32, [None, None], name = "input" ) target_ = tf.placeholder( tf.int32, [None, None], name = "target" ) learn_rate_ = tf.placeholder( tf.float32, None, name = "learn_rate" ) keep_prob_ = tf.placeholder( tf.float32, None, name = "keep_prob" ) target_sequence_length = tf.placeholder( tf.int32, [None], name="target_sequence_length" ) max_target_sequence_length = tf.reduce_max( target_sequence_length ) source_sequence_length = tf.placeholder( tf.int32, [None], name="source_sequence_length" ) return input_, target_, learn_rate_, keep_prob_, target_sequence_length, max_target_sequence_length, source_sequence_length """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_model_inputs(model_inputs) """ Explanation: Build the Neural Network You'll build the components necessary to build a Sequence-to-Sequence model by implementing the following functions below: - model_inputs - process_decoder_input - encoding_layer - decoding_layer_train - decoding_layer_infer - decoding_layer - seq2seq_model Input Implement the model_inputs() function to create TF Placeholders for the Neural Network. It should create the following placeholders: Input text placeholder named "input" using the TF Placeholder name parameter with rank 2. Targets placeholder with rank 2. Learning rate placeholder with rank 0. Keep probability placeholder named "keep_prob" using the TF Placeholder name parameter with rank 0. Target sequence length placeholder named "target_sequence_length" with rank 1 Max target sequence length tensor named "max_target_len" getting its value from applying tf.reduce_max on the target_sequence_length placeholder. Rank 0. Source sequence length placeholder named "source_sequence_length" with rank 1 Return the placeholders in the following the tuple (input, targets, learning rate, keep probability, target sequence length, max target sequence length, source sequence length) End of explanation """ def process_decoder_input(target_data, target_vocab_to_int, batch_size): """ Preprocess target data for encoding :param target_data: Target Placehoder :param target_vocab_to_int: Dictionary to go from the target words to an id :param batch_size: Batch Size :return: Preprocessed target data """ # TODO: Implement Function go_id = source_vocab_to_int[ '<GO>' ] ending_text = tf.strided_slice( target_data, [0, 0], [batch_size, -1], [1, 1] ) decoded_text = tf.concat( [ tf.fill([batch_size, 1], go_id), ending_text ], 1) return decoded_text """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_process_encoding_input(process_decoder_input) """ Explanation: Process Decoder Input Implement process_decoder_input by removing the last word id from each batch in target_data and concat the GO ID to the begining of each batch. End of explanation """ from imp import reload reload(tests) def encoding_layer(rnn_inputs, rnn_size, num_layers, keep_prob, source_sequence_length, source_vocab_size, encoding_embedding_size): """ Create encoding layer :param rnn_inputs: Inputs for the RNN :param rnn_size: RNN Size :param num_layers: Number of layers :param keep_prob: Dropout keep probability :param source_sequence_length: a list of the lengths of each sequence in the batch :param source_vocab_size: vocabulary size of source data :param encoding_embedding_size: embedding size of source data :return: tuple (RNN output, RNN state) """ # TODO: Implement Function encod_inputs = tf.contrib.layers.embed_sequence( rnn_inputs, source_vocab_size, encoding_embedding_size ) rnn_cell = tf.contrib.rnn.MultiRNNCell( [ tf.contrib.rnn.LSTMCell( rnn_size ) for _ in range(num_layers) ] ) # Adding dropout layer rnn_cell = tf.contrib.rnn.DropoutWrapper( rnn_cell, output_keep_prob = keep_prob ) rnn_output, rnn_state = tf.nn.dynamic_rnn( rnn_cell, encod_inputs, source_sequence_length, dtype = tf.float32 ) return rnn_output, rnn_state """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_encoding_layer(encoding_layer) """ Explanation: Encoding Implement encoding_layer() to create a Encoder RNN layer: * Embed the encoder input using tf.contrib.layers.embed_sequence * Construct a stacked tf.contrib.rnn.LSTMCell wrapped in a tf.contrib.rnn.DropoutWrapper * Pass cell and embedded input to tf.nn.dynamic_rnn() End of explanation """ def decoding_layer_train(encoder_state, dec_cell, dec_embed_input, target_sequence_length, max_summary_length, output_layer, keep_prob): """ Create a decoding layer for training :param encoder_state: Encoder State :param dec_cell: Decoder RNN Cell :param dec_embed_input: Decoder embedded input :param target_sequence_length: The lengths of each sequence in the target batch :param max_summary_length: The length of the longest sequence in the batch :param output_layer: Function to apply the output layer :param keep_prob: Dropout keep probability :return: BasicDecoderOutput containing training logits and sample_id """ # TODO: Implement Function decode_helper = tf.contrib.seq2seq.TrainingHelper( dec_embed_input, target_sequence_length ) decoder = tf.contrib.seq2seq.BasicDecoder( dec_cell, decode_helper, encoder_state, output_layer ) decoder_outputs, decoder_state = tf.contrib.seq2seq.dynamic_decode( decoder, impute_finished=True, maximum_iterations= max_summary_length ) return decoder_outputs """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_decoding_layer_train(decoding_layer_train) """ Explanation: Decoding - Training Create a training decoding layer: * Create a tf.contrib.seq2seq.TrainingHelper * Create a tf.contrib.seq2seq.BasicDecoder * Obtain the decoder outputs from tf.contrib.seq2seq.dynamic_decode End of explanation """ def decoding_layer_infer(encoder_state, dec_cell, dec_embeddings, start_of_sequence_id, end_of_sequence_id, max_target_sequence_length, vocab_size, output_layer, batch_size, keep_prob): """ Create a decoding layer for inference :param encoder_state: Encoder state :param dec_cell: Decoder RNN Cell :param dec_embeddings: Decoder embeddings :param start_of_sequence_id: GO ID :param end_of_sequence_id: EOS Id :param max_target_sequence_length: Maximum length of target sequences :param vocab_size: Size of decoder/target vocabulary :param decoding_scope: TenorFlow Variable Scope for decoding :param output_layer: Function to apply the output layer :param batch_size: Batch size :param keep_prob: Dropout keep probability :return: BasicDecoderOutput containing inference logits and sample_id """ # TODO: Implement Function start_tokens = tf.tile( tf.constant( [start_of_sequence_id], dtype=tf.int32), [ batch_size ], name = "start_tokens" ) decode_helper = tf.contrib.seq2seq.GreedyEmbeddingHelper( dec_embeddings, start_tokens, end_of_sequence_id ) decoder = tf.contrib.seq2seq.BasicDecoder( dec_cell, decode_helper, encoder_state, output_layer = output_layer ) decoder_outputs, decoder_state = tf.contrib.seq2seq.dynamic_decode( decoder, impute_finished=True, maximum_iterations = max_target_sequence_length ) return decoder_outputs """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_decoding_layer_infer(decoding_layer_infer) """ Explanation: Decoding - Inference Create inference decoder: * Create a tf.contrib.seq2seq.GreedyEmbeddingHelper * Create a tf.contrib.seq2seq.BasicDecoder * Obtain the decoder outputs from tf.contrib.seq2seq.dynamic_decode End of explanation """ from tensorflow.python.layers import core as layers_core def decoding_layer(dec_input, encoder_state, target_sequence_length, max_target_sequence_length, rnn_size, num_layers, target_vocab_to_int, target_vocab_size, batch_size, keep_prob, decoding_embedding_size): """ Create decoding layer :param dec_input: Decoder input :param encoder_state: Encoder state :param target_sequence_length: The lengths of each sequence in the target batch :param max_target_sequence_length: Maximum length of target sequences :param rnn_size: RNN Size :param num_layers: Number of layers :param target_vocab_to_int: Dictionary to go from the target words to an id :param target_vocab_size: Size of target vocabulary :param batch_size: The size of the batch :param keep_prob: Dropout keep probability :return: Tuple of (Training BasicDecoderOutput, Inference BasicDecoderOutput) """ # TODO: Implement Function decode_embed = tf.Variable( tf.random_uniform( [ target_vocab_size, decoding_embedding_size ] ) ) decode_embed_input = tf.nn.embedding_lookup( decode_embed, dec_input ) decode_cell = tf.contrib.rnn.MultiRNNCell( [ tf.contrib.rnn.LSTMCell(rnn_size) for _ in range(num_layers) ] ) # Adding dropout layer decode_cell = tf.contrib.rnn.DropoutWrapper( decode_cell, output_keep_prob = keep_prob ) output_layer = layers_core.Dense( target_vocab_size, kernel_initializer = tf.truncated_normal_initializer( mean = 0.0, stddev=0.1 ) ) with tf.variable_scope( "decoding" ) as decoding_scope: decode_outputs_train = decoding_layer_train( encoder_state, decode_cell, decode_embed_input, target_sequence_length, max_target_sequence_length, output_layer, keep_prob ) SOS_id = target_vocab_to_int[ "<GO>" ] EOS_id = target_vocab_to_int[ "<EOS>" ] with tf.variable_scope( "decoding", reuse=True) as decoding_scope: decode_outputs_infer = decoding_layer_infer( encoder_state, decode_cell, decode_embed, SOS_id,EOS_id, max_target_sequence_length,target_vocab_size, output_layer, batch_size, keep_prob ) return decode_outputs_train, decode_outputs_infer """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_decoding_layer(decoding_layer) """ Explanation: Build the Decoding Layer Implement decoding_layer() to create a Decoder RNN layer. Embed the target sequences Construct the decoder LSTM cell (just like you constructed the encoder cell above) Create an output layer to map the outputs of the decoder to the elements of our vocabulary Use the your decoding_layer_train(encoder_state, dec_cell, dec_embed_input, target_sequence_length, max_target_sequence_length, output_layer, keep_prob) function to get the training logits. Use your decoding_layer_infer(encoder_state, dec_cell, dec_embeddings, start_of_sequence_id, end_of_sequence_id, max_target_sequence_length, vocab_size, output_layer, batch_size, keep_prob) function to get the inference logits. Note: You'll need to use tf.variable_scope to share variables between training and inference. End of explanation """ def seq2seq_model(input_data, target_data, keep_prob, batch_size, source_sequence_length, target_sequence_length, max_target_sentence_length, source_vocab_size, target_vocab_size, enc_embedding_size, dec_embedding_size, rnn_size, num_layers, target_vocab_to_int): """ Build the Sequence-to-Sequence part of the neural network :param input_data: Input placeholder :param target_data: Target placeholder :param keep_prob: Dropout keep probability placeholder :param batch_size: Batch Size :param source_sequence_length: Sequence Lengths of source sequences in the batch :param target_sequence_length: Sequence Lengths of target sequences in the batch :param source_vocab_size: Source vocabulary size :param target_vocab_size: Target vocabulary size :param enc_embedding_size: Decoder embedding size :param dec_embedding_size: Encoder embedding size :param rnn_size: RNN Size :param num_layers: Number of layers :param target_vocab_to_int: Dictionary to go from the target words to an id :return: Tuple of (Training BasicDecoderOutput, Inference BasicDecoderOutput) """ # TODO: Implement Function encode_output, encode_state = encoding_layer( input_data, rnn_size, num_layers, keep_prob, source_sequence_length, source_vocab_size, enc_embedding_size ) decode_input = process_decoder_input( target_data, target_vocab_to_int, batch_size ) decode_outputs_train, decode_outputs_infer = decoding_layer( decode_input, encode_state, target_sequence_length, tf.reduce_max( target_sequence_length ), rnn_size, num_layers, target_vocab_to_int, target_vocab_size, batch_size, keep_prob, dec_embedding_size ) return decode_outputs_train, decode_outputs_infer """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_seq2seq_model(seq2seq_model) """ Explanation: Build the Neural Network Apply the functions you implemented above to: Apply embedding to the input data for the encoder. Encode the input using your encoding_layer(rnn_inputs, rnn_size, num_layers, keep_prob, source_sequence_length, source_vocab_size, encoding_embedding_size). Process target data using your process_decoder_input(target_data, target_vocab_to_int, batch_size) function. Apply embedding to the target data for the decoder. Decode the encoded input using your decoding_layer(dec_input, enc_state, target_sequence_length, max_target_sentence_length, rnn_size, num_layers, target_vocab_to_int, target_vocab_size, batch_size, keep_prob, dec_embedding_size) function. End of explanation """ # Number of Epochs epochs = 10 # Batch Size batch_size = 256 # RNN Size rnn_size = 256 # Number of Layers num_layers = 2 # Embedding Size encoding_embedding_size = 128 decoding_embedding_size = 128 # Learning Rate learning_rate = 0.01 # Dropout Keep Probability keep_probability = 0.8 display_step = 10 """ Explanation: Neural Network Training Hyperparameters Tune the following parameters: Set epochs to the number of epochs. Set batch_size to the batch size. Set rnn_size to the size of the RNNs. Set num_layers to the number of layers. Set encoding_embedding_size to the size of the embedding for the encoder. Set decoding_embedding_size to the size of the embedding for the decoder. Set learning_rate to the learning rate. Set keep_probability to the Dropout keep probability Set display_step to state how many steps between each debug output statement End of explanation """ """ DON'T MODIFY ANYTHING IN THIS CELL """ save_path = 'checkpoints/dev' (source_int_text, target_int_text), (source_vocab_to_int, target_vocab_to_int), _ = helper.load_preprocess() max_target_sentence_length = max([len(sentence) for sentence in source_int_text]) train_graph = tf.Graph() with train_graph.as_default(): input_data, targets, lr, keep_prob, target_sequence_length, max_target_sequence_length, source_sequence_length = model_inputs() #sequence_length = tf.placeholder_with_default(max_target_sentence_length, None, name='sequence_length') input_shape = tf.shape(input_data) train_logits, inference_logits = seq2seq_model(tf.reverse(input_data, [-1]), targets, keep_prob, batch_size, source_sequence_length, target_sequence_length, max_target_sequence_length, len(source_vocab_to_int), len(target_vocab_to_int), encoding_embedding_size, decoding_embedding_size, rnn_size, num_layers, target_vocab_to_int) training_logits = tf.identity(train_logits.rnn_output, name='logits') inference_logits = tf.identity(inference_logits.sample_id, name='predictions') masks = tf.sequence_mask(target_sequence_length, max_target_sequence_length, dtype=tf.float32, name='masks') with tf.name_scope("optimization"): # Loss function cost = tf.contrib.seq2seq.sequence_loss( training_logits, targets, masks) # Optimizer optimizer = tf.train.AdamOptimizer(lr) # Gradient Clipping gradients = optimizer.compute_gradients(cost) capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None] train_op = optimizer.apply_gradients(capped_gradients) """ Explanation: Build the Graph Build the graph using the neural network you implemented. End of explanation """ """ DON'T MODIFY ANYTHING IN THIS CELL """ def pad_sentence_batch(sentence_batch, pad_int): """Pad sentences with <PAD> so that each sentence of a batch has the same length""" max_sentence = max([len(sentence) for sentence in sentence_batch]) return [sentence + [pad_int] * (max_sentence - len(sentence)) for sentence in sentence_batch] def get_batches(sources, targets, batch_size, source_pad_int, target_pad_int): """Batch targets, sources, and the lengths of their sentences together""" for batch_i in range(0, len(sources)//batch_size): start_i = batch_i * batch_size # Slice the right amount for the batch sources_batch = sources[start_i:start_i + batch_size] targets_batch = targets[start_i:start_i + batch_size] # Pad pad_sources_batch = np.array(pad_sentence_batch(sources_batch, source_pad_int)) pad_targets_batch = np.array(pad_sentence_batch(targets_batch, target_pad_int)) # Need the lengths for the _lengths parameters pad_targets_lengths = [] for target in pad_targets_batch: pad_targets_lengths.append(len(target)) pad_source_lengths = [] for source in pad_sources_batch: pad_source_lengths.append(len(source)) yield pad_sources_batch, pad_targets_batch, pad_source_lengths, pad_targets_lengths """ Explanation: Batch and pad the source and target sequences End of explanation """ """ DON'T MODIFY ANYTHING IN THIS CELL """ def get_accuracy(target, logits): """ Calculate accuracy """ max_seq = max(target.shape[1], logits.shape[1]) if max_seq - target.shape[1]: target = np.pad( target, [(0,0),(0,max_seq - target.shape[1])], 'constant') if max_seq - logits.shape[1]: logits = np.pad( logits, [(0,0),(0,max_seq - logits.shape[1])], 'constant') return np.mean(np.equal(target, logits)) # Split data to training and validation sets train_source = source_int_text[batch_size:] train_target = target_int_text[batch_size:] valid_source = source_int_text[:batch_size] valid_target = target_int_text[:batch_size] (valid_sources_batch, valid_targets_batch, valid_sources_lengths, valid_targets_lengths ) = next(get_batches(valid_source, valid_target, batch_size, source_vocab_to_int['<PAD>'], target_vocab_to_int['<PAD>'])) with tf.Session(graph=train_graph) as sess: sess.run(tf.global_variables_initializer()) for epoch_i in range(epochs): for batch_i, (source_batch, target_batch, sources_lengths, targets_lengths) in enumerate( get_batches(train_source, train_target, batch_size, source_vocab_to_int['<PAD>'], target_vocab_to_int['<PAD>'])): _, loss = sess.run( [train_op, cost], {input_data: source_batch, targets: target_batch, lr: learning_rate, target_sequence_length: targets_lengths, source_sequence_length: sources_lengths, keep_prob: keep_probability}) if batch_i % display_step == 0 and batch_i > 0: batch_train_logits = sess.run( inference_logits, {input_data: source_batch, source_sequence_length: sources_lengths, target_sequence_length: targets_lengths, keep_prob: 1.0}) batch_valid_logits = sess.run( inference_logits, {input_data: valid_sources_batch, source_sequence_length: valid_sources_lengths, target_sequence_length: valid_targets_lengths, keep_prob: 1.0}) train_acc = get_accuracy(target_batch, batch_train_logits) valid_acc = get_accuracy(valid_targets_batch, batch_valid_logits) print('Epoch {:>3} Batch {:>4}/{} - Train Accuracy: {:>6.4f}, Validation Accuracy: {:>6.4f}, Loss: {:>6.4f}' .format(epoch_i, batch_i, len(source_int_text) // batch_size, train_acc, valid_acc, loss)) # Save Model saver = tf.train.Saver() saver.save(sess, save_path) print('Model Trained and Saved') """ Explanation: Train Train the neural network on the preprocessed data. If you have a hard time getting a good loss, check the forms to see if anyone is having the same problem. End of explanation """ """ DON'T MODIFY ANYTHING IN THIS CELL """ # Save parameters for checkpoint helper.save_params(save_path) """ Explanation: Save Parameters Save the batch_size and save_path parameters for inference. End of explanation """ """ DON'T MODIFY ANYTHING IN THIS CELL """ import tensorflow as tf import numpy as np import helper import problem_unittests as tests _, (source_vocab_to_int, target_vocab_to_int), (source_int_to_vocab, target_int_to_vocab) = helper.load_preprocess() load_path = helper.load_params() """ Explanation: Checkpoint End of explanation """ def sentence_to_seq(sentence, vocab_to_int): """ Convert a sentence to a sequence of ids :param sentence: String :param vocab_to_int: Dictionary to go from the words to an id :return: List of word ids """ # TODO: Implement Function sequence = [ vocab_to_int.get( word, vocab_to_int[ "<UNK>"] ) for word in sentence.lower().split() ] return sequence """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_sentence_to_seq(sentence_to_seq) """ Explanation: Sentence to Sequence To feed a sentence into the model for translation, you first need to preprocess it. Implement the function sentence_to_seq() to preprocess new sentences. Convert the sentence to lowercase Convert words into ids using vocab_to_int Convert words not in the vocabulary, to the &lt;UNK&gt; word id. End of explanation """ translate_sentence = 'he saw a old yellow truck .' """ DON'T MODIFY ANYTHING IN THIS CELL """ translate_sentence = sentence_to_seq(translate_sentence, source_vocab_to_int) loaded_graph = tf.Graph() with tf.Session(graph=loaded_graph) as sess: # Load saved model loader = tf.train.import_meta_graph(load_path + '.meta') loader.restore(sess, load_path) input_data = loaded_graph.get_tensor_by_name('input:0') logits = loaded_graph.get_tensor_by_name('predictions:0') target_sequence_length = loaded_graph.get_tensor_by_name('target_sequence_length:0') source_sequence_length = loaded_graph.get_tensor_by_name('source_sequence_length:0') keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0') translate_logits = sess.run(logits, {input_data: [translate_sentence]*batch_size, target_sequence_length: [len(translate_sentence)*2]*batch_size, source_sequence_length: [len(translate_sentence)]*batch_size, keep_prob: 1.0})[0] print('Input') print(' Word Ids: {}'.format([i for i in translate_sentence])) print(' English Words: {}'.format([source_int_to_vocab[i] for i in translate_sentence])) print('\nPrediction') print(' Word Ids: {}'.format([i for i in translate_logits])) print(' French Words: {}'.format(" ".join([target_int_to_vocab[i] for i in translate_logits]))) """ Explanation: Translate This will translate translate_sentence from English to French. End of explanation """
blue-yonder/tsfresh
notebooks/examples/04 Multiclass Selection Example.ipynb
mit
%matplotlib inline import matplotlib.pylab as plt from tsfresh import extract_features, extract_relevant_features, select_features from tsfresh.utilities.dataframe_functions import impute from sklearn.tree import DecisionTreeClassifier from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report import pandas as pd import numpy as np """ Explanation: Multiclass Example This example show shows how to use tsfresh to extract and select useful features from timeseries in a multiclass classification example. The underlying control of the false discovery rate (FDR) has been introduced by Tang et al. (2020, Sec. 3.2). We use an example dataset of human activity recognition for this. The dataset consists of timeseries for 7352 accelerometer readings. Each reading represents an accelerometer reading for 2.56 sec at 50hz (for a total of 128 samples per reading). Furthermore, each reading corresponds one of six activities (walking, walking upstairs, walking downstairs, sitting, standing and laying). For more information go to https://archive.ics.uci.edu/ml/datasets/Human+Activity+Recognition+Using+Smartphones This notebook follows the example in the first notebook, so we will go quickly over the extraction and focus on the more interesting feature selection in this case. End of explanation """ from tsfresh.examples.har_dataset import download_har_dataset, load_har_dataset, load_har_classes # fetch dataset from uci download_har_dataset() df = load_har_dataset() df.head() y = load_har_classes() """ Explanation: Load and visualize data End of explanation """ df["id"] = df.index df = df.melt(id_vars="id", var_name="time").sort_values(["id", "time"]).reset_index(drop=True) df.head() plt.title('accelerometer reading') plt.plot(df[df["id"] == 0].set_index("time").value) plt.show() """ Explanation: The data is not in a typical time series format so far: the columns are the time steps whereas each row is a measurement of a different person. Therefore we bring it to a format where the time series of different persons are identified by an id and are order by time vertically. End of explanation """ # only use the first 500 ids to speed up the processing X = extract_features(df[df["id"] < 500], column_id="id", column_sort="time", impute_function=impute) X.head() """ Explanation: Extract Features End of explanation """ X_train, X_test, y_train, y_test = train_test_split(X, y[:500], test_size=.2) classifier_full = DecisionTreeClassifier() classifier_full.fit(X_train, y_train) print(classification_report(y_test, classifier_full.predict(X_test))) """ Explanation: Train and evaluate classifier For later comparison, we train a decision tree on all features (without selection): End of explanation """ relevant_features = set() for label in y.unique(): y_train_binary = y_train == label X_train_filtered = select_features(X_train, y_train_binary) print("Number of relevant features for class {}: {}/{}".format(label, X_train_filtered.shape[1], X_train.shape[1])) relevant_features = relevant_features.union(set(X_train_filtered.columns)) len(relevant_features) """ Explanation: Multiclass feature selection We will now select a subset of relevant features using the tsfresh select features method. However it only works for binary classification or regression tasks. For a 6 label multi classification we therefore split the selection problem into 6 binary one-versus all classification problems. For each of them we can do a binary classification feature selection: End of explanation """ X_train_filtered = X_train[list(relevant_features)] X_test_filtered = X_test[list(relevant_features)] """ Explanation: we keep only those features that we selected above, for both the train and test set End of explanation """ classifier_selected = DecisionTreeClassifier() classifier_selected.fit(X_train_filtered, y_train) print(classification_report(y_test, classifier_selected.predict(X_test_filtered))) """ Explanation: and train again: End of explanation """ X_train_filtered_multi = select_features(X_train, y_train, multiclass=True, n_significant=5) X_train_filtered_multi.shape """ Explanation: It worked! The precision improved by removing irrelevant features. Improved Multiclass feature selection We can instead specify the number of classes for which a feature should be a relevant predictor in order to pass through the filtering process. This is as simple as setting the multiclass parameter to True and setting n_significant to the required number of classes. We will try with a requirement of being relevant for 5 classes. End of explanation """ classifier_selected_multi = DecisionTreeClassifier() classifier_selected_multi.fit(X_train_filtered_multi, y_train) X_test_filtered_multi = X_test[X_train_filtered_multi.columns] print(classification_report(y_test, classifier_selected_multi.predict(X_test_filtered_multi))) """ Explanation: We can see that the number of relevant features is lower than the previous implementation. End of explanation """
ES-DOC/esdoc-jupyterhub
notebooks/cmcc/cmip6/models/cmcc-cm2-sr5/toplevel.ipynb
gpl-3.0
# DO NOT EDIT ! from pyesdoc.ipython.model_topic import NotebookOutput # DO NOT EDIT ! DOC = NotebookOutput('cmip6', 'cmcc', 'cmcc-cm2-sr5', 'toplevel') """ Explanation: ES-DOC CMIP6 Model Properties - Toplevel MIP Era: CMIP6 Institute: CMCC Source ID: CMCC-CM2-SR5 Sub-Topics: Radiative Forcings. Properties: 85 (42 required) Model descriptions: Model description details Initialized From: -- Notebook Help: Goto notebook help page Notebook Initialised: 2018-02-15 16:53:50 Document Setup IMPORTANT: to be executed each time you run the notebook End of explanation """ # Set as follows: DOC.set_author("name", "email") # TODO - please enter value(s) """ Explanation: Document Authors Set document authors End of explanation """ # Set as follows: DOC.set_contributor("name", "email") # TODO - please enter value(s) """ Explanation: Document Contributors Specify document contributors End of explanation """ # Set publication status: # 0=do not publish, 1=publish. DOC.set_publication_status(0) """ Explanation: Document Publication Specify document publication status End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.model_overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: Document Table of Contents 1. Key Properties 2. Key Properties --&gt; Flux Correction 3. Key Properties --&gt; Genealogy 4. Key Properties --&gt; Software Properties 5. Key Properties --&gt; Coupling 6. Key Properties --&gt; Tuning Applied 7. Key Properties --&gt; Conservation --&gt; Heat 8. Key Properties --&gt; Conservation --&gt; Fresh Water 9. Key Properties --&gt; Conservation --&gt; Salt 10. Key Properties --&gt; Conservation --&gt; Momentum 11. Radiative Forcings 12. Radiative Forcings --&gt; Greenhouse Gases --&gt; CO2 13. Radiative Forcings --&gt; Greenhouse Gases --&gt; CH4 14. Radiative Forcings --&gt; Greenhouse Gases --&gt; N2O 15. Radiative Forcings --&gt; Greenhouse Gases --&gt; Tropospheric O3 16. Radiative Forcings --&gt; Greenhouse Gases --&gt; Stratospheric O3 17. Radiative Forcings --&gt; Greenhouse Gases --&gt; CFC 18. Radiative Forcings --&gt; Aerosols --&gt; SO4 19. Radiative Forcings --&gt; Aerosols --&gt; Black Carbon 20. Radiative Forcings --&gt; Aerosols --&gt; Organic Carbon 21. Radiative Forcings --&gt; Aerosols --&gt; Nitrate 22. Radiative Forcings --&gt; Aerosols --&gt; Cloud Albedo Effect 23. Radiative Forcings --&gt; Aerosols --&gt; Cloud Lifetime Effect 24. Radiative Forcings --&gt; Aerosols --&gt; Dust 25. Radiative Forcings --&gt; Aerosols --&gt; Tropospheric Volcanic 26. Radiative Forcings --&gt; Aerosols --&gt; Stratospheric Volcanic 27. Radiative Forcings --&gt; Aerosols --&gt; Sea Salt 28. Radiative Forcings --&gt; Other --&gt; Land Use 29. Radiative Forcings --&gt; Other --&gt; Solar 1. Key Properties Key properties of the model 1.1. Model Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Top level overview of coupled model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.model_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 1.2. Model Name Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Name of coupled model. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.flux_correction.details') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 2. Key Properties --&gt; Flux Correction Flux correction properties of the model 2.1. Details Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe if/how flux corrections are applied in the model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.genealogy.year_released') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 3. Key Properties --&gt; Genealogy Genealogy and history of the model 3.1. Year Released Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Year the model was released End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP3_parent') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 3.2. CMIP3 Parent Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 CMIP3 parent if any End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP5_parent') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 3.3. CMIP5 Parent Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 CMIP5 parent if any End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.genealogy.previous_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 3.4. Previous Name Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Previously known as End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.software_properties.repository') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 4. Key Properties --&gt; Software Properties Software properties of model 4.1. Repository Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Location of code for this component. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_version') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 4.2. Code Version Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Code version identifier. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_languages') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 4.3. Code Languages Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Code language(s). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.software_properties.components_structure') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 4.4. Components Structure Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe how model realms are structured into independent software components (coupled via a coupler) and internal software components. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.software_properties.coupler') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "OASIS" # "OASIS3-MCT" # "ESMF" # "NUOPC" # "Bespoke" # "Unknown" # "None" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 4.5. Coupler Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Overarching coupling framework for model. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.coupling.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5. Key Properties --&gt; Coupling ** 5.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of coupling in the model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_double_flux') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 5.2. Atmosphere Double Flux Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is the atmosphere passing a double flux to the ocean and sea ice (as opposed to a single one)? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_fluxes_calculation_grid') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Atmosphere grid" # "Ocean grid" # "Specific coupler grid" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 5.3. Atmosphere Fluxes Calculation Grid Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Where are the air-sea fluxes calculated End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_relative_winds') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 5.4. Atmosphere Relative Winds Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Are relative or absolute winds used to compute the flux? I.e. do ocean surface currents enter the wind stress calculation? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.description') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6. Key Properties --&gt; Tuning Applied Tuning methodology for model 6.1. Description Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 General overview description of tuning: explain and motivate the main targets and metrics/diagnostics retained. Document the relative weight given to climate performance metrics/diagnostics versus process oriented metrics/diagnostics, and on the possible conflicts with parameterization level tuning. In particular describe any struggle with a parameter value that required pushing it to its limits to solve a particular model deficiency. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.global_mean_metrics_used') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.2. Global Mean Metrics Used Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N List set of metrics/diagnostics of the global mean state used in tuning model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.regional_metrics_used') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.3. Regional Metrics Used Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N List of regional metrics/diagnostics of mean state (e.g THC, AABW, regional means etc) used in tuning model/component End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.trend_metrics_used') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.4. Trend Metrics Used Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N List observed trend metrics/diagnostics used in tuning model/component (such as 20th century) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.energy_balance') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.5. Energy Balance Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe how energy balance was obtained in the full system: in the various components independently or at the components coupling stage? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.fresh_water_balance') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.6. Fresh Water Balance Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe how fresh_water balance was obtained in the full system: in the various components independently or at the components coupling stage? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.global') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7. Key Properties --&gt; Conservation --&gt; Heat Global heat convervation properties of the model 7.1. Global Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe if/how heat is conserved globally End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_ocean_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7.2. Atmos Ocean Interface Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe if/how heat is conserved at the atmosphere/ocean coupling interface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_land_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7.3. Atmos Land Interface Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe if/how heat is conserved at the atmosphere/land coupling interface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_sea-ice_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7.4. Atmos Sea-ice Interface Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe if/how heat is conserved at the atmosphere/sea-ice coupling interface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.ocean_seaice_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7.5. Ocean Seaice Interface Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe if/how heat is conserved at the ocean/sea-ice coupling interface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.land_ocean_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7.6. Land Ocean Interface Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe if/how heat is conserved at the land/ocean coupling interface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.global') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8. Key Properties --&gt; Conservation --&gt; Fresh Water Global fresh water convervation properties of the model 8.1. Global Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe if/how fresh_water is conserved globally End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_ocean_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.2. Atmos Ocean Interface Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe if/how fresh_water is conserved at the atmosphere/ocean coupling interface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_land_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.3. Atmos Land Interface Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe if/how fresh water is conserved at the atmosphere/land coupling interface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_sea-ice_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.4. Atmos Sea-ice Interface Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe if/how fresh water is conserved at the atmosphere/sea-ice coupling interface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.ocean_seaice_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.5. Ocean Seaice Interface Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe if/how fresh water is conserved at the ocean/sea-ice coupling interface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.runoff') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.6. Runoff Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe how runoff is distributed and conserved End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.iceberg_calving') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.7. Iceberg Calving Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe if/how iceberg calving is modeled and conserved End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.endoreic_basins') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.8. Endoreic Basins Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe if/how endoreic basins (no ocean access) are treated End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.snow_accumulation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.9. Snow Accumulation Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe how snow accumulation over land and over sea-ice is treated End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.salt.ocean_seaice_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 9. Key Properties --&gt; Conservation --&gt; Salt Global salt convervation properties of the model 9.1. Ocean Seaice Interface Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe if/how salt is conserved at the ocean/sea-ice coupling interface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.momentum.details') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 10. Key Properties --&gt; Conservation --&gt; Momentum Global momentum convervation properties of the model 10.1. Details Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe if/how momentum is conserved in the model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 11. Radiative Forcings Radiative forcings of the model for historical and scenario (aka Table 12.1 IPCC AR5) 11.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of radiative forcings (GHG and aerosols) implementation in model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 12. Radiative Forcings --&gt; Greenhouse Gases --&gt; CO2 Carbon dioxide forcing 12.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 12.2. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 13. Radiative Forcings --&gt; Greenhouse Gases --&gt; CH4 Methane forcing 13.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 13.2. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 14. Radiative Forcings --&gt; Greenhouse Gases --&gt; N2O Nitrous oxide forcing 14.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 14.2. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 15. Radiative Forcings --&gt; Greenhouse Gases --&gt; Tropospheric O3 Troposheric ozone forcing 15.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 15.2. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 16. Radiative Forcings --&gt; Greenhouse Gases --&gt; Stratospheric O3 Stratospheric ozone forcing 16.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 16.2. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 17. Radiative Forcings --&gt; Greenhouse Gases --&gt; CFC Ozone-depleting and non-ozone-depleting fluorinated gases forcing 17.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.equivalence_concentration') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "Option 1" # "Option 2" # "Option 3" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 17.2. Equivalence Concentration Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Details of any equivalence concentrations used End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 17.3. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 18. Radiative Forcings --&gt; Aerosols --&gt; SO4 SO4 aerosol forcing 18.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 18.2. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 19. Radiative Forcings --&gt; Aerosols --&gt; Black Carbon Black carbon aerosol forcing 19.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 19.2. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 20. Radiative Forcings --&gt; Aerosols --&gt; Organic Carbon Organic carbon aerosol forcing 20.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 20.2. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 21. Radiative Forcings --&gt; Aerosols --&gt; Nitrate Nitrate forcing 21.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 21.2. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 22. Radiative Forcings --&gt; Aerosols --&gt; Cloud Albedo Effect Cloud albedo effect forcing (RFaci) 22.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.aerosol_effect_on_ice_clouds') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 22.2. Aerosol Effect On Ice Clouds Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Radiative effects of aerosols on ice clouds are represented? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 22.3. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 23. Radiative Forcings --&gt; Aerosols --&gt; Cloud Lifetime Effect Cloud lifetime effect forcing (ERFaci) 23.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.aerosol_effect_on_ice_clouds') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 23.2. Aerosol Effect On Ice Clouds Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Radiative effects of aerosols on ice clouds are represented? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.RFaci_from_sulfate_only') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 23.3. RFaci From Sulfate Only Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Radiative forcing from aerosol cloud interactions from sulfate aerosol only? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 23.4. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 24. Radiative Forcings --&gt; Aerosols --&gt; Dust Dust forcing 24.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 24.2. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 25. Radiative Forcings --&gt; Aerosols --&gt; Tropospheric Volcanic Tropospheric volcanic forcing 25.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.historical_explosive_volcanic_aerosol_implementation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Type A" # "Type B" # "Type C" # "Type D" # "Type E" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 25.2. Historical Explosive Volcanic Aerosol Implementation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 How explosive volcanic aerosol is implemented in historical simulations End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.future_explosive_volcanic_aerosol_implementation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Type A" # "Type B" # "Type C" # "Type D" # "Type E" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 25.3. Future Explosive Volcanic Aerosol Implementation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 How explosive volcanic aerosol is implemented in future simulations End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 25.4. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 26. Radiative Forcings --&gt; Aerosols --&gt; Stratospheric Volcanic Stratospheric volcanic forcing 26.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.historical_explosive_volcanic_aerosol_implementation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Type A" # "Type B" # "Type C" # "Type D" # "Type E" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 26.2. Historical Explosive Volcanic Aerosol Implementation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 How explosive volcanic aerosol is implemented in historical simulations End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.future_explosive_volcanic_aerosol_implementation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Type A" # "Type B" # "Type C" # "Type D" # "Type E" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 26.3. Future Explosive Volcanic Aerosol Implementation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 How explosive volcanic aerosol is implemented in future simulations End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 26.4. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 27. Radiative Forcings --&gt; Aerosols --&gt; Sea Salt Sea salt forcing 27.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 27.2. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 28. Radiative Forcings --&gt; Other --&gt; Land Use Land use forcing 28.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.crop_change_only') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 28.2. Crop Change Only Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Land use change represented via crop change only? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 28.3. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "irradiance" # "proton" # "electron" # "cosmic ray" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 29. Radiative Forcings --&gt; Other --&gt; Solar Solar forcing 29.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How solar forcing is provided End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 29.2. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """
guyhoffman/hri-statistics
notebooks/DSUR - 04.ipynb
mit
facebookdata = pd.read_table('../../DSUR/04/FacebookNarcissism.dat') facebookdata.head(10) sns.lmplot(data=facebookdata, x="NPQC_R_Total", y="Rating", fit_reg=False) sns.lmplot(data=facebookdata, x="NPQC_R_Total", y="Rating", col="Rating_Type", y_jitter=.25,fit_reg=False) sns.lmplot(data=facebookdata, x="NPQC_R_Total", y="Rating", markers=["x","o","s","v"], hue="Rating_Type", y_jitter=0.25,fit_reg=False) """ Explanation: Facebook Narcissism Earlier in the chapter we mentioned a study that looked at ratings of Facebook profile pictures (rated on coolness, fashion, attractiveness and glamour) and predicting them from how highly the person posting the picture scores on narcissism (Ong et al., 2011). Field, Andy,Miles, Jeremy,Field, Zoe. Discovering Statistics Using R (p. 133). SAGE Publications. Kindle Edition. The data structure is: id: a number indicating from which participant the profile photo came. NPQC_R_Total: the total score on the narcissism questionnaire. Rating_Type: whether the rating was for coolness, glamour, fashion or attractiveness (stored as strings of text). Rating: the rating given (on a scale from 1 to 5). Field, Andy,Miles, Jeremy,Field, Zoe. Discovering Statistics Using R (p. 133). SAGE Publications. Kindle Edition. End of explanation """ examdata = pd.read_table('../../DSUR/04/Exam Anxiety.dat') examdata.head(10) sns.lmplot(data=examdata, x="Anxiety", y="Exam", fit_reg=False) sns.lmplot(data=examdata, x="Anxiety", y="Exam") sns.lmplot(data=examdata, x="Anxiety", y="Exam", order=2) sns.lmplot(data=examdata, x="Anxiety", y="Exam", order=3, ci=None) sns.lmplot(data=examdata, x="Anxiety", y="Exam", hue="Gender") sns.lmplot(data=examdata, x="Anxiety", y="Exam", col="Gender", hue="Gender") """ Explanation: Exam Anxiety For example, a psychologist was interested in the effects of exam stress on exam performance. So, she devised and validated a questionnaire to assess state anxiety relating to exams (called the Exam Anxiety Questionnaire, or EAQ). This scale produced a measure of anxiety scored out of 100. Anxiety was measured before an exam, and the percentage mark of each student on the exam was used to assess the exam performance. Field, Andy,Miles, Jeremy,Field, Zoe. Discovering Statistics Using R (p. 136). SAGE Publications. Kindle Edition. End of explanation """ festivaldata = pd.read_table('../../DSUR/04/DownloadFestival.dat') festivaldata.head(10) sns.distplot(festivaldata.day1, kde=False) d1 = festivaldata.day1 print ("Stdev:", d1.std()) print ("3x Stdev:", 3*d1.std()) print ("Distance from mean:", ((d1-d1.mean()).abs()).head(5)) cleand1 = d1[~((d1-d1.mean()).abs()>3*d1.std())] sns.distplot(cleand1, kde=False) """ Explanation: Festival Data Hygiene was measured using a standardized technique (don’t worry, it wasn’t licking the person’s armpit) that results in a score ranging between 0 (you smell like a corpse that’s been left to rot up a skunk’s arse) and 4 (you smell of sweet roses on a fresh spring day). Field, Andy,Miles, Jeremy,Field, Zoe. Discovering Statistics Using R (p. 142). SAGE Publications. Kindle Edition. End of explanation """
MarsUniversity/ece387
website/block_3_vision/lsn15/misc.ipynb
mit
%matplotlib inline from __future__ import print_function from __future__ import division import cv2 # opencv itself import numpy as np # matrix manipulations from matplotlib import pyplot as plt # this lets you draw inline pictures in the notebooks import pylab # this allows you to control figure size pylab.rcParams['figure.figsize'] = (10.0, 8.0) # this controls figure size in the notebook """ Explanation: Misc Stuff I want to teach, but there really isn't time to do it right and it is not needed for the course really. Image Segmentation End of explanation """ # img = cv2.imread('coins.jpg') img = cv2.imread('dnd.jpg') img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB) gray = cv2.cvtColor(img,cv2.COLOR_RGB2GRAY) ret, thresh = cv2.threshold(gray,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU) # noise removal kernel = np.ones((3,3),np.uint8) opening = cv2.morphologyEx(thresh,cv2.MORPH_OPEN,kernel, iterations = 2) # sure background area sure_bg = cv2.dilate(opening,kernel,iterations=3) # Finding sure foreground area dist_transform = cv2.distanceTransform(opening,cv2.DIST_L2,5) ret, sure_fg = cv2.threshold(dist_transform,0.5*dist_transform.max(),255,0) # Finding unknown region sure_fg = np.uint8(sure_fg) unknown = cv2.subtract(sure_bg,sure_fg) # Marker labelling ret, markers = cv2.connectedComponents(sure_fg) # Add one to all labels so that sure background is not 0, but 1 markers = markers+1 # Now, mark the region of unknown with zero markers[unknown==255] = 0 # boundries are marked as -1 markers = cv2.watershed(img,markers) # img[markers == -1] = [255,0,0] plt.subplot(1,4,1) plt.imshow(sure_fg) plt.subplot(1,4,2) plt.imshow(sure_fg, cmap='gray') plt.subplot(1,4,3) plt.imshow(thresh, cmap='gray') plt.subplot(1,4,4) plt.imshow(markers) markers = cv2.watershed(img,markers) print('len(markers)=={}'.format(len(markers))) # img[markers == 1] = [255,0,0] cimg = img.copy() cimg[markers == -1] = [255,0,0] plt.subplot(2,2,1) plt.imshow(cimg) plt.title(-1) for i in range(1, 4): cimg = img.copy() cimg[markers == i] = [255,0,0] plt.subplot(2, 2, i+1) plt.imshow(cimg) plt.title(i) """ Explanation: Image Segmentation with Watershed Algorithm Any grayscale image can be viewed as a topographic surface where high intensity denotes peaks and hills while low intensity denotes valleys. You start filling every isolated valleys (local minima) with different colored water (labels). As the water rises, depending on the peaks (gradients) nearby, water from different valleys, obviously with different colors will start to merge. To avoid that, you build barriers in the locations where water merges. You continue the work of filling water and building barriers until all the peaks are under water. Then the barriers you created gives you the segmentation result. This is the “philosophy” behind the watershed. You can visit the CMM webpage on watershed to understand it with the help of some animations. But this approach gives you oversegmented result due to noise or any other irregularities in the image. So OpenCV implemented a marker-based watershed algorithm where you specify which are all valley points are to be merged and which are not. It is an interactive image segmentation. What we do is to give different labels for our object we know. Label the region which we are sure of being the foreground or object with one color (or intensity), label the region which we are sure of being background or non-object with another color and finally the region which we are not sure of anything, label it with 0. That is our marker. Then apply watershed algorithm. Then our marker will be updated with the labels we gave, and the boundaries of objects will have a value of -1. End of explanation """ def quantize(img, K): Z = img.reshape((-1,3)) # convert to np.float32 Z = np.float32(Z) # define criteria, number of clusters(K) and apply kmeans() criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0) ret,label,center=cv2.kmeans(Z,K,None,criteria,10,cv2.KMEANS_RANDOM_CENTERS) # Now convert back into uint8, and make original image center = np.uint8(center) res = center[label.flatten()] res2 = res.reshape((img.shape)) return res2 img = cv2.imread('dnd.jpg') img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) for k in range(2,8): qimg = quantize(img, k) plt.subplot(2, 3, k-1) plt.imshow(qimg) plt.title('{} Colors'.format(k)) from mpl_toolkits.mplot3d import Axes3D img = cv2.imread('dnd.jpg') img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) Z = img.reshape((-1,3)) # convert to np.float32 Z = np.float32(Z) K = 6 # define criteria, number of clusters(K) and apply kmeans() criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0) ret,label,center=cv2.kmeans(Z,K,None,criteria,10,cv2.KMEANS_RANDOM_CENTERS) # print(label) A = Z[label.ravel()==0] # every pt with label zero, put in A B = Z[label.ravel()==1] C = Z[label.ravel()==2] D = Z[label.ravel()==3] E = Z[label.ravel()==4] F = Z[label.ravel()==5] print(A) print(A.shape) print(len(A)) print(center) # Plot the data # plt.subplot(111, projection='3d') # plt.scatter(A[:,1],A[:,2]) # plt.scatter(A[:,0],A[:,1], A[:,2]) # plt.scatter(B[:,0],B[:,1], B[:,2]) # plt.scatter(C[:,0],C[:,1],C[:,2]) # plt.scatter(D[:,0],D[:,1],D[:,2]) # plt.scatter(E[:,0],E[:,1],E[:,2]) # plt.scatter(F[:,0],F[:,1],F[:,2]) # plt.scatter(center[:,0],center[:,1], center[:,2],s = 80,c = 'y', marker = 's') # plt.scatter(center[:,0],center[:,1], center[:,2],c = 'y', marker = 's') # plt.xlabel('Blue') # plt.ylabel('Green') # plt.title('6 Colors') # plt.grid(True); print(A[:,0].min(), A[:,0].max()) print(A[:,1].min(), A[:,1].max()) print(A[:,2].min(), A[:,2].max()) # For HSV, Hue range is [0,179], Saturation range is [0,255] and Value range is [0,255] for i, (h, c) in enumerate(zip([A,B,C,D,E,F], center)): plt.subplot(3,2,i+1) # rgb # plt.hist(h[:,0],256,[0,255], normed=True) # plt.hist(h[:,1],256,[0,255], normed=True) # plt.hist(h[:,2],256,[0,255], normed=True) # hsv plt.hist(h[:,0],180,[0,179], normed=True) plt.hist(h[:,1],256,[0,255], normed=True) plt.hist(h[:,2],256,[0,255], normed=True) plt.title('{:.1f} {:.1f} {:.1f}'.format(*c)) # plt.hist(B,256,[0,256], normed=True) # plt.hist(C,256,[0,256], normed=True) # plt.hist(D,256,[0,256], normed=True) # # plt.hist(E,256,[0,256], normed=True) # plt.hist(F,256,[0,256], normed=True) plt.show(); center[0] """ Explanation: Color Quantinization Reducing the number of colors in an image can be useful. Image you are trying to detect and maybe track the color red. Unfortunately there are a lot of different RGB (or HSV or whatever) numerical discriptions for what someone would call red. Sometimes it is nice to reduce all of those possible values down to just a few and then try to detect one of those colors. Unfortunately this is a little slow because of kmeans. :( End of explanation """ def quantize(img, K): # reshape (width, height, colors) to (width*height, colors) # the image is just a long array of pixels now Z = img.reshape((-1,3)) # convert to np.float32 Z = np.float32(Z) # define criteria, number of clusters(K) and apply kmeans() criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0) ret,label,center=cv2.kmeans(Z,K,None,criteria,10,cv2.KMEANS_RANDOM_CENTERS) # Now convert back into uint8, and make original image center = np.uint8(center) res = center[label.flatten()] res2 = res.reshape((img.shape)) return res2 img = cv2.imread('dnd.jpg') # convert to a linear color space img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) for k in range(2,8): qimg = quantize(img, k) plt.subplot(2, 3, k-1) qimg = cv2.cvtColor(qimg, cv2.COLOR_HSV2RGB) plt.imshow(qimg) plt.title('{} Colors'.format(k)) # what does the reshape do? a = np.zeros((10,10,3)) print(a.shape) b = a.reshape((-1,3)) print(b.shape) print(a.flatten().shape) # now we have 3 color planes but we put all pixels # in a row that we can sort/operate on # Move some place else!!! img = cv2.imread('balls.jpg',0) img = cv2.medianBlur(img,5) cimg = cv2.cvtColor(img,cv2.COLOR_GRAY2BGR) circles = cv2.HoughCircles( img, cv2.HOUGH_GRADIENT, 1, # dp - accumulator has same resolution as image 20, # min dist between circle centers param1=50, # Upper threshold for the internal Canny edge detector param2=90, # Threshold for center detection, smaller means more false positives minRadius=50, # circle radius maxRadius=300) circles = np.uint16(np.around(circles)) for i in circles[0,:]: # draw the outer circle cv2.circle(cimg,(i[0],i[1]),i[2],(0,255,0),2) # draw the center of the circle cv2.circle(cimg,(i[0],i[1]),2,(0,0,255),3) plt.imshow(cimg); """ Explanation: Let's try another way. RBG is not a linear space and colors that are similar in appearance are not always next to each other. By switching to HSV, we are now in a cylindrical colorspace where colors are continous. End of explanation """
timgasser/bcycle-austin
notebooks/bcycle_stations.ipynb
mit
import pandas as pd import numpy as np import matplotlib.pyplot as plt import folium import seaborn as sns from bcycle_lib.utils import * %matplotlib inline # for auto-reloading external modules # see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython %load_ext autoreload %autoreload 2 # Load the stations table, and show the first 10 entries STATIONS = 5 stations_df = load_stations() num_stations = stations_df.shape[0] print('Found {} stations, showing first {}'.format(num_stations, STATIONS)) stations_df.head(STATIONS) """ Explanation: BCycle Austin stations This notebook looks at the stations that make up the Austin BCycle network. For each station we have the following information: station_id: A unique identifier for each of the station. Used to connect the bikes.csv time-varying table to the static stations table. name: The name of the station. This is the nearest cross street to the station, or if the station is located at a building, the name of that building. address: The address of the station. Note that if a company sponsors the station, it will include their name, for example 'Presented by Whole Foods Market'. For this reason, its best not to geocode this field to a lat/lon pair, and use those values from the respective fields. lat: The latitude of the station. lon: The longitude of the station. datetime: The date and time that the station was first reported when fetching the BCycle Station webpage. Imports and data loading Before getting started, let's import some useful libraries (including the bcycle_lib created for these notebooks), and load the stations CSV file. End of explanation """ # Calculate where the map should be centred based on station locations min_lat = stations_df['lat'].min() max_lat = stations_df['lat'].max() min_lon = stations_df['lon'].min() max_lon = stations_df['lon'].max() center_lat = min_lat + (max_lat - min_lat) / 2.0 center_lon = min_lon + (max_lon - min_lon) / 2.0 # Plot map using the B&W Stamen Toner tiles centred on BCycle stations map = folium.Map(location=(center_lat, center_lon), zoom_start=14, tiles='Stamen Toner', control_scale=True) # Add markers to the map for each station. Click on them to see their name for station in stations_df.iterrows(): stat=station[1] folium.Marker([stat['lat'], stat['lon']], popup=stat['name'], icon=folium.Icon(icon='info-sign') ).add_to(map) map.save('stations.html') map """ Explanation: Plot the stations on a map of Austin Let's plot all the stations on an Open Street Map of Austin, to see where they're concentrated. We can use the latitude and longitude of the stations to center the map. To find out the name of the station, click on the marker. End of explanation """ # Load bikes dataframe, calculate the capacity of each every 5 minutes (bikes + docks) bikes_df = load_bikes() bikes_df['capacity'] = bikes_df['bikes'] + bikes_df['docks'] # Now find the max capacity across all the stations at all 5 minute intervals bikes_df = bikes_df.groupby('station_id').max().reset_index() bikes_df = bikes_df[['station_id', 'capacity']] # Now join with the stations dataframe using station_id stations_cap_df = pd.merge(stations_df, bikes_df, on='station_id') # Print the smallest and largest stations N = 4 sorted_stations = stations_cap_df.sort_values(by='capacity', ascending=True) print('Smallest {} stations: \n{}\n'.format(N, sorted_stations[['name', 'capacity']][:N])) print('Largest {} stations: \n{}\n'.format(N, sorted_stations[['name', 'capacity']][-N:])) # Show a histogram of the capacities # fig = plt.figure() ax1 = stations_cap_df['capacity'].plot.hist(figsize=(10,6)) ax1.set_xlabel('Station Capacity', fontsize=14) ax1.set_ylabel('Number of stations', fontsize=14) ax1.set_title('Histogram of station capacities', fontsize=14) """ Explanation: There are a total of 50 stations, which can be roughly clustered into 4 different groups: Stations around the University, North of 11th Street. UT Austin buildings and student housing is based in this area, so bikes could be used to get around without the expense and hassle of having a car. The downtown stations south of 11th Street, and north of the river. Austin's downtown is a mixture of residential and business buildings, so these stations could used for commute start and end points. There are also many bars on 6th Street, especially towards I-35. The stations east of I-35, including those on East 5th and 11th streets. This area is almost an overspill from the downtown area, with a similar amount of nightlife. There are fewer businesses in this area compared to downtown. This area also has a light rail, which connects downtown Austin with North Austin, and up to Cedar Park and Leander. Stations south of Lady Bird Lake. South Congress is good for nightlife, making it a popular destination on weekends and evenings. It also has limited parking, which you don't need to worry about when using a bike. There is also a bike and hike trail that runs along Lady Bird Lake on the North and South banks, which a lot of people enjoy on a bike. Station bike capacity histogram Now we've visualized where each station in the system is, let's show how many combined bikes and docks each of the stations has (their capacity). To do this we need to load in the bikes dataframe, and calculate the maximum of bikes + docks for each of the stations across the data. We can then plot a histogram of station capacity. End of explanation """ # Now plot each station as a circle whose area represents the capacity map = folium.Map(location=(center_lat, center_lon), zoom_start=14, tiles='Stamen Toner', control_scale=True) # Hand-tuned values to make differences between circles larger K = 0.5 P = 2 # Add markers whose radius is proportional to station capacity. # Click on them to pop up their name and capacity for station in stations_cap_df.iterrows(): stat=station[1] folium.CircleMarker([stat['lat'], stat['lon']], radius= K * (stat['capacity'] ** P), # Scale circles to show difference popup='{} - capacity {}'.format(stat['name'], stat['capacity']), fill_color='blue', fill_opacity=0.8 ).add_to(map) map.save('station_capacity.html') map """ Explanation: Looking at the histogram, the most popular station capacity is 13, then 11, and 9. Maybe there's an advantage to having capacity an odd number for stations ! The largest stations have a capacity of 19, and the smallest have a capacity of 9 (approximately half of the largest station). Station bike capacity and location Now we have an idea of the bike station capacity, we can visualize this on a map to see if there is any relationship between their capacity and location. The plot below uses their capacity as the radius of each circle marker. For proper quantitative evaluation of the stations, we should take the square root of the radius so the areas of the circles are proportional to the capacity. But not doing this helps distinguish between the narrow range of capacities. To find out the precise capacity of the stations, click on the circle markers. End of explanation """ # Load both the bikes and station dataframes bikes_df = load_bikes() stations_df = load_stations() """ Explanation: The map above shows 4 of the largest stations are along the North edge of Lady Bird Lake. There is also a large station at Congress & 11th Street, at the north of the downtown area. The downtown area is served by a larger number of smaller stations, concentrated relatively close together. East of I-35, the stations tend to be smaller and on major roads running North-to-South. The University area and South-of-the-Lake areas are more dispersed than the downtown and East areas. Station health For more insight into the stations and their characteristics, we can define a metric of station 'health'. When bike stations have no bikes available, customers can't start a journey from that location. If they have no docks available, they can't end a trip at that station. In addition to the station information, we also have station bike and dock availability sampled every 5 minutes. If we count the amount of 5-minute periods a station is full or empty, this can give us a guide to its health. End of explanation """ # Using the bikes and stations dataframes, mask off so the only rows remaining # are either empty or full cases from 6AM onwards bike_empty_mask = bikes_df['bikes'] == 0 bike_full_mask = bikes_df['docks'] == 0 bike_empty_full_mask = bike_empty_mask | bike_full_mask bikes_empty_full_df = bikes_df[bike_empty_full_mask].copy() bikes_empty_full_df['empty'] = bikes_empty_full_df['bikes'] == 0 bikes_empty_full_df['full'] = bikes_empty_full_df['docks'] == 0 bikes_empty_full_df.head() """ Explanation: Empty/Full station health Now we have a list of all the bike measurements where the station was empty or full, let's aggregate by station_id and count the results. This will tell us for every station, how many 5-minute intervals it was either full or empty. This is a good indicator of which stations are often full or empty, and are unusable. Let's merge the station names so the graph makes sense. End of explanation """ # Now aggregate the remaining rows by station_id, and plot the results bike_health_df = bikes_empty_full_df.copy() bike_health_df = bike_health_df[['station_id', 'empty', 'full']].groupby('station_id').sum().reset_index() bike_health_df = pd.merge(bike_health_df, stations_df, on='station_id') bike_health_df['oos'] = bike_health_df['full'] + bike_health_df['empty'] bike_health_df = bike_health_df.sort_values('oos', ascending=False) ax1 = (bike_health_df[['name', 'empty', 'full']] .plot.bar(x='name', y=['empty', 'full'], stacked=True, figsize=(16,8))) ax1.set_xlabel('Station', fontsize=14) ax1.set_ylabel('# 5 minute periods empty or full', fontsize=14) ax1.set_title('Empty/Full station count during April/May 2016', fontdict={'size' : 18, 'weight' : 'bold'}) ax1.tick_params(axis='x', labelsize=13) ax1.tick_params(axis='y', labelsize=13) ax1.legend(fontsize=13) """ Explanation: Empty/full by station in April and May 2016 Now we have a list of which stations were empty or full in each 5 minute period, we can total these up by station. If a station is either empty or full, this effectively removes it from the BCycle network temporarily. Let's use a stacked barchart to show the proportion of the time the station was full or empty. Sorting by the amount of 5-minute periods the station was full or empty also helps. End of explanation """ # For this plot, we don't want to mask out the time intervals where stations are neither full nor empty. HEALTHY_RATIO = 0.9 station_ratio_df = bikes_df.copy() station_ratio_df['empty'] = station_ratio_df['bikes'] == 0 station_ratio_df['full'] = station_ratio_df['docks'] == 0 station_ratio_df['neither'] = (station_ratio_df['bikes'] != 0) & (station_ratio_df['docks'] != 0) station_ratio_df = station_ratio_df[['station_id', 'empty', 'full', 'neither']].groupby('station_id').sum().reset_index() station_ratio_df['total'] = station_ratio_df['empty'] + station_ratio_df['full'] + station_ratio_df['neither'] station_ratio_df = pd.merge(station_ratio_df, stations_df, on='station_id') station_ratio_df['full_ratio'] = station_ratio_df['full'] / station_ratio_df['total'] station_ratio_df['empty_ratio'] = station_ratio_df['empty'] / station_ratio_df['total'] station_ratio_df['oos_ratio'] = station_ratio_df['full_ratio'] + station_ratio_df['empty_ratio'] station_ratio_df['in_service_ratio'] = 1 - station_ratio_df['oos_ratio'] station_ratio_df['healthy'] = station_ratio_df['in_service_ratio'] >= HEALTHY_RATIO station_ratio_df['color'] = np.where(station_ratio_df['healthy'], '#348ABD', '#A60628') station_ratio_df = station_ratio_df.sort_values('in_service_ratio', ascending=False) colors = ['b' if ratio >= 0.9 else 'r' for ratio in station_ratio_df['in_service_ratio']] # station_ratio_df.head() ax1 = (station_ratio_df.sort_values('in_service_ratio', ascending=False) .plot.bar(x='name', y='in_service_ratio', figsize=(16,8), legend=None, yticks=np.linspace(0.0, 1.0, 11), color=station_ratio_df['color'])) ax1.set_xlabel('Station', fontsize=14) ax1.set_ylabel('%age of time neither empty nor full', fontsize=14) ax1.set_title('In-service percentage by station during April/May 2016', fontdict={'size' : 16, 'weight' : 'bold'}) ax1.axhline(y = HEALTHY_RATIO, color = 'black') ax1.tick_params(axis='x', labelsize=13) ax1.tick_params(axis='y', labelsize=13) """ Explanation: The bar chart shows a large variation between the empty/full durations for each of the stations. The worst offender is the Riverside @ S. Lamar station, which was full or empty for a total of 12 days during the 61-day period of April and May 2016. The proportion of empty vs full 5-minute periods also varies from station to station, shown in the relative height of the green and blue stacked bars. Station empty / full percentage in April and May 2016 The barchart above shows a large variation between the 'Riverside @ S. Lamar' with ~3500 empty or full 5 minute periods, and the 'State Capitol Visitors Garage' with almost no full or empty 5 minute periods. To dig into this further, let's calculate the percentage of the time each station was neither empty nor full. This shows the percentage of the time the station was active in the BCycle system. End of explanation """ mask = station_ratio_df['healthy'] == False unhealthy_stations_df = station_ratio_df[mask].sort_values('oos_ratio', ascending=False) unhealthy_stations_df = pd.merge(unhealthy_stations_df, stations_cap_df[['station_id', 'capacity']], on='station_id') unhealthy_stations_df[['name', 'oos_ratio', 'full_ratio', 'empty_ratio', 'capacity']].reset_index(drop=True).round(2) """ Explanation: The barchart above shows that 12 of the 50 stations are either full or empty 10% of the time. Table of unhealthy stations Let's show the table of stations, with only those available 90% of the time or more included. End of explanation """ # Merge in the station capacity also for the popup markers station_ratio_cap_df = pd.merge(station_ratio_df, stations_cap_df[['station_id', 'capacity']], on='station_id') map = folium.Map(location=(center_lat, center_lon), zoom_start=14, tiles='Stamen Toner', control_scale=True) # Hand-tuned parameter to increase circle size K = 1000 C = 5 for station in station_ratio_cap_df.iterrows(): stat = station[1] if stat['healthy']: colour = 'blue' else: colour='red' folium.CircleMarker([stat['lat'], stat['lon']], radius=(stat['oos_ratio'] * K) + C, popup='{}, empty {:.1f}%, full {:.1f}%, capacity {}'.format( stat['name'], stat['empty_ratio']*100, stat['full_ratio']*100, stat['capacity']), fill_color=colour, fill_opacity=0.8 ).add_to(map) map.save('unhealthy_stations.html') map """ Explanation: Stations empty / full based on their location After checking the proportion of time each station has docks and bikes available above, we can visualize these on a map, to see if there is any correlation in their location. In the map below, the circle markers use both colour and size as below: The colour of the circle shows whether the station is available less than 90% of the time. Red stations are in the unhealthy list above, and are empty or full 10% or more of the time. Blue stations are the healthy stations available 90% or more of the time. The size of the circle shows how frequently the station is empty or full. To see details about the stations, you can click on the circle markers. End of explanation """ # Plot the empty/full time periods grouped by hour for the top 10 oos_stations_df = bikes_df.copy() oos_stations_df['empty'] = oos_stations_df['bikes'] == 0 oos_stations_df['full'] = oos_stations_df['docks'] == 0 oos_stations_df['neither'] = (oos_stations_df['bikes'] != 0) & (oos_stations_df['docks'] != 0) oos_stations_df['hour'] = oos_stations_df['datetime'].dt.hour oos_stations_df = (oos_stations_df[['station_id', 'hour', 'empty', 'full', 'neither']] .groupby(['station_id', 'hour']).sum().reset_index()) oos_stations_df = oos_stations_df[oos_stations_df['station_id'].isin(unhealthy_stations_df['station_id'])] oos_stations_df['oos'] = oos_stations_df['empty'] + oos_stations_df['full'] oos_stations_df = pd.merge(stations_df, oos_stations_df, on='station_id') oos_stations_df g = sns.factorplot(data=oos_stations_df, x="hour", y="oos", col='name', kind='bar', col_wrap=2, size=3.5, aspect=2.0, color='#348ABD') """ Explanation: The map shows that stations most frequently unavailable can be grouped into 3 clusters: The downtown area around East 6th Street between Congress and I-35 and Red River street. This area has a large concentration of businesses, restaurants and bars. Their capacity is around 11 - 13, and they tend to be full most of the time. South of the river along the Town Lake hiking and cycling trail along with South Congress. The Town Lake trail is a popular cycling route, and there are many restaurants and bars on South Congress. Both Riverside @ S.Lamar and Barton Springs at Riverside have capacities of 11, and are full 15% of the time. Stations along East 5th Street, near the downtown area. This area has a lot of bars and restaurants, people may be using BCycles to get around to other bars. Their capacity is 12 and 9, and they're full 10% or more of the time. These stations would also benefit from extra capacity. The South Congress trio of stations is interesting. They are all only a block or so away from each other, but the South Congress and James station has a capacity of 9, is full 12% of the time, and empty 4%. The other two stations on South Congress have a capacity of 13 each, and are full for much less of the time. End of explanation """ bikes_capacity_df = bikes_df.copy() bikes_capacity_df['capacity'] = bikes_capacity_df['bikes'] + bikes_capacity_df['docks'] # Now find the max capacity across all the stations at all 5 minute intervals bikes_capacity_df = bikes_capacity_df.groupby('station_id').max().reset_index() bike_merged_health_df = pd.merge(bike_health_df, bikes_capacity_df[['station_id', 'capacity']], on='station_id', how='inner') plt.rc("legend", fontsize=14) sns.jointplot("capacity", "full", data=bike_merged_health_df, kind="reg", size=8) plt.xlabel('Station capacity', fontsize=14) plt.ylabel('5-minute periods that are full', fontsize=14) plt.tick_params(axis="both", labelsize=14) sns.jointplot("capacity", "empty", data=bike_merged_health_df, kind="reg", size=8) plt.xlabel('Station capacity', fontsize=14) plt.ylabel('5-minute periods that are empty', fontsize=14) plt.tick_params(axis="both", labelsize=14) """ Explanation: Correlation between station empty/full and station capacity Perhaps the reason stations are empty or full a lot is because they have a smaller capacity. Smaller stations would quickly run out of bikes, or become more full. Let's do a hypothesis test, assuming p < 0.05 for statistical significance. Null hypothesis: The capacity of the station is not correlated with the full count. Alternative hypothesis: The capacity of the station is correlated with the full count. The plot below shows a negative correlation between the capacity of a station, and how frequently it becomes full. The probability of a result this extreme is 0.0086 given the null hypothesis, so we reject the null hypothesis. Stations with larger capacities become full less frequently. End of explanation """ bikes_df = load_bikes() empty_mask = bikes_df['bikes'] == 0 full_mask = bikes_df['docks'] == 0 empty_full_mask = empty_mask | full_mask bikes_empty_full_df = bikes_df[empty_full_mask].copy() bikes_empty_full_df['day_of_week'] = bikes_empty_full_df['datetime'].dt.dayofweek bikes_empty_full_df['hour'] = bikes_empty_full_df['datetime'].dt.hour fig, axes = plt.subplots(1,2, figsize=(16,8)) bikes_empty_full_df.groupby(['day_of_week']).size().plot.bar(ax=axes[0], legend=None) axes[0].set_xlabel('Day of week (0 = Monday, 1 = Tuesday, .. ,6 = Sunday)') axes[0].set_ylabel('Station empty/full count per 5-minute interval ') axes[0].set_title('Station empty/full by day of week', fontsize=15) axes[0].tick_params(axis='x', labelsize=13) axes[0].tick_params(axis='y', labelsize=13) bikes_empty_full_df.groupby(['hour']).size().plot.bar(ax=axes[1]) axes[1].set_xlabel('Hour of day (24H clock)') axes[1].set_ylabel('Station empty/full count per 5-minute interval ') axes[1].set_title('Station empty/full by hour of day', fontsize=15) axes[1].tick_params(axis='x', labelsize=13) axes[1].tick_params(axis='y', labelsize=13) """ Explanation: Station empty / full by Time To break the station health down further, we can check in which 5 minute periods the station was either full or empty. By grouping the results over various time scales, we can look for periodicity in the data. End of explanation """
ES-DOC/esdoc-jupyterhub
notebooks/messy-consortium/cmip6/models/emac-2-53-aerchem/toplevel.ipynb
gpl-3.0
# DO NOT EDIT ! from pyesdoc.ipython.model_topic import NotebookOutput # DO NOT EDIT ! DOC = NotebookOutput('cmip6', 'messy-consortium', 'emac-2-53-aerchem', 'toplevel') """ Explanation: ES-DOC CMIP6 Model Properties - Toplevel MIP Era: CMIP6 Institute: MESSY-CONSORTIUM Source ID: EMAC-2-53-AERCHEM Sub-Topics: Radiative Forcings. Properties: 85 (42 required) Model descriptions: Model description details Initialized From: -- Notebook Help: Goto notebook help page Notebook Initialised: 2018-02-15 16:54:10 Document Setup IMPORTANT: to be executed each time you run the notebook End of explanation """ # Set as follows: DOC.set_author("name", "email") # TODO - please enter value(s) """ Explanation: Document Authors Set document authors End of explanation """ # Set as follows: DOC.set_contributor("name", "email") # TODO - please enter value(s) """ Explanation: Document Contributors Specify document contributors End of explanation """ # Set publication status: # 0=do not publish, 1=publish. DOC.set_publication_status(0) """ Explanation: Document Publication Specify document publication status End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.model_overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: Document Table of Contents 1. Key Properties 2. Key Properties --&gt; Flux Correction 3. Key Properties --&gt; Genealogy 4. Key Properties --&gt; Software Properties 5. Key Properties --&gt; Coupling 6. Key Properties --&gt; Tuning Applied 7. Key Properties --&gt; Conservation --&gt; Heat 8. Key Properties --&gt; Conservation --&gt; Fresh Water 9. Key Properties --&gt; Conservation --&gt; Salt 10. Key Properties --&gt; Conservation --&gt; Momentum 11. Radiative Forcings 12. Radiative Forcings --&gt; Greenhouse Gases --&gt; CO2 13. Radiative Forcings --&gt; Greenhouse Gases --&gt; CH4 14. Radiative Forcings --&gt; Greenhouse Gases --&gt; N2O 15. Radiative Forcings --&gt; Greenhouse Gases --&gt; Tropospheric O3 16. Radiative Forcings --&gt; Greenhouse Gases --&gt; Stratospheric O3 17. Radiative Forcings --&gt; Greenhouse Gases --&gt; CFC 18. Radiative Forcings --&gt; Aerosols --&gt; SO4 19. Radiative Forcings --&gt; Aerosols --&gt; Black Carbon 20. Radiative Forcings --&gt; Aerosols --&gt; Organic Carbon 21. Radiative Forcings --&gt; Aerosols --&gt; Nitrate 22. Radiative Forcings --&gt; Aerosols --&gt; Cloud Albedo Effect 23. Radiative Forcings --&gt; Aerosols --&gt; Cloud Lifetime Effect 24. Radiative Forcings --&gt; Aerosols --&gt; Dust 25. Radiative Forcings --&gt; Aerosols --&gt; Tropospheric Volcanic 26. Radiative Forcings --&gt; Aerosols --&gt; Stratospheric Volcanic 27. Radiative Forcings --&gt; Aerosols --&gt; Sea Salt 28. Radiative Forcings --&gt; Other --&gt; Land Use 29. Radiative Forcings --&gt; Other --&gt; Solar 1. Key Properties Key properties of the model 1.1. Model Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Top level overview of coupled model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.model_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 1.2. Model Name Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Name of coupled model. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.flux_correction.details') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 2. Key Properties --&gt; Flux Correction Flux correction properties of the model 2.1. Details Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe if/how flux corrections are applied in the model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.genealogy.year_released') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 3. Key Properties --&gt; Genealogy Genealogy and history of the model 3.1. Year Released Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Year the model was released End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP3_parent') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 3.2. CMIP3 Parent Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 CMIP3 parent if any End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP5_parent') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 3.3. CMIP5 Parent Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 CMIP5 parent if any End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.genealogy.previous_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 3.4. Previous Name Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Previously known as End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.software_properties.repository') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 4. Key Properties --&gt; Software Properties Software properties of model 4.1. Repository Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Location of code for this component. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_version') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 4.2. Code Version Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Code version identifier. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_languages') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 4.3. Code Languages Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Code language(s). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.software_properties.components_structure') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 4.4. Components Structure Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe how model realms are structured into independent software components (coupled via a coupler) and internal software components. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.software_properties.coupler') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "OASIS" # "OASIS3-MCT" # "ESMF" # "NUOPC" # "Bespoke" # "Unknown" # "None" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 4.5. Coupler Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Overarching coupling framework for model. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.coupling.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5. Key Properties --&gt; Coupling ** 5.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of coupling in the model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_double_flux') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 5.2. Atmosphere Double Flux Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is the atmosphere passing a double flux to the ocean and sea ice (as opposed to a single one)? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_fluxes_calculation_grid') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Atmosphere grid" # "Ocean grid" # "Specific coupler grid" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 5.3. Atmosphere Fluxes Calculation Grid Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Where are the air-sea fluxes calculated End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_relative_winds') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 5.4. Atmosphere Relative Winds Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Are relative or absolute winds used to compute the flux? I.e. do ocean surface currents enter the wind stress calculation? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.description') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6. Key Properties --&gt; Tuning Applied Tuning methodology for model 6.1. Description Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 General overview description of tuning: explain and motivate the main targets and metrics/diagnostics retained. Document the relative weight given to climate performance metrics/diagnostics versus process oriented metrics/diagnostics, and on the possible conflicts with parameterization level tuning. In particular describe any struggle with a parameter value that required pushing it to its limits to solve a particular model deficiency. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.global_mean_metrics_used') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.2. Global Mean Metrics Used Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N List set of metrics/diagnostics of the global mean state used in tuning model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.regional_metrics_used') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.3. Regional Metrics Used Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N List of regional metrics/diagnostics of mean state (e.g THC, AABW, regional means etc) used in tuning model/component End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.trend_metrics_used') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.4. Trend Metrics Used Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N List observed trend metrics/diagnostics used in tuning model/component (such as 20th century) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.energy_balance') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.5. Energy Balance Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe how energy balance was obtained in the full system: in the various components independently or at the components coupling stage? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.fresh_water_balance') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.6. Fresh Water Balance Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe how fresh_water balance was obtained in the full system: in the various components independently or at the components coupling stage? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.global') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7. Key Properties --&gt; Conservation --&gt; Heat Global heat convervation properties of the model 7.1. Global Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe if/how heat is conserved globally End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_ocean_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7.2. Atmos Ocean Interface Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe if/how heat is conserved at the atmosphere/ocean coupling interface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_land_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7.3. Atmos Land Interface Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe if/how heat is conserved at the atmosphere/land coupling interface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_sea-ice_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7.4. Atmos Sea-ice Interface Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe if/how heat is conserved at the atmosphere/sea-ice coupling interface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.ocean_seaice_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7.5. Ocean Seaice Interface Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe if/how heat is conserved at the ocean/sea-ice coupling interface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.land_ocean_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7.6. Land Ocean Interface Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe if/how heat is conserved at the land/ocean coupling interface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.global') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8. Key Properties --&gt; Conservation --&gt; Fresh Water Global fresh water convervation properties of the model 8.1. Global Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe if/how fresh_water is conserved globally End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_ocean_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.2. Atmos Ocean Interface Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe if/how fresh_water is conserved at the atmosphere/ocean coupling interface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_land_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.3. Atmos Land Interface Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe if/how fresh water is conserved at the atmosphere/land coupling interface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_sea-ice_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.4. Atmos Sea-ice Interface Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe if/how fresh water is conserved at the atmosphere/sea-ice coupling interface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.ocean_seaice_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.5. Ocean Seaice Interface Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe if/how fresh water is conserved at the ocean/sea-ice coupling interface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.runoff') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.6. Runoff Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe how runoff is distributed and conserved End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.iceberg_calving') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.7. Iceberg Calving Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe if/how iceberg calving is modeled and conserved End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.endoreic_basins') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.8. Endoreic Basins Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe if/how endoreic basins (no ocean access) are treated End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.snow_accumulation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.9. Snow Accumulation Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe how snow accumulation over land and over sea-ice is treated End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.salt.ocean_seaice_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 9. Key Properties --&gt; Conservation --&gt; Salt Global salt convervation properties of the model 9.1. Ocean Seaice Interface Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe if/how salt is conserved at the ocean/sea-ice coupling interface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.momentum.details') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 10. Key Properties --&gt; Conservation --&gt; Momentum Global momentum convervation properties of the model 10.1. Details Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe if/how momentum is conserved in the model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 11. Radiative Forcings Radiative forcings of the model for historical and scenario (aka Table 12.1 IPCC AR5) 11.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of radiative forcings (GHG and aerosols) implementation in model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 12. Radiative Forcings --&gt; Greenhouse Gases --&gt; CO2 Carbon dioxide forcing 12.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 12.2. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 13. Radiative Forcings --&gt; Greenhouse Gases --&gt; CH4 Methane forcing 13.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 13.2. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 14. Radiative Forcings --&gt; Greenhouse Gases --&gt; N2O Nitrous oxide forcing 14.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 14.2. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 15. Radiative Forcings --&gt; Greenhouse Gases --&gt; Tropospheric O3 Troposheric ozone forcing 15.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 15.2. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 16. Radiative Forcings --&gt; Greenhouse Gases --&gt; Stratospheric O3 Stratospheric ozone forcing 16.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 16.2. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 17. Radiative Forcings --&gt; Greenhouse Gases --&gt; CFC Ozone-depleting and non-ozone-depleting fluorinated gases forcing 17.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.equivalence_concentration') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "Option 1" # "Option 2" # "Option 3" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 17.2. Equivalence Concentration Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Details of any equivalence concentrations used End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 17.3. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 18. Radiative Forcings --&gt; Aerosols --&gt; SO4 SO4 aerosol forcing 18.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 18.2. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 19. Radiative Forcings --&gt; Aerosols --&gt; Black Carbon Black carbon aerosol forcing 19.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 19.2. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 20. Radiative Forcings --&gt; Aerosols --&gt; Organic Carbon Organic carbon aerosol forcing 20.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 20.2. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 21. Radiative Forcings --&gt; Aerosols --&gt; Nitrate Nitrate forcing 21.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 21.2. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 22. Radiative Forcings --&gt; Aerosols --&gt; Cloud Albedo Effect Cloud albedo effect forcing (RFaci) 22.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.aerosol_effect_on_ice_clouds') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 22.2. Aerosol Effect On Ice Clouds Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Radiative effects of aerosols on ice clouds are represented? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 22.3. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 23. Radiative Forcings --&gt; Aerosols --&gt; Cloud Lifetime Effect Cloud lifetime effect forcing (ERFaci) 23.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.aerosol_effect_on_ice_clouds') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 23.2. Aerosol Effect On Ice Clouds Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Radiative effects of aerosols on ice clouds are represented? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.RFaci_from_sulfate_only') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 23.3. RFaci From Sulfate Only Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Radiative forcing from aerosol cloud interactions from sulfate aerosol only? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 23.4. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 24. Radiative Forcings --&gt; Aerosols --&gt; Dust Dust forcing 24.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 24.2. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 25. Radiative Forcings --&gt; Aerosols --&gt; Tropospheric Volcanic Tropospheric volcanic forcing 25.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.historical_explosive_volcanic_aerosol_implementation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Type A" # "Type B" # "Type C" # "Type D" # "Type E" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 25.2. Historical Explosive Volcanic Aerosol Implementation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 How explosive volcanic aerosol is implemented in historical simulations End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.future_explosive_volcanic_aerosol_implementation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Type A" # "Type B" # "Type C" # "Type D" # "Type E" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 25.3. Future Explosive Volcanic Aerosol Implementation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 How explosive volcanic aerosol is implemented in future simulations End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 25.4. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 26. Radiative Forcings --&gt; Aerosols --&gt; Stratospheric Volcanic Stratospheric volcanic forcing 26.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.historical_explosive_volcanic_aerosol_implementation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Type A" # "Type B" # "Type C" # "Type D" # "Type E" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 26.2. Historical Explosive Volcanic Aerosol Implementation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 How explosive volcanic aerosol is implemented in historical simulations End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.future_explosive_volcanic_aerosol_implementation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Type A" # "Type B" # "Type C" # "Type D" # "Type E" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 26.3. Future Explosive Volcanic Aerosol Implementation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 How explosive volcanic aerosol is implemented in future simulations End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 26.4. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 27. Radiative Forcings --&gt; Aerosols --&gt; Sea Salt Sea salt forcing 27.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 27.2. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 28. Radiative Forcings --&gt; Other --&gt; Land Use Land use forcing 28.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.crop_change_only') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 28.2. Crop Change Only Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Land use change represented via crop change only? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 28.3. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "irradiance" # "proton" # "electron" # "cosmic ray" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 29. Radiative Forcings --&gt; Other --&gt; Solar Solar forcing 29.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How solar forcing is provided End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 29.2. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """
spacy-io/thinc
examples/03_textcat_basic_neural_bow.ipynb
mit
!pip install thinc syntok "ml_datasets>=0.2.0a0" tqdm """ Explanation: Basic neural bag-of-words text classifier with Thinc This notebook shows how to implement a simple neural text classification model in Thinc. Last tested with thinc==8.0.0a9. End of explanation """ from syntok.tokenizer import Tokenizer def tokenize_texts(texts): tok = Tokenizer() return [[token.value for token in tok.tokenize(text)] for text in texts] """ Explanation: For simple and standalone tokenization, we'll use the syntok package and the following function: End of explanation """ import ml_datasets import numpy def load_data(): train_data, dev_data = ml_datasets.dbpedia(train_limit=2000, dev_limit=2000) train_texts, train_cats = zip(*train_data) dev_texts, dev_cats = zip(*dev_data) unique_cats = list(numpy.unique(numpy.concatenate((train_cats, dev_cats)))) nr_class = len(unique_cats) print(f"{len(train_data)} training / {len(dev_data)} dev\n{nr_class} classes") train_y = numpy.zeros((len(train_cats), nr_class), dtype="f") for i, cat in enumerate(train_cats): train_y[i][unique_cats.index(cat)] = 1 dev_y = numpy.zeros((len(dev_cats), nr_class), dtype="f") for i, cat in enumerate(dev_cats): dev_y[i][unique_cats.index(cat)] = 1 train_tokenized = tokenize_texts(train_texts) dev_tokenized = tokenize_texts(dev_texts) # Generate simple vocab mapping, <unk> is 0 vocab = {} count_id = 1 for text in train_tokenized: for token in text: if token not in vocab: vocab[token] = count_id count_id += 1 # Map texts using vocab train_X = [] for text in train_tokenized: train_X.append(numpy.array([vocab.get(t, 0) for t in text])) dev_X = [] for text in dev_tokenized: dev_X.append(numpy.array([vocab.get(t, 0) for t in text])) return (train_X, train_y), (dev_X, dev_y), vocab """ Explanation: Setting up the data The load_data function loads the DBPedia Ontology dataset, converts and tokenizes the data and generates a simple vocabulary mapping. Instead of ml_datasets.dbpedia you can also try ml_datasets.imdb for the IMDB review dataset. End of explanation """ from typing import List import thinc from thinc.api import Model, chain, list2ragged, with_array, reduce_mean, Softmax from thinc.types import Array2d @thinc.registry.layers("EmbedPoolTextcat.v1") def EmbedPoolTextcat(embed: Model[Array2d, Array2d]) -> Model[List[Array2d], Array2d]: with Model.define_operators({">>": chain}): model = with_array(embed) >> list2ragged() >> reduce_mean() >> Softmax() model.set_ref("embed", embed) return model """ Explanation: Defining the model and config The model takes a list of 2-dimensional arrays (the tokenized texts mapped to vocab IDs) and outputs a 2d array. Because the embed layer's nV dimension (the number of entries in the lookup table) depends on the vocab and the training data, it's passed in as an argument and registered as a reference. This makes it easy to retrieve it later on by calling model.get_ref("embed"), so we can set its nV dimension. End of explanation """ CONFIG = """ [hyper_params] width = 64 [model] @layers = "EmbedPoolTextcat.v1" [model.embed] @layers = "Embed.v1" nO = ${hyper_params:width} [optimizer] @optimizers = "Adam.v1" learn_rate = 0.001 [training] batch_size = 8 n_iter = 10 """ """ Explanation: The config defines the top-level model using the registered EmbedPoolTextcat function, and the embed argument, referencing the Embed layer. End of explanation """ from thinc.api import registry, Config C = registry.resolve(Config().from_str(CONFIG)) C """ Explanation: Training setup When the config is loaded, it's first parsed as a dictionary and all references to values from other sections, e.g. ${hyper_params:width} are replaced. The result is a nested dictionary describing the objects defined in the config. registry.resolve then creates the objects and calls the functions bottom-up. End of explanation """ (train_X, train_y), (dev_X, dev_y), vocab = load_data() batch_size = C["training"]["batch_size"] optimizer = C["optimizer"] model = C["model"] model.get_ref("embed").set_dim("nV", len(vocab) + 1) model.initialize(X=train_X, Y=train_y) def evaluate_model(model, dev_X, dev_Y, batch_size): correct = 0.0 total = 0.0 for X, Y in model.ops.multibatch(batch_size, dev_X, dev_Y): Yh = model.predict(X) for j in range(len(Yh)): correct += Yh[j].argmax(axis=0) == Y[j].argmax(axis=0) total += len(Y) return float(correct / total) """ Explanation: Once the data is loaded, we'll know the vocabulary size and can set the dimension on the embedding layer. model.get_ref("embed") returns the layer defined as the ref "embed" and the set_dim method lets you set a value for a dimension. To fill in the other missing shapes, we can call model.initialize with some input and output data. End of explanation """ from thinc.api import fix_random_seed from tqdm.notebook import tqdm fix_random_seed(0) for n in range(C["training"]["n_iter"]): loss = 0.0 batches = model.ops.multibatch(batch_size, train_X, train_y, shuffle=True) for X, Y in tqdm(batches, leave=False): Yh, backprop = model.begin_update(X) d_loss = [] for i in range(len(Yh)): d_loss.append(Yh[i] - Y[i]) loss += ((Yh[i] - Y[i]) ** 2).sum() backprop(numpy.array(d_loss)) model.finish_update(optimizer) score = evaluate_model(model, dev_X, dev_y, batch_size) print(f"{n}\t{loss:.2f}\t{score:.3f}") """ Explanation: Training the model End of explanation """
brian-rose/ClimateModeling_courseware
Lectures/Lecture21 -- Ice albedo feedback in the EBM.ipynb
mit
# Ensure compatibility with Python 2 and 3 from __future__ import print_function, division """ Explanation: ATM 623: Climate Modeling Brian E. J. Rose, University at Albany Lecture 21: Ice albedo feedback in the EBM Warning: content out of date and not maintained You really should be looking at The Climate Laboratory book by Brian Rose, where all the same content (and more!) is kept up to date. Here you are likely to find broken links and broken code. About these notes: This document uses the interactive Jupyter notebook format. The notes can be accessed in several different ways: The interactive notebooks are hosted on github at https://github.com/brian-rose/ClimateModeling_courseware The latest versions can be viewed as static web pages rendered on nbviewer A complete snapshot of the notes as of May 2017 (end of spring semester) are available on Brian's website. Also here is a legacy version from 2015. Many of these notes make use of the climlab package, available at https://github.com/brian-rose/climlab End of explanation """ %matplotlib inline import numpy as np import matplotlib.pyplot as plt import climlab # for convenience, set up a dictionary with our reference parameters param = {'A':210, 'B':2, 'a0':0.3, 'a2':0.078, 'ai':0.62, 'Tf':-10.} model1 = climlab.EBM_annual(name='Annual EBM with ice line', num_lat=180, D=0.55, **param ) print( model1) """ Explanation: Contents Interactive snow and ice line in the EBM Polar-amplified warming in the EBM Effects of diffusivity in the annual mean EBM with albedo feedback Diffusive response to a point source of energy <a id='section1'></a> 1. Interactive snow and ice line in the EBM The annual mean EBM the equation is \begin{align} C(\phi) \frac{\partial T_s}{\partial t} = & ~(1-\alpha) ~ Q - \left( A + B~T_s \right) + \ & \frac{D}{\cos⁡\phi } \frac{\partial }{\partial \phi} \left(\cos⁡\phi ~ \frac{\partial T_s}{\partial \phi} \right) \end{align} Temperature-dependent ice line Let the surface albedo be larger wherever the temperature is below some threshold $T_f$: $$ \alpha\left(\phi, T(\phi) \right) = \left{\begin{array}{ccc} \alpha_0 + \alpha_2 P_2(\sin\phi) & ~ & T(\phi) > T_f \ a_i & ~ & T(\phi) \le T_f \ \end{array} \right. $$ End of explanation """ print( model1.param) def ebm_plot( model, figsize=(8,12), show=True ): '''This function makes a plot of the current state of the model, including temperature, energy budget, and heat transport.''' templimits = -30,35 radlimits = -340, 340 htlimits = -7,7 latlimits = -90,90 lat_ticks = np.arange(-90,90,30) fig = plt.figure(figsize=figsize) ax1 = fig.add_subplot(3,1,1) ax1.plot(model.lat, model.Ts) ax1.set_xlim(latlimits) ax1.set_ylim(templimits) ax1.set_ylabel('Temperature (deg C)') ax1.set_xticks( lat_ticks ) ax1.grid() ax2 = fig.add_subplot(3,1,2) ax2.plot(model.lat, model.ASR, 'k--', label='SW' ) ax2.plot(model.lat, -model.OLR, 'r--', label='LW' ) ax2.plot(model.lat, model.net_radiation, 'c-', label='net rad' ) ax2.plot(model.lat, model.heat_transport_convergence, 'g--', label='dyn' ) ax2.plot(model.lat, model.net_radiation.squeeze() + model.heat_transport_convergence, 'b-', label='total' ) ax2.set_xlim(latlimits) ax2.set_ylim(radlimits) ax2.set_ylabel('Energy budget (W m$^{-2}$)') ax2.set_xticks( lat_ticks ) ax2.grid() ax2.legend() ax3 = fig.add_subplot(3,1,3) ax3.plot(model.lat_bounds, model.heat_transport) ax3.set_xlim(latlimits) ax3.set_ylim(htlimits) ax3.set_ylabel('Heat transport (PW)') ax3.set_xlabel('Latitude') ax3.set_xticks( lat_ticks ) ax3.grid() return fig model1.integrate_years(5) f = ebm_plot(model1) model1.icelat """ Explanation: Because we provided a parameter ai for the icy albedo, our model now contains several sub-processes contained within the process called albedo. Together these implement the step-function formula above. The process called iceline simply looks for grid cells with temperature below $T_f$. End of explanation """ deltaA = 4. model2 = climlab.process_like(model1) model2.subprocess['LW'].A = param['A'] - deltaA model2.integrate_years(5, verbose=False) plt.plot(model1.lat, model1.Ts) plt.plot(model2.lat, model2.Ts) """ Explanation: <a id='section2'></a> 2. Polar-amplified warming in the EBM Add a small radiative forcing The equivalent of doubling CO2 in this model is something like $$ A \rightarrow A - \delta A $$ where $\delta A = 4$ W m$^{-2}$. End of explanation """ model2.icelat """ Explanation: The warming is polar-amplified: more warming at the poles than elsewhere. Why? Also, the current ice line is now: End of explanation """ model3 = climlab.process_like(model1) model3.subprocess['LW'].A = param['A'] - 2*deltaA model3.integrate_years(5, verbose=False) plt.plot(model1.lat, model1.Ts) plt.plot(model2.lat, model2.Ts) plt.plot(model3.lat, model3.Ts) plt.xlim(-90, 90) plt.grid() """ Explanation: There is no ice left! Let's do some more greenhouse warming: End of explanation """ param = {'A':210, 'B':2, 'a0':0.3, 'a2':0.078, 'ai':0.62, 'Tf':-10.} print( param) """ Explanation: In the ice-free regime, there is no polar-amplified warming. A uniform radiative forcing produces a uniform warming. <a id='section3'></a> 3. Effects of diffusivity in the annual mean EBM with albedo feedback In-class investigation: We will repeat the exercise from Lecture 16, but this time with albedo feedback included in our model. Solve the annual-mean EBM (integrate out to equilibrium) over a range of different diffusivity parameters. Make three plots: Global-mean temperature as a function of $D$ Equator-to-pole temperature difference $\Delta T$ as a function of $D$ Poleward heat transport across 35 degrees $\mathcal{H}_{max}$ as a function of $D$ Choose a value of $D$ that gives a reasonable approximation to observations: $\Delta T \approx 45$ ºC Use these parameter values: End of explanation """ Darray = np.arange(0., 2.05, 0.05) model_list = [] Tmean_list = [] deltaT_list = [] Hmax_list = [] for D in Darray: ebm = climlab.EBM_annual(num_lat=360, D=D, **param ) ebm.integrate_years(5., verbose=False) Tmean = ebm.global_mean_temperature() deltaT = np.max(ebm.Ts) - np.min(ebm.Ts) HT = np.squeeze(ebm.heat_transport) ind = np.where(ebm.lat_bounds==35.5)[0] Hmax = HT[ind] model_list.append(ebm) Tmean_list.append(Tmean) deltaT_list.append(deltaT) Hmax_list.append(Hmax) color1 = 'b' color2 = 'r' fig = plt.figure(figsize=(8,6)) ax1 = fig.add_subplot(111) ax1.plot(Darray, deltaT_list, color=color1, label='$\Delta T$') ax1.plot(Darray, Tmean_list, '--', color=color1, label='$\overline{T}$') ax1.set_xlabel('D (W m$^{-2}$ K$^{-1}$)', fontsize=14) ax1.set_xticks(np.arange(Darray[0], Darray[-1], 0.2)) ax1.set_ylabel('Temperature ($^\circ$C)', fontsize=14, color=color1) for tl in ax1.get_yticklabels(): tl.set_color(color1) ax1.legend(loc='center right') ax2 = ax1.twinx() ax2.plot(Darray, Hmax_list, color=color2) ax2.set_ylabel('Poleward heat transport across 35.5$^\circ$ (PW)', fontsize=14, color=color2) for tl in ax2.get_yticklabels(): tl.set_color(color2) ax1.set_title('Effect of diffusivity on EBM with albedo feedback', fontsize=16) ax1.grid() """ Explanation: One possible way to do this: End of explanation """ param_noalb = {'A': 210, 'B': 2, 'D': 0.55, 'Tf': -10.0, 'a0': 0.3, 'a2': 0.078} m1 = climlab.EBM_annual(num_lat=180, **param_noalb) print( m1) m1.integrate_years(5.) m2 = climlab.process_like(m1) point_source = climlab.process.energy_budget.ExternalEnergySource(state=m2.state, timestep=m2.timestep) ind = np.where(m2.lat == 45.5) point_source.heating_rate['Ts'][ind] = 100. m2.add_subprocess('point source', point_source) print( m2) m2.integrate_years(5.) plt.plot(m2.lat, m2.Ts - m1.Ts) plt.xlim(-90,90) plt.grid() """ Explanation: <a id='section4'></a> 4. Diffusive response to a point source of energy Let's add a point heat source to the EBM and see what sets the spatial structure of the response. We will add a heat source at about 45º latitude. First, we will calculate the response in a model without albedo feedback. End of explanation """ m3 = climlab.EBM_annual(num_lat=180, **param) m3.integrate_years(5.) m4 = climlab.process_like(m3) point_source = climlab.process.energy_budget.ExternalEnergySource(state=m4.state, timestep=m4.timestep) point_source.heating_rate['Ts'][ind] = 100. m4.add_subprocess('point source', point_source) m4.integrate_years(5.) plt.plot(m4.lat, m4.Ts - m3.Ts) plt.xlim(-90,90) plt.grid() """ Explanation: The warming effects of our point source are felt at all latitudes but the effects decay away from the heat source. Some analysis will show that the length scale of the warming is proportional to $$ \sqrt{\frac{D}{B}} $$ so increases with the diffusivity. Now repeat this calculate with ice albedo feedback End of explanation """ %load_ext version_information %version_information numpy, matplotlib, climlab """ Explanation: Now the maximum warming does not coincide with the heat source at 45º! Our heat source has led to melting of snow and ice, which induces an additional heat source in the high northern latitudes. Heat transport communicates the external warming to the ice cap, and also commuicates the increased shortwave absorption due to ice melt globally! <div class="alert alert-success"> [Back to ATM 623 notebook home](../index.ipynb) </div> Version information End of explanation """
gaoshuming/udacity
tutorials/sentiment-rnn/Sentiment_RNN_Solution.ipynb
mit
import numpy as np import tensorflow as tf with open('../sentiment-network/reviews.txt', 'r') as f: reviews = f.read() with open('../sentiment-network/labels.txt', 'r') as f: labels = f.read() reviews[:2000] """ Explanation: Sentiment Analysis with an RNN In this notebook, you'll implement a recurrent neural network that performs sentiment analysis. Using an RNN rather than a feedfoward network is more accurate since we can include information about the sequence of words. Here we'll use a dataset of movie reviews, accompanied by labels. The architecture for this network is shown below. <img src="assets/network_diagram.png" width=400px> Here, we'll pass in words to an embedding layer. We need an embedding layer because we have tens of thousands of words, so we'll need a more efficient representation for our input data than one-hot encoded vectors. You should have seen this before from the word2vec lesson. You can actually train up an embedding with word2vec and use it here. But it's good enough to just have an embedding layer and let the network learn the embedding table on it's own. From the embedding layer, the new representations will be passed to LSTM cells. These will add recurrent connections to the network so we can include information about the sequence of words in the data. Finally, the LSTM cells will go to a sigmoid output layer here. We're using the sigmoid because we're trying to predict if this text has positive or negative sentiment. The output layer will just be a single unit then, with a sigmoid activation function. We don't care about the sigmoid outputs except for the very last one, we can ignore the rest. We'll calculate the cost from the output of the last step and the training label. End of explanation """ from string import punctuation all_text = ''.join([c for c in reviews if c not in punctuation]) reviews = all_text.split('\n') all_text = ' '.join(reviews) words = all_text.split() all_text[:2000] words[:100] """ Explanation: Data preprocessing The first step when building a neural network model is getting your data into the proper form to feed into the network. Since we're using embedding layers, we'll need to encode each word with an integer. We'll also want to clean it up a bit. You can see an example of the reviews data above. We'll want to get rid of those periods. Also, you might notice that the reviews are delimited with newlines \n. To deal with those, I'm going to split the text into each review using \n as the delimiter. Then I can combined all the reviews back together into one big string. First, let's remove all punctuation. Then get all the text without the newlines and split it into individual words. End of explanation """ from collections import Counter counts = Counter(words) vocab = sorted(counts, key=counts.get, reverse=True) vocab_to_int = {word: ii for ii, word in enumerate(vocab, 1)} reviews_ints = [] for each in reviews: reviews_ints.append([vocab_to_int[word] for word in each.split()]) """ Explanation: Encoding the words The embedding lookup requires that we pass in integers to our network. The easiest way to do this is to create dictionaries that map the words in the vocabulary to integers. Then we can convert each of our reviews into integers so they can be passed into the network. Exercise: Now you're going to encode the words with integers. Build a dictionary that maps words to integers. Later we're going to pad our input vectors with zeros, so make sure the integers start at 1, not 0. Also, convert the reviews to integers and store the reviews in a new list called reviews_ints. End of explanation """ labels = labels.split('\n') labels = np.array([1 if each == 'positive' else 0 for each in labels]) review_lens = Counter([len(x) for x in reviews_ints]) print("Zero-length reviews: {}".format(review_lens[0])) print("Maximum review length: {}".format(max(review_lens))) """ Explanation: Encoding the labels Our labels are "positive" or "negative". To use these labels in our network, we need to convert them to 0 and 1. Exercise: Convert labels from positive and negative to 1 and 0, respectively. End of explanation """ non_zero_idx = [ii for ii, review in enumerate(reviews_ints) if len(review) != 0] len(non_zero_idx) reviews_ints[-1] """ Explanation: Okay, a couple issues here. We seem to have one review with zero length. And, the maximum review length is way too many steps for our RNN. Let's truncate to 200 steps. For reviews shorter than 200, we'll pad with 0s. For reviews longer than 200, we can truncate them to the first 200 characters. Exercise: First, remove the review with zero length from the reviews_ints list. End of explanation """ reviews_ints = [reviews_ints[ii] for ii in non_zero_idx] labels = np.array([labels[ii] for ii in non_zero_idx]) """ Explanation: Turns out its the final review that has zero length. But that might not always be the case, so let's make it more general. End of explanation """ seq_len = 200 features = np.zeros((len(reviews_ints), seq_len), dtype=int) for i, row in enumerate(reviews_ints): features[i, -len(row):] = np.array(row)[:seq_len] features[:10,:100] """ Explanation: Exercise: Now, create an array features that contains the data we'll pass to the network. The data should come from review_ints, since we want to feed integers to the network. Each row should be 200 elements long. For reviews shorter than 200 words, left pad with 0s. That is, if the review is ['best', 'movie', 'ever'], [117, 18, 128] as integers, the row will look like [0, 0, 0, ..., 0, 117, 18, 128]. For reviews longer than 200, use on the first 200 words as the feature vector. This isn't trivial and there are a bunch of ways to do this. But, if you're going to be building your own deep learning networks, you're going to have to get used to preparing your data. End of explanation """ split_frac = 0.8 split_idx = int(len(features)*0.8) train_x, val_x = features[:split_idx], features[split_idx:] train_y, val_y = labels[:split_idx], labels[split_idx:] test_idx = int(len(val_x)*0.5) val_x, test_x = val_x[:test_idx], val_x[test_idx:] val_y, test_y = val_y[:test_idx], val_y[test_idx:] print("\t\t\tFeature Shapes:") print("Train set: \t\t{}".format(train_x.shape), "\nValidation set: \t{}".format(val_x.shape), "\nTest set: \t\t{}".format(test_x.shape)) """ Explanation: Training, Validation, Test With our data in nice shape, we'll split it into training, validation, and test sets. Exercise: Create the training, validation, and test sets here. You'll need to create sets for the features and the labels, train_x and train_y for example. Define a split fraction, split_frac as the fraction of data to keep in the training set. Usually this is set to 0.8 or 0.9. The rest of the data will be split in half to create the validation and testing data. End of explanation """ lstm_size = 256 lstm_layers = 1 batch_size = 500 learning_rate = 0.001 """ Explanation: With train, validation, and text fractions of 0.8, 0.1, 0.1, the final shapes should look like: Feature Shapes: Train set: (20000, 200) Validation set: (2500, 200) Test set: (2500, 200) Build the graph Here, we'll build the graph. First up, defining the hyperparameters. lstm_size: Number of units in the hidden layers in the LSTM cells. Usually larger is better performance wise. Common values are 128, 256, 512, etc. lstm_layers: Number of LSTM layers in the network. I'd start with 1, then add more if I'm underfitting. batch_size: The number of reviews to feed the network in one training pass. Typically this should be set as high as you can go without running out of memory. learning_rate: Learning rate End of explanation """ n_words = len(vocab_to_int) # Create the graph object graph = tf.Graph() # Add nodes to the graph with graph.as_default(): inputs_ = tf.placeholder(tf.int32, [None, None], name='inputs') labels_ = tf.placeholder(tf.int32, [None, None], name='labels') keep_prob = tf.placeholder(tf.float32, name='keep_prob') """ Explanation: For the network itself, we'll be passing in our 200 element long review vectors. Each batch will be batch_size vectors. We'll also be using dropout on the LSTM layer, so we'll make a placeholder for the keep probability. Exercise: Create the inputs_, labels_, and drop out keep_prob placeholders using tf.placeholder. labels_ needs to be two-dimensional to work with some functions later. Since keep_prob is a scalar (a 0-dimensional tensor), you shouldn't provide a size to tf.placeholder. End of explanation """ # Size of the embedding vectors (number of units in the embedding layer) embed_size = 300 with graph.as_default(): embedding = tf.Variable(tf.random_uniform((n_words, embed_size), -1, 1)) embed = tf.nn.embedding_lookup(embedding, inputs_) """ Explanation: Embedding Now we'll add an embedding layer. We need to do this because there are 74000 words in our vocabulary. It is massively inefficient to one-hot encode our classes here. You should remember dealing with this problem from the word2vec lesson. Instead of one-hot encoding, we can have an embedding layer and use that layer as a lookup table. You could train an embedding layer using word2vec, then load it here. But, it's fine to just make a new layer and let the network learn the weights. Exercise: Create the embedding lookup matrix as a tf.Variable. Use that embedding matrix to get the embedded vectors to pass to the LSTM cell with tf.nn.embedding_lookup. This function takes the embedding matrix and an input tensor, such as the review vectors. Then, it'll return another tensor with the embedded vectors. So, if the embedding layer as 200 units, the function will return a tensor with size [batch_size, 200]. End of explanation """ with graph.as_default(): # Your basic LSTM cell lstm = tf.contrib.rnn.BasicLSTMCell(lstm_size) # Add dropout to the cell drop = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob) # Stack up multiple LSTM layers, for deep learning cell = tf.contrib.rnn.MultiRNNCell([drop] * lstm_layers) # Getting an initial state of all zeros initial_state = cell.zero_state(batch_size, tf.float32) """ Explanation: LSTM cell <img src="assets/network_diagram.png" width=400px> Next, we'll create our LSTM cells to use in the recurrent network (TensorFlow documentation). Here we are just defining what the cells look like. This isn't actually building the graph, just defining the type of cells we want in our graph. To create a basic LSTM cell for the graph, you'll want to use tf.contrib.rnn.BasicLSTMCell. Looking at the function documentation: tf.contrib.rnn.BasicLSTMCell(num_units, forget_bias=1.0, input_size=None, state_is_tuple=True, activation=&lt;function tanh at 0x109f1ef28&gt;) you can see it takes a parameter called num_units, the number of units in the cell, called lstm_size in this code. So then, you can write something like lstm = tf.contrib.rnn.BasicLSTMCell(num_units) to create an LSTM cell with num_units. Next, you can add dropout to the cell with tf.contrib.rnn.DropoutWrapper. This just wraps the cell in another cell, but with dropout added to the inputs and/or outputs. It's a really convenient way to make your network better with almost no effort! So you'd do something like drop = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=keep_prob) Most of the time, you're network will have better performance with more layers. That's sort of the magic of deep learning, adding more layers allows the network to learn really complex relationships. Again, there is a simple way to create multiple layers of LSTM cells with tf.contrib.rnn.MultiRNNCell: cell = tf.contrib.rnn.MultiRNNCell([drop] * lstm_layers) Here, [drop] * lstm_layers creates a list of cells (drop) that is lstm_layers long. The MultiRNNCell wrapper builds this into multiple layers of RNN cells, one for each cell in the list. So the final cell you're using in the network is actually multiple (or just one) LSTM cells with dropout. But it all works the same from an achitectural viewpoint, just a more complicated graph in the cell. Exercise: Below, use tf.contrib.rnn.BasicLSTMCell to create an LSTM cell. Then, add drop out to it with tf.contrib.rnn.DropoutWrapper. Finally, create multiple LSTM layers with tf.contrib.rnn.MultiRNNCell. Here is a tutorial on building RNNs that will help you out. End of explanation """ with graph.as_default(): outputs, final_state = tf.nn.dynamic_rnn(cell, embed, initial_state=initial_state) """ Explanation: RNN forward pass <img src="assets/network_diagram.png" width=400px> Now we need to actually run the data through the RNN nodes. You can use tf.nn.dynamic_rnn to do this. You'd pass in the RNN cell you created (our multiple layered LSTM cell for instance), and the inputs to the network. outputs, final_state = tf.nn.dynamic_rnn(cell, inputs, initial_state=initial_state) Above I created an initial state, initial_state, to pass to the RNN. This is the cell state that is passed between the hidden layers in successive time steps. tf.nn.dynamic_rnn takes care of most of the work for us. We pass in our cell and the input to the cell, then it does the unrolling and everything else for us. It returns outputs for each time step and the final_state of the hidden layer. Exercise: Use tf.nn.dynamic_rnn to add the forward pass through the RNN. Remember that we're actually passing in vectors from the embedding layer, embed. End of explanation """ with graph.as_default(): predictions = tf.contrib.layers.fully_connected(outputs[:, -1], 1, activation_fn=tf.sigmoid) cost = tf.losses.mean_squared_error(labels_, predictions) optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost) """ Explanation: Output We only care about the final output, we'll be using that as our sentiment prediction. So we need to grab the last output with outputs[:, -1], the calculate the cost from that and labels_. End of explanation """ with graph.as_default(): correct_pred = tf.equal(tf.cast(tf.round(predictions), tf.int32), labels_) accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) """ Explanation: Validation accuracy Here we can add a few nodes to calculate the accuracy which we'll use in the validation pass. End of explanation """ def get_batches(x, y, batch_size=100): n_batches = len(x)//batch_size x, y = x[:n_batches*batch_size], y[:n_batches*batch_size] for ii in range(0, len(x), batch_size): yield x[ii:ii+batch_size], y[ii:ii+batch_size] """ Explanation: Batching This is a simple function for returning batches from our data. First it removes data such that we only have full batches. Then it iterates through the x and y arrays and returns slices out of those arrays with size [batch_size]. End of explanation """ epochs = 10 with graph.as_default(): saver = tf.train.Saver() with tf.Session(graph=graph) as sess: sess.run(tf.global_variables_initializer()) iteration = 1 for e in range(epochs): state = sess.run(initial_state) for ii, (x, y) in enumerate(get_batches(train_x, train_y, batch_size), 1): feed = {inputs_: x, labels_: y[:, None], keep_prob: 0.5, initial_state: state} loss, state, _ = sess.run([cost, final_state, optimizer], feed_dict=feed) if iteration%5==0: print("Epoch: {}/{}".format(e, epochs), "Iteration: {}".format(iteration), "Train loss: {:.3f}".format(loss)) if iteration%25==0: val_acc = [] val_state = sess.run(cell.zero_state(batch_size, tf.float32)) for x, y in get_batches(val_x, val_y, batch_size): feed = {inputs_: x, labels_: y[:, None], keep_prob: 1, initial_state: val_state} batch_acc, val_state = sess.run([accuracy, final_state], feed_dict=feed) val_acc.append(batch_acc) print("Val acc: {:.3f}".format(np.mean(val_acc))) iteration +=1 saver.save(sess, "checkpoints/sentiment.ckpt") """ Explanation: Training Below is the typical training code. If you want to do this yourself, feel free to delete all this code and implement it yourself. Before you run this, make sure the checkpoints directory exists. End of explanation """ test_acc = [] with tf.Session(graph=graph) as sess: saver.restore(sess, tf.train.latest_checkpoint('checkpoints')) test_state = sess.run(cell.zero_state(batch_size, tf.float32)) for ii, (x, y) in enumerate(get_batches(test_x, test_y, batch_size), 1): feed = {inputs_: x, labels_: y[:, None], keep_prob: 1, initial_state: test_state} batch_acc, test_state = sess.run([accuracy, final_state], feed_dict=feed) test_acc.append(batch_acc) print("Test accuracy: {:.3f}".format(np.mean(test_acc))) """ Explanation: Testing End of explanation """
kingb12/languagemodelRNN
model_comparisons/noing10_LSTM_v_BOW.ipynb
mit
report_files = ["/Users/bking/IdeaProjects/LanguageModelRNN/experiment_results/encdec_noing10_200_512_04drb/encdec_noing10_200_512_04drb.json", "/Users/bking/IdeaProjects/LanguageModelRNN/experiment_results/encdec_noing10_bow_200_512_04drb/encdec_noing10_bow_200_512_04drb.json"] log_files = ["/Users/bking/IdeaProjects/LanguageModelRNN/experiment_results/encdec_noing10_200_512_04drb/encdec_noing10_200_512_04drb_logs.json", "/Users/bking/IdeaProjects/LanguageModelRNN/experiment_results/encdec_noing10_bow_200_512_04drb/encdec_noing10_bow_200_512_04drb_logs.json"] reports = [] logs = [] import json import matplotlib.pyplot as plt import numpy as np for report_file in report_files: with open(report_file) as f: reports.append((report_file.split('/')[-1].split('.json')[0], json.loads(f.read()))) for log_file in log_files: with open(log_file) as f: logs.append((log_file.split('/')[-1].split('.json')[0], json.loads(f.read()))) for report_name, report in reports: print '\n', report_name, '\n' print 'Encoder: \n', report['architecture']['encoder'] print 'Decoder: \n', report['architecture']['decoder'] """ Explanation: Comparing Encoder-Decoders Analysis Model Architecture End of explanation """ %matplotlib inline from IPython.display import HTML, display def display_table(data): display(HTML( u'<table><tr>{}</tr></table>'.format( u'</tr><tr>'.join( u'<td>{}</td>'.format('</td><td>'.join(unicode(_) for _ in row)) for row in data) ) )) def bar_chart(data): n_groups = len(data) train_perps = [d[1] for d in data] valid_perps = [d[2] for d in data] test_perps = [d[3] for d in data] fig, ax = plt.subplots(figsize=(10,8)) index = np.arange(n_groups) bar_width = 0.3 opacity = 0.4 error_config = {'ecolor': '0.3'} train_bars = plt.bar(index, train_perps, bar_width, alpha=opacity, color='b', error_kw=error_config, label='Training Perplexity') valid_bars = plt.bar(index + bar_width, valid_perps, bar_width, alpha=opacity, color='r', error_kw=error_config, label='Valid Perplexity') test_bars = plt.bar(index + 2*bar_width, test_perps, bar_width, alpha=opacity, color='g', error_kw=error_config, label='Test Perplexity') plt.xlabel('Model') plt.ylabel('Scores') plt.title('Perplexity by Model and Dataset') plt.xticks(index + bar_width / 3, [d[0] for d in data]) plt.legend() plt.tight_layout() plt.show() data = [['<b>Model</b>', '<b>Train Perplexity</b>', '<b>Valid Perplexity</b>', '<b>Test Perplexity</b>']] for rname, report in reports: data.append([rname, report['train_perplexity'], report['valid_perplexity'], report['test_perplexity']]) display_table(data) bar_chart(data[1:]) """ Explanation: Perplexity on Each Dataset End of explanation """ %matplotlib inline plt.figure(figsize=(10, 8)) for rname, l in logs: for k in l.keys(): plt.plot(l[k][0], l[k][1], label=str(k) + ' ' + rname + ' (train)') plt.plot(l[k][0], l[k][2], label=str(k) + ' ' + rname + ' (valid)') plt.title('Loss v. Epoch') plt.xlabel('Epoch') plt.ylabel('Loss') plt.legend() plt.show() """ Explanation: Loss vs. Epoch End of explanation """ %matplotlib inline plt.figure(figsize=(10, 8)) for rname, l in logs: for k in l.keys(): plt.plot(l[k][0], l[k][3], label=str(k) + ' ' + rname + ' (train)') plt.plot(l[k][0], l[k][4], label=str(k) + ' ' + rname + ' (valid)') plt.title('Perplexity v. Epoch') plt.xlabel('Epoch') plt.ylabel('Perplexity') plt.legend() plt.show() """ Explanation: Perplexity vs. Epoch End of explanation """ def print_sample(sample, best_bleu=None): enc_input = ' '.join([w for w in sample['encoder_input'].split(' ') if w != '<pad>']) gold = ' '.join([w for w in sample['gold'].split(' ') if w != '<mask>']) print('Input: '+ enc_input + '\n') print('Gend: ' + sample['generated'] + '\n') print('True: ' + gold + '\n') if best_bleu is not None: cbm = ' '.join([w for w in best_bleu['best_match'].split(' ') if w != '<mask>']) print('Closest BLEU Match: ' + cbm + '\n') print('Closest BLEU Score: ' + str(best_bleu['best_score']) + '\n') print('\n') def display_sample(samples, best_bleu=False): for enc_input in samples: data = [] for rname, sample in samples[enc_input]: gold = ' '.join([w for w in sample['gold'].split(' ') if w != '<mask>']) data.append([rname, '<b>Generated: </b>' + sample['generated']]) if best_bleu: cbm = ' '.join([w for w in sample['best_match'].split(' ') if w != '<mask>']) data.append([rname, '<b>Closest BLEU Match: </b>' + cbm + ' (Score: ' + str(sample['best_score']) + ')']) data.insert(0, ['<u><b>' + enc_input + '</b></u>', '<b>True: ' + gold+ '</b>']) display_table(data) def process_samples(samples): # consolidate samples with identical inputs result = {} for rname, t_samples, t_cbms in samples: for i, sample in enumerate(t_samples): enc_input = ' '.join([w for w in sample['encoder_input'].split(' ') if w != '<pad>']) if t_cbms is not None: sample.update(t_cbms[i]) if enc_input in result: result[enc_input].append((rname, sample)) else: result[enc_input] = [(rname, sample)] return result samples = process_samples([(rname, r['train_samples'], r['best_bleu_matches_train'] if 'best_bleu_matches_train' in r else None) for (rname, r) in reports]) display_sample(samples, best_bleu='best_bleu_matches_train' in reports[1][1]) samples = process_samples([(rname, r['valid_samples'], r['best_bleu_matches_valid'] if 'best_bleu_matches_valid' in r else None) for (rname, r) in reports]) display_sample(samples, best_bleu='best_bleu_matches_valid' in reports[1][1]) samples = process_samples([(rname, r['test_samples'], r['best_bleu_matches_test'] if 'best_bleu_matches_test' in r else None) for (rname, r) in reports]) display_sample(samples, best_bleu='best_bleu_matches_test' in reports[1][1]) """ Explanation: Generations End of explanation """ def print_bleu(blue_structs): data= [['<b>Model</b>', '<b>Overall Score</b>','<b>1-gram Score</b>','<b>2-gram Score</b>','<b>3-gram Score</b>','<b>4-gram Score</b>']] for rname, blue_struct in blue_structs: data.append([rname, blue_struct['score'], blue_struct['components']['1'], blue_struct['components']['2'], blue_struct['components']['3'], blue_struct['components']['4']]) display_table(data) # Training Set BLEU Scores print_bleu([(rname, report['train_bleu']) for (rname, report) in reports]) # Validation Set BLEU Scores print_bleu([(rname, report['valid_bleu']) for (rname, report) in reports]) # Test Set BLEU Scores print_bleu([(rname, report['test_bleu']) for (rname, report) in reports]) # All Data BLEU Scores print_bleu([(rname, report['combined_bleu']) for (rname, report) in reports]) """ Explanation: BLEU Analysis End of explanation """ # Training Set BLEU n-pairs Scores print_bleu([(rname, report['n_pairs_bleu_train']) for (rname, report) in reports]) # Validation Set n-pairs BLEU Scores print_bleu([(rname, report['n_pairs_bleu_valid']) for (rname, report) in reports]) # Test Set n-pairs BLEU Scores print_bleu([(rname, report['n_pairs_bleu_test']) for (rname, report) in reports]) # Combined n-pairs BLEU Scores print_bleu([(rname, report['n_pairs_bleu_all']) for (rname, report) in reports]) # Ground Truth n-pairs BLEU Scores print_bleu([(rname, report['n_pairs_bleu_gold']) for (rname, report) in reports]) """ Explanation: N-pairs BLEU Analysis This analysis randomly samples 1000 pairs of generations/ground truths and treats them as translations, giving their BLEU score. We can expect very low scores in the ground truth and high scores can expose hyper-common generations End of explanation """ def print_align(reports): data= [['<b>Model</b>', '<b>Average (Train) Generated Score</b>','<b>Average (Valid) Generated Score</b>','<b>Average (Test) Generated Score</b>','<b>Average (All) Generated Score</b>', '<b>Average (Gold) Score</b>']] for rname, report in reports: data.append([rname, report['average_alignment_train'], report['average_alignment_valid'], report['average_alignment_test'], report['average_alignment_all'], report['average_alignment_gold']]) display_table(data) print_align(reports) """ Explanation: Alignment Analysis This analysis computs the average Smith-Waterman alignment score for generations, with the same intuition as N-pairs BLEU, in that we expect low scores in the ground truth and hyper-common generations to raise the scores End of explanation """
publicityreform/findbyimage
notebooks/sketch-rnn/sketch_rnn-updated.ipynb
gpl-3.0
# import the required libraries import numpy as np import time import random import cPickle import codecs import collections import os import math import json import tensorflow as tf from six.moves import xrange # libraries required for visualisation: from IPython.display import SVG, display import svgwrite # conda install -c omnia svgwrite=1.1.6 import PIL from PIL import Image import matplotlib.pyplot as plt # set numpy output to something sensible np.set_printoptions(precision=8, edgeitems=6, linewidth=200, suppress=True) # tells which version of tensorflow is being used tf.logging.info("TensorFlow Version: %s", tf.__version__) # import command line tools from magenta.models.sketch_rnn.sketch_rnn_train import * from magenta.models.sketch_rnn.model import * from magenta.models.sketch_rnn.utils import * from magenta.models.sketch_rnn.rnn import * # this function displays vector images, and saves them to .svg # you can invoke the "draw_strokes" function anytime you want to render an image - # specify source, destination filename, and random scale factor (defaults below) def draw_strokes(data, svg_filename = 'sample.svg', factor=0.2): tf.gfile.MakeDirs(os.path.dirname(svg_filename)) min_x, max_x, min_y, max_y = get_bounds(data, factor) dims = (50 + max_x - min_x, 50 + max_y - min_y) dwg = svgwrite.Drawing(svg_filename, size=dims) dwg.add(dwg.rect(insert=(0, 0), size=dims,fill='white')) lift_pen = 1 abs_x = 25 - min_x abs_y = 25 - min_y p = "M%s,%s " % (abs_x, abs_y) command = "m" for i in xrange(len(data)): if (lift_pen == 1): command = "m" elif (command != "l"): command = "l" else: command = "" x = float(data[i,0])/factor y = float(data[i,1])/factor lift_pen = data[i, 2] p += command+str(x)+","+str(y)+" " the_color = "black" stroke_width = 1 dwg.add(dwg.path(p).stroke(the_color,stroke_width).fill("none")) dwg.save() display(SVG(dwg.tostring())) # generate a 2D grid of many vector drawings def make_grid_svg(s_list, grid_space=10.0, grid_space_x=16.0): def get_start_and_end(x): x = np.array(x) x = x[:, 0:2] x_start = x[0] x_end = x.sum(axis=0) x = x.cumsum(axis=0) x_max = x.max(axis=0) x_min = x.min(axis=0) center_loc = (x_max+x_min)*0.5 return x_start-center_loc, x_end x_pos = 0.0 y_pos = 0.0 result = [[x_pos, y_pos, 1]] for sample in s_list: s = sample[0] grid_loc = sample[1] grid_y = grid_loc[0]*grid_space+grid_space*0.5 grid_x = grid_loc[1]*grid_space_x+grid_space_x*0.5 start_loc, delta_pos = get_start_and_end(s) loc_x = start_loc[0] loc_y = start_loc[1] new_x_pos = grid_x+loc_x new_y_pos = grid_y+loc_y result.append([new_x_pos-x_pos, new_y_pos-y_pos, 0]) result += s.tolist() result[-1][2] = 1 x_pos = new_x_pos+delta_pos[0] y_pos = new_y_pos+delta_pos[1] return np.array(result) # these global variables define the relative path to the pre-trained model # and original dataset data_dir = 'datasets/' # this is where your .npz file lives models_root_dir = 'models/' # this is where trained models live model_dir = 'models/sheep' # change "sheep" to whatever name you like # note! you must create the "datasets/" and "models/" folders # in the sketch_rnn directory before running this. # you will also need to place model files (generated in training process) # into the "models/sheep" (or whatever you've named it) folder - # i.e. a checkpoint file, a model_config.json file, and vector data, index, # and meta files too. # note! model_dir value only handles two levels of recursion (i.e. models/sheep) # subfolders break the next step (i.e. you can't do models/sheep/layer_norm) # populates the above global variables throughout the sketch_rnn project files [train_set, valid_set, test_set, hps_model, eval_hps_model, sample_hps_model] = load_env(data_dir, model_dir) #construct the sketch-rnn model: reset_graph() model = Model(hps_model) eval_model = Model(eval_hps_model, reuse=True) sample_model = Model(sample_hps_model, reuse=True) sess = tf.InteractiveSession() sess.run(tf.global_variables_initializer()) # loads the weights from checkpoint into our model load_checkpoint(sess, model_dir) """ Explanation: This notebook uses pre-trained models to explore what can be done with sketch-RNN. it should be placed inside of the "sketch_rnn" directory that is bundled with the magenta git repository, so you'll need to git clone that repository into your working environment if you haven't already. note! for this to work properly, you'll need a dataset formatted as an .npz file, and a pre-trained model (which you can create from .npz using the sketch_rnn_train.ipynb notebook or by running sketch_rnn_train.py on its own) note! you will need to create a datasets/ folder and a models/ folder in the same directory as this notebook. (i.e. sketch_rnn/). place the .npz file into datasets and the model files (checkpoint, model_config.json and various vector files) into a subdirectory of the models/ folder. note that this script can only read two levels into the models/ directory: i.e. models/sheep/model_checkpoint.json will work, but models/sheep/layer_norm/model_checkpoint.json throws an error. go figure! End of explanation """ def encode(input_strokes): strokes = to_big_strokes(input_strokes).tolist() strokes.insert(0, [0, 0, 1, 0, 0]) seq_len = [len(input_strokes)] draw_strokes(to_normal_strokes(np.array(strokes))) return sess.run(eval_model.batch_z, feed_dict={eval_model.input_data: [strokes], eval_model.sequence_lengths: seq_len})[0] def decode(z_input=None, temperature=0.1, factor=0.2): z = None if z_input is not None: z = [z_input] sample_strokes, m = sample(sess, sample_model, seq_len=eval_model.hps.max_seq_len, temperature=temperature, z=z) strokes = to_normal_strokes(sample_strokes) return strokes # get a sample drawing from the test set, and render it to .svg example_drawing = test_set.random_sample() draw_strokes(example_drawing) #encode the sample drawing into latent vector z z = encode(example_drawing) # convert z back to drawing, using a "temperature" of 0.1 decoded_drawing = decode(z, temperature=0.1) draw_strokes(decoded_drawing, 'sample3.svg', 0.2) #specify the input source, the filename to save to (in the same directory as this notebook), and the random scale factor (default is 0.2), and """ Explanation: Encode and Decode Sample Drawings First, define two convenience functions to encode a stroke into a latent vector, and decode from latent vector to stroke: End of explanation """ #Create a series of drawings stepping through various "temperatures" from 0.1 to 1.0 stroke_list = [] for i in range(10): stroke_list.append([decode(z, temperature=0.1*i+0.1), [0, i]]) stroke_grid = make_grid_svg(stroke_list) draw_strokes(stroke_grid, 'sample-interp-temp.svg') #if two arguments are given to draw_strokes, they are input vector and output filename """ Explanation: Temperature Interpolation End of explanation """ #z0 is the first sample z0 = z #use the random sample we'd already selected decoded_drawing = decode(z0) #each time it's decoded from the latent vector it's slightly different draw_strokes(decoded_drawing) #uses default file destination of 'sample.svg' and default random scale factor of 0.2 #z1 is the second sample z1 = encode(test_set.random_sample()) #grab a new random sample and encode it decoded_drawing2 = decode(z1) #then decode it draw_strokes(decoded_drawing2) #the top drawing is the encoded version, the bottom is the decoded version z_list = [] # interpolate spherically between z0 and z1 N = 10 # change this number to add more steps for t in np.linspace(0, 1, N): z_list.append(slerp(z0, z1, t)) # for every latent vector in z_list, sample a vector image reconstructions = [] for i in range(N): reconstructions.append([decode(z_list[i]), [0, i]]) #draw the interpolation steps stroke_grid = make_grid_svg(reconstructions) draw_strokes(stroke_grid, 'sample-interp1.svg') """ Explanation: Latent Space Interpolation Stepping through latent space between two sample images ($z_0$ and $z_1$) End of explanation """ model_dir = '/tmp/sketch_rnn/models/flamingo/lstm_uncond' [hps_model, eval_hps_model, sample_hps_model] = load_model(model_dir) # construct the sketch-rnn model here: reset_graph() model = Model(hps_model) eval_model = Model(eval_hps_model, reuse=True) sample_model = Model(sample_hps_model, reuse=True) sess = tf.InteractiveSession() sess.run(tf.global_variables_initializer()) # loads the weights from checkpoint into our model load_checkpoint(sess, model_dir) # randomly unconditionally generate 10 examples N = 10 reconstructions = [] for i in range(N): reconstructions.append([decode(temperature=0.1), [0, i]]) #experiment with different temperature values to get more variety #draw 10 examples stroke_grid = make_grid_svg(reconstructions) draw_strokes(stroke_grid) """ Explanation: Unconditional (Decoder-Only) Generation End of explanation """ #other models available: #model_dir = '/tmp/sketch_rnn/models/owl/lstm' #model_dir = '/tmp/sketch_rnn/models/catbus/lstm' model_dir = '/tmp/sketch_rnn/models/elephantpig/lstm' [hps_model, eval_hps_model, sample_hps_model] = load_model(model_dir) # construct the sketch-rnn model here: reset_graph() model = Model(hps_model) eval_model = Model(eval_hps_model, reuse=True) sample_model = Model(sample_hps_model, reuse=True) sess = tf.InteractiveSession() sess.run(tf.global_variables_initializer()) # loads the weights from checkpoint into our model load_checkpoint(sess, model_dir) #randomly select a latent vector z_0 z_0 = np.random.randn(eval_model.hps.z_size) _ = decode(z_0) draw_strokes(_) #randomly select a second vector z_1 z_1 = np.random.randn(eval_model.hps.z_size) _ = decode(z_1) draw_strokes(_) z_list = [] # interpolate spherically between z_0 and z_1 N = 10 for t in np.linspace(0, 1, N): z_list.append(slerp(z_0, z_1, t)) # for every latent vector in z_list, sample a vector image reconstructions = [] for i in range(N): reconstructions.append([decode(z_list[i], temperature=0.1), [0, i]]) #draw the interpolation stroke_grid = make_grid_svg(reconstructions) draw_strokes(stroke_grid, 'sample-interp2.svg') """ Explanation: Generate sketches using random IID gaussian latent vectors End of explanation """