code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import nltk import pandas as pd from nltk.corpus import stopwords import string data=pd.read_csv('D:\Work\ML project\cyberbullying-detection-master\public_data_labeled.csv') df=pd.DataFrame(data) df.head() from sklearn.preprocessing import LabelEncoder lr=LabelEncoder() df['label']=lr.fit_transform(df['label']) df['label'].head() x=df['full_text'][0] x import matplotlib.pyplot as plt import seaborn as sns # %matplotlib inline X=df['full_text'].apply(len) X.plot.hist(bins=100) df['label'].plot.hist(bins=100) def text_preprocessing(mess): nopunc=[char for char in mess if char not in string.punctuation] nopunc=''.join(nopunc) return [word for word in nopunc.split() if word.lower() not in stopwords.words('english')] df['full_text'].head().apply(text_preprocessing) from sklearn.feature_extraction.text import CountVectorizer count_vect=CountVectorizer(analyzer=text_preprocessing).fit(df['full_text']) count_vect tweet_bow=count_vect.transform(df['full_text']) tweet_bow from sklearn.feature_extraction.text import TfidfTransformer tfidf_transformer=TfidfTransformer().fit(tweet_bow) tfidf_transformer tweet_tfidf=tfidf_transformer.transform(tweet_bow) tweet_tfidf from sklearn.model_selection import train_test_split x_train,x_test,y_train,y_test=train_test_split(tweet_tfidf,df['label'],test_size=0.1) from sklearn.naive_bayes import MultinomialNB mnb=MultinomialNB() mnb.fit(x_train,y_train) prediction=mnb.predict(x_test) prediction[:50] from sklearn.metrics import accuracy_score acc=accuracy_score(y_test,prediction) acc from sklearn.metrics import classification_report print(classification_report(y_test,prediction)) from sklearn.metrics import precision_score, recall_score, confusion_matrix, classification_report,accuracy_score, f1_score y_bar=['accuracy_score', 'f1_score', 'recall_score', 'precision_score'] x_bar=[accuracy_score(y_test, prediction),f1_score(y_test, prediction) ,recall_score(y_test, prediction) , precision_score(y_test, prediction)] y_bar import matplotlib.pyplot as plt fig = plt.figure() ax = fig.add_axes([0,0,1,1]) ax.bar(y_bar,x_bar) plt.show() from sklearn.linear_model import LogisticRegression lm=LogisticRegression() lm.fit(x_train,y_train) prediction2=lm.predict(x_test) from sklearn.metrics import accuracy_score acc2=accuracy_score(y_test,prediction2) acc2 from sklearn.metrics import precision_score, recall_score, confusion_matrix, classification_report,accuracy_score, f1_score y_bar2=['accuracy_score', 'f1_score', 'recall_score', 'precision_score'] x_bar2=[accuracy_score(y_test, prediction2),f1_score(y_test, prediction2) ,recall_score(y_test, prediction2) , precision_score(y_test, prediction2)] y_bar2 import matplotlib.pyplot as plt fig = plt.figure() ax = fig.add_axes([0,0,1,1]) ax.bar(y_bar2,x_bar2) plt.show() from sklearn.metrics import classification_report print(classification_report(y_test,prediction2)) from sklearn.svm import SVC clf = SVC() clf.fit(x_train,y_train) prediction3=clf.predict(x_test) from sklearn.metrics import accuracy_score acc3=accuracy_score(y_test,prediction3) acc3 from sklearn.metrics import classification_report print(classification_report(y_test,prediction3)) from sklearn.metrics import precision_score, recall_score, confusion_matrix, classification_report,accuracy_score, f1_score y_bar3=['accuracy_score', 'f1_score', 'recall_score', 'precision_score'] x_bar3=[accuracy_score(y_test, prediction3),f1_score(y_test, prediction3) ,recall_score(y_test, prediction3) , precision_score(y_test, prediction3)] y_bar3 fig = plt.figure() ax = fig.add_axes([0,0,1,1]) ax.bar(y_bar3,x_bar3) plt.show() from sklearn.neighbors import KNeighborsClassifier neigh = KNeighborsClassifier(n_neighbors=3) neigh.fit(x_train,y_train) prediction4=neigh.predict(x_test) from sklearn.metrics import accuracy_score acc4=accuracy_score(y_test,prediction4) acc4 from sklearn.metrics import classification_report print(classification_report(y_test,prediction4)) from sklearn.metrics import precision_score, recall_score, confusion_matrix, classification_report,accuracy_score, f1_score y_bar4=['accuracy_score', 'f1_score', 'recall_score', 'precision_score'] x_bar4=[accuracy_score(y_test, prediction4),f1_score(y_test, prediction4) ,recall_score(y_test, prediction4) , precision_score(y_test, prediction4)] y_bar4 fig = plt.figure() ax = fig.add_axes([0,0,1,1]) ax.bar(y_bar4,x_bar4) plt.show() from sklearn.tree import DecisionTreeRegressor dtree=DecisionTreeRegressor() dtree.fit(x_train,y_train) prediction5=dtree.predict(x_test) from sklearn.metrics import accuracy_score acc5=accuracy_score(y_test,prediction5) acc5 from sklearn.metrics import classification_report print(classification_report(y_test,prediction5)) from sklearn.metrics import precision_score, recall_score, confusion_matrix, classification_report,accuracy_score, f1_score y_bar5=['accuracy_score', 'f1_score', 'recall_score', 'precision_score'] x_bar5=[accuracy_score(y_test, prediction5),f1_score(y_test, prediction5) ,recall_score(y_test, prediction5) , precision_score(y_test, prediction5)] y_bar5 fig = plt.figure() ax = fig.add_axes([0,0,1,1]) ax.bar(y_bar5,x_bar5) plt.show() from sklearn.ensemble import AdaBoostClassifier adb = AdaBoostClassifier(n_estimators=100, random_state=0) adb.fit(x_test,y_test) prediction6=adb.predict(x_test) from sklearn.metrics import accuracy_score acc6=accuracy_score(y_test,prediction6) acc6 from sklearn.metrics import classification_report print(classification_report(y_test,prediction6)) from sklearn.metrics import precision_score, recall_score, confusion_matrix, classification_report,accuracy_score, f1_score y_bar6=['accuracy_score', 'f1_score', 'Decision Tree', 'precision_score'] x_bar6=[accuracy_score(y_test, prediction6),f1_score(y_test, prediction6) ,recall_score(y_test, prediction6) , precision_score(y_test, prediction6)] y_bar6 fig = plt.figure() ax = fig.add_axes([0,0,1,1]) ax.bar(y_bar6,x_bar6) plt.show() y_mod=['MNB','Logistic Regression','SVM', 'KNN','Decsion Tree' ,'Ada Boost','LSTM'] x_acc=[acc,acc2,acc3,acc4,acc5,acc6,0.94] x_acc fig = plt.figure() ax = fig.add_axes([5,0,1,1]) ax.bar(y_mod,x_acc) plt.show()
Implementation_Code.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # This notebook is aimed to find better parameters for the evalutaion model. # For details on the construction and decision making process take a look at the ML-Pipeline notebook. # # # Importing the libraries needed and the dataframes # + import numpy as np import pandas as pd import re import nltk from nltk.corpus import stopwords from nltk.stem.wordnet import WordNetLemmatizer from nltk.tokenize import word_tokenize from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfTransformer from sklearn.pipeline import Pipeline import sqlite3 from sklearn.model_selection import GridSearchCV from sklearn.model_selection import train_test_split from sklearn.multioutput import MultiOutputClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import confusion_matrix from sklearn.metrics import roc_auc_score from sklearn.metrics import classification_report from sklearn.metrics import f1_score from sklearn.metrics import precision_score from sklearn.metrics import accuracy_score from sklearn.metrics import recall_score import statistics def load_data(): '''loading the messages database''' #opening the connect and reading the database conn = sqlite3.connect('Messages.db') df = pd.read_sql('SELECT * FROM Messages', conn) df = df.drop(columns=['index']) #storing the database into X,y X = df['message'].values#first scenario will ignore the genre feature y= df[df.columns.difference(['message','genre_news','genre_social'])] #closing connection conn.close() return X,y; X, y = load_data() # + stop_words = stopwords.words("english") lemmatizer = WordNetLemmatizer() def tokenize(text): # normalize case, remove punctuation and numbers text = re.sub(r"[^a-zA-Z]", " ", text.lower()) # tokenize text tokens = word_tokenize(text) # lemmatize and remove stop words tokens = [lemmatizer.lemmatize(word) for word in tokens if word not in stop_words] #lemmatize verbs tokens = [lemmatizer.lemmatize(word, pos='v') for word in tokens] #lemmatize adjectives tokens = [lemmatizer.lemmatize(word, pos='a') for word in tokens] #lemmatize adverbs tokens = [lemmatizer.lemmatize(word, pos='r') for word in tokens] return tokens # - def model_pipeline(): '''Pipeline for a model with the default parameters''' pipeline = Pipeline([ ('vect',CountVectorizer(tokenizer=tokenize)), ('tfidf',TfidfTransformer()), ('clf', MultiOutputClassifier(estimator=RandomForestClassifier())) ]) # specify parameters for grid search parameters = { #'vect__ngram_range': ((1, 1), (1, 2)), #'vect__max_df': (0.5, 0.75, 1.0), #'vect__max_features': (None, 5000, 10000), #'tfidf__use_idf': (True, False), 'clf__estimator__n_estimators': [150], 'clf__estimator__max_depth': [220], 'clf__estimator__random_state': [42] } # create grid search object cv = GridSearchCV(pipeline, param_grid=parameters,verbose=1,n_jobs=3) return cv # + random_state=42 X_train, X_test, y_train, y_test = train_test_split(X, y,test_size=0.2,random_state=random_state) model = model_pipeline() model.fit(X_train, y_train) y_pred = model.predict(X_test) # + def AUC_ROC(y_test,y_pred): '''Calculates the area under the ROC curve for every label and returns the list Also displays the mean, maximum and minimum values. ''' auc = [] for i in range (0,y_test.shape[1]): auc.append(roc_auc_score(y_test.iloc[:,i],y_pred[:,i])) print('Mean AUC: ',"%.2f" % statistics.mean(auc),'Max AUC:', "%.2f" % max(auc),'Min AUC:', "%.2f" % min (auc)) return auc; def f1_score_labels(y_test,y_pred): '''Calculates the f1 score for every label, displays it and returns the list Also displays the mean, maximum and minimum values. ''' f1_score_model = [] for i in range (0,y_test.shape[1]): f1_score_column = f1_score(y_test.iloc[:,i],y_pred[:,i]) f1_score_model.append(f1_score_column) print('The f1 score for',y.columns[i],' was: ',"%.2f" % f1_score_column,'.') print('Mean f1 score: ',"%.2f" % statistics.mean(f1_score_model),'Max f1 score:',"%.2f" % max(f1_score_model),'Min f1 score:',"%.2f" % min (f1_score_model)) def precision_score_labels(y_test,y_pred): '''Calculates the precision score for every label, displays it and returns the list Also displays the mean, maximum and minimum values. ''' precision_score_model = [] for i in range (0,y_test.shape[1]): precision_score_column = precision_score(y_test.iloc[:,i],y_pred[:,i]) precision_score_model.append(precision_score_column) print('The precision score for',y.columns[i],' was: ',"%.2f" % precision_score_column,'.') print('Mean precision score: ',"%.2f" % statistics.mean(precision_score_model),'Max precision score:',"%.2f" % max(precision_score_model),'Min precision score:',"%.2f" % min (precision_score_model)) def accuracy_score_labels (y_test,y_pred): '''Calculates the accuracy score for every label, displays it and returns the list Also displays the mean, maximum and minimum values. ''' accuracy_score_model = [] for i in range (0,y_test.shape[1]): accuracy_score_column = accuracy_score(y_test.iloc[:,i],y_pred[:,i]) accuracy_score_model.append(accuracy_score_column) print('The accuracy score for',y.columns[i],' was: ',"%.2f" % accuracy_score_column,'.') print('Mean accuracy score: ',"%.2f" % statistics.mean(accuracy_score_model),'Max accuracy score:',"%.2f" % max(accuracy_score_model),'Min accuracy score:',"%.2f" % min (accuracy_score_model)) def recall_score_labels (y_test,y_pred): recall_score_model = [] for i in range (0,y_test.shape[1]): recall_score_column = recall_score(y_test.iloc[:,i],y_pred[:,i]) recall_score_model.append(recall_score_column) print('The recall score for',y.columns[i],' was: ',"%.2f" % recall_score_column,'.') print('Mean recall score: ',"%.2f" % statistics.mean(recall_score_model),'Max recall score:',"%.2f" % max(recall_score_model),'Min recall score:',"%.2f" % min (recall_score_model)) # - AUC_ROC(y_test,y_pred) f1_score_labels(y_test,y_pred) # f1_score with 0 values indicates us that the labels are imbalanced, conducting a grid search will help us get further insights about this behaviour. precision_score_labels(y_test,y_pred) accuracy_score_labels (y_test,y_pred) recall_score_labels (y_test,y_pred) # + cm_y1 = confusion_matrix(y_test.iloc[:,0],y_pred[:,0]) cm_y2 = confusion_matrix(y_test.iloc[:,1],y_pred[:,1]) cr_y0 = classification_report(y_test.iloc[:,0],y_pred[:,0]) cr_y9 = classification_report(y_test.iloc[:,9],y_pred[:,9]) cr_y13 = classification_report(y_test.iloc[:,13],y_pred[:,13]) cr_y19 = classification_report(y_test.iloc[:,19],y_pred[:,19]) cr_y21 = classification_report(y_test.iloc[:,21],y_pred[:,21]) cr_y26 = classification_report(y_test.iloc[:,26],y_pred[:,26]) cr_y28 = classification_report(y_test.iloc[:,28],y_pred[:,28]) cr_y30 = classification_report(y_test.iloc[:,30],y_pred[:,30]) cr_y31 = classification_report(y_test.iloc[:,31],y_pred[:,31]) # - print (cr_y31) model.best_params_ # So far the parameters tested max_depth: 5,6, 50, 100, 150 200, 220, 250: 220 was the best one. And the estimators: 50,100,150: 150 proved to be best. However from the already implemeted model there is no significant difference so there is no reason to overwrite for now.
Preparation/Data_Preparation-Model-Tuning.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Ranking Measures Check # # ## Data Format: # ### Golden results # Golden DataFrame Format is standardized to 2 columns: query, document: # # ```python # query document # 0 q1 doc2 # 1 q1 doc3 # 2 q2 doc6 # ``` # # ### Search results # Search results can be given in 2 formats: # # #### Flat format: # ```python # query document rank # 0 q1 doc1 1 # 1 q1 doc2 2 # 2 q1 doc3 3 # 3 q2 doc4 1 # 4 q2 doc5 2 # 5 q2 doc6 3 # ``` # # #### Nested format: # ```python # [{'question': 'q1', 'answers': ['doc1', 'doc2', 'doc3']}, # {'question': 'q2', 'answers': ['doc4', 'doc5', 'doc6']}] # ``` import pandas as pd import numpy as np # + golden = pd.DataFrame.from_dict([ {'query': 'q1', 'document': 'doc2'}, {'query': 'q1', 'document': 'doc3'}, {'query': 'q2', 'document': 'doc6'}, ]) results = pd.DataFrame.from_dict([ {'query': 'q1', 'document': 'doc1', 'rank': 1}, {'query': 'q1', 'document': 'doc2', 'rank': 2}, {'query': 'q1', 'document': 'doc3', 'rank': 3}, {'query': 'q2', 'document': 'doc4', 'rank': 1}, {'query': 'q2', 'document': 'doc5', 'rank': 2}, {'query': 'q2', 'document': 'doc6', 'rank': 3}, ]) # - golden results # + def to_nested(results_df): res = [] for q in results_df['query'].unique(): answers = list(results_df[results_df['query'] == q].sort_values('rank')['document'].values) res.append({'question': q, 'answers': answers}) return res to_nested(results) # + def from_nested(results): res = [] for item in results: for i, answer in enumerate(item['answers']): res.append([item['question'], answer, i+1]) return pd.DataFrame(res, columns=['query', 'document', 'rank']) (from_nested(to_nested(results)) == results).all().all() # - # ## MRR # https://stackoverflow.com/questions/49733119/calculate-mean-reciprocal-rank # + MAX_RANK = 100000 def mrr(golden, results, max_rank=MAX_RANK): if isinstance(results, pd.DataFrame): res = results elif isinstance(results, list): res = from_nested(results) else: raise NotImplementedError() hits = pd.merge(golden, res, on=["query", "document"], how="left").fillna(max_rank) mrr = (1 / hits.groupby('query')['rank'].min()).mean() return mrr # - mrr(golden, results) mrr(golden, to_nested(results)) # ### Sanity Check # https://gist.github.com/bwhite/3726239 def mean_reciprocal_rank(rs): """Score is reciprocal of the rank of the first relevant item First element is 'rank 1'. Relevance is binary (nonzero is relevant). Example from http://en.wikipedia.org/wiki/Mean_reciprocal_rank >>> rs = [[0, 0, 1], [0, 1, 0], [1, 0, 0]] >>> mean_reciprocal_rank(rs) 0.61111111111111105 >>> rs = np.array([[0, 0, 0], [0, 1, 0], [1, 0, 0]]) >>> mean_reciprocal_rank(rs) 0.5 >>> rs = [[0, 0, 0, 1], [1, 0, 0], [1, 0, 0]] >>> mean_reciprocal_rank(rs) 0.75 Args: rs: Iterator of relevance scores (list or numpy) in rank order (first element is the first item) Returns: Mean reciprocal rank """ rs = (np.asarray(r).nonzero()[0] for r in rs) return np.mean([1. / (r[0] + 1) if r.size else 0. for r in rs]) # + grouped = hits.groupby('query')['rank'].min().values rs = np.zeros((len(grouped), grouped.max())) for i, j in enumerate(grouped): rs[i, j-1] = 1 rs # - mean_reciprocal_rank(rs) rs = [[0, 0, 0, 1], [1, 0, 0], [1, 0, 0]] mean_reciprocal_rank(rs) rs = [[0, 0, 0, 1], [1, 1, 0], [1, 1, 0]] mean_reciprocal_rank(rs)
notebooks/Ranking measures_check.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python2 # --- # + import numpy as np import matplotlib.pyplot as plt import joeyPlot # %matplotlib inline # + x = np.linspace(-3, 5) m = 1 hbar = 1 dx = 1 dE = 1 k_g = 1.0 k_e = .9 s = 1.0 # - v_g = k_g * x**2 v_e = k_e * (x - dx)**2 + dE psi_g = np.exp(-x**2/(2 *s**2) ) / np.pi**.25 plt.figure() plt.plot(x, v_g) plt.plot(x, v_e) plt.plot(x, psi_g) plt.plot(x, psi_g + dE) plt.ylim(0, 5)
NonCondonPaper/code/.ipynb_checkpoints/figure_gen_helper-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] _cell_guid="00b63778-674d-0a2d-e5b0-8956c5ce1770" _uuid="e87ad12f4fa51825a7468907fd9c6c3522abb220" # ## Introduction # ###Let me start by saying, this is not the best way to classify digits! This notebook is rather meant to be for someone who might not know where to start. As an ml beginner myself, I find it helpful to play with these sorts of commented kernels. Any suggestions for improvement or comments on poor coding practices are appreciated! # + _uuid="7358d6c936c9c002a3a8e456dd89e2556c4eef41" # + _cell_guid="d0abbfc5-e04e-98d3-8c5c-37d114daa804" _uuid="6a4570a0f2c92b955adf577d73a4cc77e434e3bd" import pandas as pd import matplotlib.pyplot as plt, matplotlib.image as mpimg from sklearn.model_selection import train_test_split from sklearn import svm # %matplotlib inline # + [markdown] _cell_guid="11f567ca-77d0-2c41-bbc9-a60396caacea" _uuid="4284e6eab4cd4998065f3ac877633890e6c15d6c" # ## Loading the data # - We use panda's [read_csv][1] to read train.csv into a [dataframe][2]. # - Then we separate our images and labels for supervised learning. # - We also do a [train_test_split][3] to break our data into two sets, one for training and one for testing. This let's us measure how well our model was trained by later inputting some known test data. # # ### For the sake of time, we're only using 5000 images. You should increase or decrease this number to see how it affects model training. # # # [1]: http://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_csv.html # [2]: http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.html#pandas.DataFrame # [3]: http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html # + _cell_guid="e8c0e206-f504-253f-8ac6-2c50eff06b58" _uuid="4c224769aeee733dfcdac67edca727594024d33b" labeled_images = pd.read_csv('../input/train.csv') images = labeled_images.iloc[0:5000,1:] labels = labeled_images.iloc[0:5000,:1] train_images, test_images,train_labels, test_labels = train_test_split(images, labels, train_size=0.8, random_state=0) # + [markdown] _cell_guid="caa9e676-3656-5509-0aa4-a503437f727f" _uuid="ed3c95ae9c11ee7b68b88efb7250fc6ab113e1b6" # # ## Viewing an Image # - Since the image is currently one-dimension, we load it into a [numpy array][1] and [reshape][2] it so that it is two-dimensional (28x28 pixels) # - Then, we plot the image and label with matplotlib # # ### You can change the value of variable <i>i</i> to check out other images and labels. # # # [1]: https://docs.scipy.org/doc/numpy/reference/generated/numpy.array.html # [2]: https://docs.scipy.org/doc/numpy/reference/generated/numpy.reshape.html # + _cell_guid="dc43b65b-d3d9-8208-e7a2-88c2535b506f" _uuid="609e52cdf5ba5532d8ffb5b986c2b96f921245e5" i=100 #different images, up to 5,000 img=train_images.iloc[i].as_matrix() img=img.reshape((28,28)) plt.imshow(img,cmap='gray') plt.title(train_labels.iloc[i,0]) # + [markdown] _cell_guid="42a5a5bd-83be-7116-7cd5-20cbc8336417" _uuid="40d16412cf7406eb57b9872747b15572ee1e4f14" # ## Examining the Pixel Values # ### Note that these images aren't actually black and white (0,1). They are gray-scale (0-255). # - A [histogram][1] of this image's pixel values shows the range. # # # [1]: http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.hist # + _cell_guid="b6143ae2-eb4e-a4af-aad3-37fa85b657b7" _uuid="4776fc55c13860a3a15fb616a40e88ac16befe8a" plt.hist(train_images.iloc[i]) # + [markdown] _cell_guid="b7f8578b-9985-4ccc-eab5-82465d7cad8b" _uuid="640b2d2e15e40c5b87f21fbde6c08f2725fc3412" # ## Training our model # - First, we use the [sklearn.svm][1] module to create a [vector classifier][2]. # - Next, we pass our training images and labels to the classifier's [fit][3] method, which trains our model. # - Finally, the test images and labels are passed to the [score][4] method to see how well we trained our model. Fit will return a float between 0-1 indicating our accuracy on the test data set # # ### Try playing with the parameters of svm.SVC to see how the results change. # # # [1]: http://scikit-learn.org/stable/modules/svm.html # [2]: http://scikit-learn.org/stable/modules/generated/sklearn.svm.SVC.html # [3]: http://scikit-learn.org/stable/modules/generated/sklearn.svm.SVC.html#sklearn.svm.SVC.fit # [4]: http://scikit-learn.org/stable/modules/generated/sklearn.svm.SVC.html#sklearn.svm.SVC.score # [5]: http://scikit-learn.org/stable/modules/generated/sklearn.svm.SVC.html#sklearn.svm.SVC.score # + _cell_guid="e35c02d7-4510-3202-a7b4-fb3accde0c5a" _uuid="573b1f0ff0ada89f6348ceca3b242071b7d7030e" help(svm.SVC()) clf = svm.SVC() #what parameters significantly affect this function? clf.fit(train_images, train_labels.values.ravel()) clf.score(test_images,test_labels) # + [markdown] _cell_guid="61729c3a-fc18-d204-1e72-a453af1ff0a3" _uuid="1cd038c0aaf2efaa10b0f3a345a96e3def733a6c" # ## How did our model do? # ### You should have gotten around 0.10, or 10% accuracy. This is terrible. 10% accuracy is what get if you randomly guess a number. There are many ways to improve this, including not using a vector classifier, but here's a simple one to start. Let's just simplify our images by making them true black and white. # # - To make this easy, any pixel with a value simply becomes 1 and everything else remains 0. # - We'll plot the same image again to see how it looks now that it's black and white. Look at the histogram now. # + _cell_guid="b5f3a212-b832-55af-d228-9b1931dcbc32" _uuid="b505e5740797d44eb576242885290dc8df6a826f" test_images[test_images>0]=1 train_images[train_images>0]=1 img=train_images.iloc[i].as_matrix().reshape((28,28)) plt.imshow(img,cmap='binary') plt.title(train_labels.iloc[i]) # + _cell_guid="edbe1d68-3224-1bf4-b571-a3bd845c3de5" _uuid="ce4f4aa6d6a985ff79b034584e335a6a45d24952" plt.hist(train_images.iloc[i]) # + [markdown] _cell_guid="785c75aa-06f7-fce3-b690-038356d4a51c" _uuid="f5c30d5b1e46551c252c69a25f756b7f82774dbc" # ## Retraining our model # ### We follow the same procedure as before, but now our training and test sets are black and white instead of gray-scale. Our score still isn't great, but it's a huge improvement. # + _cell_guid="6f0355ef-d019-c814-11ff-795f81d041f1" _uuid="682379e737efe5cda4b1af861d7dc5ce8349b84e" help(svm.SVC) clf = svm.SVC() clf.fit(train_images, train_labels.values.ravel()) clf.score(test_images,test_labels) # + _uuid="833726ff88a9ca8b9d9d2bd37d935949e6c444d9" import numpy as np from sklearn.model_selection import GridSearchCV C1 = np.arange(0.05, 2.05, 0.05) gamma = np.arange(0.001, 0.101, 0.001) grid = GridSearchCV(estimator=clf, param_grid = dict(C = C1)) grid.fit(train_images, train_labels.values.ravel()) print(grid) # + [markdown] _cell_guid="7ca53ece-d50a-efe5-d242-dea8d089244b" _uuid="8f518d49c777d65ad69d5bef22099d8ade7ccce9" # ## Labelling the test data # ### Now for those making competition submissions, we can load and predict the unlabeled data from test.csv. Again, for time we're just using the first 5000 images. We then output this data to a results.csv for competition submission. # + _cell_guid="8fa0b0aa-1bf3-dcd3-6ef2-fae5ccda55b3" _uuid="61e2b01a4d60155d08fb6f559fb036f17bcee7d5" test_data=pd.read_csv('../input/test.csv') test_data[test_data>0]=1 results=clf.predict(test_data[0:5001]) # + _cell_guid="aa6f3fd3-8ff6-80b4-68ae-261172ebe580" _uuid="d07937922498ca290a661da5a8b386aa72421dc6" results # + _cell_guid="28ec8d56-d3f6-573d-2230-bc5a410868c6" _uuid="7b582784f56b7741166a1c6afa1622e533cf340f" df = pd.DataFrame(results) df.index.name='ImageId' df.index+=1 df.columns=['Label'] df.to_csv('results.csv', header=True) # + _uuid="88cee7babaabc096822edff6f8e0ca2f130544d0" classifier = [svm.SVC()] name = ["SVC Result"] # + _uuid="e1829b332e5e136457bbd3aa1fa47990e892176e" from sklearn.metrics import classification_report # + _cell_guid="097e0f0a-246c-2f96-cd2d-b5cd75f26a65" _uuid="2367dea8c7a9b52d341ea93037495b02ae858b6f" for name1, clf in zip(name, classifier): clf.fit(train_images, train_labels) score = clf.score(test_images,test_labels) print("{:12} {}".format(name1,"-"*15)) print(classification_report(test_labels, clf.predict(test_images), digits=3)) # + _uuid="95095e3f90a1a8b7ae7c97f284ed41904e4664f3"
Digit_Recognizer_Tutorial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from my_loc import oracle as oc import pandas as pd emp=oc.connect('emp') dept=oc.connect('dept') # - # ### โ€ป ๋ฌธ์ œ171. emp DF์— dname column์„ ์ถ”๊ฐ€ํ•˜๊ณ  ํ•ด๋‹น ์‚ฌ์›์˜ ๋ถ€์„œ๋ช…์œผ๋กœ ๊ฐ’์„ ๊ฐฑ์‹ ํ•˜์‹œ์˜ค emp['dname']=pd.merge(emp, dept, on='DEPTNO',how='left')['DNAME'] emp # ### โ–  ์˜ˆ์ œ75. ๋ฌธ์ž์—ด์—์„œ ํŠน์ • ์œ„์น˜์˜ ๋ฌธ์ž์–ป๊ธฐ # #### ๋ฌธ์ž์—ด์—์„œ ํŠน์ • ์œ„์น˜์˜ ๋ฌธ์ž๋ฅผ ์–ป๋Š” ๋ฐฉ๋ฒ•์€ indexing์„ ์ด์šฉ # #### index๋Š” 0๋ถ€ํ„ฐ ์‹œ์ž‘ # #### ํŒŒ์ด์ฌ index๋Š” ์Œ์ˆ˜๋„ ๊ฐ€๋Šฅ # ์˜ˆ์ œ txt1='A tale that was not right' txt1[5] # ### โ€ป ๋ฌธ์ œ172. ์œ„์˜ txt1 ๋ณ€์ˆ˜์—์„œ ๋์˜ ๊ธ€์ž g๋ฅผ ์ถœ๋ ฅํ•˜์‹œ์˜ค txt1[-3] # ### โ€ป ๋ฌธ์ œ173. ์ด๋ฆ„์„ ์ถœ๋ ฅํ•˜๊ณ  ์ด๋ฆ„์˜ ์ฒซ๋ฒˆ์งธ ์ฒ ์ž๋ฅผ ์ถœ๋ ฅํ•˜์‹œ์˜ค # + import csv file=open('k:/Itwill/2. Python/์ž๋ฃŒ/emp2.csv','r') emp_csv=csv.reader(file) for i in emp_csv: print(i[1],i[1][0]) # - for i in emp['ENAME']: print(i, i[0]) emp['ENAME'].apply(lambda x:(x, x[0])) # apply(ํ•จ์ˆ˜) # lambda ์ž…๋ ฅ:์ถœ๋ ฅ # #### โ€ป ์„ค๋ช… # ```python # 1. PandasSeries.apply(ํ•จ์ˆ˜) : Pandas Series ๊ฐ’๋“ค์„(column ๊ฐ’) ํ•จ์ˆ˜๊ฐ€ ๋ฐ›์•„์„œ ์ถœ๋ ฅ # 2. lambda ํ‘œํ˜„์‹ : ์—ฌ๋Ÿฌ์ค„์˜ ์ฝ”๋“œ๋ฅผ ๋”ฑ ํ•œ ์ค„๋กœ ๋งŒ๋“ค์–ด์ฃผ๋Š” ์ธ์ž. # Oracle โ†’ ์ด๋ฆ„์—†๋Š” ํ•จ์ˆ˜ # ์˜ˆ์‹œ, # def hap(x,y): # hap: ํ•จ์ˆ˜๋ช… # return x+y # # print(hap(10,20)) # # ์œ„์˜ ์ฝ”๋“œ๋ฅผ lambda ํ‘œํ˜„์‹์œผ๋กœ ํ•˜๋ฉด # print((lambda x,y:x+y)(10,20)) # ``` print((lambda x,y:x+y)(10,20)) # ### โ€ป ๋ฌธ์ œ174. ์ด๋ฆ„์˜ ์ฒซ๋ฒˆ์งธ ์ฒ ์ž๊ฐ€ S๋กœ ์‹œ์ž‘ํ•˜๋Š” ์‚ฌ์›๋“ค์˜ ์ด๋ฆ„์„ ์ถœ๋ ฅํ•˜์‹œ์˜ค # #### 1. ํŒ๋‹ค์Šค๋ฅผ ์ด์šฉํ•˜์ง€ ์•Š์•˜์„ ๋•Œ # #### 2. ํŒ๋‹ค์Šค๋ฅผ ์ด์šฉํ–ˆ์„ ๋•Œ file=open('k:/Itwill/2. Python/์ž๋ฃŒ/emp2.csv','r') emp_csv=csv.reader(file) for i in emp_csv: if i[1][0]=='S': print(i[1]) for i in emp['ENAME']: if i[0]=='S': print(i) # ### โ–  ์˜ˆ์ œ76. ๋ฌธ์ž์—ด์—์„œ ์ง€์ •ํ•œ ๊ตฌ๊ฐ„์˜ ๋ฌธ์ž์—ด ์–ป๊ธฐ # #### ๋ฌธ์ž์—ด์—์„œ ํŠน์ • ๊ตฌ๊ฐ„์— ์žˆ๋Š” ๋ฌธ์ž์—ด์„ ์–ป์œผ๋ ค๋ฉด ์Šฌ๋ผ์ด์‹ฑ์„ ์ด์šฉ print(txt1[2:6]) print(txt1[2:]) print(txt1[:6]) # ### โ€ป ๋ฌธ์ œ175. ์•„๋ž˜์˜ SQL์„ ํŒŒ์ด์ฌ์œผ๋กœ ๊ตฌํ˜„ํ•˜์‹œ์˜ค # ```sql # select ename, substr(ename,1,3) # from emp; # ``` print(emp['ENAME']+' '+emp['ENAME'].apply(lambda x:x[:3])) # ### โ–  ์˜ˆ์ œ77. ๋ฌธ์ž์—ด์—์„œ ํ™€์ˆ˜๋ฒˆ ์งธ ๋ฌธ์ž๋งŒ ์ถœ๋ ฅํ•˜๊ธฐ # #### ์ฃผ์–ด์ง„ ๋ฌธ์ž์—ด์—์„œ ํ™€์ˆ˜๋ฒˆ์งธ ๋ฌธ์ž๋งŒ ์ถ”์ถœํ•˜๋Š” ๋ฐฉ๋ฒ•์€ ์Šฌ๋ผ์ด์‹ฑ์˜ ์Šคํ…์„ ์ด์šฉ txt='aAbBcCdDeEfFgGhHiIjJkK' txt[::2] # ### โ–  ์˜ˆ์ œ78. ๋ฌธ์ž์—ด ๊ฑฐ๊พธ๋กœ ๋งŒ๋“ค๊ธฐ # #### ์Šฌ๋ผ์ด์‹ฑ์„ ์ด์šฉํ•˜๋ฉด ๋งค์šฐ ๊ฐ„๋‹จํ•˜๊ฒŒ ๊ฑฐ๊พธ๋กœ ๋œ ๋ฌธ์ž์—ด์„ ์–ป์„ ์ˆ˜ ์žˆ๋‹ค # #### ๋ฌธ์ž์—ด txt์˜ ์ฒ˜์Œ๋ถ€ํ„ฐ ๋๊นŒ์ง€ ์Šคํ… -1๋กœ ์Šฌ๋ผ์ด์‹ฑ txt[::-1] # ### โ€ป ๋ฌธ์ œ176. txt๋ฌธ์ž์—ด์„ ๊ฑฐ๊พธ๋กœ ์ถœ๋ ฅํ•˜๋Š”๋ฐ ํ™€์ˆ˜๋ฒˆ์งธ ๋ฌธ์ž๋งŒ ์ถ”์ถœํ•˜์‹œ์˜ค txt[::-2] # ### โ€ป ๋ฌธ์ œ177. txt๋ฌธ์ž์—ด์„ ๊ฑฐ๊พธ๋กœ ์ถœ๋ ฅํ•˜๋Š”๋ฐ ์ง์ˆ˜๋ฒˆ์งธ ๋ฌธ์ž๋งŒ ์ถ”์ถœํ•˜์‹œ์˜ค txt[-2::-2] # ### โ–  ์˜ˆ์ œ79. ๋‘ ๊ฐœ์˜ ๋ฌธ์ž์—ด ํ•ฉ์น˜๊ธฐ(+) # #### ๋‘ ๊ฐœ์˜ ๋ฌธ์ž์—ด์„ ํ•ฉ์น˜๋Š” ๋ฐฉ๋ฒ•์€ + ์—ฐ์‚ฐ์ž๋ฅผ ์ด์šฉ # ### โ–  ์˜ˆ์ œ80. ๋ฌธ์ž์—ด์„ ๋ฐ˜๋ณตํ•ด์„œ ์ƒˆ๋กœ์šด ๋ฌธ์ž์—ด๋กœ ๋งŒ๋“ค๊ธฐ (*) # #### ์ฃผ์–ด์ง„ ๋ฌธ์ž์—ด์„ ๋ฐ˜๋ณตํ•˜๊ณ ์ž ํ•  ๋•Œ * ์—ฐ์‚ฐ์ž๋ฅผ ์ด์šฉ print('์—ฌ๋Ÿฌ๋ถ„ ~ '*3) # ### โ–  ์˜ˆ์ œ81. ๋ฌธ์ž์—ด์—์„œ ํŠน์ • ๋ฌธ์ž๊ฐ€ ์žˆ๋Š”์ง€ ํ™•์ธํ•˜๊ธฐ(in) # #### ๋ฌธ์ž์—ด์—์„œ ํŠน์ • ๋ฌธ์ž๊ฐ€ ์žˆ๋Š”์ง€ ์—†๋Š”์ง€ ํ™•์ธํ•  ๋•Œ in ํ‚ค์›Œ๋“œ๋ฅผ ์ด์šฉ msg='abcdefghijklmnop' if 'b' in msg: print('exist') else: print('not exist') # ### โ–  ์˜ˆ์ œ82. ๋ฌธ์ž์—ด์—์„œ ํŠน์ • ๋ฌธ์ž์—ด์ด ์žˆ๋Š”์ง€ ํ™•์ธํ•˜๊ธฐ(in) msg='I am a boy' if 'boy' in msg: print('exist') else: print('not exist') # ### โ–  ์˜ˆ์ œ83. ํŒŒ์ด์ฌ comprehension # #### ํŒŒ์ด์ฌ์—์„œ Comprehension์„ ์ด์šฉํ•ด์„œ ํŒŒ์ด์ฌ ์ฝ”๋“œ๋ฅผ ๋” ๊ฐ„๊ฒฐํ•˜๊ณ  ์‹ฌํ”Œํ•˜๊ฒŒ ์ž‘์„ฑํ•  ์ˆ˜ ์žˆ๋‹ค. # ``` # 1. list comprehension # 2. set comprehension # 3. dictionary comprehension # ``` # list comprehension # ์˜ˆ์ œ. ์•„๋ž˜์˜ a ๋ฆฌ์ŠคํŠธ์—์„œ ์ˆซ์ž๋งŒ ์ถ”์ถœํ•ด์„œ ๋ฆฌ์ŠคํŠธ์— ๋‹ด์•„ ์ถœ๋ ฅํ•˜์‹œ์˜ค a=[1,2,'A',False,3]# ๊ฒฐ๊ณผ :[1,2,3] ls=[] for i in a: if type(i)==int: ls.append(i) # ls a=[1,2,'A',False,3]# ๊ฒฐ๊ณผ :[1,2,3] [i for i in a if type(i)==int] # ```python # โ€ป ๋ฌธ๋ฒ• : ์ถœ๋ ฅํ‘œํ˜„์‹ for ์š”์†Œ in ์ž…๋ ฅ์‹œํ€€์Šค if ์กฐ๊ฑด์‹ # i for i in a if type(i)==int # ``` # ### โ€ป ๋ฌธ์ œ178. list comprehension์„ ์ด์šฉํ•ด์„œ ์•„๋ž˜์˜ ๊ฒฐ๊ณผ๋ฅผ ์ถœ๋ ฅํ•˜์‹œ์˜ค # ``` # [1, 3, 5, 7, 9, 11, 13, 15, 17, 19] # ``` [i for i in range(20) if i%2==1] # ### โ€ป ๋ฌธ์ œ179. ์•„๋ž˜์˜ ๋ฆฌ์ŠคํŠธ์˜ ๋ฐ์ดํ„ฐ์˜ ์ค‘๋ณต์„ ์ œ๊ฑฐํ•ด์„œ ๊ฒฐ๊ณผ๋ฅผ ์ถœ๋ ฅํ•˜์‹œ์˜ค # a=[1,1,2,2,3,3,3,4] # [1,2,3,4] a=[1,1,2,2,3,3,3,4] # comprehension ์‚ฌ์šฉ ์•ˆํ•œ ์ฝ”๋“œ list(set(a)) # ### โ€ป ๋ฌธ์ œ180. set comprehension์„ ์ด์šฉํ•ด์„œ ์œ„์˜ ๊ฒฐ๊ณผ๋ฅผ ์ถœ๋ ฅํ•˜์‹œ์˜ค a=[1,1,2,2,3,3,3,4] # set comprehension list({i for i in a}) # #### 3. dict comprehension # ```python # {key : value for ์š”์†Œ in ์ž…๋ ฅ์‹œํ€€์Šค if ์กฐ๊ฑด์‹} # ``` id_name={1:'Kim',2:'Seojun',3:'Adrian',4:'Jeans'} # print(type(id_name)) a={key:val for key, val in id_name.items()} a # #### * Pandas๋ฅผ ์ด์šฉํ•œ ํŒŒ์ƒ๋ณ€์ˆ˜ ์ถ”๊ฐ€(์—ฌ๋Ÿฌ๊ฐœ์˜ ๋ฌธ์ž๋กœ ์ถ”๊ฐ€ํ•˜๋Š” ๋ฐฉ๋ฒ•) # ### โ€ป ๋ฌธ์ œ181. emp DF์— income์ด๋ผ๋Š” ํŒŒ์ƒ๋ณ€์ˆ˜๋ฅผ ์ถ”๊ฐ€ํ•˜๋Š”๋ฐ ์›”๊ธ‰์ด 3000์ด์ƒ์ด๋ฉด ๊ณ ์†Œ๋“์ž์—ฌ์„œ h๋กœ ๊ฐฑ์‹ ๋˜๊ฒŒ ํ•˜๊ณ  ์›”๊ธ‰์ด 3000๋ณด๋‹ค ์ž‘์œผ๋ฉด p๋กœ ๊ฐฑ์‹  # + import numpy as np emp=oc.connect('emp') emp['INCOME']=np.where(emp['SAL']>=3000,'h','g') emp # + df = pd.DataFrame({'Type':list('ABBC'), 'Set':list('ZZXY')}) conditions = [ (df['Set'] == 'Z') & (df['Type'] == 'A'), (df['Set'] == 'Z') & (df['Type'] == 'B'), (df['Type'] == 'B')] choices = ['yellow', 'blue', 'purple'] df['color'] = np.select(conditions, choices, default='black') print(df) # - # ### โ€ป ๋ฌธ์ œ182. emp DF์— income2๋ผ๋Š” ํŒŒ์ƒ๋ณ€์ˆ˜๋ฅผ ์ถ”๊ฐ€ํ•˜๋Š”๋ฐ ์•„๋ž˜์™€ ๊ฐ™์ด ๊ฐฑ์‹ ๋˜๊ฒŒ ํ•˜์‹œ์˜ค # ``` # ์›”๊ธ‰ >= 3000 -> A # ์›”๊ธ‰ >= 2000 -> B # ์›”๊ธ‰ >= 1000 -> C # ๋‚˜๋จธ์ง€ D # ``` condition=[ (emp['SAL']>=3000), (emp['SAL']>=2000) & (emp['SAL']<3000), (emp['SAL']>=1000) & (emp['SAL']<2000), ] grade=['A','B','C'] emp['INCOME2']=np.select(condition, grade,default='D') emp # ### โ–  ์˜ˆ์ œ84. ๋ฌธ์ž์—ด์ด ์•ŒํŒŒ๋ฒณ์ธ์ง€ ๊ฒ€์‚ฌํ•˜๊ธฐ(isalpha) # #### ๋ฌธ์ž์—ด์€ ๋ฌธ์ž๋‚˜ ์ˆซ์ž, ๊ธฐํ˜ธ๋“ค๋กœ ๊ตฌ์„ฑ. # #### ์ฝ”๋“œ๋ฅผ ์ž‘์„ฑํ•˜๋‹ค ๋ณด๋ฉด ํŠน์ • ๋ฌธ์ž์—ด์ด ํ•œ๊ธ€์ด๋‚˜ ์•ŒํŒŒ๋ฒณ๊ณผ ๊ฐ™์ด ์‚ฌ๋žŒ์˜ ์–ธ์–ด๋ฅผ ํ‘œํ˜„ํ•˜๊ธฐ ์œ„ํ•ด ์‚ฌ์šฉ๋˜๋Š” ๋ฌธ์ž๋กœ๋งŒ ๊ตฌ์„ฑ๋˜์–ด ์žˆ๋Š”์ง€ ํ™•์ธํ•  ๊ฒฝ์šฐ๊ฐ€ ์žˆ๋‹ค. # #### ํŒŒ์ด์ฌ ๋ฌธ์ž์—ด ๊ฐ์ฒด๊ฐ€ ์ œ๊ณตํ•˜๋Š” method์ธ isalpha()๋Š” ์ฃผ์–ด์ง„ ๋ฌธ์ž์—ด์ด ์‚ฌ๋žŒ์˜ ์–ธ์–ด ๋ฌธ์ž๋กœ๋งŒ ๊ตฌ์„ฑ๋˜์–ด ์žˆ๋Š”์ง€ ํ™•์ธ # ์˜ˆ์ œ. txt1='Warcraft three' txt2='์•ˆ๋…•' txt3='3PO' print(txt1.isalpha()) print(txt2.isalpha()) print(txt3.isalpha()) # ### โ€ป ๋ฌธ์ œ183. ๊ฒจ์šธ์™•๊ตญ ๋Œ€๋ณธ์—๋Š” ์•ŒํŒŒ๋ฒณ ๋ฌธ์ž์˜ ๊ฐœ์ˆ˜๊ฐ€ ๋ช‡ ๊ฐœ ์ธ๊ฐ€? txt_file=open('k:/Itwill/2. Python/์ž๋ฃŒ/winter.txt') lines=txt_file.readlines() sum([k.isalpha() for i in lines for k in i]) # + from scipy.stats import norm import numpy as np import matplotlib.pyplot as plt # ์ •๊ทœ๋ถ„ํฌ ๊ทธ๋ฆฌ๊ธฐ x= np.arange(120,180,0.001) y = norm.pdf(x,148.5,7.8) plt.plot(x, y, color="red") # P-value ์‹œ๊ฐํ™” (์ƒํ•œ) x1 = np.arange(160,180,0.001) y1 = norm.pdf(x1,148.5, 7.8) plt.fill_between(x1, y1, interpolate=True, color='Orange', alpha=0.5) # ๊ฒ€์ • ํ†ต๊ณ„๋Ÿ‰๊ฐ’์ด ๊ธฐ๊ฐ์—ญ์— ์žˆ๋Š” ํ‘œ์‹œ plt.scatter(165, 0, c='red', alpha=0.9) plt.show() # - # ### โ€ป (์˜ค๋Š˜์˜ ๋งˆ์ง€๋ง‰๋ฌธ์ œ)(ํŒŒ์ด์ฌ ์•Œ๊ณ ๋ฆฌ์ฆ˜ ๋ฌธ์ œ 31๋ฒˆ) ์˜ค๋Š˜ ์ ์‹ฌ์‹œ๊ฐ„ ๋ฌธ์ œ์— ์œ„์˜ ์ฝ”๋“œ๋ฅผ ์ด์šฉํ•ด์„œ ์‹œ๊ฐํ™”๋„ ๊ฐ™์ด ์ถœ๋ ฅ๋˜๊ฒŒ ํ•˜์‹œ์˜ค # + def child_tall(n): import numpy as np from scipy.stats import norm import matplotlib.pyplot as plt x=np.arange(120,180,0.001) y=norm.pdf(x,148.5,7.8) # ํ‰๊ท  148.5, ํ‘œ์ค€ํŽธ์ฐจ 7.8 a1=148.5+1.96*7.8 a2=148.5-1.96*7.8 plt.plot(x,y,color='red') plt.fill_between(x, y, where= (x>a2) & (x<a1), interpolate=True, color='green', alpha=0.5) x1=np.arange(148.5+7.8*1.96,180,0.001) y1=norm.pdf(x1,148.5,7.8) plt.fill_between(x1, y1, where= (x1<a2) | (x1>a1), color='orange', alpha=0.5) plt.scatter(n,0,c='r',alpha=0.8) if abs(148.5-n)<1.96*7.8: return '์‹ ๋ขฐ๊ตฌ๊ฐ„ 95% ์•ˆ์— ์กด์žฌํ•ฉ๋‹ˆ๋‹ค.' else: return '์‹ ๋ขฐ๊ตฌ๊ฐ„ 95% ์•ˆ์— ์กด์žฌํ•˜์ง€ ์•Š์Šต๋‹ˆ๋‹ค.' print(child_tall(178)) # -
ex/08-2. Python ch.75 ~ Q. ~183 (200521).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Data Preparation for question 2 # 2. What are the factors most strongly associated with restaurants being closed? # How accurately can you predict when a restaurant in the dataset will be closed? # + #imports import numpy as np import pandas as pd import ast import matplotlib.pyplot as plt import seaborn as sns # %matplotlib inline from datetime import datetime # - # ### Get initial dataframes business = pd.read_csv("data/business.csv") print(len(business)) print(business.columns) business.head(2) business[business['review_count'].isnull()] for b in business['categories']: if b == "[]": print(b) print(len(business['Smoking'])) sum(business['Smoking'].isnull()) print(len(business['AgesAllowed'])) sum(business['AgesAllowed'].isnull()) print(len(business['Alcohol'])) sum(business['Alcohol'].isnull()) reviews = pd.read_csv("data/reviews.csv") print(len(reviews)) reviews.head(2) # ### Business dataset # <br> # Used the following variables from the business initial dataset: # <ul> # <li> latitude # <li> longitude # <li> review_count # <li> stars # <ul> # These varibales can be taken directly from the large dataset without any transformation # <br><br> # The categories variable could also provide some insight into whether a a resturnat is closed or not. For example because we might expect some resturant categories are less popular leading to their closure. Each resturant has a list of categories associated to it. For example, ['Italian', 'French', 'Restaurants']. Hence, it is nessary to transfore this categoires variable. This could be done using indicator variables (i.e. x_cat =1 if category is in list of resturants categories and 0 otherwise). The promblem is that there are 323 unique categories and this would result in 323 variables. To reduce the complexity of the model it is nessary to select on the most predictive variables. This was done by find the top 10 most common categories for both resuranct open and closed. This resulted in 2 lists. We then found the categories that where not in both lists. These resturanct are potentially popular for open resturants and not popular for closed resuturants and visa versa. In other words, they will allow us to distinguish between the two types of resturants. # <br> # Italian and Japanese in the top 10 list for closed and not open. Pizza, Coffee & Tea in open but not closed # <br><br> # These variables had no missing values. The rest of the variables from the business dataset where quite sparce. FOr instance, 1614/7148 of Alcohol was missing, 6477/7148 of smoking variable was missing and only 7 resturants gave information on their AgesAllowed variable. Hence, the rest of the variables from the business dataset where ignored. More potential explanaotry variables where obtained from the review dataset: # # # question2_1 = business[['business_id','name','latitude','longitude','review_count','stars', 'is_open']] question2_1.head() # ### Categories list(business['categories'])[0] categories = list(business['categories']) categories_list = [item for sublist in categories for item in ast.literal_eval(sublist)] print(len(set(categories_list))) popular_categories = pd.Series(categories_list).value_counts()[1:21] popular_categories categories = list(business[business['is_open'] == 0]['categories']) categories_list = [item for sublist in categories for item in ast.literal_eval(sublist)] popular_categories_1 = pd.Series(categories_list).value_counts()[1:11].index popular_categories_1 categories = list(business[business['is_open'] == 1]['categories']) categories_list = [item for sublist in categories for item in ast.literal_eval(sublist)] popular_categories_2 = pd.Series(categories_list).value_counts()[1:11].index popular_categories_2 uncommon = [] for p in popular_categories_1: if p not in popular_categories_2: print(p) uncommon.append(p) print() for p in popular_categories_2: if p not in popular_categories_1: print(p) uncommon.append(p) uncommon def getIndicator(name): """ Creates an idicator variable for whether a resturant has a specific category name: category name """ indicator = [] for c in list(business['categories']): i = 0 if name in c: i = 1 indicator.append(i) return(indicator) for c in uncommon: question2_1[c] = getIndicator(c) question2_1.head() # ### Hours hours = business[['business_id','hours.Friday', 'hours.Monday', 'hours.Saturday', 'hours.Sunday', 'hours.Thursday', 'hours.Tuesday', 'hours.Wednesday']] hours.head() # Too sparce # ## review datset # <ul> # <li> Number 5,4,3,2 and 1 star ratings # <li> Average length of text reviews # <li> Year of last review # </ul> # ### Stars stars = reviews[["business_id","stars"]] print(len(stars)) stars.head() for star_value in range(1,6): star_index = [1 if x==star_value else 0 for x in stars['stars']] stars['stars_{}'.format(star_value)] = star_index stars_count = stars.groupby("business_id",as_index=False).sum() stars_count.drop(labels=['stars'],axis=1,inplace=True) stars_count.head() print(len(stars[stars['stars']==1]),sum(stars_count['stars_1'])) question2_1 = pd.merge(question2_1,stars_count) question2_1.head() # # Text # + text = reviews[["business_id","text"]] text['length'] = [len(x) for x in text['text']] text= text.groupby(["business_id"],as_index=False).mean() question2_1 = pd.merge(question2_1,text) question2_1.head() # - # ### End date # + date = reviews[["business_id","date"]] last_date = [] year = [] for d in date['date']: date_split = d.split("-") epoch = int(datetime(int(date_split[0]), int(date_split[1]), int(date_split[2]), 0, 0, 0).strftime('%s')) year.append(int(date_split[0])) last_date.append(epoch) date['last_year'] = year date['last_date'] = last_date date.head() # - date = date.groupby(["business_id"],as_index=False).max() date.drop(labels=['date'],axis=1,inplace=True) question2_1 = pd.merge(question2_1,date) question2_1.head() question2_1.to_csv("data/question2_1.csv",index=False) # ### NLP approach
src/data_preparation_2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.10.1 64-bit (windows store) # language: python # name: python3 # --- # + import socket import json import traceback import os VALIDADOR = '2019057195:12142021:713956ac462e3cc9736660c44697d3b6d91ffbe60ee2911114890582c2435f72+2019056890:12142021:d4ae8849f0d2f8ccf163b12a3fcf45908b61c8f2239f3806fe6292f3428a37ce+3933183216bb827a7cdca38687047dd1a191952b1afb1a01bcfd92ade29ae224' def entrada(): while True: entrada = input('Digite sua entrada: ') dados = entrada.split(' ') try: s = dados[0] p = int(dados[1]) n = VALIDADOR break except: print('Entrada invรกlida! Tente no seguinte formato: "SERVER PORTA SAG"') while s != VALID_SERVER: entrada = input('Server invรกlidado! Digite novamente: ') dados = entrada.split(' ') s = dados[0] p = int(dados[1]) n = VALIDADOR while p not in VALID_PORTS: entrada = input('Porta invรกlidada! Digite novamente: ') dados = entrada.split(' ') s = dados[0] p = int(dados[1]) n = VALIDADOR info =[s,p,n] return info def authreq(rio, adress): #ENVIO entrada = json.dumps({"type": "authreq", "auth": SAG}).encode('utf-8') rio.sendto(entrada, adress) #RESPOSTA try: saida = rio.recv(bufferSize, 0) resposta = json.loads(saida.decode('utf-8')) ESTADO.append(resposta['type']) if 'gameover' in ESTADO: quit() return 0 except: print("ERRO NA AUTENTICACAO") return 1 def getcannons(): #ENVIO entrada = json.dumps({"type": "getcannons", "auth": SAG}).encode('utf-8') rio1.sendto(entrada, RIVER[0]) #RESPOSTA try: saida = rio1.recv(bufferSize, 0) resposta = json.loads(saida.decode('utf-8')) print(resposta) VALID_CANNONS.append(resposta['cannons']) ESTADO.append(resposta['type']) except: getcannons() def getturn(turn, rio, adress): #ENVIO entrada = json.dumps({"type": "getturn", "auth": SAG, "turn": turn}).encode('utf-8') rio.sendto(entrada, adress) def state(rio, boat, lista_resposta): try: alcance = 8 if(turno == 272): alcance = 1 for t in range(0,alcance): saida = rio.recv(bufferSize, 0) resposta = json.loads(saida.decode('utf-8')) if(turno != 272): tam = len(resposta['ships']) for c in range(0,tam): ponte = resposta['bridge'] boat[ponte-1].append(resposta['ships'][c]) ALL_BOATS.append(resposta['ships'][c]) ESTADO.append(resposta['type']) else: print('TERMINOOOOOOOOOOOOOOOOOOOOOOU') if(turno == 272): for II in range(0,8): lista_resposta[II] = resposta else: lista_resposta[t] = resposta return 0 except: traceback.print_exc() print('erro de transmissรฃo no state') return 1 def shot(rio, adress, cannon, id): entrada = json.dumps({"type": "shot", "auth": SAG, "cannon": cannon, "id": id}).encode('utf-8') rio.sendto(entrada, adress) #RESPOSTA try: saida = rio.recv(bufferSize, 0) resposta = json.loads(saida.decode('utf-8')) print(resposta) #ESTADO.append(resposta['type']) except: #print('Erro de transmissรฃo') shot(rio, adress, cannon, id) def quit(): #ENVIO entrada = json.dumps({"type": "quit", "auth": SAG}).encode('utf-8') rio1.sendto(entrada, RIVER[0]) rio1.close() rio2.close() rio3.close() rio4.close() print('Jogo finalizado com sucesso!') exit() #bd20212.dcc023.2advanced.dev 52221 def weakest(_listaBarcos): betterBoat = _listaBarcos[0] for i in _listaBarcos: if i['hull'] == 'frigate': return i elif i['hull'] == 'destroyer': betterBoat = i elif betterBoat['hull'] != 'destroyer': betterBoat = i return betterBoat def refresh(id, lista): for c in lista: if c['id'] == id: barco = c barco['hits'] += 1 if barco['hull'] == 'frigate': if barco['hits'] == 1: lista.remove(barco) ALL_BOATS.remove(barco) elif barco['hull'] == 'destroyer': if barco['hits'] == 2: lista.remove(barco) ALL_BOATS.remove(barco) elif barco['hull'] == 'battleship': if barco['hits'] == 3: lista.remove(barco) ALL_BOATS.remove(barco) def analisaRio(lista1, lista2): vidas1 = vidas2 = 0 if len(lista1[p-1]) == len(lista2[p-1]) == 0: return 0 else: for barco in lista1[p-1]: if barco['hull'] == 'frigate': vidas1 += 1 if barco['hull'] == 'destroyer': vidas1 += (2 - barco['hits']) if barco['hull'] == 'battleship': vidas1 += (3 - barco['hits']) for barco in lista2[p-1]: if barco['hull'] == 'frigate': vidas2 += 1 if barco['hull'] == 'destroyer': vidas2 += (2 - barco['hits']) if barco['hull'] == 'battleship': vidas2 += (3 - barco['hits']) if vidas1 > vidas2: return 1 else: return 2 def display_canhoes(): #canhoes: print('CANHOES:') for c in VALID_CANNONS[0]: ponte = c[0] fila = c[1] CANHOES[fila][ponte-1] = 'C' for fila in CANHOES: for c in fila: print(c.rjust(4), end='') print('\n') def display_navio(): P = 0 for ponte in BOATS_1: vida = [] for barco in ponte: if barco['hull'] == 'frigate': vida.append(str(1 - barco['hits'])) if barco['hull'] == 'destroyer': vida.append(str(2 - barco['hits'])) if barco['hull'] == 'battleship': vida.append(str(3 - barco['hits'])) status = ''.join(vida) BOATS_1D[P] = status P+=1 P = 0 for ponte in BOATS_2: vida = [] for barco in ponte: if barco['hull'] == 'frigate': vida.append(str(1 - barco['hits'])) if barco['hull'] == 'destroyer': vida.append(str(2 - barco['hits'])) if barco['hull'] == 'battleship': vida.append(str(3 - barco['hits'])) status = ''.join(vida) BOATS_2D[P] = status P+=1 P = 0 for ponte in BOATS_3: vida = [] for barco in ponte: if barco['hull'] == 'frigate': vida.append(str(1 - barco['hits'])) if barco['hull'] == 'destroyer': vida.append(str(2 - barco['hits'])) if barco['hull'] == 'battleship': vida.append(str(3 - barco['hits'])) status = ''.join(vida) BOATS_3D[P] = status P+=1 P = 0 for ponte in BOATS_4: vida = [] for barco in ponte: if barco['hull'] == 'frigate': vida.append(str(1 - barco['hits'])) if barco['hull'] == 'destroyer': vida.append(str(2 - barco['hits'])) if barco['hull'] == 'battleship': vida.append(str(3 - barco['hits'])) status = ''.join(vida) BOATS_4D[P] = status P+=1 def display(): print(f'TURNO: {turno}') for c in CANHOES[0]: print(c.rjust(4), end='') print('\n') for c in BOATS_1D: print(c.ljust(4), end='') print('\n') for c in CANHOES[1]: print(c.rjust(4), end='') print('\n') for c in BOATS_2D: print(c.ljust(4), end='') print('\n') for c in CANHOES[2]: print(c.rjust(4), end='') print('\n') for c in BOATS_3D: print(c.ljust(4), end='') print('\n') for c in CANHOES[3]: print(c.rjust(4), end='') print('\n') for c in BOATS_4D: print(c.ljust(4), end='') print('\n') for c in CANHOES[4]: print(c.rjust(4), end='') print('\n') os.system('cls') #DEFININDO AS ESPECIFICAร‡ร•ES DO SERVIDOR E PEGANDO AS INFORMAร‡ร•ES DO TECLADO bufferSize = 4096 RIVER = [0,0,0,0] VALID_PORTS = [52221,52222,52223,52224] VALID_SERVER = 'bd20212.dcc023.2advanced.dev' VALID_CANNONS = [] ESTADO = [] RIOS = [1,2,3,4] timeout = 0.5 rio1 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) rio2 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) rio3 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) rio4 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) auth = list() for c in range(0,4): dados = entrada() SERVER = dados[0] PORT = int(dados[1]) SAG = dados[2] if PORT == VALID_PORTS[0]: RIVER[0]=(SERVER, PORT) rio1.connect(RIVER[0]) rio1.settimeout(timeout) elif PORT == VALID_PORTS[1]: RIVER[1]=(SERVER, PORT) rio2.connect(RIVER[1]) rio2.settimeout(timeout) elif PORT == VALID_PORTS[2]: RIVER[2]=(SERVER, PORT) rio3.connect(RIVER[2]) rio3.settimeout(timeout) elif PORT == VALID_PORTS[3]: RIVER[3]=(SERVER, PORT) rio4.connect(RIVER[3]) rio4.settimeout(timeout) auth.append(authreq(rio1, RIVER[0])) while auth[0] == 1: auth[0] = (authreq(rio1, RIVER[0])) auth.append(authreq(rio2, RIVER[1])) while auth[1] == 1: auth[1] = (authreq(rio2, RIVER[1])) auth.append(authreq(rio3, RIVER[2])) while auth[2] == 1: auth[2] = (authreq(rio3, RIVER[2])) auth.append(authreq(rio4, RIVER[3])) while auth[3] == 1: auth[3] = (authreq(rio4, RIVER[3])) if auth == [0,0,0,0]: getcannons() CANHOES = [['-','-','-','-','-','-','-','-'], ['-','-','-','-','-','-','-','-'], ['-','-','-','-','-','-','-','-'], ['-','-','-','-','-','-','-','-'], ['-','-','-','-','-','-','-','-']] display_canhoes() turno = 0 while turno < 273: RESPOSTA_RIO1 = ['', '', '', '', '', '', '', ''] RESPOSTA_RIO2 = ['', '', '', '', '', '', '', ''] RESPOSTA_RIO3 = ['', '', '', '', '', '', '', ''] RESPOSTA_RIO4 = ['', '', '', '', '', '', '', ''] ALL_BOATS = [] BOATS_1 = [[],[],[],[],[],[],[],[]] BOATS_2 = [[],[],[],[],[],[],[],[]] BOATS_3 = [[],[],[],[],[],[],[],[]] BOATS_4 = [[],[],[],[],[],[],[],[]] BOATS_1D = ['', '', '', '', '', '', '', ''] BOATS_2D = ['', '', '', '', '', '', '', ''] BOATS_3D = ['', '', '', '', '', '', '', ''] BOATS_4D = ['', '', '', '', '', '', '', ''] error = [0,0,0,0] print(f'=============== TURNO {turno} ===============') getturn(turno, rio1, RIVER[0]) getturn(turno, rio2, RIVER[1]) getturn(turno, rio3, RIVER[2]) getturn(turno, rio4, RIVER[3]) error[0] = state(rio1, BOATS_1, RESPOSTA_RIO1) while error[0] == 1: getturn(turno, rio1, RIVER[0]) error[0] = state(rio1, BOATS_1, RESPOSTA_RIO1) print('\nRIO 1:') for c in RESPOSTA_RIO1: print(c) error[1] = state(rio2, BOATS_2, RESPOSTA_RIO2) while error[1] == 1: getturn(turno, rio2, RIVER[1]) error[1] = state(rio2, BOATS_2, RESPOSTA_RIO2) print('\nRIO 2:') for c in RESPOSTA_RIO2: print(c) error[2] = state(rio3, BOATS_3, RESPOSTA_RIO3) while error[2] == 1: getturn(turno, rio3, RIVER[2]) error[2] = state(rio3, BOATS_3, RESPOSTA_RIO3) print('\nRIO 3:') for c in RESPOSTA_RIO3: print(c) error[3] = state(rio4, BOATS_4, RESPOSTA_RIO4) while error[3] == 1: getturn(turno, rio4, RIVER[3]) error[3] = state(rio4, BOATS_4, RESPOSTA_RIO4) print('\nRIO 4:') for c in RESPOSTA_RIO4: print(c) try: for x in VALID_CANNONS[0]: tiro = True p = x[0] if x[1] == 0: r = 1 if len(BOATS_1[p-1]) > 0: identificador = weakest(BOATS_1[p-1])['id'] refresh(identificador, BOATS_1[p-1]) else: tiro = False if x[1] == 1: escolha = analisaRio(BOATS_1, BOATS_2) if escolha == 1: identificador = weakest(BOATS_1[p-1])['id'] refresh(identificador, BOATS_1[p-1]) elif escolha == 2: r = 2 identificador = weakest(BOATS_2[p-1])['id'] refresh(identificador, BOATS_2[p-1]) else: tiro = False if x[1] == 2: escolha = analisaRio(BOATS_2, BOATS_3) if escolha == 1: identificador = weakest(BOATS_2[p-1])['id'] refresh(identificador, BOATS_2[p-1]) elif escolha == 2: r = 2 identificador = weakest(BOATS_3[p-1])['id'] refresh(identificador, BOATS_3[p-1]) else: tiro = False if x[1] == 3: escolha = analisaRio(BOATS_3, BOATS_4) if escolha == 1: identificador = weakest(BOATS_3[p-1])['id'] refresh(identificador, BOATS_3[p-1]) elif escolha == 2: r = 2 identificador = weakest(BOATS_4[p-1])['id'] refresh(identificador, BOATS_4[p-1]) else: tiro = False if x[1] == 4: r = 4 if len(BOATS_4[p-1]) > 0: identificador = weakest(BOATS_4[p-1])['id'] refresh(identificador, BOATS_4[p-1]) else: tiro = False if tiro == True: print('\nRESPOSTA TIRO:') if r == 1: shot(rio1, RIVER[0], x, identificador) if r == 2: shot(rio2, RIVER[1], x, identificador) if r == 3: shot(rio3, RIVER[2], x, identificador) if r == 4: shot(rio4, RIVER[3], x, identificador) except: print('erro ao atirar') display_navio() display() input('proximo round') turno += 1 else: print('FALHA NA AUTENTICAร‡รƒO') #print(f'ESTADO {ESTADO}') print('Gameover!') quit()
TP1 - Bridge Defense/teste.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import igl import meshplot as mp # + # Utility function to generate a tet grid # n is a 3-tuple with the number of cell in every direction # mmin/mmax are the grid bounding box corners def tet_grid(n, mmin, mmax): nx = n[0] ny = n[1] nz = n[2] delta = mmax-mmin deltax = delta[0]/(nx-1) deltay = delta[1]/(ny-1) deltaz = delta[2]/(nz-1) T = np.zeros(((nx-1)*(ny-1)*(nz-1)*6, 4), dtype=np.int64) V = np.zeros((nx*ny*nz, 3)) mapping = -np.ones((nx, ny, nz), dtype=np.int64) index = 0 for i in range(nx): for j in range(ny): for k in range(nz): mapping[i, j, k] = index V[index, :] = [i*deltax, j*deltay, k*deltaz] index += 1 assert(index == V.shape[0]) tets = np.array([ [0,1,3,4], [5,2,6,7], [4,1,5,3], [4,3,7,5], [3,1,5,2], [2,3,7,5] ]) index = 0 for i in range(nx-1): for j in range(ny-1): for k in range(nz-1): indices = [ (i, j, k), (i+1, j, k), (i+1, j+1, k), (i, j+1, k), (i, j, k+1), (i+1, j, k+1), (i+1, j+1, k+1), (i, j+1, k+1), ] for t in range(tets.shape[0]): tmp = [mapping[indices[ii]] for ii in tets[t, :]] T[index, :]=tmp index += 1 assert(index == T.shape[0]) V += mmin return V, T # - # # Reading point cloud pi, v = igl.read_triangle_mesh("data/cat.off") pi /= 10 ni = igl.per_vertex_normals(pi, v) mp.plot(pi, shading={"point_size": 8}) # # MLS function # + # Parameters bbox_min = np.array([-1., -1., -1.]) bbox_max = np.array([1., 1., 1.]) bbox_diag = np.linalg.norm(bbox_max - bbox_min) n = 10 # + # Generate grid n x n x n x, T = tet_grid((n, n, n), bbox_min - 0.05 * bbox_diag, bbox_max + 0.05 * bbox_diag) #Compute implicit sphere function center = np.array([0., 0., 0.]) radius = 1 fx = np.linalg.norm(x-center, axis=1) - radius # + # Treshold fx to visualize inside outside ind = np.zeros_like(fx) ind[fx >= 0] = 1 ind[fx < 0] = -1 mp.plot(x, c=ind, shading={"point_size": 0.1,"width": 800, "height": 800}) # - # # Marching to extract surface # + # Marcing tet to extract surface sv, sf, _, _ = igl.marching_tets(x, T, fx, 0) mp.plot(sv, sf, shading={"wireframe": True}) # -
Assignment_2/Assigment2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Import libraries import pandas as pd import numpy as np filename = 'data/car_financing.xlsx' df = pd.read_excel(filename) # ## Basic Operations # # 1. Assure that you have correctly loaded the data. # 2. See what kind of data you have. # 3. Check the validity of your data. # ### Viewing the first and last 5 rows # Select top N number of records (default = 5) df.head() # Select bottom N number of records (default = 5) df.tail() # ### Check the column data types # Check the column data types using the dtypes attribute # For example, you can wrongly assume the values in one of your columns is # a int64 instead of a string. df.dtypes # Use the shape attribute to get the number of rows and columns in your dataframe df.shape # The info method gives the column datatypes + number of non-null values # Notice that we seem to have 408 non-null values for all but the Interest Paid column. df.info()
Introduction/hewei-material/5.Handling_Spreadsheets/2.BasicOperations.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline # %load_ext autoreload # %autoreload 2 # + from torchwisdom.metrics.callback import * from torchwisdom.optim.callback import * from torchwisdom.metrics import * from torchwisdom.callback import * from torchwisdom.statemgr.callback import StateManagerCallback from torchwisdom.utils.data.collector import * from torch.optim.optimizer import Optimizer from torchwisdom.trainer import * from torchwisdom.vision.trainer.trainer import * from torchwisdom.vision.models import mobilenetv2 from torchwisdom.vision.trainer.trainer import * from torchvision.datasets.mnist import MNIST import torchvision.transforms as transforms from torchwisdom.utils.data.collector import DatasetCollector import torch.optim as optim import torch.nn as nn from torchwisdom.vision.trainer.trainer import ConvTrainer from torchwisdom.vision.models.simplecnn import SimpleCNN from torchvision import datasets from torchwisdom.metrics.callback import * tmft = transforms.Compose([ transforms.Resize((32,32)), transforms.Grayscale(), transforms.ToTensor() ]) # train_path = '/data/MNIST/train' # valid_path = '/data/MNIST/valid' # trainset = MNIST(train_path, train=True, transform=tmft, download=False) # validset = MNIST(valid_path, train=False, transform=tmft, download=False) train_path = '/data/att_faces_new/train' valid_path = '/data/att_faces_new/valid' trainset = datasets.ImageFolder(train_path, transform=tmft) validset = datasets.ImageFolder(valid_path, transform=tmft) data = DatasetCollector(trainset, validset) model = SimpleCNN(in_chan=1, num_classes=40) criterion = nn.CrossEntropyLoss() optimizer = optim.Adam scheduler = StepLRCallback(step_size=30) metric_acc = AccuracyCallback() trainer = ConvTrainer(data=data, model=model, optimizer=optimizer, criterion=criterion, metrics=[metric_acc], callbacks=[scheduler]) trainer.fit(20, lr=0.01) # - data = DatasetCollector(trainset, validset) model = SimpleCNN(in_chan=1, num_classes=40) criterion = nn.CrossEntropyLoss() optimizer = optim.Adam scheduler = StepLRCallback(step_size=30) metric_acc = AccuracyCallback() trainer = ConvTrainer(data=data, model=model, optimizer=optimizer, criterion=criterion, metrics=[metric_acc], callbacks=[scheduler]) trainer.fit(5, lr=[0.001, 0.005, 0.01]) trainer.fit(5, lr=[0.0001, 0.005, 0.01]) # + import time a = time.time() time.sleep(10) b = time.time() c = b - a from datetime import timedelta str(timedelta(seconds=c)) # - trainer.state_manager.get_state('metric').data torch.Tensor(64,40).argmax(dim=1).shape params = model.parameters() param_groups = list(params) if not isinstance(param_groups[0], dict): param_groups = [{'params': param_groups}] model # + import torch.nn as nn class ParameterModule(nn.Module): "Register a lone parameter `p` in a module." def __init__(self, p:nn.Parameter): super().__init__() self.val = p def forward(self, x): return x def children(m:nn.Module)->nn.ModuleList: "Get children of `m`." return list(m.children()) def num_children(m:nn.Module)->int: "Get number of children modules in `m`." return len(children(m)) def children_and_parameters(m:nn.Module): "Return the children of `m` and its direct parameters not registered in modules." children = list(m.children()) children_p = sum([[id(p) for p in c.parameters()] for c in m.children()],[]) for p in m.parameters(): if id(p) not in children_p: children.append(ParameterModule(p)) return children flatten_model = lambda m: sum(map(flatten_model,children_and_parameters(m)),[]) if num_children(m) else [m] # + # flatten_model(mobilenetv2()) # - conv = nn.Conv2d(1,1, kernel_size=3) params = list(conv.parameters()) params list(conv.named_parameters()) named_params = list(mobilenetv2().named_parameters()) ln = len(named_params) print(ln) name = named_params[0][0] param = named_params[0][1]
notebook/devboard2-Copy1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Blatt 5 # ## Aufgabe 13 import numpy as np import matplotlib.pyplot as plt from pandas import DataFrame, Series from math import inf gamma = 2.7 np.random.seed(42) # __a)__ # + def PDF(E): return (gamma - 1) * E**(- gamma) def CDF(E): return 1 - E**(1 - gamma) def INV_CDF(y): return (1 - y)**(1 / (1 - gamma)) # - y = np.random.uniform(0, 1, int(1e5)) Energy = INV_CDF(y) # __b)__ def P(E): return (1 - np.exp(-E / 2))**3 uniform = np.random.uniform(size = len(y)) AcceptanceMask = np.array([uniform < P(Energy) for uniform, Energy in zip(uniform, Energy)]) plot_energy = np.logspace(0, 5, 1000) plt.plot(plot_energy, PDF(plot_energy) * P(plot_energy), 'r-', label = 'PDF') plt.hist(Energy[AcceptanceMask], bins = np.logspace(0, 5, 50), density = True, histtype = 'step', label = 'Mit Akzeptanz') plt.hist(Energy, bins = np.logspace(0, 5, 50), density = True, histtype = 'step', label = 'Ohne Akzeptanz') plt.xscale('log') plt.yscale('log') plt.legend() plt.show() # Kommentar: Man erkennt, dass ab einem Energiewert von ca. $1000\,$TeV keine Bins mehr befรผllt werden. Dies liegt an der endlichen Lรคnge des gleichverteilten Samples, das fรผr die Rรผckweisungsmethode verwendet wird. Die unten abgebildete CDF zeigt, dass Hohe Werte fรผr $E$ aus Werten nahe 1 der Gleichverteilung hervorgehen. plt.plot(plot_energy, CDF(plot_energy)) plt.xscale('log') plt.xlabel('$E$') plt.title('CDF') plt.show() data = DataFrame() data['Energy'] = Series(Energy) data['AcceptanceMask'] = Series(AcceptanceMask) # __Polarmethode__: Erzeugt eine Standardnormalverteilung def polar_method(size): v1 = 2 * np.random.uniform(0, 1, size) - 1 v2 = 2 * np.random.uniform(0, 1, size) - 1 s = v1**2 + v2**2 while (True in (s >= 1)): v1[s >= 1] = 2 * np.random.uniform(0, 1, len(s[s >= 1])) - 1 v2[s >= 1] = 2 * np.random.uniform(0, 1, len(s[s >= 1])) - 1 s[s >= 1] = v1[s >= 1]**2 + v2[s >= 1]**2 x1 = v1 * np.sqrt(- 2 / s * np.log(s)) x2 = v2 * np.sqrt(- 2 / s * np.log(s)) return x1, x2 # Die Funktion 'random_gaus' erzeugt eine 1- oder 2-dim GauรŸverteilung, indem die Standardnormalverteilung aus # der Polarmethode transformiert wird. Zusรคtzlich wird ermรถglicht ausschlieรŸlich Werte aus einem gegebenen Bereich (z.<NAME>) zu ziehen. def random_gaus(mu, sig, size, rho = 0, two_dim = False, lim = (0, inf)): x_std, y_std = polar_method(size) x = np.sqrt(1 - rho**2) * sig * x_std + rho * sig * y_std + mu #formula for x transformation mask = ((x < lim[0]) | (x > lim[1])) #generate new numbers, when out of limit while (True in mask): x_std[mask], y_std[mask] = polar_method(len(x[mask])) x[mask] = np.sqrt(1 - rho**2) * sig * x_std[mask] + rho * sig * y_std[mask] + mu mask = ((x < lim[0]) | (x > lim[1])) if two_dim: y = sig * y_std + mu #formula for y transformation mask = ((y < lim[0]) | (y > lim[1])) while (True in mask): x_std[mask], y_std[mask] = polar_method(len(y[mask])) y[mask] = sig * y_std[mask] + mu mask = ((y < lim[0]) | (y > lim[1])) return x, y else: return x # __c)__ def hits(E): NumberOfHits = round(random_gaus(mu = 10*E, sig = 2*E, size = 1, lim = (0, inf))[0], 0) return NumberOfHits NumberOfHits = [hits(E) for E in Energy] data['NumberOfHits'] = Series(NumberOfHits) # __d)__ def location(N, center): x = random_gaus(mu = center, sig = 1 / np.log10(N + 1), rho = 0, size = 1, lim = (0, 10))[0] return x x = [location(N, 7) for N in NumberOfHits] y = [location(N, 3) for N in NumberOfHits] data['x'] = Series(x) data['y'] = Series(y) plt.hist2d(x, y, bins = [40, 40], range = [[0, 10], [0, 10]]) plt.xlabel('x') plt.ylabel('y') plt.show() data.to_hdf('NeutrinoMC.hdf5', key = 'Signal') # __e)__ noise = DataFrame() # + rho = 0.5 sig = 3 mu = 5 x, y = random_gaus(mu = 5, sig = 3, two_dim = True, size = int(1e7), rho = 0.5, lim = (0, 10)) noise['x'] = Series(x) noise['y'] = Series(y) # - plt.hist2d(x, y, bins = [30, 30], range = [[-3, 13], [-3, 13]]) plt.show() log_NumberOfHits = random_gaus(mu = 2, sig = 1, size = int(1e7)) NumberOfHits_noise = np.round(10**log_NumberOfHits, 0) plt.hist(log_NumberOfHits, bins = 20, histtype='step') plt.show() noise['NumberOfHits_noise'] = Series(NumberOfHits_noise) noise.to_hdf('NeutrinoMC.hdf5', key = 'Background') # ## Aufgabe 14 # a) from sklearn.datasets import make_blobs from matplotlib.colors import ListedColormap discrete_cmap = ListedColormap([(0.8, 0.2, 0.3), (0.1, 0.8, 0.3), (0, 0.4, 0.8)]) X, y = make_blobs(n_samples=1000, centers=2, n_features = 4, random_state=0) plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap=discrete_cmap) # b) # Bei der Hauptkomponentenanalyse geht es darum, eine Basis im Raum zu finden, entlang derer Eigenvektoren die # Varianz maximiert wird. Die Vielzahl an Daten soll durch eine mรถglichst geringe Anzahl an aussgekrรคftigen Hauptkomponenten # genรคhert werden, die Dimension der Datenpunkte wird also reduziert von $d$ auf $k<d$. # # Die Hauptkomponentenanalyse besteht dabei aus mehreren Schritten. # Zuerst werden die Daten um den Mittelwert $\vec{\mu}$ zentriert: $x'_i=x_i-\mu$. # AnschlieรŸend wird die Kovarianzmatrix bezรผglich einer Zufallszahl mit beliebiger Dimension bestimmt. Aus der Kovarianzmarix # ergeben sich entsprechend $d$ Eigenwerte $\lambda_1, \ldots , \lambda_d$ und Eigenvektoren $\nu_1, \ldots, \nu_d$. Die # Eigenvektoren werden gemรครŸ ihrer GrรถรŸe sortiert. Bei einer Reduzierung auf $k$ Dimensionen werden nur die $k$ hรถchsten # Eigenwerte und Eigenvektoren benรถtigt, die anderen Werte kรถnnen verworfen werden. Mithilfe dieser Eigenvektoren kann die # Transformationsmatrix $\textbf{W}$ bestimmt werden, welche die Eigenvektoren als Spalten enthรคlt. Die einzelnen Vektoren # kรถnnen dann gemรครŸ $\textbf{X'} = \textbf{WX}$ bestimmt werden. # c) # + # print(X.shape) c = np.cov(X, rowvar=False) l, W = np.linalg.eigh(c) # Reihenfolge umkehren. GrรถรŸte Eigenwerte zuerst. l = l[::-1] W = W[:, ::-1] print(f'Die Eigenwerte lauten: {l}') # - # Die Eigenwerte definieren die Eigenrรคume, auf die der Datensatz projeziert werden kann. Die Eigenwerte geben die # Varianz der durch sie definierten Hauptkomponenten an. Da die Varianz maximiert werden soll, werden die Achsen der grรถรŸten Eigenwerte verwendet und der Rest verworfen. Hier ist es sinnvoll sich nur auf die Achse mit Eigenwert $\sim \!18$ zu beschrรคnken. # + from sklearn.decomposition import PCA pca = PCA(n_components = 4) transformed = pca.fit_transform(X) plt.scatter(transformed[:, 0], transformed[:, 1], c=y, cmap = discrete_cmap) plt.show() # - # d) # + fig = plt.figure(figsize = (14, 10)) for i in range(4): ax = plt.subplot(221 + i) ax.hist(transformed[:, i], bins = 15, histtype = 'step', label = f'$x_{i + 1}$,\n $EW = {round(l[i], 2)}$') ax.legend() plt.show() # - # Kommentar: Es zeigt sich, dass die Hauptachse mit dem grรถรŸten Eigenwert tatsรคchlich die beste zur Trennung der Daten ist.
Blatt05/blatt05_nitschke_grisard.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # word2vec IMDB data # # Training word2vec embeddings on the IMDB database and experimenting. # # Referรชncia: Tutorial Kagggle ["Bag of Words meets Bags of Popcorn"](https://www.kaggle.com/c/word2vec-nlp-tutorial#part-2-word-vectors) from bs4 import BeautifulSoup import re from nltk.corpus import stopwords import nltk.data import pandas as pd import gensim train = pd.read_csv( "labeledTrainData.tsv", header=0, delimiter="\t", quoting=3 ) test = pd.read_csv( "testData.tsv", header=0, delimiter="\t", quoting=3 ) unlabeled_train = pd.read_csv( "unlabeledTrainData.tsv", header=0, delimiter="\t", quoting=3 ) def review_to_wordlist( review, remove_stopwords=False ): # Function to convert a document to a sequence of words, # optionally removing stop words. Returns a list of words. # # 1. Remove HTML review_text = BeautifulSoup(review).get_text() # # 2. Remove non-letters review_text = re.sub("[^a-zA-Z]"," ", review_text) # # 3. Convert words to lower case and split them words = review_text.lower().split() # # 4. Optionally remove stop words (false by default) if remove_stopwords: stops = set(stopwords.words("english")) words = [w for w in words if not w in stops] # # 5. Return a list of words return(words) # + # Load the punkt tokenizer tokenizer = nltk.data.load('tokenizers/punkt/english.pickle') # Define a function to split a review into parsed sentences def review_to_sentences( review, tokenizer, remove_stopwords=False ): # Function to split a review into parsed sentences. Returns a # list of sentences, where each sentence is a list of words # # 1. Use the NLTK tokenizer to split the paragraph into sentences raw_sentences = tokenizer.tokenize(review.decode('utf-8').strip()) # # 2. Loop over each sentence sentences = [] for raw_sentence in raw_sentences: # If a sentence is empty, skip it if len(raw_sentence) > 0: # Otherwise, call review_to_wordlist to get a list of words sentences.append( review_to_wordlist( raw_sentence, \ remove_stopwords )) # # Return the list of sentences (each sentence is a list of words, # so this returns a list of lists return sentences # + sentences = [] # Initialize an empty list of sentences print("Parsing sentences from training set") for review in train["review"]: sentences += review_to_sentences(review, tokenizer) print("Parsing sentences from unlabeled set") for review in unlabeled_train["review"]: sentences += review_to_sentences(review, tokenizer) # - model = gensim.models.Word2Vec(sentences, min_count=1) print(model.wv.most_similar(positive=['bad', 'best'], negative=['good'])) acc = model.accuracy('questions-words.txt') [(d.keys()[1], d[d.keys()[1]]) for d in acc] for i in range(0, len(acc)): print(acc[i][acc[i].keys()[1]], len(acc[i]['correct']), len(acc[i]['incorrect']))#, len(acc[i]['correct']/len(acc[i]['incorrect']))
models/.ipynb_checkpoints/word2vec-imdb-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: bonsai-env # language: python # name: bonsai-env # --- import requests import json # + # General variables url = "http://localhost:5000" predictionPath = "/v1/prediction" headers = { "Content-Type": "application/json" } METERS_PER_INCH = 0.0254 # Build the endpoint reference endpoint = url + predictionPath # Set the request variables requestBody = { "screw_angular_speed": 4e-6, "cutter_frequency": 4e-6, "product_length": 4 * 12 * METERS_PER_INCH, "flow_rate": 2.3e-5, "temperature": 190 + 273.15 } # - requestBody # Send the POST request response = requests.post( endpoint, data = json.dumps(requestBody), headers = headers ) response.json() response.json() response.json()
Python/samples/plastic-extrusion/deployment/BrainInteraction.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [Root] # language: python # name: Python [Root] # --- # # Introduction # # This tutorial will introduce you to the TextRank algorithm that is commonly used to summarize a document of text. In the modern day, data is being collected and generated at an unprecedented rate. To make use of this data, we need to be able to extract features and important information quickly and accurately. For text data, it is very important to summarize large collections or documents of text to understand what the collection or document is about. Summarization is useful everywhere, since almost every professional at some point will read large amounts of text. There are many approaches for this problem, such as supervised machine learning or maximum entropy. In this tutorial, however, we will go over an unsupervised algorithm called TextRank(or LexRank but they're the same concept). TextRank is an algorithm that retrieves the important parts of the document via a method similar to PageRank, but using different vertices and edges. # # # Tutorial Content # # We will go over how the TextRank algorithm works by starting with a bag of words approach. We will be using data copied from a Wikipedia page. After covering the basics of TextRank and its use on document summarization, we will go over how TextRank can be applied to keyword extraction. # # # # # # # Installation # Please just use the anaconda package. # #!/usr/bin/env python # -*- coding: utf-8 -*- import networkx as nx import numpy as np import nltk import string import operator from nltk.tokenize.punkt import PunktSentenceTokenizer from sklearn.feature_extraction.text import TfidfTransformer, CountVectorizer # # Text Rank # # Text Rank is an algorithm introduced in 2004 from researchers at University of Michigan. It uses the same concept as PageRank: do a random walk on a graph where the sentences are vertices and the similarity between each sentence are the edges. This means that the algorithm is extracting the sentences that are most similar to other sentences, which indicate that this sentence is important and covers the information in a lot of other sentences. To do that, we start by tokenizing all the sentences. # # # Sentence splitting # # Since we are doing summarization of the document by finding the most important sentences, we need to split the document by sentences. # We do this through NLTK. def sentence_tokens(document): tokenizer = PunktSentenceTokenizer() sentences = tokenizer.tokenize(document) return sentences document = """ Another keyphrase extraction algorithm is TextRank. While supervised methods have some nice properties, like being able to produce interpretable rules for what features characterize a keyphrase, they also require a large amount of training data. Many documents with known keyphrases are needed. Furthermore, training on a specific domain tends to customize the extraction process to that domain, so the resulting classifier is not necessarily portable, as some of Turney's results demonstrate. Unsupervised keyphrase extraction removes the need for training data. It approaches the problem from a different angle. Instead of trying to learn explicit features that characterize keyphrases, the TextRank algorithm[3] exploits the structure of the text itself to determine keyphrases that appear "central" to the text in the same way that PageRank selects important Web pages. Recall this is based on the notion of "prestige" or "recommendation" from social networks. In this way, TextRank does not rely on any previous training data at all, but rather can be run on any arbitrary piece of text, and it can produce output simply based on the text's intrinsic properties. Thus the algorithm is easily portable to new domains and languages. TextRank is a general purpose graph-based ranking algorithm for NLP. Essentially, it runs PageRank on a graph specially designed for a particular NLP task. For keyphrase extraction, it builds a graph using some set of text units as vertices. Edges are based on some measure of semantic or lexical similarity between the text unit vertices. Unlike PageRank, the edges are typically undirected and can be weighted to reflect a degree of similarity. Once the graph is constructed, it is used to form a stochastic matrix, combined with a damping factor (as in the "random surfer model"), and the ranking over vertices is obtained by finding the eigenvector corresponding to eigenvalue 1 (i.e., the stationary distribution of the random walk on the graph). The vertices should correspond to what we want to rank. Potentially, we could do something similar to the supervised methods and create a vertex for each unigram, bigram, trigram, etc. However, to keep the graph small, the authors decide to rank individual unigrams in a first step, and then include a second step that merges highly ranked adjacent unigrams to form multi-word phrases. This has a nice side effect of allowing us to produce keyphrases of arbitrary length. For example, if we rank unigrams and find that "advanced", "natural", "language", and "processing" all get high ranks, then we would look at the original text and see that these words appear consecutively and create a final keyphrase using all four together. Note that the unigrams placed in the graph can be filtered by part of speech. The authors found that adjectives and nouns were the best to include. Thus, some linguistic knowledge comes into play in this step. Edges are created based on word co-occurrence in this application of TextRank. Two vertices are connected by an edge if the unigrams appear within a window of size N in the original text. N is typically around 2โ€“10. Thus, "natural" and "language" might be linked in a text about NLP. "Natural" and "processing" would also be linked because they would both appear in the same string of N words. These edges build on the notion of "text cohesion" and the idea that words that appear near each other are likely related in a meaningful way and "recommend" each other to the reader. Since this method simply ranks the individual vertices, we need a way to threshold or produce a limited number of keyphrases. The technique chosen is to set a count T to be a user-specified fraction of the total number of vertices in the graph. Then the top T vertices/unigrams are selected based on their stationary probabilities. A post- processing step is then applied to merge adjacent instances of these T unigrams. As a result, potentially more or less than T final keyphrases will be produced, but the number should be roughly proportional to the length of the original text. It is not initially clear why applying PageRank to a co-occurrence graph would produce useful keyphrases. One way to think about it is the following. A word that appears multiple times throughout a text may have many different co-occurring neighbors. For example, in a text about machine learning, the unigram "learning" might co-occur with "machine", "supervised", "un-supervised", and "semi-supervised" in four different sentences. Thus, the "learning" vertex would be a central "hub" that connects to these other modifying words. Running PageRank/TextRank on the graph is likely to rank "learning" highly. Similarly, if the text contains the phrase "supervised classification", then there would be an edge between "supervised" and "classification". If "classification" appears several other places and thus has many neighbors, its importance would contribute to the importance of "supervised". If it ends up with a high rank, it will be selected as one of the top T unigrams, along with "learning" and probably "classification". In the final post-processing step, we would then end up with keyphrases "supervised learning" and "supervised classification". In short, the co-occurrence graph will contain densely connected regions for terms that appear often and in different contexts. A random walk on this graph will have a stationary distribution that assigns large probabilities to the terms in the centers of the clusters. This is similar to densely connected Web pages getting ranked highly by PageRank. This approach has also been used in document summarization, considered below. """ document = unicode(document, 'ascii', 'ignore') document1 = ' '.join(document.strip().split('\n')) sentences = sentence_tokens(document1) print(sentences[0:3]) # # Creating Bag of Words # # To use page rank, we need to create a similarity graph of some kind to do a random walk on. To do that, we create a bag of words for each individual sentence. We could use Python's default Counter library, but that returns a dictionary of counts while we want a sparse matrix of the word occurrences in each matrix; basically a matrix of unique words as columns and sentences as rows and each entry is whether a word occurs in a sentence. Luckily, CountVectorizer from sklearn does exactly that. # input: array of sentences # output: sparse matrix of word occurences def counts(tokens): counter = CountVectorizer() matrix = counter.fit_transform(tokens) return matrix word_matrix = counts(sentences) word_matrix # # Create a Graph # # Now, we have a sparse matrix of sentences by words, but we want a mirror matrix of sentences by sentences because that represents the graph we want. First, we should normalize our graph so zeroes don't ruin the calculations. We do this using TfidfTransformer, which normalizes a count matrix into a tf-idf matrix which better represent the importance of a word in a set of documents. We now multiply the normalized matrix by its transpose, which creates a mirror matrix where each entry is the result of multiplying every tfidf of each word in a sentence by another sentences' and adding them together. The result is a number from 0 to 1, where 1 means the sentences are exactly the same, and 0 means the sentences are completely different. This is an adjancency matrix that represents the graph of sentences and edges that represent similarities. This specific approach is done by the LexRank algorithm. The TextRank algorithm simply uses a different similarity measure that isn't tfidf. # # # + def graph(word_matrix): normalized_matrix = TfidfTransformer().fit_transform(word_matrix) similarity_graph = normalized_matrix * normalized_matrix.T return similarity_graph similarity_graph = graph(word_matrix) similarity_graph # - # # The Algorithm # # Now, we use networkx's page rank algorithm on this sparse matrix. The PageRank algorithm does a random walk on this graph, our sentences, and terminates after a fixed number and produces the rank of each sentence, which is how similar this sentence is to every other sentence. The higher the rank, means it is similar to a lot of sentences in this article, implying it must be important in some way. # + def summary(similarity_graph, n): nx_graph = nx.from_scipy_sparse_matrix(similarity_graph) scores = nx.pagerank(nx_graph) ranked = sorted(((scores[i],s) for i,s in enumerate(sentences)), reverse=True) summary = "" for i in range(n): summary += ranked[i][1] + " " return summary summ = summary(similarity_graph, 4) print summ # - # # TextRank for keyword extraction # # Imagine a scenario where you're given a large document and you need to figure out what are the key things this document talks about. You want to use an algorithm that extracts the most important words and phrases from the text. But what determines importance of a word or a phrase? For this problem, we can also use the TextRank approach, but varied slightly. # # # Process text # # For a keyword extraction algorithm, a natural intuition is to find the words that are occur the most. To effectively do that, removing the punctuation and then lemmatizing the words will let the algorithm better count the words that occur in different contexts. The same reasoning applies to the TextRank algorithm. # # taken from homework3 def process(text, lemmatizer=nltk.stem.wordnet.WordNetLemmatizer()): """ Normalizes case and handles punctuation Inputs: text: str: raw text lemmatizer: an instance of a class implementing the lemmatize() method (the default argument is of type nltk.stem.wordnet.WordNetLemmatizer) Outputs: list(str): tokenized text """ b = text.lower() b = b.replace("'s","") def applyFunc(s): if s is "'": return "" elif s in string.punctuation: return " " else: return s newB = ''.join(map(applyFunc, b)) tokens = nltk.word_tokenize(newB) newTokens = [] for tok in tokens : try: word = lemmatizer.lemmatize(tok) newTokens.append(word) except: pass return newTokens text = "This is a sample test input for processing." print process(text) # lemmatizer=nltk.stem.wordnet.WordNetLemmatizer() # print(lemmatizer.lemmatize("processes")) process("Education is the ability to listen to almost anything without losing your temper or your self-confidence.") print(process("I don't know how this works")) print(process("I'm doing well! How about you?")) print(process("Are #those John-Bahsd's dishes?")) # # Stop words # # Extremely common words, or stop words, can also ruin the algorithm. We get rid of all the stopwords that come with nltk. # def remove_stopwords(tokens): stopwords=nltk.corpus.stopwords.words('english') new_tokens = [] for tok in tokens: if not tok in stopwords: new_tokens.append(tok) return new_tokens tokens = remove_stopwords(process(document)) print tokens[:100] # # Co-occurence # # Now that we've finished processing the text, how does TextRank actually work on keywords? The key concept behind TextRank is creating a graph with unigrams as vertices and the co-occurence between 2 words as edges. A co-occurence is when a word is within a window n of another word. For example, if a co-occurence window is 2, that means only words that are next to each other are counted by the algorithm. A co-occurence window of 3 means that words 2 ahead of the word will be added as edges to the graph. # # # Making a Graph # # TextRank uses the PageRank algorithm to rank the nodes. To use PageRank, we first need to make a graph. Conveniently, networkx provides a great graph data structure. To simplify the algorithm, we will use a co-occurence window of 2, and use unweighted edges instead of weighted edges. Usually, the edge is weighted by the amount of times the co-occurence happened. # + def make_graph(tokens): graph = nx.Graph() # set(tokens) generates the unique unigrams of from tokens graph.add_nodes_from(set(tokens)) # add edges for every adjacent word (co-occurence window of 2) for i in range(len(tokens) - 2 + 1): t1, t2 = tokens[i], tokens[i+1] graph.add_edge(*sorted([t1,t2])) return graph graph = make_graph(tokens) print type(graph) # - # # Keyword Extraction # # Now, we will rank the nodes in the graph via the PageRank algorithm in networkx. The function will take in a parameter n for the number of keywords that needs to be extracted from the text. def extract_n_keywords(graph, n=10): ranks = nx.pagerank(graph) keywords = {rank[0]: rank[1] for rank in sorted(ranks.items(), key=operator.itemgetter(1),reverse=True)[:n]} words = keywords.keys() return set(words), ranks keywords, word_ranks = extract_n_keywords(graph) print keywords # # Keyword Phrases # # So far, we've only extracted key unigrams from the text, but we want are not just unigrams, we want phrases along with unigrams. Forturnately, we don't need to go back and create a new graph based on ngrams to find important phrases. After getting the top n keywords, all we need to do is check all the times the keywords occur in the document, and see if other keywords are adjacent to it. Then we average the pagerank scores so we don't overweight longer phrases and rerank the keywords with key phrases. # # + def key_phrases(keywords, tokens): from itertools import takewhile, tee, izip keyphrases = {} j = 0 for i, word in enumerate(tokens): if i < j: continue if word in keywords: temp = [] # if its adjacent to the keyword, add it as a phrase for x in tokens[i:i+10] : if x in keywords: temp.append(x) else: break kp_words = temp sum_ranks = 0 for w in kp_words: sum_ranks += word_ranks[w] avg_pagerank = sum_ranks / float(len(kp_words)) # insert it back into the keyphrases, and rerank later keyphrases[' '.join(kp_words)] = avg_pagerank j = i + len(kp_words) ranked_phrases = sorted(keyphrases.items(), key=operator.itemgetter(1),reverse=True) phrases = map((lambda x: x[0]), ranked_phrases) return phrases, ranked_phrases # - keywords_and_phrases, ranks = key_phrases(keywords, tokens) for i in ranks: print i[0], i[1] # print ranks # # Evaluation # # Now that we know how TextRank works, how do we know how well it works? This is actually a very difficult question to answer. Due to the difficulty in determining what a good summary is, there isn't an absolute measure that determines how good a summarization algorithm is. However, typical benchmark to use is the ROUGE(Recall-Oriented Understudy for Gisting Evaluation) measure. It is a recall-based measure, which encourages an algorithm to cover as many topics as it can. The measure compares the generated summary against a reference summary and computes the recall based on any ngram. For the purposes of this tutorial, we will not go over the evaluation because the ROUGE system requires a registration application. # # # Further Reading # # The original TextRank paper: # http://web.eecs.umich.edu/~mihalcea/papers/mihalcea.emnlp04.pdf # # Other, more advanced and better versions of TextRank: # # DivRank: # http://clair.si.umich.edu/~radev/papers/SIGKDD2010.pdf # # CollabRank: # http://www.aclweb.org/anthology/C/C08/C08-1122.pdf # # ExpandRank: # http://www.aaai.org/Papers/AAAI/2008/AAAI08-136.pdf # # Wikipedia: # https://en.wikipedia.org/wiki/Automatic_summarization # # More on Evaluation: # ROUGE for python # https://pypi.python.org/pypi/pyrouge/0.1.0 #
2016/tutorial_final/201/TextRank Tutorial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Importance Reweighting # ## Setup # First, let's set up some environmental dependencies. These just make the numerics easier and adjust some of the plotting defaults to make things more legible. # + # Python 3 compatability from __future__ import division, print_function from six.moves import range # system functions that are always useful to have import time, sys, os # basic numeric setup import numpy as np from numpy import linalg # inline plotting # %matplotlib inline # plotting import matplotlib from matplotlib import pyplot as plt # seed the random number generator np.random.seed(510) # - # re-defining plotting defaults from matplotlib import rcParams rcParams.update({'xtick.major.pad': '7.0'}) rcParams.update({'xtick.major.size': '7.5'}) rcParams.update({'xtick.major.width': '1.5'}) rcParams.update({'xtick.minor.pad': '7.0'}) rcParams.update({'xtick.minor.size': '3.5'}) rcParams.update({'xtick.minor.width': '1.0'}) rcParams.update({'ytick.major.pad': '7.0'}) rcParams.update({'ytick.major.size': '7.5'}) rcParams.update({'ytick.major.width': '1.5'}) rcParams.update({'ytick.minor.pad': '7.0'}) rcParams.update({'ytick.minor.size': '3.5'}) rcParams.update({'ytick.minor.width': '1.0'}) rcParams.update({'font.size': 30}) import dynesty # # Importance Sampling # Nested Sampling provides both a set of samples and their associated **importance weights**. These are exactly analagous to those provided by **importance sampling**, where we want to estimate some function $f(\mathbf{x})$ relative to a target distribution $p(\mathbf{x})$ using some proposal distribution $q(\mathbf{x})$ using $N$ Monte Carlo samples $\mathbf{x}_i$ drawn from $q(\mathbf{x})$ via # # $$ # \mathbb{E}[f(\mathbf{x})] = \int f(\mathbf{x}) p(\mathbf{x}) d\mathbf{x} # = \int f(\mathbf{x}) q(\mathbf{x}) \frac{p(\mathbf{x})}{q(\mathbf{x})} d\mathbf{x} # \approx \frac{1}{N} \sum_{i=1}^{N} f(\mathbf{x}_i) \frac{p(\mathbf{x}_i)}{q(\mathbf{x}_i)} # \: \text{where} \: # \mathbf{x}_i \sim q(\mathbf{x}) # $$ # # This means that by assigning each sample $\mathbf{x}_i$ its importance weight $w_i \equiv p(\mathbf{x}_i) / q(\mathbf{x}_i)$, we can estimate any posterior-related quantity as well as its integral (i.e. the evidence). In Nested Sampling, $q(\mathbf{x})$ is constructed/estimated from the actual sampling process. # Within an importance sampling framework, it is straightforward to update to a new target distribution $p^\prime(\mathbf{x})$ using the previous set of importance weights since # # $$ # w^\prime_i \equiv \frac{p^\prime(\mathbf{x}_i)}{q(\mathbf{x}_i)} # = \frac{p^\prime(\mathbf{x}_i)}{p(\mathbf{x}_i)}\frac{p(\mathbf{x}_i)}{q(\mathbf{x}_i)} = # \frac{p^\prime(\mathbf{x}_i)}{p(\mathbf{x}_i)} w_i # $$ # # Since the Nested Sampling weights are importance weights, it is also straightforward to update these a new target distribution if we want to "swap out" our posteriors. # # There are two important caveats to this: # 1. This process can only work if the pre-existing samples have sufficient **coverage**, meaning that they span the majority of the new target distribution. If they don't encompass the majority of the new parameter space, the results will be inevitably biased. # 2. In addition to reasonable coverage, samples must also be sufficiently **dense** relative to the new target distribution. If samples are sparse, then reweighting can lead to a much noisier estimates. # As a result, importance reweighting is most useful when "tweaking" a distribution and least useful when trying to make big changes. # # 3-D Multivariate Normal # We will demonstrate importance reweighting using 3-D **multivariate Normal** distributions. First, we will define the correlated version used in previous examples. # + ndim = 3 # number of dimensions C = np.identity(ndim) # set covariance to identity matrix C[C==0] = 0.95 # set off-diagonal terms (strongly correlated) Cinv = linalg.inv(C) # precision matrix lnorm = -0.5 * (np.log(2 * np.pi) * ndim + np.log(linalg.det(C))) # ln(normalization) # 3-D correlated multivariate normal log-likelihood def loglikelihood(x): """Multivariate normal log-likelihood.""" return -0.5 * np.dot(x, np.dot(Cinv, x)) + lnorm # - # We'll again define our prior (via `prior_transform`) to be uniform in each dimension from -10 to 10 and 0 everywhere else. # prior transform def prior_transform(u): """Transforms our unit cube samples `u` to a flat prior between -10. and 10. in each variable.""" return 10. * (2. * u - 1.) # Let's first generate samples from this target distribution. # initialize our nested sampler dsampler = dynesty.DynamicNestedSampler(loglikelihood, prior_transform, ndim=3, bound='single', sample='unif') dsampler.run_nested(maxiter=50000, use_stop=False) dres = dsampler.results # Now let's generate samples from the uncorrelated version with the same priors. # + C2 = np.identity(ndim) # set covariance to identity matrix Cinv2 = linalg.inv(C2) # precision matrix lnorm2 = -0.5 * (np.log(2 * np.pi) * ndim + np.log(linalg.det(C2))) # ln(normalization) # 3-D correlated multivariate normal log-likelihood def loglikelihood2(x): """Multivariate normal log-likelihood.""" return -0.5 * np.dot(x, np.dot(Cinv2, x)) + lnorm2 # - dsampler2 = dynesty.DynamicNestedSampler(loglikelihood2, prior_transform, ndim=3, bound='single', sample='unif') dsampler2.run_nested(maxiter=50000, use_stop=False) dres2 = dsampler2.results # Comparing our results shows these distributions are somewhat different. # + # plot results from dynesty import plotting as dyplot lnz_truth = ndim * -np.log(2 * 10.) # analytic evidence solution fig, axes = dyplot.runplot(dres, color='blue') fig, axes = dyplot.runplot(dres2, color='red', lnz_truth=lnz_truth, truth_color='black', fig=(fig, axes)) fig.tight_layout() # + # initialize figure fig, axes = plt.subplots(3, 7, figsize=(35, 15)) axes = axes.reshape((3, 7)) [a.set_frame_on(False) for a in axes[:, 3]] [a.set_xticks([]) for a in axes[:, 3]] [a.set_yticks([]) for a in axes[:, 3]] # plot initial run (left) fg, ax = dyplot.cornerplot(dres, color='blue', truths=[0., 0., 0.], truth_color='black', show_titles=True, max_n_ticks=3, title_kwargs={'y': 1.05}, quantiles=None, fig=(fig, axes[:, :3])) # plot extended run (right) fg, ax = dyplot.cornerplot(dres2, color='red', truths=[0., 0., 0.], truth_color='black', show_titles=True, title_kwargs={'y': 1.05}, quantiles=None, max_n_ticks=3, fig=(fig, axes[:, 4:])) # - # Let's using importance reweighting to adjust each of our samples to try and approximate the other distribution. # + # compute new log-likelihoods logl = np.array([loglikelihood(s) for s in dres2.samples]) logl2 = np.array([loglikelihood2(s) for s in dres.samples]) # reweight results dres_rwt = dynesty.utils.reweight_run(dres, logp_new=logl2) dres2_rwt = dynesty.utils.reweight_run(dres2, logp_new=logl) # + # initialize figure fig, axes = plt.subplots(3, 7, figsize=(35, 15)) axes = axes.reshape((3, 7)) [a.set_frame_on(False) for a in axes[:, 3]] [a.set_xticks([]) for a in axes[:, 3]] [a.set_yticks([]) for a in axes[:, 3]] # plot initial run (left) fg, ax = dyplot.cornerplot(dres_rwt, color='blue', truths=[0., 0., 0.], truth_color='black', show_titles=True, max_n_ticks=3, title_kwargs={'y': 1.05}, quantiles=None, fig=(fig, axes[:, :3])) # plot extended run (right) fg, ax = dyplot.cornerplot(dres2_rwt, color='red', truths=[0., 0., 0.], truth_color='black', show_titles=True, title_kwargs={'y': 1.05}, quantiles=None, max_n_ticks=3, fig=(fig, axes[:, 4:])) # - # plot results fig, axes = dyplot.runplot(dres_rwt, color='blue') fig, axes = dyplot.runplot(dres2_rwt, color='red', lnz_truth=lnz_truth, truth_color='black', fig=(fig, axes)) fig.tight_layout() # We see that while we are able to reproduce the broad features of each distribution, the reweighted estimates are quite noisy.
demos/Examples -- Importance Reweighting.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: drlnd # language: python # name: drlnd # --- # # Navigation # # --- # # In this notebook, you will learn how to use the Unity ML-Agents environment for the first project of the [Deep Reinforcement Learning Nanodegree](https://www.udacity.com/course/deep-reinforcement-learning-nanodegree--nd893). # # ### 1. Start the Environment # # We begin by importing some necessary packages. If the code cell below returns an error, please revisit the project instructions to double-check that you have installed [Unity ML-Agents](https://github.com/Unity-Technologies/ml-agents/blob/master/docs/Installation.md) and [NumPy](http://www.numpy.org/). # %load_ext autoreload # %autoreload 2 # + import os import sys repo_path = os.path.dirname(os.path.dirname(os.path.abspath("__file__"))) sys.path.append(repo_path) # - from unityagents import UnityEnvironment import numpy as np # Next, we will start the environment! **_Before running the code cell below_**, change the `file_name` parameter to match the location of the Unity environment that you downloaded. # # - **Mac**: `"path/to/Banana.app"` # - **Windows** (x86): `"path/to/Banana_Windows_x86/Banana.exe"` # - **Windows** (x86_64): `"path/to/Banana_Windows_x86_64/Banana.exe"` # - **Linux** (x86): `"path/to/Banana_Linux/Banana.x86"` # - **Linux** (x86_64): `"path/to/Banana_Linux/Banana.x86_64"` # - **Linux** (x86, headless): `"path/to/Banana_Linux_NoVis/Banana.x86"` # - **Linux** (x86_64, headless): `"path/to/Banana_Linux_NoVis/Banana.x86_64"` # # For instance, if you are using a Mac, then you downloaded `Banana.app`. If this file is in the same folder as the notebook, then the line below should appear as follows: # ``` # env = UnityEnvironment(file_name="Banana.app") # ``` env = UnityEnvironment(file_name="Banana_Windows_x86_64/Banana.exe") # Environments contain **_brains_** which are responsible for deciding the actions of their associated agents. Here we check for the first brain available, and set it as the default brain we will be controlling from Python. # get the default brain brain_name = env.brain_names[0] brain = env.brains[brain_name] # ### 2. Examine the State and Action Spaces # # The simulation contains a single agent that navigates a large environment. At each time step, it has four actions at its disposal: # - `0` - walk forward # - `1` - walk backward # - `2` - turn left # - `3` - turn right # # The state space has `37` dimensions and contains the agent's velocity, along with ray-based perception of objects around agent's forward direction. A reward of `+1` is provided for collecting a yellow banana, and a reward of `-1` is provided for collecting a blue banana. # # Run the code cell below to print some information about the environment. # + # reset the environment env_info = env.reset(train_mode=True)[brain_name] # number of agents in the environment print('Number of agents:', len(env_info.agents)) # number of actions action_size = brain.vector_action_space_size print('Number of actions:', action_size) # examine the state space state = env_info.vector_observations[0] print('States look like:', state) state_size = len(state) print('States have length:', state_size) # - action = np.random.randint(action_size) env_info = env.step(action)[brain_name] next_state = env_info.vector_observations[0] reward = env_info.rewards[0] done = env_info.local_done[0] # ### 3. Take Random Actions in the Environment # # In the next code cell, you will learn how to use the Python API to control the agent and receive feedback from the environment. # # Once this cell is executed, you will watch the agent's performance, if it selects an action (uniformly) at random with each time step. A window should pop up that allows you to observe the agent, as it moves through the environment. # # Of course, as part of the project, you'll have to change the code so that the agent is able to use its experience to gradually choose better actions when interacting with the environment! # + env_info = env.reset(train_mode=False)[brain_name] # reset the environment state = env_info.vector_observations[0] # get the current state score = 0 # initialize the score acc_steps = 0 while True: action = np.random.randint(action_size) # select an action env_info = env.step(action)[brain_name] # send the action to the environment next_state = env_info.vector_observations[0] # get the next state reward = env_info.rewards[0] # get the reward done = env_info.local_done[0] # see if episode has finished score += reward # update the score state = next_state # roll over the state to next time step acc_steps += 1 if done: # exit loop if episode finished break print("Score: {} in {} steps {}".format(score, acc_steps)) # - # When finished, you can close the environment. env.close() # ### 4. It's Your Turn! # # Now it's your turn to train your own agent to solve the environment! When training the environment, set `train_mode=True`, so that the line for resetting the environment looks like the following: # ```python # env_info = env.reset(train_mode=True)[brain_name] # ``` # + from collections import deque import pandas as pd def train(env, brain_name, agent, n_episodes=2000, max_t=1000, eps_start=1.0, eps_end=0.01, eps_decay=0.995, model_save_path='checkpoint.pth', score_solved=13., score_win=100): """ Train DQ-Learning agent on a given environment, based on epsilon-greedy policy and GLIE evolution of epsilon parameter When the game is considered solved, save the DQ-Net underlaying the agent in a given path Params ====== env: Environment to solve an episodic game. Should behave like: state = env.reset() next_state, reward, done, _ = env.step(action) agent: DQ-Learning Agent, should estimate and optimal policy estimating Q function using a DQN n_episodes (int): Number of episodes to simulate max_t (int): Max number of time steps (transitions) on each episode eps_start (float): Epsilon parameter starting value (at first episode) eps_end (float): Epsilon min value eps_decay (float): Epsilon decay rate model_save_path (str): Path to persist model score_solved (float): Score to consider the game solved Returns ====== scores (list): Average reward over 100 episodes """ scores = [] # list containing scores from each episode scores_window = deque(maxlen=100) # last 100 scores eps = eps_start # initialize epsilon for i_episode in range(1, n_episodes + 1): env_info = env.reset(train_mode=True)[brain_name] # reset the environment state = env_info.vector_observations[0] # get the current state score = 0 # initialize the score for t in range(max_t): action = agent.act(state, eps) env_info = env.step(action)[brain_name] # send the action to the environment next_state = env_info.vector_observations[0] # get the next state reward = env_info.rewards[0] # get the reward done = env_info.local_done[0] # see if episode has finished agent.step(state, action, reward, next_state, done) state = next_state score += reward if done: break scores_window.append(score) # save most recent score scores.append(score) # save most recent score eps = update_epsilon(eps_end, eps_decay, eps) # decrease epsilon print('\rEpisode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)), end="") if i_episode % score_win == 0: print('\rEpisode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window))) if np.mean(scores_window) >= score_solved: print('\nEnvironment solved in {:d} episodes!\tAverage Score: {:.2f}'.format( i_episode - score_win, np.mean(scores_window))) agent.save_network(model_save_path) break env.close() return pd.Series(index=range(1, len(scores) +1), data=scores, dtype=np.float32, name='score') def update_epsilon(eps_end, eps_decay, eps_curr): """ """ return max(eps_end, eps_decay * eps_curr) # - from src.dqn_agent import AgentDQ agent_dq = AgentDQ(state_size=37, action_size=4, gamma=0.99, hidden_layers=[64, 32], drop_p=None, batch_size=64, learning_rate=5e-4, soft_upd_param=1e-3, update_every=4, buffer_size=int(1e5), seed=123) # + import matplotlib.pyplot as plt import numpy as np fig, axs = plt.subplots(1,2, sharex=True, sharey=True, figsize=(16, 5)) eps1, eps2, eps3 = 0.995, 0.99, 0.95 ax1 = axs[0] neps1 = np.arange(2000) ax1.plot(1*eps1**neps1, label=rf'$\epsilon$={eps1}') ax1.plot(1*eps2**neps1, label=rf'$\epsilon$={eps2}') ax1.plot(1*eps3**neps1, label=rf'$\epsilon$={eps3}') ax2 = axs[1] neps2 = np.arange(1000) ax2.plot(1*eps1**neps2, label=rf'$\epsilon$={eps1}') ax2.plot(1*eps2**neps2, label=rf'$\epsilon$={eps2}') ax2.plot(1*eps3**neps2, label=rf'$\epsilon$={eps3}') for ax in axs: ax.grid() ax.axhline(0.1, color='black') ax.legend() plt.show() # - scores_dq = train(env, brain_name=brain_name, agent=agent_dq, n_episodes=2000, max_t=1000, eps_start=1.0, eps_end=0.01, eps_decay=0.995, model_save_path='models/dq_checkpoint_v01.pth') scores_dq.rolling(100).mean().plot() plt.show() scores_df = scores_dq.to_frame('score') scores_df['score_mave100'] = scores_df['score'].rolling(100).mean() scores_df['experiment'] = 'dqn:v01' scores_df.index.name = 'idx_episode' checkpoint_metadata = pd.Series(index=['N_episodes', 'gamma', 'hidden_layers', 'drop_p', 'batch_size', 'learning_rate', 'soft_upd_param', 'update_every', 'buffer_size','solved', 'checkpoint'], data = [len(scores_dq), 0.99, [64, 32], None, 64, 5e-4, 1e-3, 4, int(1e5), True, 'dq_checkpoint_v01.pth'], name='experiment:dqn:v01') checkpoint_metadata import datetime as dt experiment_dt = dt.datetime.strftime(dt.datetime.now(), "%Y%m%d%H%M%S") checkpoint_metadata.to_json(f'models/experiments/hparams_{experiment_dt}.json') scores_df.to_csv(f'models/experiments/scores_{experiment_dt}.csv')
p1_navigation/Navigation-dq-v01.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Feature Maps # # [link](https://machinelearningmastery.com/how-to-visualize-filters-and-feature-maps-in-convolutional-neural-networks/) # load vgg model from keras.applications.resnet import ResNet50 # load the model model = ResNet50() # summarize the model model.summary() # summarize feature map shapes for i in range(len(model.layers)): layer = model.layers[i] # check for convolutional layer if '_conv' not in layer.name: continue # summarize output shape print(i, layer.name, layer.output.shape) # + # visualize feature maps output from each block in the vgg model from keras.applications.resnet import preprocess_input from keras.preprocessing.image import load_img from keras.preprocessing.image import img_to_array from keras.models import Model from matplotlib import pyplot from numpy import expand_dims # redefine model to output right after the first hidden layer ixs = [2, 7, 10, 13, 14, 171, 139] outputs = [model.layers[i].output for i in ixs] model = Model(inputs=model.inputs, outputs=outputs) print(type(model)) # load the image with the required shape img = load_img('bird.jpg', target_size=(224, 224)) pyplot.imshow(img) pyplot.show() # convert the image to an array img = img_to_array(img) # expand dimensions so that it represents a single 'sample' img = expand_dims(img, axis=0) # prepare the image (e.g. scale pixel values for the vgg) img = preprocess_input(img) # get feature map for first hidden layer feature_maps = model.predict(img) # plot the output from each block square = 8 for fmap in feature_maps: # plot all 64 maps in an 8x8 squares ix = 1 for _ in range(square): for _ in range(square): # specify subplot and turn of axis ax = pyplot.subplot(square, square, ix) ax.set_xticks([]) ax.set_yticks([]) # plot filter channel in grayscale pyplot.imshow(fmap[0, :, :, ix-1], cmap='gray') ix += 1 # show the figure pyplot.show() # - # Dla dwรณch ostatnich istnieje 256 feature maps ale skupiliล›my siฤ™ na 64
Feature Maps/FeatureMaps.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Camera calibration # In this notebook we will perform camera calibration in order to compute the camera matrix import numpy as np import matplotlib.pyplot as plt import matplotlib.image as mpimg import cv2 import glob import pickle # %matplotlib inline # Find images images = glob.glob("./camera_cal/calibration*.jpg") plt.imshow(mpimg.imread(images[-1])) # + # Prepare arrays to store points objpoints = [] imgpoints = [] shapes = [] # Prepare chessboard object points objp = np.zeros((6*9,3), dtype=np.float32) objp[:,:2] = np.mgrid[0:9,0:6].T.reshape(-1, 2) for idx,fname in enumerate(images): # Read each image img = mpimg.imread(fname) # Convert image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) # Find chessboard corners ret, corners = cv2.findChessboardCorners(gray, (9,6), None) if ret: print(str(idx) + ". " + fname + ": Corners found") imgpoints.append(corners) objpoints.append(objp) shapes.append(img.shape) plt.imshow(cv2.drawChessboardCorners(img, (9,6), corners, ret)) else: print(str(idx) + ". " + fname + ": Corners NOT found") # - # Print in how many images we found corners print("Number of images with corners: {} / {}".format(len(imgpoints), len(images))) # Check they all have the same shape shapes # Three images have 1281x721 size, but we deem the difference as negliglible # Calibrate camera ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None) # + # Try to undistort dst = cv2.undistort(mpimg.imread(images[-1]), mtx, dist, None, mtx) fig, ax = plt.subplots(1,2,figsize=(20,5)) ax[0].imshow(mpimg.imread(images[-1])) ax[0].set_title("Distorted") ax[1].imshow(dst) ax[1].set_title("Corrected (undistorted)") fig.savefig("./output_images/distortion_comparison.jpg") plt.show() # - # Pickle calibration data and save it calibration_data = {"mtx": mtx, "dist": dist} with open('./camera_cal/calibration_data.p', 'wb') as cal_f: pickle.dump(calibration_data, cal_f)
camera_calibration.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # List # # # List lst = ["<NAME>",12,34,56,566.6,54,67,98] lst lst.append("Ramana") lst lst.index(12) lst.remove("Ramana") lst lst.pop(2) lst lst.extend("Ramana") lst # # DICT Dictonaries # dit = {"name":"jayaprakash", "age":"20","number":12345678,"email":"<EMAIL>"} dit dit.get("name") dit.items() dit.pop("name") dit dit.popitem() dit.keys() # # SET functions st = {"<NAME> ","letsupgrade",1,2,3,4,5,6,7,8} st st = {"<NAME>",1} st.issubset(st) st.difference() st.intersection() st st.isdisjoint("<NAME>") st.union("<NAME>") st.add("Ramana") st # # Tuple tup =("<NAME>","@","gmail.com","yahoo.com") tup tup.count("@") tup.index("gmail.com") tup.count("yahoo.com") tup.index("yahoo.com") tup.count("<NAME>") # # String # + name = "<NAME>" name1 = "RAMANA" name # - name1 name +" "+ name1 name == name1 name != name1
Assignment 1 DAY 2 B7.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Format DataFrame # # Be advised, this dataset (SKLearn's Forest Cover Types) can take a little while to download... # # This is a multi-class classification task, in which the target is label-encoded. # + import pandas as pd from sklearn.datasets import fetch_covtype data = fetch_covtype(shuffle=True, random_state=32) train_df = pd.DataFrame(data.data, columns=["x_{}".format(_) for _ in range(data.data.shape[1])]) train_df["y"] = data.target print(train_df.shape) train_df.head() # - # # Set Up Environment # + from hyperparameter_hunter import Environment, CVExperiment from sklearn.metrics import f1_score env = Environment( train_dataset=train_df, root_results_path="HyperparameterHunterAssets", target_column="y", metrics_map=dict(f1=lambda y_true, y_pred: f1_score(y_true, y_pred, average="micro")), cross_validation_type="StratifiedKFold", cross_validation_params=dict(n_splits=5, random_state=32), ) # - # Now that HyperparameterHunter has an active `Environment`, we can do two things: # # # 1. Perform Experiments # + from lightgbm import LGBMClassifier experiment = CVExperiment( model_initializer=LGBMClassifier, model_init_params=dict(boosting_type="gbdt", num_leaves=31, max_depth=-1, subsample=0.5), ) # - # # 2. Hyperparameter Optimization # + from hyperparameter_hunter import RandomForestOptimization, Real, Integer, Categorical optimizer = RandomForestOptimization(iterations=10, random_state=32) optimizer.set_experiment_guidelines( model_initializer=LGBMClassifier, model_init_params=dict( boosting_type=Categorical(["gbdt", "dart"]), num_leaves=Integer(10, 40), max_depth=-1, subsample=Real(0.3, 0.7), ), ) optimizer.go() # - # Notice, `optimizer` recognizes our earlier `experiment`'s hyperparameters fit inside the search space/guidelines set for `optimizer`. # # Then, when optimization is started, it automatically learns from `experiment`'s results - without any extra work for us!
examples/lightgbm_examples/classification.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "skip"} # # # %%HTML # <style> # # .CodeMirror { # width: 100vw; # } # # .container { # width: 99% !important; # } # # .rendered_html { # font-size:0.8em; # } # .rendered_html table, .rendered_html th, .rendered_html tr, .rendered_html td { # font-size: 100%; # } # # </style> # + [markdown] slideshow={"slide_type": "-"} # ## Hypothesis Testing # <br> # <center> # <img src="../images/ibm-logo-bw.png" alt="ibm-logo" align="center" style="width: 200px;"/> # </center> # + [markdown] slideshow={"slide_type": "notes"} # Data scientists employ a broad range of statistical tools to analyze data and reach conclusions from sometimes messy and incomplete data. Many of these tools come from classical statistics and are used before the formal modeling part of the workflow. This unit focuses on the foundational techniques of estimation with probability distributions and simple hypothesis tests in the context of EDA. # + slideshow={"slide_type": "skip"} import re import os import numpy as np import pandas as pd import seaborn as sns from scipy import stats from termcolor import cprint from IPython.display import Image import matplotlib.pyplot as plt plt.style.use('seaborn') # %matplotlib inline SMALL_SIZE = 10 MEDIUM_SIZE = 11 LARGE_SIZE = 12 plt.rc('font', size=SMALL_SIZE) # controls default text sizes plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize plt.rc('figure', titlesize=LARGE_SIZE) # fontsize of the figure title def slide_print(text, color='white'): cprint(text, color, 'on_grey') # + [markdown] slideshow={"slide_type": "slide"} # > <span style="color:orange">Statistical Inference</span>: the theory, methods, and practice of forming judgments about the parameters of a population and the reliability of statistical relationships, typically on the basis of random sampling. # # <br></br> # A <span style="color:orange">population</span> is a set of similar items or events which is of interest for some question or experiment. It is generally difficult or impossible to sample from the entire population so we rely on # <span style="color:orange">random sampling</span> techniques to ensure that our sample is a good representation of the population. # + [markdown] slideshow={"slide_type": "notes"} # Statistical inference is a very complex discipline, but fortunately there are tools that make its application routine. Thinking about your data with in terms of statistical populations and random sampling is foundational to the methods described in this section. We will discuss how statistical inference is used to answer the following types of questions. # + [markdown] slideshow={"slide_type": "slide"} # ### Applications of Statistical Inference # # <span style="color:orange">Hypothesis Testing</span>: How well does the data match some assumed (null) distribution? # # <span style="color:orange">Point Estimation</span>: What instance of some distributional class does it match well? # # <span style="color:orange">Uncertainty Estimation</span>: How can we quantify our degree of uncertainty about our estimates? # # <span style="color:orange">Sensitivity Analysis</span>: Do our results rely heavily on our distributional assumptions? # + [markdown] slideshow={"slide_type": "notes"} # All of these types are questions are typical for an investigative analysis. We may be looking to uncover the connection between the business opportunity and the data or we may be looking to understand a trend or pattern in the data. Hypothesis testing, point estimation, uncertainty estimation, and sensitivity analysis are all examples of where we rely on statistical inference to do the heavy lifting. Before we jump into the investigation example let's think for a moment about a simple application of statistical inference. Lets imagine that there is a devops unit within the AAVAIL company that allocates computational resources for other units in the company. **END OF PART 1**. # + slideshow={"slide_type": "skip"} def plot_beta_distributions(): """ takes the alpha (a) and beta (b) parameters from a beta distribution produces a plot with both the original and the inferred """ fig = plt.figure(figsize=(10,4),dpi=150,facecolor='white') splot = 0 ## loop through parameterizations of the beta for a,b in [(5,1),(5,2),(5,5)]: splot += 1 ax = fig.add_subplot(1,3,splot) beta_dist = stats.beta(a,b) beta_rvs = beta_dist.rvs(size=1000) pdf_range = np.linspace(beta_dist.ppf(0.0001),beta_dist.ppf(0.9999), 100) ax.hist(beta_rvs,bins=60,facecolor="royalblue",alpha=0.7,density=1,histtype='stepfilled') ax.plot(pdf_range, beta_dist.pdf(pdf_range), 'darkorange', lw=4) ax.set_xlim((0,1)) ax.set_title(r"$\alpha$=%s, $\beta$=%s"%(a,b)) ax.set_aspect(1./ax.get_data_ratio()) # + slideshow={"slide_type": "slide"} plot_beta_distributions() # + [markdown] slideshow={"slide_type": "notes"} # Lets say the data are the percentage of time CPUs are allocated each day. For each day we might have 50%, 80% or another number. Any of these subplots could be a representative sample of the population. First we are going to use a beta distribution to generate some data for this example. Then we are going to fit it with another distribution and make probability statements about the data. You may recall that the beta distribution, governed by the $\alpha$ and $\beta$ parameters is very flexible as seen here. # + slideshow={"slide_type": "slide"} def infer_gaussian_from_beta(a,b): """ takes the alpha (a) and beta (b) parameters from a beta distribution produces a plot with both the original and the inferred """ ## plot the beta fig = plt.figure(figsize=(10,4),dpi=160,facecolor='white') ax = fig.add_subplot(111) beta_dist = stats.beta(a,b) beta_rvs = beta_dist.rvs(size=2000) pdf_range = np.linspace(beta_dist.ppf(0.0001),beta_dist.ppf(0.9999), 100) ax.hist(beta_rvs,bins=60,facecolor="royalblue",alpha=0.7,density=1,histtype='stepfilled') ax.plot(pdf_range, beta_dist.pdf(pdf_range), 'darkorange', lw=3, label="Beta") ## inference mu_hat,sigma_hat = stats.norm.fit(beta_rvs) ## plot inferred pdf norm_dist = stats.norm(loc=mu_hat,scale=sigma_hat) norm_rvs = norm_dist.rvs(size=1000) pdf_range = np.linspace(norm_dist.ppf(0.0001), norm_dist.ppf(0.9999), 100) ax.plot(pdf_range, norm_dist.pdf(pdf_range),color='black',linestyle='dashed',lw=3,label="Gaussian") ax.set_aspect(1./ax.get_data_ratio()) ax.set_xlim((-.1,1.1)) ax.legend(loc='upper left') ## annotate the axis text = r"$\alpha$=%s, $\beta$=%s"%(a,b) + "\n" text += r"$\hat{\mu}$=%s, $\hat{\sigma}$=%s"%(round(mu_hat,2),round(sigma_hat,2)) ax.annotate(text, xy=(0.01,0.7), xycoords='axes fraction', xytext=(0.02,0.68), textcoords='axes fraction', bbox=dict(boxstyle="round", fc="0.8")) return(norm_dist,beta_dist) # + [markdown] slideshow={"slide_type": "notes"} # This function demonstrates the process of statistical inference on a dataset. We first instaintiate a beta distribution given the input parameters. We create a histogram of 2000 samples drawn from the distribution and then evaluate the pdf for most possible values. The plotting code takes up most of the function and is less important here than the single line needed for inference. To summarize the function we use a beta distribution to represent our given data and then we infer a Gaussian using the dot fit method. The estimated parameters are denoted with conventional hat notation. # + slideshow={"slide_type": "slide"} norm_dist,beta_dist = infer_gaussian_from_beta(5,5) # (5,1),(5,2),(5,5) ## what is the probability that more than 90% of processors are being used at any one time? slide_print("Estimated Probability: {}".format(round(1-norm_dist.cdf(0.90),3))) slide_print("True Probability: {}".format(round(1-beta_dist.cdf(0.90),3))) # + [markdown] slideshow={"slide_type": "notes"} # The historgram represents the random samples from the specified beta distribution and the lines are the corresponding pdfs. The goal here is to make probability statements that are meaningful even to non-technical stakeholders. For example... READ. We can answer this using the cdf as shown. We see that the probabilities from the assumed and actual distributions are close. Given a reasonable fit we can make statements like on average there is a 12% probability that more than 90% of processors being allocated. # # Lets first see what happens when we our assumed distribution is not longer appropriate for the given data # # There is a noticeable difference between the two probabilities. # # Next lets align the assumed and actual distributions # # We see that the probabilities tend to converge. Despite the ease with which these statements can be made it is important remember that visualization provides credibility and context that is important when using statistical inference to make probability statements. **END OF PART 2** # + [markdown] slideshow={"slide_type": "slide"} # <span style="color:orange">Numerical Optimization</span> # # * Maximum Likelihood # * Expectation Maximization (EM) # # <span style="color:orange">Simulation</span> # # * Bootstrapping # * Permutation Testing # * Monte Carlo Methods # # <span style="color:orange">Estimation of Posterior Distributions</span> # # * Markov Chain Monte Carlo (MCMC) # * Variational Methods # # <span style="color:orange">Nonparametric Estimation</span> # # * Bayesian Non-parametrics # + [markdown] slideshow={"slide_type": "notes"} # The fit we just used in the previous slide was computed by maximizing a log-likelihood function. There are many ways to carry out inference. Depending on the choice of method there are inherent advantages and disadvantages... like computational complexity, bias and flexibility. Let's dive into an example that showcases several of these inference methods in the context of a EDA investigation. # + [markdown] slideshow={"slide_type": "slide"} # > Visitors to the AAVAIL website are randomly sent to version A or version B of the website. Letโ€™s assume that version B has a new marketing scheme for getting a user to click โ€˜subscribeโ€™ and version A is the default version. In order to investigate whether version B has a greater impact on purchase decisions we will track the number of visitors to each version and keep track of the proportion that convert to becoming subscribers. # + [markdown] cell_style="split" slideshow={"slide_type": "notes"} # In data science, hypothesis tests often take the form of A/B tests where there are control and treatment groups of samples. We are going to work with the following example for the remainder of this lesson. READ IT! # + [markdown] slideshow={"slide_type": "slide"} # ## Hypothesis testing # # 1. **Pose your question** # # * Do visitors to sites A and B convert (i.e. become subscribers) at different rates? # # 2. **Specify a null $H_{0}$ and alternative $H_{1}$ hypotheses** # # * $H_{0}$ The conversion rate is the same between sites A and B # * $H_{1}$ The conversion rate is different between sites A and B # # 3. **Choose a specific hypothesis test and level of significance** # # * Binomial Test, $\alpha=0.05$ # # 4. **Collect data** # # * Track visitors to site a specified period of time, randomly sending each to either A or B # # 5. **Run the test** # # * Calculate the test statistic and determine a $p$-value # # 6. **Evaluate the results** # # * You will fail to reject the null hypothesis or you will reject it in favor of the alternative # + [markdown] slideshow={"slide_type": "notes"} # Recall the basic process behind hypothesis testing. If we decided to use a binomial test then the procedure would look like the steps enumerated here. From a scientific thinking perspective we are trying to disprove all other possible explanations before accepting that website B is more or less effective than website A. It is important to remember that we decide on a test and the level of significance before collecting the data. In the context of modern data science 'collecting data' could refer to the process of loading it into pandas, because data is often being accumulated in some form for most organizations. # + slideshow={"slide_type": "slide"} def simulate_data(n,p): """ The probability of success in a single trial follows a Bernoulli distribution. We can simulate visitors to the site by running repeated Bernoulli trials. """ results = stats.bernoulli(p).rvs(n) converts = np.sum(results) slide_print("We observed {} conversions out of {} visitors".format(converts, n)) return(results) p_a = 0.12 # the long term conversion rate for website 'a' p_b = 0.20 # the 'unknown' conversion rate for website 'b' np.random.seed(42) # use a random seed to ensure the repeatability results = simulate_data(100,p_b) # + [markdown] slideshow={"slide_type": "notes"} # Since we are simulating the data we can specify the 'unknown' conversion rates for both versions of the website. In reality these are values that we estimate. In a typical A/B test we would be comparing two versions of the site running concurrently, because we want to account for as many unmeasured effects as possible like seasonality, time of day effects and more. This would be a two-sample hypothesis test. Because many organizations are not always willing to run experiments in this way letโ€™s start with a one-sample test and ask the question if there is a difference between site B and the historical baseline. # + slideshow={"slide_type": "slide"} ## run a binomial test baseline_probability = p_a p_value = stats.binom_test(np.sum(results), n=results.size, p=baseline_probability) slide_print("binomial test p-value: {}".format(round(p_value,4))) # + [markdown] slideshow={"slide_type": "notes"} # If the p-value is less than 0.05 we reject the null hypothesis the conversion rate is the same as the historical conversion rate, in favor of the alternative. It is important that you do not stop your investigation here... it is also important that you do not make critical business decisions based on a single p-value. We will discuss some limitations of p-values in later sections. This p-value should be considered alongside other forms of evidence before making decisions. **END OF PART 3** # + [markdown] slideshow={"slide_type": "slide"} # #### The expected distribution # + slideshow={"slide_type": "-"} expected_dist = stats.binom(n=results.size,p=p_a) mu = expected_dist.mean() sd = expected_dist.std() slide_print("The expected distribution the site is mu={}, sd={}".format(mu,round(sd,3))) # + [markdown] slideshow={"slide_type": "notes"} # We can also think of the A/B test from a generative perspective. That is samples are generated by repeated Bernoulli trials, and these follow a Binomial distribution. So we can specify the baseline as follows. Let p be the long term conversion rate, in this case it is the rate observed from site A. And let the parameter 'n' be the number of samples in our experiment. We will use this distribution to give us and idea of what is expected given the null or baseline distribution. # + [markdown] slideshow={"slide_type": "slide"} # #### One-sample Z-test for difference in proportions # # $$ # z = \frac{\hat{p}-\frac{1}{2} - p_{0}}{\sigma} # $$ # + slideshow={"slide_type": "-"} expected_dist = stats.binom(n=results.size,p=p_a) p0 = expected_dist.mean() sd = expected_dist.std() z = (np.sum(results)-0.5-p0) / sd pvalue = 2*(1 - stats.norm.cdf(z)) slide_print("normal approximation p-value: {}".format(round(p_value,3))) # + [markdown] slideshow={"slide_type": "notes"} # The binomial test is an example of an exact solution. If the number of visitors increases beyond a few thousand it becomes reasonable to use an normal distribution to approximate the estimated proportion. The test statistic in this case is a z-score shown by the formula above. The numerator is the difference between our estimated conversion rate and the baseline. The one half is additionally subtracted as a continuity correction. This is necessary when we approximate discrete distributions with continuous ones. The denominator is the estimate for the standard deviation. We see that the p-value is similar to the exact test in this case. # + [markdown] slideshow={"slide_type": "slide"} # #### Permutation test # # The distribution of the test statistic under the null hypothesis is obtained by calculating a very large number of possible values. # + slideshow={"slide_type": "-"} nsamples = 100000 n = results.size p = p_a slide_print("n={}, p={}".format(n,p)) xs = np.random.binomial(n, p, nsamples) p_value = 2*np.sum(xs >= np.sum(results))/xs.size slide_print("simulation p-value: {}".format(round(p_value,3))) # + [markdown] slideshow={"slide_type": "notes"} # It is also possible to take a numerical approach to calculating these probabilities. In this example we repeatedly generate success counts from a binomial distribution with specified n and p. We then track how many of those success counts were greater than or equal to the observed number of conversions from site B. The proportion, after a large number of simulations converges towards the p-value that tests the hypothesis of equality between the two site conversion rates. # + [markdown] slideshow={"slide_type": "slide"} # #### Maximum likelihood estimation # # > When business decisions are made on the basis of a particular estimate then the context provided by the bootstrap method provides an additional source of information to help make those decisions. # + slideshow={"slide_type": "-"} bs_samples = np.random.choice(results, (nsamples, len(results)), replace=True) bs_ps = np.mean(bs_samples, axis=1) bs_ps.sort() print("Maximum Likelihood Estimate:%s"%(np.sum(results)/float(len(results)))) print("Bootstrap CI: (%.4f, %.4f)"%(bs_ps[int(0.025*nsamples)], bs_ps[int(0.975*nsamples)])) # + [markdown] slideshow={"slide_type": "notes"} # Maximum Likelihood Estimation (MLE) # # We have seen an example of maximum likelihood estimation in the example about probabilities and CPU usage. One significant caveat to this kind of estimation is that we are left with a point estimate that has little context. Here we take the point estimation a step further and quantify the distribution of that estimate using the bootstrap. **END OF PART 4** # + [markdown] slideshow={"slide_type": "slide"} # #### Bayesian estimation # # Recall that we are trying to update our degree of belief by combining 'prior' information with our likelihood. # # $$ # p(\theta|x) = \frac{p(x|\theta)p(\theta)}{p(x)} # $$ # # * <span style="color:orange">Prior</span> - $p(\theta)$ - belief about a quantity before presented with evidence # * <span style="color:orange">Posterior</span> - $p(\theta|x)$ - probability of the parameters given the evidence # * <span style="color:orange">Likelihood</span> - $p(x|\theta)$ - probability of the evidence given the parameters # * Normalizing Constant - $p(x)$ - helps ensure a valid probability # + [markdown] slideshow={"slide_type": "notes"} # The Bayesian treatment for comparing conversion rates for sites A and B is very similar to the MLE approach when combined with a bootstrap confidence interval. Point estimates are not obtained directly, instead there is a posterior distribution that corresponds to, in this case $\hat{p}$. Bayes formula and the relevant terms are shown on this slide as a reminder. # + slideshow={"slide_type": "slide"} def bayes_one_sample_proportaions(p_a,p_b,n=100): """ use the conjugate prior to estimate the posterior """ fig = plt.figure(figsize=(10,4),dpi=160,facecolor='white') ax = fig.add_subplot(111) np.random.seed(42) results = simulate_data(n,p_b) expected_dist = stats.binom(n=results.size,p=p_a) mu = expected_dist.mean() sd = expected_dist.std() slide_print(r"Given the baseline we expected on average {} conversions".format(int(mu))) p_value = stats.binom_test(np.sum(results), n=results.size, p=p_a) slide_print("binomial test p-value: {}".format(round(p_value,4))) converts = np.sum(results) a, b = 1, 1 prior = stats.beta(a, b) post = stats.beta(converts+a, n-converts+b) ci = post.interval(0.95) map_ =(converts+a-1.0)/(n+a+b-2.0) xs = np.linspace(0, 1, n) ax.plot(prior.pdf(xs), label='Prior') ax.plot(post.pdf(xs), label='Posterior') maxval = (0.05 * post.pdf(xs).max()) + post.pdf(xs).max() linerange = np.linspace(0,maxval,100) ci_range = np.linspace(ci[0],ci[1],100) ax.plot(ci_range*n,[0.05*maxval]*100, c='black', linewidth=2, label='95% CI'); ax.plot([n*p_a]*100,linerange,c='darkred',linestyle='solid',linewidth=5,alpha=0.5,label=r'$p_{a}$') ax.plot([n*p_b]*100,linerange,c='gold', linestyle='solid',linewidth=5,alpha=0.5,label=r'$p_{b}$') ax.plot([n*map_]*100,linerange,c='royalblue', linestyle='dashed', alpha=0.9,label=r'$\hat{p_{b}}$') ax.set_ylim((0,maxval)) ax.legend() # + [markdown] slideshow={"slide_type": "notes"} # For this example we demonstrate here an analytical solution that makes use of a conjugate prior over the binomial distribution. For most real life problems the necessary statistical models are more complex and estimation makes use of numerical methods like Markov Chain Monte Carlo. The conjugate prior of the Binomial is the Beta distribution. The prior distribution, in this case a beta, with both parameters equal to 1 results in a uniform distribution, which happens to be ideal when we want our prior to be uninformative. We encourage you to come back to this function later on, but try not to get caught up in too many of the details your first time through. # + slideshow={"slide_type": "slide"} p_a = 0.12 # the long term conversion rate for website 'a' p_b = 0.20 # the 'unknown' conversion rate for website 'b' bayes_one_sample_proportaions(p_a,p_b,n=1000) # + [markdown] slideshow={"slide_type": "notes"} # We are interested in the question whether the conversion rate from B is different from that of A. Normally we do not know the actual conversion rate for site B, but we have plotted it here in yellow to see how well our dashed blue estimate aligns. With more data these two lines will converge. The historical expected number of conversions is shown in red and as a rule of thumb if our confidence interval overlaps it then we cannot be confident that the two conversion rates are different. It is an intuitive way of essentially running a hypothesis test where there is no need to set a level of $\alpha$. # # 1. First lets increase n (change n to 500) # # We see that as the sample size increases the known and empirically estimated conversion rates converge # # 2. Then lets increas n some more (change n to 1000) # # At even higher sample sizes we see the confidence intervals shrink to reflect an increased degree of belief # # Note that this setup naturally accepts new data by setting your posteriors as your prior and repeating this process. We encourage you to explore this solution and dig deeper into related readings on solutions to the multi-armed bandit problem.
data-vis-mpl/slides/hypothesis-testing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Load Packages # + import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns pd.options.display.max_columns = 999 pd.options.display.max_rows = 999 from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split import joblib # - # Ensemble Methods from sklearn.ensemble import RandomForestRegressor from sklearn.ensemble import GradientBoostingRegressor from sklearn.metrics import mean_squared_error as mse # # Load Data data = pd.read_csv('data/nyc-rolling-sales.csv') # # Clean Data # + ### Clean Data # Convert to float data['SALE PRICE'] = pd.to_numeric(data['SALE PRICE'], errors='coerce') data['SALE PRICE'] = data['SALE PRICE'].fillna(0) data['GROSS SQUARE FEET'] = pd.to_numeric(data['GROSS SQUARE FEET'], errors='coerce') data['LAND SQUARE FEET'] = pd.to_numeric(data['LAND SQUARE FEET'], errors='coerce') # Convert to date data['SALE DATE'] = pd.to_datetime(data['SALE DATE'], errors='coerce') # Remove 5th and 95th percentile tails zero = 0 fifth = data['SALE PRICE'].describe(np.arange(0.05, 1, 0.05)).T['15%'] ninetyfifth = data['SALE PRICE'].describe(np.arange(0.05, 1, 0.05)).T['95%'] data = data[(data['SALE PRICE'] > zero) & (data['SALE PRICE'] <= ninetyfifth)].copy() # Handle Missing Values by Dropping (for now) data.dropna(inplace=True) # - data.shape dep_var = 'SALE PRICE' cat_names = ['BOROUGH', 'NEIGHBORHOOD', 'BUILDING CLASS CATEGORY', 'TAX CLASS AT PRESENT', 'BUILDING CLASS AT PRESENT', 'BUILDING CLASS AT TIME OF SALE'] cont_names = ['LAND SQUARE FEET', 'GROSS SQUARE FEET', 'RESIDENTIAL UNITS', 'COMMERCIAL UNITS'] # # Feature Engineering # + # Multihot encode categorical variables df_cat = pd.get_dummies(data[cat_names].astype(str)) # Reassign numerical to diff df df_cont = data[cont_names] # Normalize numerical features df_cont_norm = (df_cont-df_cont.min())/(df_cont.max()-df_cont.min()) # Concatenate features X = pd.concat([df_cat, df_cont_norm], axis=1) # Get dependent variable and store as different df series # y = np.log(data[dep_var]) y = data[dep_var] # - df_cont.corr().style.background_gradient('coolwarm', axis=None) X.shape, y.shape # # Modelling # + # Train-test split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.4, random_state = 42) # model = RandomForestRegressor(n_estimators=300, random_state=42) model = GradientBoostingRegressor(n_estimators=300, random_state=42, ) # Fit Model # %time model.fit(X_train, y_train) # - y_pred = model.predict(X_test) y_pred_train = model.predict(X_train) rmse_score = np.sqrt(mse(y_pred, y_test)) rmse_score rmse_score_train = np.sqrt(mse(y_pred_train, y_train)) rmse_score_train # # AutoML # # !pip install tpot; from tpot import TPOTRegressor gen_num = 5 max_tot_time = 30 max_eval_time = 3 pop_size = 10 verbose = 2 # + import os import shutil datadir = './pipelines' if not os.path.exists(datadir): os.makedirs(datadir) tpot = TPOTRegressor(generations=gen_num, max_time_mins=max_tot_time, max_eval_time_mins=max_eval_time, population_size=pop_size, verbosity = verbose, memory = 'auto', periodic_checkpoint_folder='pipelines/', warm_start = True) tpot.fit(X_train, y_train) print(tpot.score(X_test, y_test)) # - pd.DataFrame(dict(list(tpot.evaluated_individuals_.items()))).T\ .replace([np.inf, -np.inf], np.nan)\ .dropna()\ .drop('generation', axis = 1)\ .sort_values('internal_cv_score', ascending = False)\ .head() 1.7
nyc_house_modelling.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Introduction to Pandas I # # Today we will be using a real-life, messy dataset to practice some of what we've learned about the Pandas library. The dataset itself + a short description of it is available at https://www.kaggle.com/paultimothymooney/phd-stipends. import pandas as pd import numpy as np from matplotlib import pyplot as plt # Let's download the dataset and explore what's inside: stipends = pd.read_csv('phd-stipends.csv') print(stipends.shape) stipends.head() # ## Task 1: Select relevant columns # Many columns! We'll really just use University, Department, and Overall Pay today. # Let's create a new dataframe with only these columns. stipends_essential = stipends.loc[:,['University', 'Department', 'Overall Pay']] # Pandas provides several selection methods, see [here](https://pandas.pydata.org/docs/getting_started/10min.html#selection) and # `.loc` is the prefrered method to select multi-axis data by label. You can also read [here](https://pandas.pydata.org/docs/user_guide/indexing.html#different-choices-for-indexing) for a larger documentation. stipends_essential.head() # We saw that for some variables, there were a lot of NaNs. Let's check if this is there are any NaNs in *Overall Pay* and if so, let's remove them. stipends_essential['Overall Pay'].isna().sum() # Let's remove these 21 NaNs. You can call the `.dropna()` method of a DataFrame, but be wary of the index modification. The following example also uses the `.tail(n)` method that displays the last n items in a DataFrame (5 by default). # Compare the effect of reset_index() function on the index (left). print(stipends_essential.tail()) print(stipends_essential.dropna().tail()) # see that having removed 21 values do not change the index. print(stipends_essential.dropna().reset_index(drop=True).tail()) #The drop=True in reset_index() method is to prevent pandas from keeping the previous index in a separated column # We will keep the last one : stipends_essential = stipends_essential.dropna().reset_index(drop=True) # safety check: all the NaNs have been removed stipends_essential['Overall Pay'].isna().sum() # ## Task 2: Rename columns # All good but as we've learned, it's useful to not have spaces within column names: that way we can often refer to them as `df.column` instead of `df['column']`. Let's rename the columns avoiding spaces. stipends_essential.columns stipends_essential.columns = ['University', 'Department', 'Stipend'] # ## Task 3: Calculate average stipend # Our goal is to be able to see what the overall distribution of stipends is. First, let's calculate the average stipends. But this is real data entered by the users of a website, so we might need to clean the dataframe some. # We know that a useful function to getting descriptive stats for your dataframe is `.describe()`. Let's try to use it. stipends_essential.describe() # It seems like *Stipend* is not numeric! Let's investigate: We can check which types of data are in each of our columns using .info() stipends_essential.info() # `object` can contain strings or data of different types. Let's see how else we can check what sort of data we have in *Stipend*. type(stipends_essential.Stipend) # Using *type* on a whole column is not super-helpful, because every column is just a pandas series object. type(stipends_essential.Stipend[0]) # + data_types = [] for i in range(len(stipends_essential.Stipend)): data_types.append(str(type(stipends_essential.Stipend[i]))) print(np.unique(data_types)) # - # String! To get the average stipend, we need to convert all these strings to floats. Let's try `.astype(float)` on the *Stipend* column of our dataframe to do this stipends_essential.Stipend.astype(float) # This gives an error: we first need to remove the *$s* and the commas. You can do this in different ways: (1) you can apply a function to every row in the dataframe; (2) you can use some built-in Pandas functions that work on whole columns (i.e., no need to apply a function to each row). Let's use the latter option applying `.replace()` to the whole *Stipend* column. If we want the search-and-replace `.replace` function to look within strings, we will use an additional argument: `regex=True`. A good alternative is to use .str.replace. .str functions are specific to working on strings within a dataframe. stipends_essential['Stipend'] = stipends_essential['Stipend'].replace(',', '', regex=True) # note that this is the replace method of the DataFrame stipends_essential['Stipend'] = stipends_essential['Stipend'].str.replace(',', '') # If you want to go row-by-row, here are two options: # - defining a function and use it with the `.apply` method; # - using a lambda function. # + # Option 1: "real function" (can add other operations right away, too) def remove_non_numbers(row): row.Stipend = row.Stipend.replace(',', '') # note that this is the replace method of the string return row.Stipend stipends_essential.Stipend = stipends_essential.apply(remove_non_numbers, axis = 1) # - # Alternative option: "lambda function" stipends_essential.Stipend = stipends_essential.apply(lambda row: row.Stipend.replace(',',''), axis = 1) # Did it work? stipends_essential.Stipend.head() # Yes, we got rid of all the commas. Now, let's try to do the same with the dollar signs. # If we want the search-and-replace to look within strings, we will use, like above, `regex=True`. But it recognizes *the dollar sign* as a special character, so we'll have to use *\$* for regex to know what we mean is the *literal $*. stipends_essential.Stipend = stipends_essential.Stipend.replace('\$', '', regex=True) stipends_essential.Stipend.head() # Now we're good to go: let's convert *Stipend* values to floats. stipends_essential['Stipend'] = stipends_essential['Stipend'].astype(float) type(stipends_essential['Stipend'][0]) stipends_essential.head() # .describe() should work now, too. stipends_essential.describe() # Something looks wrong: # - why are there negative values? Let's remove them: who knows what the users meant. # - why are the positive values so huge? Let's investigate. stipends_essential.Stipend.median() stipends_essential.Stipend.mean() # The mean is far away from the median. There are some potential explanations for the outliers to find their way into the dataset. For now, let's remove these outliers altogether while tracking how many values we would lose by doing so. # # The standard deviation is huge, so removing values that are larger than 2 SDs than the median stipend doesn't make much sense. # An alternative is to cap them at a value that sounds plausible. Let's remove (1) all the negative values and (2) anything above $100000. stipends_essential.shape # original number of values # Let's deal with negative values first. stipends_essential_clean = stipends_essential[(stipends_essential.Stipend < 100000) & (stipends_essential.Stipend > 0)] # Did we remove too many values? stipends_essential_clean.shape # ## Task 4: Plot data distribution # We can use df.hist() function to plot the histogram of the data hist = stipends_essential_clean.hist(bins=50) # If you are still concerned about the outliers, you can also practice calculating descriptive statistics for columns in Pandas DataFrames. median_stipend = stipends_essential_clean.Stipend.median() std_stipend = stipends_essential_clean.Stipend.std() stipends_essential_clean = stipends_essential_clean[stipends_essential_clean.Stipend > (median_stipend - 2*std_stipend)] stipends_essential_clean.describe() stipends_essential_clean.Stipend.hist(bins=40) plt.show() # ## Bonus task: plot the stipend distribution only for neuroscience students # Hint: Not all department names are just "Neuroscience" neuro_stipends = stipends_essential_clean[stipends_essential_clean['Department'].str.contains('neuroscience', case = False)].reset_index(drop=True) neuro_stipends.describe() neuro_stipends.Stipend.median() > stipends_essential_clean.Stipend.median() neuro_stipends.hist(bins=20) # ## Woop woop, what a nice outcome! Thanks, everyone + see you next week!
Class5_PandasPractice_PhDStipends.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### A rank 1 ndarray # + import numpy as np rank_1 = np.array([2 ,22,123]) # Creating a rank 1 array print(type(rank_1)) # The type of an ndarray is: "<class 'numpy.ndarray'>" # - # #### shape of the array, it should have just one dimension (Rank 1) # print(rank_1.shape) # #### because this is a 1-rank array, we need only one index to accesss each element # print(rank_1[0], rank_1[1], rank_1[2]) # #### ndarrays are mutable, here change an element of the array # + rank_1[0] = 888 print(rank_1) # - # ### A rank 2 ndarray # + rank_2 = np.array([[11,12,13], [21,22,23]]) # Create a rank 2 array print(rank_2) # print the array print() print(f"The shape is {rank_2.shape[0]} rows and {rank_2.shape[1]} columns ".format(rank_2.shape)) # rows x columns # - # #### many ways to create numpy arrays # #### create a 3x3 array of zeros # # + import numpy as np example_1 = np.zeros((3, 3)) example_1 # - # #### create a 3x3 array filled with 6.0 # example_2 = np.ones((3,3)) example_2 example_3 = np.full((3,3), 6) example_3 # #### create a 3x3 matrix with the diagonal 1s and the others 0 # example_4 = np.eye(3,3) example_4 np.eye(3,3) # #### create an array of random numbers in between 0 to 1 example_5 = np.random.random((4,4)) example_5 example_6 = np.random.randint(0, 10) print("Random number between 0 and 10 is % s" % (example_6)) # + import numpy as np # Rank 2 array of shape (3, 4) array_1 = np.array([[11,12,13,14], [21,22,23,24], [31,32,33,34]]) array_1 # - array_1[::,1:3] # #### slicing of last column array_1[::,-1] array_1[::,1:2] array_1[::,1:2].shape # + # Using both integer indexing & slicing generates an array of lower rank row_rank1 = array_1[0, :] # Rank 1 view print(row_rank1, row_rank1.shape) # notice only a single [] # + # Slicing alone: generates an array of the same rank as an_array row_rank1 = array_1[1:2, :] # Rank 1 view print(row_rank1, row_rank1.shape) # notice [[]] # + #We can do the same thing for columns of an array: print() col_rank1 = array_1[:, 1] col_rank2 = array_1[:, 1:2] col_rank3 = array_1[::, 3:4] col_rank4 = array_1[:, 3] print(col_rank1, col_rank1.shape) # Rank 1 print() print(col_rank2, col_rank2.shape) # Rank 2 print() print(col_rank3,col_rank3.shape) print() print(col_rank4,col_rank4.shape) # + # Create a new array array_2 = np.array([[11,12,13], [21,22,23], [31,32,33], [41,42,43]]) print('Original Array:') print(array_2) # + ## Create an array of indices col_indices = np.array([0, 2, 1, 2]) print('\nCol indices picked : ', col_indices) row_indices = np.arange(4) print('\nRows indices picked : ', row_indices) # - # pairings of row and column indices for row, col in zip(row_indices, col_indices): print(row,col) # Select one element from each row print('Values in the array at those indices: ',array_2[row_indices, col_indices]) # create a filter which will be boolean values for whether each element meets this condition filter = (array_2 > 25) filter # we can now select just those elements which meet that criteria print(array_2[filter]) # + # For short, we could have just used the approach below without the need for the separate filter array. array_2[(array_2 % 2 == 0)] # - # ### Data types example1 = np.array([11, 22, 33], dtype=np.int64) #You can also tell Python the data type print(example1.dtype) # #### you can use this to force floats into integers (using floor function) # example2 = np.array([121.6,142.3], dtype=np.int64) print(example2.dtype) print() print(example2) example3 = np.array([11, 22, 33], dtype=np.float64) #You can also tell Python the data type print(example3.dtype) print(example3) # ### Arithmatic array operations x = np.array([[11,12],[13,14]], dtype=np.int) y = np.array([[20.4, 22.5],[32.6,64.7]], dtype=np.float64) x y x+y np.add(x,y) np.subtract(x,y) np.multiply(x,y) np.divide(x, y) np.sqrt(x) np.exp(x) # #### Basic Statistical operations # setup a random 2 x 4 matrix arr = 10 * np.random.randn(2,5) print(arr) arr.mean() arr.mean(axis =1) arr.mean(axis =0) arr.sum() np.median(arr) np.median(arr, axis = 1) sorting = np.median(arr, axis=0) sorting sorting.sort() sorting # #### Set Operations with np.array data type: train = np.array(['mobile','price','clip']) test = np.array(['lamp','mobile','clip']) print(train, test) # #### intersection np.intersect1d(train,test) np.union1d(train, test) print( np.setdiff1d(train, test) )# elements in train that are not in test print( np.in1d(train, test) )# element of train is also prsent in test # ### conversion of files # #### npy file convertTo_npy = np.array([1,2,3,4]) convertTo_npy np.save('npy_file',convertTo_npy) np.load('npy_file.npy') # #### txt file np.savetxt('test_file.txt',X = convertTo_npy, delimiter=',') np.loadtxt('test_file.txt',delimiter=',') # + # transpose ex1 = np.array([[11,12],[21,22]]) ex1.T # - mat = np.random.rand(5,5) mat np.where( mat > 0.5 , 1000, 1.5) Y = np.random.normal(size = (1,5)) print(Y) #print(Y.dtype) print(Y.shape) print() z = np.random.normal(size = (1,5))[0] print(z) #print(z.dtype) print(z.shape) Z = np.random.randint(low = 2, high = 50, size = 4) print(Z) np.random.permutation(Z) #return a new ordering of elements in Z # #### uniform distribution np.random.uniform(size=4) #uniform distribution # #### Normal Distribution np.random.normal(size=4) #normal distribution # #### Merging Dataset # + K = np.random.randint(low=2,high=50,size=(2,2)) print(K) print() M = np.random.randint(low=2,high=50,size=(2,2)) print(M) # - # #### vertically merging data np.vstack((K,M)) np.concatenate([K, M], axis = 0) # #### Horizontally merging datset np.hstack((K,M)) np.concatenate([K, M], axis =1)
1.numpy_notebook.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.9.4 64-bit (''.venv'': venv)' # name: python3 # --- # To open this notebook in Google Colab and start coding, click on the Colab icon below. # # <table style="border:2px solid orange" align="left"> # <td style="border:2px solid orange"> # <a target="_blank" href="https://colab.research.google.com/github/neuefische/ds-meetups/blob/main/02_Web_Scraping_With_Beautiful_Soup/02_webscraping_bs4.ipynb"> # <img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> # </td> # </table> # # Web Scraping # # Web scraping is the process of extracting and storing data from websites for analytical or other purposes. Therefore, it is useful to know the basics of html and css, because you have to identifiy the elements of a webpage you want to scrape. If you want to refresh your knowledge about these elements, check out the [HTML basics notebook](./01_HTML_Basics.ipynb). # # We will go through all the important steps performed during web scraping with python and BeautifulSoup in this Notebook. # # ### Learning objectives for this Notebook # # At the end of this notebook you should: # - be able to look at the structure of a real website # - be able to figure out what information is relevant to you and how to find it (Locating Elements) # - know how to download the HTML content with BeautifulSoup # - know how to loop over an entire website structure and extract information # - know how to save the data afterwards # # # For web scraping it is useful to know the basics of html and css, because you have to identifiy the elements of a webpage you want to scrape. The easiest way to locate an element is to open your Chrome dev tools and inspect the element that you need. A cool shortcut for this is to highlight the element you want with your mouse and then press Ctrl + Shift + C or on macOS Cmd + Shift + C instead of having to right click + inspect each time (same in mozilla). # # ## Locating Elements # # For locating an element on a website you can use: # # - Tag name # - Class name # - IDs # - XPath # - CSS selectors # # ![alt text](./images/html_elements.png) # # XPath is a technology that uses path expressions to select nodes or node- sets in an XML document (or in our case an HTML document). [Read here for more information](https://www.scrapingbee.com/blog/practical-xpath-for-web-scraping/) # # ## Is Web Scraping Legal? # # Unfortunately, thereโ€™s not a cut-and-dry answer here. Some websites explicitly allow web scraping. Others explicitly forbid it. Many websites donโ€™t offer any clear guidance one way or the other. # # Before scraping any website, we should look for a terms and conditions page to see if there are explicit rules about scraping. If there are, we should follow them. If there are not, then it becomes more of a judgement call. # # Remember, though, that web scraping consumes server resources for the host website. If weโ€™re just scraping one page once, that isnโ€™t going to cause a problem. But if our code is scraping 1,000 pages once every ten minutes, that could quickly get expensive for the website owner. # # Thus, in addition to following any and all explicit rules about web scraping posted on the site, itโ€™s also a good idea to follow these best practices: # # ### Web Scraping Best Practices: # # - Never scrape more frequently than you need to. # - Consider caching the content you scrape so that itโ€™s only downloaded once. # - Build pauses into your code using functions like time.sleep() to keep from overwhelming servers with too many requests too quickly. # # The Problem we want to solve # # ![](images/sad_larissa.png) # # Larissa's sister broke her aquarium. And we decided to get her a new one because christmas is near and we want to cheer Larissa up! And because we know how to code and can't decide what fish we want to get, we will solve this problem with web scraping! # ## BeautifulSoup # # The library we will use today to find fishes we can gift Larissa for christmas is [BeautifulSoup](https://www.crummy.com/software/BeautifulSoup/bs4/doc/). It is a library to extract data out of HTML and XML files. # # The first thing weโ€™ll need to do to scrape a web page is to download the page. We can download pages using the Python requests. # # The requests library will make a GET request to a web server, which will download the HTML contents of a given web page for us. There are several different types of requests we can make using requests, of which GET is just one. import time import requests from bs4 import BeautifulSoup import re import pandas as pd # get the content of the website page = requests.get("https://www.interaquaristik.de/tiere/zierfische") html = page.content # We can use the BeautifulSoup library to parse this document, and extract the information from it. # # We first have to import the library, and create an instance of the BeautifulSoup class to parse our document: # parse the html and save it into a BeautifulSoup instance bs = BeautifulSoup(html, 'html.parser') # We can now print out the HTML content of the page, formatted nicely, using the prettify method on the BeautifulSoup object. print(bs.prettify()) # This step isn't strictly necessary, and we won't always bother with it, but it can be helpful to look at prettified HTML to make the structure of the page clearer and nested tags are easier to read. # # As all the tags are nested, we can move through the structure one level at a time. We can first select all the elements at the top level of the page using the children property of ``bs``. # # Note that children returns a list generator, so we need to call the list function on it: list(bs.findChildren()) # And then we can have a closer look on the children. For example the ```head```. bs.find('head') # Here you can try out different tags like ```body```, headers like ```h1``` or ```title```: bs.find('insert your tag here') # But what if we have more than one element with the same tag? Then we can just use the ```.find_all()``` method of BeautifulSoup: bs.find_all('article') # Also you can search for more than one tag at once for example if you want to look for all headers on the page: titles = bs.find_all(['h1', 'h2','h3','h4','h5','h6']) print([title for title in titles]) # Often we are not interested in the tags themselves, but in the content they contain. With the ```.get_text()``` method we can easily extract the text from between the tags. So let's find out if we really scrape the right page to buy the fishes: bs.find('title').get_text() # + [markdown] tags=[] # ### Searching for tags by class and id # We introduced ```classes``` and ```ids``` earlier, but it probably wasnโ€™t clear why they were useful. # # Classes and ```ids``` are used by ```CSS``` to determine which ```HTML``` elements to apply certain styles to. For web scraping they are also pretty useful as we can use them to specify the elements we want to scrape. In our case the ```รฌds``` are not that useful there are only a few of them but one example would be: # - bs.find_all('div', id='page-body') # But it seems like that the ```classes``` could be useful for finding the fishes and their prices, can you spot the necessary tags in the DevTool of your browser? # tag of the description of the fishes bs.find_all(class_="insert your tag here for the name") # tag of the price of the fishes bs.find_all(class_="insert your tag here for the price") # ## Extracting all the important information from the page # Now that we know how to extract each individual piece of information, we can save these informations to a list. Let's start with the price: # + # We will search for the price prices = bs.find_all(class_= "price") prices_lst = [price.get_text() for price in prices] prices_lst # - # We seem to be on the right track but like you can see it doesn't handle the special characters, spaces and paragraphs. So web scraping is coming hand in hand with cleaning your data: prices_lst = [price.strip() for price in prices_lst] prices_lst[:5] # That looks a little bit better but we want only the number to work with the prices later. We have to remove the letters and convert the string to a float: # We are removing the letters from the end of the string and keeping only the first part prices_lst = [price.replace('\xa0โ‚ฌ *', '') for price in prices_lst] prices_lst[:5] # Now we have to replace the comma with a dot to convert the string to a float prices_lst = [price.replace(',', '.') for price in prices_lst] prices_lst[:5] # So lets convert the string into a float prices_lst = [float(price) for price in prices_lst] # But if we want to convert the string to a flaot we get an error message there seems to be prices which start with ```ab```. # So let me intodruce you to a very handy thing called ```Regular expressions``` or short ```regex```. It is a sequence of characters that specifies a search pattern. In python you can use regex with the ```re``` library. So lets have a look how many of the prices still contain any kind of letters. # with the regex sequence we are looking for strings that contain any # kind of letters for price in prices_lst: if re.match("^[A-Za-z]", price): print(price) # So there are some prices with an "ab" in front of them, so lets remove the letters: # Now we have to replace the comma with a dot to convert the string to a float prices_lst = [float(price.replace('ab ', '')) for price in prices_lst] prices_lst[:5] # Now it worked! so let's do the same with the description of the fishes: # + # Find all the descriptions of the fish and save them in a variable descriptions = bs.find_all(class_='thumb-title small') # Get only the text of the descriptions descriptions_lst = [description.get_text() for description in descriptions] descriptions_lst # - # Clean the text by removing spaces and paragraphs descriptions_lst = [description.strip() for description in descriptions_lst] descriptions_lst[:5] # Let's have a look if we can get the links to the images of the fish, so that we later can look up how the fish are looking, we can use the ```img``` tag for that in most cases: # find all images of the fish image_lst = bs.find('ul', {'class': 'product-list row grid'}) images = image_lst.find_all('img') images # There are only two results for the image tag so let's have a look what the tag of the other images are. # # So they have the tag: ```picture``` so lets extract those: # Extract all the pictures for the fish by using first the tag ul and than the tag picture picture_lst = bs.find('ul', {'class': 'product-list row grid'}) pictures = picture_lst.find_all('picture') pictures[:5] # That looks more like all pictures! # Although, it seems some of the fish have specials like 'Sonderangebot' or 'Neuheit'. Wouldn't it be nice if we would have this information as well? Here it gets a little bit tricky because the 'Sonderangebot' and 'Neuheit' do not have the same ```classes``` in the ```span``` but if we go one tag higher we can get all of them: # Extracting all the special offers by using the div tag and the class 'special-tags p-2' specials = bs.find_all('div', {'class' : 'special-tags p-2'}) specials # If we want only the text from the ```span``` we now can iterate over the specials list and extract the text: # to get only the text from the specials we are iterating over all specials for special in specials: # and than get the text of all spans from the special objects special_text = special.find("span").get_text().strip() print(special_text) # Nice that will help us for making a decision what fish to buy! # # But so far we only scraped the first page there are more fish on the next pages. There are 29 pages of fish. So how can we automate this? <br> # So this is the link of the first page: https://www.interaquaristik.de/tiere/zierfische <br> # The second link of the second page looks like this: https://www.interaquaristik.de/tiere/zierfische?page=2 <br> # The third: https://www.interaquaristik.de/tiere/zierfische?page=3 <br> # # So the only thing that changes is the ending... Let's use this! But don't forget each request is causing traffic for the server, so we will set a sleep timer between requests! # ``` # link = 'https://www.interaquaristik.de/tiere/zierfische' # for _ in range(30): # time.sleep(3) # if _ == 0: # page = requests.get(link) # html = page.content # else: # print(link + f'?page={_}') # page = requests.get(link + f'?page={_}') # html = page.content # ``` # # This will be our starting point! # We will save our results in a pandas data frame so that we can work with the data later. Therefore we will create a empty data frame and append our data to it. # Creating an empty Dataframe for later use df = pd.DataFrame() # But first lets create some functions for the scraping part: # 1. for the description # 2. for the price # 3. for the images # 4. for specials # Creating a function to get all the description def get_description(lst_name): ''' Get all the description from the fish by class_ = 'thumb-title small' and saving it to an input list. Input: list Output: list ''' # find all the descriptions and save them to a list fish = bs.find_all(class_='thumb-title small') # iterate over the list fish to get the text and strip the strings for names in fish: lst_name.append( names.get_text()\ .strip() ) return lst_name # Creating a function to get all the prices def get_price(lst_name): ''' Get all the prices from the fish by class_ = 'prices' and saving it to an input list. Input: list Output: list ''' # find all the prices and save them to a list prices = bs.find_all(class_='prices') # iterate over the prices for price in prices: # try to clean the strings from spaces, letters and paragraphs and convert it into a float try: price = float(price.get_text()\ .strip()\ .replace('\xa0โ‚ฌ *','')\ .replace(',','.')\ .replace('ab ', '') ) except: # in some cases there is no * in the string like here: '\xa0โ‚ฌ *' with the except we try to intercept this price = price.get_text()\ .split('\n')[0]\ .replace('\xa0โ‚ฌ','') if price != '': price = 0.0 else: price = float(price) # append the prices to the fish_prices list fish_prices.append( price ) return lst_name # Creating a function to get all the images def get_image(lst_name_1, lst_name_2): ''' Get all the images from the fish by tag = 'ul' and class_ = 'product-list row grid' and saving the name to one lst_name_1 and the link of the image to another lst_name_2. Input: list_1, list_2 Output: list_1, list_2 ''' # find all images images_listings = bs.find('ul', {'class': 'product-list row grid'}) images = images_listings.find_all('img') # find all pictures pictures_listings = bs.find('ul', {'class': 'product-list row grid'}) pictures = pictures_listings.find_all('picture') # iterate over the images and save the names of the fish in one list and the link to the image in another one for image in images: lst_name_1.append(image['src']) lst_name_2.append(image['alt'].strip()) # iterate over the pictures and save the names of the fish in one list and the link to the image in another one for picture in pictures: lst_name_1.append(picture['data-iesrc']) lst_name_2.append(picture['data-alt'].strip()) return lst_name_1, lst_name_2 def get_special(lst_name_1, lst_name_2): ''' Get all the images from the fish by tag = 'div' and class_ = 'thumb-inner' and saving the name to one lst_name_1 and the index to another lst_name_2. Input: list_1, list_2 Output: list_1, list_2 ''' # use the article as tag to get the index of all articles article_lst = bs.find_all('div', {'class' : 'thumb-inner'}) # iterate over all articles with enumerate to get the single articles and the index for idx,article in enumerate(article_lst): # get all specials spans = article.find('div', {'class' : 'special-tags p-2'}) # and if there is a special save the special and the index each to a list if spans != None: special = spans.find("span").get_text().strip() lst_name_1.append(special) lst_name_2.append(idx) return lst_name_1, lst_name_2 # Now we will combine it all and could scrape all pages: # # **NOTE:** We have commented out the code, because we don't want to overwhelm the server with the requests of participants in the meetup. Feel free to run the code after the meetup. We ran the code once and uploaded the result in a csv file to github so the following code will still work! # + #link = 'https://www.interaquaristik.de/tiere/zierfische' # ## for loop to get the page numbers #for _ in range(30): # # sleep timer to reduce the traffic for the server # time.sleep(3) # # create the lists for the functions # fish_names = [] # fish_prices = [] # picture_lst = [] # picture_name = [] # index_lst =[] # special_lst = [] # # first iteration is the main page # if _ == 0: # # get the content # page = requests.get(link) # html = page.content # bs = BeautifulSoup(html, 'html.parser') # # call the functions to get the information # get_description(fish_names) # get_price(fish_prices) # get_image(picture_lst, picture_name) # get_special(special_lst, index_lst) # # create a pandas dataframe for the names and prices # fish_dict = { # 'fish_names': fish_names, # 'fish_prices in EUR': fish_prices # } # df_fish_info = pd.DataFrame(data=fish_dict) # # create a pandas dataframe for the pictures # picture_dict = { # 'fish_names': picture_name, # 'pictures': picture_lst # } # df_picture = pd.DataFrame(data=picture_dict) # # # merge those two dataframes on the fishnames # df_ = pd.merge(df_fish_info, df_picture, on='fish_names', how='outer') # # # create a pandas dataframe for the specials # specials_dict = { # 'special': special_lst # } # real_index = pd.Series(index_lst) # df_specials = pd.DataFrame(data=specials_dict) # df_specials.set_index(real_index, inplace=True) # # # merge the dataframes on the index # df_ = pd.merge(df_, df_specials, left_index=True,right_index=True, how='outer') # # append the temporary dataframe to the dataframe we created earlier outside the for loop # df = df.append(df_) # # else-statment for the next pages # else: # # get the content from the links we create with a f-string an the number we get from the for-loop # page = requests.get(link+f'?page={_}') # html = page.content # bs = BeautifulSoup(html, 'html.parser') # # call the functions to get the information # get_description(fish_names) # get_price(fish_prices) # get_image(picture_lst, picture_name) # get_special(special_lst, index_lst) # # create a pandas dataframe for the names and prices # fish_dict = { # 'fish_names': fish_names, # 'fish_prices in EUR': fish_prices # } # df_fish_info = pd.DataFrame(data=fish_dict) # # create a pandas dataframe for the pictures # picture_dict = { # 'fish_names': picture_name, # 'pictures': picture_lst # } # df_picture = pd.DataFrame(data=picture_dict) # # # merge those two dataframes on the fishnames # df_ = pd.merge(df_fish_info, df_picture, on='fish_names', how='outer') # # # create a pandas dataframe for the specials # specials_dict = { # 'special': special_lst # } # real_index = pd.Series(index_lst) # df_specials = pd.DataFrame(data=specials_dict) # df_specials.set_index(real_index, inplace=True) # # # merge the dataframes on the index # df_ = pd.merge(df_, df_specials, left_index=True,right_index=True, how='outer') # # append the temporary dataframe to the dataframe we created earlier outside the for loop # df = df.append(df_) # # + #checking if everything worked #df.head() # - # The web scraping part is over and the following part is only looking at the data. # We will save the dataframe to a csv file so that we don't have to scrape the info again! # ### Checking for duplicates something that can happen quickly while scraping # df.pivot_table(columns=['fish_names'], aggfunc='size') # It seems like we have some duplicates. Let's drop them! # + #df.drop_duplicates(inplace=True) # + # save the dataframe to a csv file without index #df.to_csv('fish_data.csv', index=False) # - # Because we haven't run the code for scraping all pages, we uploaded the data we scraped before to github and we now can load it into pandas: # reading the csv file from github df = pd.read_csv('https://raw.githubusercontent.com/neuefische/ds-meetups/main/02_Web_Scraping_With_Beautiful_Soup/fish_data.csv') #checking if everything worked df.head() # We want fish for Larissa that she has never had before, that is why we are looking for new items (Neuheiten). # Query over the dataframe and keeping only the fish with the special Neuheit df_special_offer = df.query('special == "Neuheit"') df_special_offer.head() # We have a budget of around 250 โ‚ฌ and we want to buy at least 10 fish so we will filter out fishes that are more expensive than 25 โ‚ฌ! # Filtering only for the fish that are cheaper than 25 EUR df_final = df_special_offer[df_special_offer['fish_prices in EUR'] <= 25] df_final.head() # So let's write some code that chooses the fish for us: # our budget BUDGET = 250 # a list for the fish we will buy shopping_bag = [] # a variable here we save the updating price in price = 0 # we are looking for fish until our budget is reached while price <= BUDGET: # samples the dataframe randomly df_temp = df_final.sample(1) # getting the name from the sample name = df_temp['fish_names'].values # getting the price from the sample fish_price = df_temp['fish_prices in EUR'].values # updating our price price += fish_price # adding the fish name to the shopping bag shopping_bag.append((name[0],fish_price[0])) # + pd.set_option('display.max_colwidth', None) print(f"We are at a price point of {price[0].round(2)} Euro and this are the fish we chose:") res=pd.DataFrame(shopping_bag,columns=["Name","Price [โ‚ฌ]"]) display(res) # - # # Christmas can come! # # ![](images/happy_larissa.png) #
02_Web_Scraping_With_Beautiful_Soup/02_webscraping_bs4.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- # + [markdown] origin_pos=0 # # ็บฟๆ€งๅ›žๅฝ’็š„็ฎ€ๆดๅฎž็Žฐ # :label:`sec_linear_concise` # # ๅœจ่ฟ‡ๅŽป็š„ๅ‡ ๅนด้‡Œ๏ผŒๅ‡บไบŽๅฏนๆทฑๅบฆๅญฆไน ๅผบ็ƒˆ็š„ๅ…ด่ถฃ๏ผŒ # ่ฎธๅคšๅ…ฌๅธใ€ๅญฆ่€…ๅ’Œไธšไฝ™็ˆฑๅฅฝ่€…ๅผ€ๅ‘ไบ†ๅ„็งๆˆ็†Ÿ็š„ๅผ€ๆบๆก†ๆžถใ€‚ # ่ฟ™ไบ›ๆก†ๆžถๅฏไปฅ่‡ชๅŠจๅŒ–ๅŸบไบŽๆขฏๅบฆ็š„ๅญฆไน ็ฎ—ๆณ•ไธญ้‡ๅคๆ€ง็š„ๅทฅไฝœใ€‚ # ๅœจ :numref:`sec_linear_scratch`ไธญ๏ผŒๆˆ‘ไปฌๅช่ฟ็”จไบ†๏ผš # ๏ผˆ1๏ผ‰้€š่ฟ‡ๅผ ้‡ๆฅ่ฟ›่กŒๆ•ฐๆฎๅญ˜ๅ‚จๅ’Œ็บฟๆ€งไปฃๆ•ฐ๏ผ› # ๏ผˆ2๏ผ‰้€š่ฟ‡่‡ชๅŠจๅพฎๅˆ†ๆฅ่ฎก็ฎ—ๆขฏๅบฆใ€‚ # ๅฎž้™…ไธŠ๏ผŒ็”ฑไบŽๆ•ฐๆฎ่ฟญไปฃๅ™จใ€ๆŸๅคฑๅ‡ฝๆ•ฐใ€ไผ˜ๅŒ–ๅ™จๅ’Œ็ฅž็ป็ฝ‘็ปœๅฑ‚ๅพˆๅธธ็”จ๏ผŒ # ็Žฐไปฃๆทฑๅบฆๅญฆไน ๅบ“ไนŸไธบๆˆ‘ไปฌๅฎž็Žฐไบ†่ฟ™ไบ›็ป„ไปถใ€‚ # # ๅœจๆœฌ่Š‚ไธญ๏ผŒๆˆ‘ไปฌๅฐ†ไป‹็ปๅฆ‚ไฝ•(**้€š่ฟ‡ไฝฟ็”จๆทฑๅบฆๅญฆไน ๆก†ๆžถๆฅ็ฎ€ๆดๅœฐๅฎž็Žฐ**) # :numref:`sec_linear_scratch`ไธญ็š„(**็บฟๆ€งๅ›žๅฝ’ๆจกๅž‹**)ใ€‚ # # ## ็”Ÿๆˆๆ•ฐๆฎ้›† # # ไธŽ :numref:`sec_linear_scratch`ไธญ็ฑปไผผ๏ผŒๆˆ‘ไปฌ้ฆ–ๅ…ˆ[**็”Ÿๆˆๆ•ฐๆฎ้›†**]ใ€‚ # # + origin_pos=2 tab=["pytorch"] import numpy as np import torch from torch.utils import data from d2l import torch as d2l # + origin_pos=4 tab=["pytorch"] true_w = torch.tensor([2, -3.4]) true_b = 4.2 features, labels = d2l.synthetic_data(true_w, true_b, 1000) # + [markdown] origin_pos=5 # ## ่ฏปๅ–ๆ•ฐๆฎ้›† # # ๆˆ‘ไปฌๅฏไปฅ[**่ฐƒ็”จๆก†ๆžถไธญ็Žฐๆœ‰็š„APIๆฅ่ฏปๅ–ๆ•ฐๆฎ**]ใ€‚ # ๆˆ‘ไปฌๅฐ†`features`ๅ’Œ`labels`ไฝœไธบAPI็š„ๅ‚ๆ•ฐไผ ้€’๏ผŒๅนถ้€š่ฟ‡ๆ•ฐๆฎ่ฟญไปฃๅ™จๆŒ‡ๅฎš`batch_size`ใ€‚ # ๆญคๅค–๏ผŒๅธƒๅฐ”ๅ€ผ`is_train`่กจ็คบๆ˜ฏๅฆๅธŒๆœ›ๆ•ฐๆฎ่ฟญไปฃๅ™จๅฏน่ฑกๅœจๆฏไธช่ฟญไปฃๅ‘จๆœŸๅ†…ๆ‰“ไนฑๆ•ฐๆฎใ€‚ # # + origin_pos=7 tab=["pytorch"] def load_array(data_arrays, batch_size, is_train=True): #@save """ๆž„้€ ไธ€ไธชPyTorchๆ•ฐๆฎ่ฟญไปฃๅ™จ""" dataset = data.TensorDataset(*data_arrays) return data.DataLoader(dataset, batch_size, shuffle=is_train) # + origin_pos=9 tab=["pytorch"] batch_size = 10 data_iter = load_array((features, labels), batch_size) # + [markdown] origin_pos=10 # ไฝฟ็”จ`data_iter`็š„ๆ–นๅผไธŽๆˆ‘ไปฌๅœจ :numref:`sec_linear_scratch`ไธญไฝฟ็”จ`data_iter`ๅ‡ฝๆ•ฐ็š„ๆ–นๅผ็›ธๅŒใ€‚ไธบไบ†้ชŒ่ฏๆ˜ฏๅฆๆญฃๅธธๅทฅไฝœ๏ผŒ่ฎฉๆˆ‘ไปฌ่ฏปๅ–ๅนถๆ‰“ๅฐ็ฌฌไธ€ไธชๅฐๆ‰น้‡ๆ ทๆœฌใ€‚ # ไธŽ :numref:`sec_linear_scratch`ไธๅŒ๏ผŒ่ฟ™้‡Œๆˆ‘ไปฌไฝฟ็”จ`iter`ๆž„้€ Python่ฟญไปฃๅ™จ๏ผŒๅนถไฝฟ็”จ`next`ไปŽ่ฟญไปฃๅ™จไธญ่Žทๅ–็ฌฌไธ€้กนใ€‚ # # + origin_pos=11 tab=["pytorch"] next(iter(data_iter)) # + [markdown] origin_pos=12 # ## ๅฎšไน‰ๆจกๅž‹ # # ๅฝ“ๆˆ‘ไปฌๅœจ :numref:`sec_linear_scratch`ไธญๅฎž็Žฐ็บฟๆ€งๅ›žๅฝ’ๆ—ถ๏ผŒ # ๆˆ‘ไปฌๆ˜Ž็กฎๅฎšไน‰ไบ†ๆจกๅž‹ๅ‚ๆ•ฐๅ˜้‡๏ผŒๅนถ็ผ–ๅ†™ไบ†่ฎก็ฎ—็š„ไปฃ็ ๏ผŒ่ฟ™ๆ ท้€š่ฟ‡ๅŸบๆœฌ็š„็บฟๆ€งไปฃๆ•ฐ่ฟ็ฎ—ๅพ—ๅˆฐ่พ“ๅ‡บใ€‚ # ไฝ†ๆ˜ฏ๏ผŒๅฆ‚ๆžœๆจกๅž‹ๅ˜ๅพ—ๆ›ดๅŠ ๅคๆ‚๏ผŒไธ”ๅฝ“ไฝ ๅ‡ ไนŽๆฏๅคฉ้ƒฝ้œ€่ฆๅฎž็Žฐๆจกๅž‹ๆ—ถ๏ผŒไฝ ไผšๆƒณ็ฎ€ๅŒ–่ฟ™ไธช่ฟ‡็จ‹ใ€‚ # ่ฟ™็งๆƒ…ๅ†ต็ฑปไผผไบŽไธบ่‡ชๅทฑ็š„ๅšๅฎขไปŽ้›ถๅผ€ๅง‹็ผ–ๅ†™็ฝ‘้กตใ€‚ # ๅšไธ€ไธคๆฌกๆ˜ฏๆœ‰็›Š็š„๏ผŒไฝ†ๅฆ‚ๆžœๆฏไธชๆ–ฐๅšๅฎขไฝ ๅฐฑ่Šฑไธ€ไธชๆœˆ็š„ๆ—ถ้—ด้‡ๆ–ฐๅผ€ๅง‹็ผ–ๅ†™็ฝ‘้กต๏ผŒ้‚ฃๅนถไธ้ซ˜ๆ•ˆใ€‚ # # ๅฏนไบŽๆ ‡ๅ‡†ๆทฑๅบฆๅญฆไน ๆจกๅž‹๏ผŒๆˆ‘ไปฌๅฏไปฅ[**ไฝฟ็”จๆก†ๆžถ็š„้ข„ๅฎšไน‰ๅฅฝ็š„ๅฑ‚**]ใ€‚่ฟ™ไฝฟๆˆ‘ไปฌๅช้œ€ๅ…ณๆณจไฝฟ็”จๅ“ชไบ›ๅฑ‚ๆฅๆž„้€ ๆจกๅž‹๏ผŒ่€Œไธๅฟ…ๅ…ณๆณจๅฑ‚็š„ๅฎž็Žฐ็ป†่Š‚ใ€‚ # ๆˆ‘ไปฌ้ฆ–ๅ…ˆๅฎšไน‰ไธ€ไธชๆจกๅž‹ๅ˜้‡`net`๏ผŒๅฎƒๆ˜ฏไธ€ไธช`Sequential`็ฑป็š„ๅฎžไพ‹ใ€‚ # `Sequential`็ฑปๅฐ†ๅคšไธชๅฑ‚ไธฒ่”ๅœจไธ€่ตทใ€‚ # ๅฝ“็ป™ๅฎš่พ“ๅ…ฅๆ•ฐๆฎๆ—ถ๏ผŒ`Sequential`ๅฎžไพ‹ๅฐ†ๆ•ฐๆฎไผ ๅ…ฅๅˆฐ็ฌฌไธ€ๅฑ‚๏ผŒ # ็„ถๅŽๅฐ†็ฌฌไธ€ๅฑ‚็š„่พ“ๅ‡บไฝœไธบ็ฌฌไบŒๅฑ‚็š„่พ“ๅ…ฅ๏ผŒไปฅๆญค็ฑปๆŽจใ€‚ # ๅœจไธ‹้ข็š„ไพ‹ๅญไธญ๏ผŒๆˆ‘ไปฌ็š„ๆจกๅž‹ๅชๅŒ…ๅซไธ€ไธชๅฑ‚๏ผŒๅ› ๆญคๅฎž้™…ไธŠไธ้œ€่ฆ`Sequential`ใ€‚ # ไฝ†ๆ˜ฏ็”ฑไบŽไปฅๅŽๅ‡ ไนŽๆ‰€ๆœ‰็š„ๆจกๅž‹้ƒฝๆ˜ฏๅคšๅฑ‚็š„๏ผŒๅœจ่ฟ™้‡Œไฝฟ็”จ`Sequential`ไผš่ฎฉไฝ ็†Ÿๆ‚‰โ€œๆ ‡ๅ‡†็š„ๆตๆฐด็บฟโ€ใ€‚ # # ๅ›ž้กพ :numref:`fig_single_neuron`ไธญ็š„ๅ•ๅฑ‚็ฝ‘็ปœๆžถๆž„๏ผŒ # ่ฟ™ไธ€ๅ•ๅฑ‚่ขซ็งฐไธบ*ๅ…จ่ฟžๆŽฅๅฑ‚*๏ผˆfully-connected layer๏ผ‰๏ผŒ # ๅ› ไธบๅฎƒ็š„ๆฏไธ€ไธช่พ“ๅ…ฅ้ƒฝ้€š่ฟ‡็Ÿฉ้˜ต-ๅ‘้‡ไน˜ๆณ•ๅพ—ๅˆฐๅฎƒ็š„ๆฏไธช่พ“ๅ‡บใ€‚ # # + [markdown] origin_pos=14 tab=["pytorch"] # ๅœจPyTorchไธญ๏ผŒๅ…จ่ฟžๆŽฅๅฑ‚ๅœจ`Linear`็ฑปไธญๅฎšไน‰ใ€‚ # ๅ€ผๅพ—ๆณจๆ„็š„ๆ˜ฏ๏ผŒๆˆ‘ไปฌๅฐ†ไธคไธชๅ‚ๆ•ฐไผ ้€’ๅˆฐ`nn.Linear`ไธญใ€‚ # ็ฌฌไธ€ไธชๆŒ‡ๅฎš่พ“ๅ…ฅ็‰นๅพๅฝข็Šถ๏ผŒๅณ2๏ผŒ็ฌฌไบŒไธชๆŒ‡ๅฎš่พ“ๅ‡บ็‰นๅพๅฝข็Šถ๏ผŒ่พ“ๅ‡บ็‰นๅพๅฝข็Šถไธบๅ•ไธชๆ ‡้‡๏ผŒๅ› ๆญคไธบ1ใ€‚ # # + origin_pos=17 tab=["pytorch"] # nnๆ˜ฏ็ฅž็ป็ฝ‘็ปœ็š„็ผฉๅ†™ from torch import nn net = nn.Sequential(nn.Linear(2, 1)) # + [markdown] origin_pos=19 # ## (**ๅˆๅง‹ๅŒ–ๆจกๅž‹ๅ‚ๆ•ฐ**) # # ๅœจไฝฟ็”จ`net`ไน‹ๅ‰๏ผŒๆˆ‘ไปฌ้œ€่ฆๅˆๅง‹ๅŒ–ๆจกๅž‹ๅ‚ๆ•ฐใ€‚ # ๅฆ‚ๅœจ็บฟๆ€งๅ›žๅฝ’ๆจกๅž‹ไธญ็š„ๆƒ้‡ๅ’Œๅ็ฝฎใ€‚ # ๆทฑๅบฆๅญฆไน ๆก†ๆžถ้€šๅธธๆœ‰้ข„ๅฎšไน‰็š„ๆ–นๆณ•ๆฅๅˆๅง‹ๅŒ–ๅ‚ๆ•ฐใ€‚ # ๅœจ่ฟ™้‡Œ๏ผŒๆˆ‘ไปฌๆŒ‡ๅฎšๆฏไธชๆƒ้‡ๅ‚ๆ•ฐๅบ”่ฏฅไปŽๅ‡ๅ€ผไธบ0ใ€ๆ ‡ๅ‡†ๅทฎไธบ0.01็š„ๆญฃๆ€ๅˆ†ๅธƒไธญ้šๆœบ้‡‡ๆ ท๏ผŒ # ๅ็ฝฎๅ‚ๆ•ฐๅฐ†ๅˆๅง‹ๅŒ–ไธบ้›ถใ€‚ # # + [markdown] origin_pos=21 tab=["pytorch"] # ๆญฃๅฆ‚ๆˆ‘ไปฌๅœจๆž„้€ `nn.Linear`ๆ—ถๆŒ‡ๅฎš่พ“ๅ…ฅๅ’Œ่พ“ๅ‡บๅฐบๅฏธไธ€ๆ ท๏ผŒ # ็Žฐๅœจๆˆ‘ไปฌ่ƒฝ็›ดๆŽฅ่ฎฟ้—ฎๅ‚ๆ•ฐไปฅ่ฎพๅฎšๅฎƒไปฌ็š„ๅˆๅง‹ๅ€ผใ€‚ # ๆˆ‘ไปฌ้€š่ฟ‡`net[0]`้€‰ๆ‹ฉ็ฝ‘็ปœไธญ็š„็ฌฌไธ€ไธชๅ›พๅฑ‚๏ผŒ # ็„ถๅŽไฝฟ็”จ`weight.data`ๅ’Œ`bias.data`ๆ–นๆณ•่ฎฟ้—ฎๅ‚ๆ•ฐใ€‚ # ๆˆ‘ไปฌ่ฟ˜ๅฏไปฅไฝฟ็”จๆ›ฟๆขๆ–นๆณ•`normal_`ๅ’Œ`fill_`ๆฅ้‡ๅ†™ๅ‚ๆ•ฐๅ€ผใ€‚ # # + origin_pos=24 tab=["pytorch"] net[0].weight.data.normal_(0, 0.01) net[0].bias.data.fill_(0) # + [markdown] origin_pos=27 tab=["pytorch"] # # # + [markdown] origin_pos=29 # ## ๅฎšไน‰ๆŸๅคฑๅ‡ฝๆ•ฐ # # + [markdown] origin_pos=31 tab=["pytorch"] # [**่ฎก็ฎ—ๅ‡ๆ–น่ฏฏๅทฎไฝฟ็”จ็š„ๆ˜ฏ`MSELoss`็ฑป๏ผŒไนŸ็งฐไธบๅนณๆ–น$L_2$่Œƒๆ•ฐ**]ใ€‚ # ้ป˜่ฎคๆƒ…ๅ†ตไธ‹๏ผŒๅฎƒ่ฟ”ๅ›žๆ‰€ๆœ‰ๆ ทๆœฌๆŸๅคฑ็š„ๅนณๅ‡ๅ€ผใ€‚ # # + origin_pos=34 tab=["pytorch"] loss = nn.MSELoss() # + [markdown] origin_pos=36 # ## ๅฎšไน‰ไผ˜ๅŒ–็ฎ—ๆณ• # # + [markdown] origin_pos=38 tab=["pytorch"] # ๅฐๆ‰น้‡้šๆœบๆขฏๅบฆไธ‹้™็ฎ—ๆณ•ๆ˜ฏไธ€็งไผ˜ๅŒ–็ฅž็ป็ฝ‘็ปœ็š„ๆ ‡ๅ‡†ๅทฅๅ…ท๏ผŒ # PyTorchๅœจ`optim`ๆจกๅ—ไธญๅฎž็Žฐไบ†่ฏฅ็ฎ—ๆณ•็š„่ฎธๅคšๅ˜็งใ€‚ # ๅฝ“ๆˆ‘ไปฌ(**ๅฎžไพ‹ๅŒ–ไธ€ไธช`SGD`ๅฎžไพ‹**)ๆ—ถ๏ผŒๆˆ‘ไปฌ่ฆๆŒ‡ๅฎšไผ˜ๅŒ–็š„ๅ‚ๆ•ฐ # ๏ผˆๅฏ้€š่ฟ‡`net.parameters()`ไปŽๆˆ‘ไปฌ็š„ๆจกๅž‹ไธญ่Žทๅพ—๏ผ‰ไปฅๅŠไผ˜ๅŒ–็ฎ—ๆณ•ๆ‰€้œ€็š„่ถ…ๅ‚ๆ•ฐๅญ—ๅ…ธใ€‚ # ๅฐๆ‰น้‡้šๆœบๆขฏๅบฆไธ‹้™ๅช้œ€่ฆ่ฎพ็ฝฎ`lr`ๅ€ผ๏ผŒ่ฟ™้‡Œ่ฎพ็ฝฎไธบ0.03ใ€‚ # # + origin_pos=41 tab=["pytorch"] trainer = torch.optim.SGD(net.parameters(), lr=0.03) # + [markdown] origin_pos=43 # ## ่ฎญ็ปƒ # # ้€š่ฟ‡ๆทฑๅบฆๅญฆไน ๆก†ๆžถ็š„้ซ˜็บงAPIๆฅๅฎž็Žฐๆˆ‘ไปฌ็š„ๆจกๅž‹ๅช้œ€่ฆ็›ธๅฏน่พƒๅฐ‘็š„ไปฃ็ ใ€‚ # ๆˆ‘ไปฌไธๅฟ…ๅ•็‹ฌๅˆ†้…ๅ‚ๆ•ฐใ€ไธๅฟ…ๅฎšไน‰ๆˆ‘ไปฌ็š„ๆŸๅคฑๅ‡ฝๆ•ฐ๏ผŒไนŸไธๅฟ…ๆ‰‹ๅŠจๅฎž็Žฐๅฐๆ‰น้‡้šๆœบๆขฏๅบฆไธ‹้™ใ€‚ # ๅฝ“ๆˆ‘ไปฌ้œ€่ฆๆ›ดๅคๆ‚็š„ๆจกๅž‹ๆ—ถ๏ผŒ้ซ˜็บงAPI็š„ไผ˜ๅŠฟๅฐ†ๅคงๅคงๅขžๅŠ ใ€‚ # ๅฝ“ๆˆ‘ไปฌๆœ‰ไบ†ๆ‰€ๆœ‰็š„ๅŸบๆœฌ็ป„ไปถ๏ผŒ[**่ฎญ็ปƒ่ฟ‡็จ‹ไปฃ็ ไธŽๆˆ‘ไปฌไปŽ้›ถๅผ€ๅง‹ๅฎž็Žฐๆ—ถๆ‰€ๅš็š„้žๅธธ็›ธไผผ**]ใ€‚ # # ๅ›ž้กพไธ€ไธ‹๏ผšๅœจๆฏไธช่ฟญไปฃๅ‘จๆœŸ้‡Œ๏ผŒๆˆ‘ไปฌๅฐ†ๅฎŒๆ•ด้ๅކไธ€ๆฌกๆ•ฐๆฎ้›†๏ผˆ`train_data`๏ผ‰๏ผŒ # ไธๅœๅœฐไปŽไธญ่Žทๅ–ไธ€ไธชๅฐๆ‰น้‡็š„่พ“ๅ…ฅๅ’Œ็›ธๅบ”็š„ๆ ‡็ญพใ€‚ # ๅฏนไบŽๆฏไธ€ไธชๅฐๆ‰น้‡๏ผŒๆˆ‘ไปฌไผš่ฟ›่กŒไปฅไธ‹ๆญฅ้ชค: # # * ้€š่ฟ‡่ฐƒ็”จ`net(X)`็”Ÿๆˆ้ข„ๆต‹ๅนถ่ฎก็ฎ—ๆŸๅคฑ`l`๏ผˆๅ‰ๅ‘ไผ ๆ’ญ๏ผ‰ใ€‚ # * ้€š่ฟ‡่ฟ›่กŒๅๅ‘ไผ ๆ’ญๆฅ่ฎก็ฎ—ๆขฏๅบฆใ€‚ # * ้€š่ฟ‡่ฐƒ็”จไผ˜ๅŒ–ๅ™จๆฅๆ›ดๆ–ฐๆจกๅž‹ๅ‚ๆ•ฐใ€‚ # # ไธบไบ†ๆ›ดๅฅฝ็š„่กก้‡่ฎญ็ปƒๆ•ˆๆžœ๏ผŒๆˆ‘ไปฌ่ฎก็ฎ—ๆฏไธช่ฟญไปฃๅ‘จๆœŸๅŽ็š„ๆŸๅคฑ๏ผŒๅนถๆ‰“ๅฐๅฎƒๆฅ็›‘ๆŽง่ฎญ็ปƒ่ฟ‡็จ‹ใ€‚ # # + origin_pos=45 tab=["pytorch"] num_epochs = 3 for epoch in range(num_epochs): for X, y in data_iter: l = loss(net(X) ,y) trainer.zero_grad() l.backward() trainer.step() l = loss(net(features), labels) print(f'epoch {epoch + 1}, loss {l:f}') # + [markdown] origin_pos=47 # ไธ‹้ขๆˆ‘ไปฌ[**ๆฏ”่พƒ็”Ÿๆˆๆ•ฐๆฎ้›†็š„็œŸๅฎžๅ‚ๆ•ฐๅ’Œ้€š่ฟ‡ๆœ‰้™ๆ•ฐๆฎ่ฎญ็ปƒ่Žทๅพ—็š„ๆจกๅž‹ๅ‚ๆ•ฐ**]ใ€‚ # ่ฆ่ฎฟ้—ฎๅ‚ๆ•ฐ๏ผŒๆˆ‘ไปฌ้ฆ–ๅ…ˆไปŽ`net`่ฎฟ้—ฎๆ‰€้œ€็š„ๅฑ‚๏ผŒ็„ถๅŽ่ฏปๅ–่ฏฅๅฑ‚็š„ๆƒ้‡ๅ’Œๅ็ฝฎใ€‚ # ๆญฃๅฆ‚ๅœจไปŽ้›ถๅผ€ๅง‹ๅฎž็Žฐไธญไธ€ๆ ท๏ผŒๆˆ‘ไปฌไผฐ่ฎกๅพ—ๅˆฐ็š„ๅ‚ๆ•ฐไธŽ็”Ÿๆˆๆ•ฐๆฎ็š„็œŸๅฎžๅ‚ๆ•ฐ้žๅธธๆŽฅ่ฟ‘ใ€‚ # # + origin_pos=49 tab=["pytorch"] w = net[0].weight.data print('w็š„ไผฐ่ฎก่ฏฏๅทฎ๏ผš', true_w - w.reshape(true_w.shape)) b = net[0].bias.data print('b็š„ไผฐ่ฎก่ฏฏๅทฎ๏ผš', true_b - b) # + [markdown] origin_pos=51 # ## ๅฐ็ป“ # # + [markdown] origin_pos=53 tab=["pytorch"] # * ๆˆ‘ไปฌๅฏไปฅไฝฟ็”จPyTorch็š„้ซ˜็บงAPIๆ›ด็ฎ€ๆดๅœฐๅฎž็Žฐๆจกๅž‹ใ€‚ # * ๅœจPyTorchไธญ๏ผŒ`data`ๆจกๅ—ๆไพ›ไบ†ๆ•ฐๆฎๅค„็†ๅทฅๅ…ท๏ผŒ`nn`ๆจกๅ—ๅฎšไน‰ไบ†ๅคง้‡็š„็ฅž็ป็ฝ‘็ปœๅฑ‚ๅ’Œๅธธ่งๆŸๅคฑๅ‡ฝๆ•ฐใ€‚ # * ๆˆ‘ไปฌๅฏไปฅ้€š่ฟ‡`_`็ป“ๅฐพ็š„ๆ–นๆณ•ๅฐ†ๅ‚ๆ•ฐๆ›ฟๆข๏ผŒไปŽ่€Œๅˆๅง‹ๅŒ–ๅ‚ๆ•ฐใ€‚ # # + [markdown] origin_pos=55 # ## ็ปƒไน  # # 1. ๅฆ‚ๆžœๅฐ†ๅฐๆ‰น้‡็š„ๆ€ปๆŸๅคฑๆ›ฟๆขไธบๅฐๆ‰น้‡ๆŸๅคฑ็š„ๅนณๅ‡ๅ€ผ๏ผŒไฝ ้œ€่ฆๅฆ‚ไฝ•ๆ›ดๆ”นๅญฆไน ็އ๏ผŸ # 1. ๆŸฅ็œ‹ๆทฑๅบฆๅญฆไน ๆก†ๆžถๆ–‡ๆกฃ๏ผŒๅฎƒไปฌๆไพ›ไบ†ๅ“ชไบ›ๆŸๅคฑๅ‡ฝๆ•ฐๅ’Œๅˆๅง‹ๅŒ–ๆ–นๆณ•๏ผŸ็”จHuberๆŸๅคฑไปฃๆ›ฟๅŽŸๆŸๅคฑ๏ผŒๅณ # $$l(y,y') = \begin{cases}|y-y'| -\frac{\sigma}{2} & \text{ if } |y-y'| > \sigma \\ \frac{1}{2 \sigma} (y-y')^2 & \text{ ๅ…ถๅฎƒๆƒ…ๅ†ต}\end{cases}$$ # 1. ไฝ ๅฆ‚ไฝ•่ฎฟ้—ฎ็บฟๆ€งๅ›žๅฝ’็š„ๆขฏๅบฆ๏ผŸ # # + [markdown] origin_pos=57 tab=["pytorch"] # [Discussions](https://discuss.d2l.ai/t/1781) #
pytorch/chapter_linear-networks/linear-regression-concise.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Hadamard Multitask GP Regression # # ## Introduction # # This notebook demonstrates how to perform "Hadamard" multitask regression with kernels.IndexKernel. # # This differs from the [multitask gp regression example notebook](./Multitask_GP_Regression.ipynb) in one key way: # - Here, we assume that we want to learn **one task per input**. For each input, we specify the task of the input that we care about. (The kernel that we learn is expressed as a Hadamard product of an input kernel and a task kernel) # - In the other notebook, we assume that we want to learn all tasks per input. (The kernel in that notebook is the Kronecker product of an input kernel and a task kernel). # # Multitask regression, first introduced in [this paper](https://papers.nips.cc/paper/3189-multi-task-gaussian-process-prediction.pdf) learns similarities in the outputs simultaneously. It's useful when you are performing regression on multiple functions that share the same inputs, especially if they have similarities (such as being sinusodial). # # Given inputs $x$ and $x'$, and tasks $i$ and $j$, the covariance between two datapoints and two tasks is given by # # \begin{equation*} # k([x, i], [x', j]) = k_\text{inputs}(x, x') * k_\text{tasks}(i, j) # \end{equation*} # # where $k_\text{inputs}$ is a standard kernel (e.g. RBF) that operates on the inputs. # $k_\text{task)$ is a special kernel - the `IndexKernel` - which is a lookup table containing inter-task covariance. # + import math import torch import gpytorch from matplotlib import pyplot as plt # %matplotlib inline # %load_ext autoreload # %autoreload 2 # - # ### Set up training data # # In the next cell, we set up the training data for this example. We'll be using 15 regularly spaced points on [0,1] which we evaluate the function on and add Gaussian noise to get the training labels. # # We'll have two functions - a sine function (y1) and a cosine function (y2) # + train_x = torch.linspace(0, 1, 100) train_y1 = torch.sin(train_x * (2 * math.pi)) + torch.randn(train_x.size()) * 0.2 train_y2 = torch.cos(train_x * (2 * math.pi)) + torch.randn(train_x.size()) * 0.2 # - # ## Set up the model # # The model should be somewhat similar to the `ExactGP` model in the [simple regression example](../01_Simple_GP_Regression/Simple_GP_Regression.ipynb). # # The differences: # # 1. The model takes two input: the inputs (x) and indices. The indices indicate which task we want an output for, # 2. Rather than just using a RBFKernel, we're using that in conjunction with a IndexKernel # 3. We don't use a ScaleKernel, since the IndexKernel will do some scaling for us. (This way we're not overparameterizing the kernel.) # + class MultitaskGPModel(gpytorch.models.ExactGP): def __init__(self, train_x, train_y, likelihood): super(MultitaskGPModel, self).__init__(train_x, train_y, likelihood) self.mean_module = gpytorch.means.ConstantMean() self.covar_module = gpytorch.kernels.RBFKernel() # We learn an IndexKernel for 2 tasks # (so we'll actually learn 2x2=4 tasks with correlations) self.task_covar_module = gpytorch.kernels.IndexKernel(num_tasks=2, rank=1) def forward(self,x,i): mean_x = self.mean_module(x) # Get input-input covariance covar_x = self.covar_module(x) # Get task-task covariance covar_i = self.task_covar_module(i) # Multiply the two together to get the covariance we want covar = covar_x.mul(covar_i) return gpytorch.distributions.MultivariateNormal(mean_x, covar) likelihood = gpytorch.likelihoods.GaussianLikelihood() # Here we want outputs for every input and task # This is not the most efficient model for this: it's better to use the model in the ./Multitask_GP_Regression.ipynb notebook # Since we are learning two tasks we feed in the x_data twice, along with the # y_data along with its indices train_i_task1 = torch.full_like(train_x, dtype=torch.long, fill_value=0) train_i_task2 = torch.full_like(train_x, dtype=torch.long, fill_value=1) full_train_x = torch.cat([train_x, train_x]) full_train_i = torch.cat([train_i_task1, train_i_task2]) full_train_y = torch.cat([train_y1, train_y2]) # Here we have two iterms that we're passing in as train_inputs model = MultitaskGPModel((full_train_x, full_train_i), full_train_y, likelihood) # - # ## Training the model # # In the next cell, we handle using Type-II MLE to train the hyperparameters of the Gaussian process. # The spectral mixture kernel's hyperparameters start from what was specified in `initialize_from_data`. # # See the [simple regression example](../01_Simple_GP_Regression/Simple_GP_Regression.ipynb) for more info on this step. # + # Find optimal model hyperparameters model.train() likelihood.train() # Use the adam optimizer optimizer = torch.optim.Adam([ {'params': model.parameters()}, # Includes GaussianLikelihood parameters ], lr=0.1) # "Loss" for GPs - the marginal log likelihood mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, model) for i in range(50): optimizer.zero_grad() output = model(full_train_x, full_train_i) loss = -mll(output, full_train_y) loss.backward() print('Iter %d/50 - Loss: %.3f' % (i + 1, loss.item())) optimizer.step() # - # ## Make predictions with the model # + # Set into eval mode model.eval() likelihood.eval() # Initialize plots f, (y1_ax, y2_ax) = plt.subplots(1, 2, figsize=(8, 3)) # Test points every 0.02 in [0,1] test_x = torch.linspace(0, 1, 51) tast_i_task1 = torch.full_like(test_x, dtype=torch.long, fill_value=0) test_i_task2 = torch.full_like(test_x, dtype=torch.long, fill_value=1) # Make predictions - one task at a time # We control the task we cae about using the indices # The gpytorch.settings.fast_pred_var flag activates LOVE (for fast variances) # See https://arxiv.org/abs/1803.06058 with torch.no_grad(), gpytorch.settings.fast_pred_var(): observed_pred_y1 = likelihood(model(test_x, tast_i_task1)) observed_pred_y2 = likelihood(model(test_x, test_i_task2)) # Define plotting function def ax_plot(ax, train_y, rand_var, title): # Get lower and upper confidence bounds lower, upper = rand_var.confidence_region() # Plot training data as black stars ax.plot(train_x.detach().numpy(), train_y.detach().numpy(), 'k*') # Predictive mean as blue line ax.plot(test_x.detach().numpy(), rand_var.mean.detach().numpy(), 'b') # Shade in confidence ax.fill_between(test_x.detach().numpy(), lower.detach().numpy(), upper.detach().numpy(), alpha=0.5) ax.set_ylim([-3, 3]) ax.legend(['Observed Data', 'Mean', 'Confidence']) ax.set_title(title) # Plot both tasks ax_plot(y1_ax, train_y1, observed_pred_y1, 'Observed Values (Likelihood)') ax_plot(y2_ax, train_y2, observed_pred_y2, 'Observed Values (Likelihood)') # -
examples/03_Multitask_GP_Regression/Hadamard_Multitask_GP_Regression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] nbsphinx="hidden" # This notebook is part of the `nbsphinx` documentation: https://nbsphinx.readthedocs.io/. # - # # Specifying Thumbnails in `conf.py` # # This notebook doesn't contain any thumbnail metadata. # # But in the file [conf.py](../conf.py), # a thumbnail is specified (via the # [nbsphinx_thumbnails](../usage.ipynb#nbsphinx_thumbnails) # option), # which will be used in the [gallery](../subdir/gallery.ipynb). # # The keys in the `nbsphinx_thumbnails` dictionary can contain wildcards, # which behave very similarly to the # [html_sidebars](https://www.sphinx-doc.org/en/master/usage/configuration.html#confval-html_sidebars) # option. # # The thumbnail files can be local image files somewhere in the source directory, # but you'll need to create at least one # [link](../markdown-cells.ipynb#Links-to-Local-Files) # to them in order to copy them to the HTML output directory. # # You can also use files from the `_static` directory # (which contains all files in your [html_static_path](https://www.sphinx-doc.org/en/master/usage/configuration.html#confval-html_static_path)). # # If you want, you can also use files from the `_images` directory, # which contains all notebook outputs. # To demonstrate this feature, # we are creating an image file here: # %matplotlib agg import matplotlib.pyplot as plt fig, ax = plt.subplots() ax.plot([4, 8, 15, 16, 23, 42]) fig.savefig('a-local-file.png') # Please note that the previous cell doesn't have any outputs, # but it has generated a file named `a-local-file.png` in the notebook's directory. # # We have to create a link to this file (which is a good idea anyway): # [a-local-file.png](a-local-file.png). # # Now we can use this file in our [conf.py](../conf.py) like this: # # ```python # nbsphinx_thumbnails = { # 'gallery/thumbnail-from-conf-py': 'gallery/a-local-file.png', # } # ``` # # Please note that the notebook name does *not* contain the `.ipynb` suffix.
doc/gallery/thumbnail-from-conf-py.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import tensorflow as tf import numpy as np raw_data = np.random.normal(10, 1, 100) alpha = tf.constant(0.05) curr_value = tf.placeholder(tf.float32) prev_avg = tf.Variable(0.0) update_avg = alpha * curr_value + (1 - alpha) * prev_avg init = tf.global_variables_initializer() with tf.Session() as sess: sess.run(init) for i in range(len(raw_data)): curr_avg = sess.run(update_avg, feed_dict={curr_value: raw_data[i]}) sess.run(tf.assign(prev_avg, curr_avg)) print(raw_data[i], curr_avg)
ch02/Listing2.14.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.8 64-bit (''base'': conda)' # language: python # name: python3 # --- # What will be the output: sample = 'hello', print(type(sample)) # What will be the output: t2 = ([1,2,3,4], [5,6,7,8]) t2[0].append(10) print(t2) # What will be the output: a = set('abracadabra') print(a) # + # Given a list, use list slicing to get the expected output: lst = [1,2,3,4,5] # Expected Output # [5,3,1] # - # What will be the output: bool(0.0)
interviewer/00_questions_python_core_concepts.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Create csv data for Reuters-21578 dataset # # run the following cell to generate the csv # + import json import codecs import os import unicodecsv from collections import OrderedDict import os #data_size = "justTen" data_size = "full" outfile = os.path.abspath("reuters-21578.csv") row_count = 0 ordered_fieldnames = OrderedDict([('id',None),('title',None)]) with open(outfile,'wt') as f: dw = unicodecsv.DictWriter(f, delimiter=',', fieldnames=ordered_fieldnames, encoding='utf-8') dw.writeheader() for filename in os.listdir("reuters-21578-json/data/"+data_size): f = open("reuters-21578-json/data/"+data_size+"/"+filename) js = json.load(f) for j in js: if 'topics' in j and 'body' in j: r = {} r["id"] = j['id'] r["title"] = j['title'] dw.writerow(r) row_count += 1 print "finished writing csv data".format(**locals()) print "rows: {row_count}".format(**locals()) print "file: {outfile}".format(**locals())
python/examples/doc_similarity_reuters_csv.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Fourier Transforms import os import numpy as np import matplotlib.pyplot as plt import pandas as pd from sklearn.preprocessing import MinMaxScaler data = pd.read_csv('data/GOOG_040819 - 200416.csv', date_parser=True) data.head() close = data['Close'].copy() close.head() # + """ Code to create the Fuorier trasfrom """ data_FT = close close_fft = np.fft.fft(np.asarray(data_FT.tolist())) fft_df = pd.DataFrame({'fft':close_fft}) fft_df['absolute'] = fft_df['fft'].apply(lambda x: np.abs(x)) fft_df['angle'] = fft_df['fft'].apply(lambda x: np.angle(x)) plt.figure(figsize=(14, 7), dpi=100) fft_list = np.asarray(fft_df['fft'].tolist()) for num_ in [3, 6, 9, 100]: fft_list_m10= np.copy(fft_list); fft_list_m10[num_:-num_]=0 plt.plot(np.fft.ifft(fft_list_m10), label='Fourier transform with {} components'.format(num_)) plt.plot(data_FT, label='Real') plt.xlabel('Days') plt.ylabel('USD') plt.title('Figure 3: Google (close) stock prices & Fourier transforms') plt.legend() plt.show() # - df = pd.read_csv(os.path.join('./mli/data','aa.us.txt'),delimiter=',',usecols=['Date','Open','High','Low','Close']) df.head() close_aa = df['Close'].copy() close_aa.head() # + """ Code to create the Fuorier trasfrom """ data_FT = close_aa close_fft = np.fft.fft(np.asarray(data_FT.tolist())) fft_df = pd.DataFrame({'fft':close_fft}) fft_df['absolute'] = fft_df['fft'].apply(lambda x: np.abs(x)) fft_df['angle'] = fft_df['fft'].apply(lambda x: np.angle(x)) plt.figure(figsize=(14, 7), dpi=100) fft_list = np.asarray(fft_df['fft'].tolist()) for num_ in [3, 6, 9, 100]: fft_list_m10= np.copy(fft_list); fft_list_m10[num_:-num_]=0 plt.plot(np.fft.ifft(fft_list_m10), label='Fourier transform with {} components'.format(num_)) plt.plot(data_FT, label='Real') plt.xlabel('Days') plt.ylabel('USD') plt.title('Figure 3: AA (close) stock prices & Fourier transforms') plt.legend() plt.show()
notebooks/Fourier Transforms.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # ## 1st variant # ### Dynamic Bayesian Network Structure Learning with hybrid network = (classic) encoder + (quantum) circuit. # This is an example with real test data and dummy generated training data of 24 variables, that means 48 vertices (24 for t and 24 for t+1). # input=(1,48) # n_qubits=2 * ceil(log2(2 * nnodes)) -> for nnodes = 24, n_qubits = 12 # where first six digits correspond to vertice where the edge begins and six last digits correspond to vertice where edge ends i.e. 000010000101 corresponds to: vertice_2 ---> vertice_5 # It was tested with two sets of test data at the end of the notebook. The last test is with real data # # ! pip3 install torch==1.10.2+cpu torchvision==0.11.3+cpu -f https://download.pytorch.org/whl/cpu/torch_stable.html import pandas as pd from torch.utils.data import Dataset import torch import torchvision from torch import nn import numpy as np import pennylane as qml import random import networkx as nx from matplotlib import pyplot as plt path = "small_elu.csv" ds = pd.read_csv(path) ds nnodes = 24 ds = ds.sort_values(by=['T', 'NAME']) t01_list = [ds[['t0','t1']].iloc[f*nnodes:(f+1)*nnodes].values.T for f in range(len(ds)//nnodes)] dst = pd.DataFrame({'T':range(len(ds)//nnodes), 't01':t01_list}) dst nodes_names = {f:ds[['NAME']].iloc[0:nnodes].values[f][0] for f in range(nnodes)} nodes_genes = {f:ds[['GENE']].iloc[0:nnodes].values[f][0] for f in range(nnodes)} dst['y'] = [range(4) for i in range(9)] dst # + scale = np.frompyfunc(lambda x, min, max: (x-min)/(max - min), 3, 1) def get_edges(n=4): num_edges = random.randint(n, n+3) e1 = [(random.randint(0, n-1),random.randint(0, (n*2)-1)) for f in range(num_edges//2)] e2 = [(random.randint(0, (n*2)-1),random.randint(n, (n*2)-1)) for f in range(num_edges//2)] return e1 + e2 def get_t0(edges, weights, n=4): t0 = np.zeros(n) + 0.01 edges0 = [edge for i in range(n) for edge in edges if edge[0] == i and edge[1] < n] if len(edges0) > 0: t0[edges0[0][0]] = random.random() for edge in edges0: t0[edge[1]] += weights[edge[0]] + weights[edge[1]] * t0[edge[0]] return t0 def get_t1(edges, weights, t0, n=4): t1 = np.zeros(n) + 0.01 edges1 = [edge for edge in edges if edge[1] >= n] for edge in edges1: if edge[0] < n: t1[edge[1]-n] += weights[edge[0]] + weights[edge[1]-n] * t0[edge[0]] else: t1[edge[1]-n] += weights[edge[0]-n] + weights[edge[1]-n] * t1[edge[0]-n] return t1 # - # generate training dataset exper = 1000 n_qubits = 12 arr_list = [] edges_list = [] for f in range(exper): weights = [random.randint(1, 10)/10 for f in range(nnodes)] edges = get_edges(n = nnodes) t0 = get_t0(edges, weights, n = nnodes) t1 = get_t1(edges, weights, t0, n = nnodes) arr_list.append(scale(np.stack([t0,t1]),np.min(np.stack([t0,t1])), np.max(np.stack([t0,t1]))).astype(float)) edges_list.append(edges) arr = np.concatenate(arr_list, axis=1) dsa = pd.DataFrame({'t01':arr_list}) dsa #int("110100010",2) = 418 edges_bin_list = [[np.binary_repr(ed[0], width=n_qubits//2) + np.binary_repr(ed[1], width=n_qubits//2) for ed in edges] for edges in edges_list] ya_list = [[int(edge,2) for edge in edges] for edges in edges_bin_list] dsa['y'] = ya_list dsa # + dev = qml.device("default.qubit", wires=n_qubits) @qml.qnode(dev) def qnode(inputs, weights): qml.AngleEmbedding(inputs, wires=range(n_qubits)) qml.BasicEntanglerLayers(weights[0], wires=range(n_qubits), rotation=qml.RX) qml.BasicEntanglerLayers(weights[1], wires=range(n_qubits), rotation=qml.RY) qml.BasicEntanglerLayers(weights[2], wires=range(n_qubits), rotation=qml.RZ) return qml.probs(wires=range(n_qubits)) # - n_layers = 1 weight_shapes = {"weights": (3, n_layers, n_qubits)} qlayer = qml.qnn.TorchLayer(qnode, weight_shapes) input_size = nnodes * 2 hidden_size = input_size // 2 code_size = n_qubits encoder_hidden_layer = nn.Linear( in_features=input_size, out_features=hidden_size ) encoder_output_layer = nn.Linear( in_features=hidden_size, out_features=code_size ) layers = [encoder_hidden_layer, encoder_output_layer, qlayer] model = torch.nn.Sequential(*layers) #optimizer = torch.optim.SGD(model.parameters(), lr=0.2) #criterion = torch.nn.L1Loss() optimizer = torch.optim.Adam(model.parameters(), lr=1e-4) criterion = nn.MSELoss() def error(predictions, y): error = np.sum(abs(y.detach().numpy() - predictions.detach().numpy()))/len(y[0].detach().numpy()) return error # + def get_ranks(outputs, y, weighted = False): rp = np.flip(np.argsort(outputs.detach().numpy())) if weighted: a = [np.argwhere(rp == x)[0][1]*outputs.detach().numpy()[0][x]*len(np.nonzero(y.detach().numpy())[1]) for x in np.nonzero(y.detach().numpy())[1]] else: a = [np.argwhere(rp == x)[0][1] for x in np.nonzero(y.detach().numpy())[1]] return a def score(outputs, y, weighted = False): ly = len(np.nonzero(y.detach().numpy())[1]) lo = len(y[0].detach().numpy()) ranks = get_ranks(outputs, y, weighted) sr = sum(ranks) sy = sum(range(ly)) sw = sum(range(lo-ly,lo)) return 1 - (sr - sy)/(sw - sy) # - class CustomDataset(Dataset): def __init__(self, ds, n, q, transform=None): self.ds_full = ds self.n = n self.q = q self.x_csv = self.ds_full[["t01"]] self.y_csv = self.ds_full[["y"]] self.transform = transform def __len__(self): return len(self.x_csv) def __getitem__(self, idx): x = np.array(self.x_csv.iloc[idx].tolist()[0]) y = np.zeros(2**self.q) for i in self.y_csv.iloc[idx].tolist()[0]: #011000 24 y[i] = 1/len(self.y_csv.iloc[idx].tolist()[0]) if self.transform: x = self.transform(x) y = self.transform(y) return x, y # + batch_size = 1 transform = torchvision.transforms.Lambda(lambda y: torch.from_numpy(y).float()) train_dataset = CustomDataset(dsa, nnodes, n_qubits, transform) train_loader = torch.utils.data.DataLoader( train_dataset, batch_size=batch_size, shuffle=True, pin_memory=True ) test_loader = torch.utils.data.DataLoader( train_dataset, batch_size=batch_size, shuffle=False ) # - # %%time epochs = 100 for epoch in range(epochs): loss = 0 err = 0 metr = 0 wmetr = 0 for batch_features, y_batch in train_loader: batch_features = batch_features.view(-1, input_size) optimizer.zero_grad() outputs = model(batch_features) train_loss = criterion(outputs, y_batch) train_loss.backward() optimizer.step() loss += train_loss.item() err += error(outputs, y_batch) metr += score(outputs, y_batch, False) wmetr += score(outputs, y_batch, True) loss = loss / len(train_loader) err = err / len(train_loader) metr = metr / len(train_loader) wmetr = wmetr / len(train_loader) print("epoch : {}/{}, loss = {:.6f}, error = {:.6f}, score = {:.6f}, weighted_score = {:.6f}".format(epoch + 1, epochs, loss, err, metr, wmetr)) # ## testing with generated data # generate Testing dataset exper = 12 num_res = 12 arr_list = [] edges_list = [] edges = get_edges(n = nnodes) for f in range(exper): weights = [random.randint(1, 10)/10 for f in range(nnodes)] t0 = get_t0(edges, weights, n = nnodes) t1 = get_t1(edges, weights, t0, n = nnodes) arr_list.append(scale(np.stack([t0,t1]),np.min(np.stack([t0,t1])), np.max(np.stack([t0,t1]))).astype(float)) edges_list.append(edges) arr = np.concatenate(arr_list, axis=1) dstest = pd.DataFrame({'t01':arr_list}) dstest #int("110100010",2) = 418 edges_bin_list = [[np.binary_repr(ed[0], width=n_qubits//2) + np.binary_repr(ed[1], width=n_qubits//2) for ed in edges] for edges in edges_list] ya_list = [[int(edge,2) for edge in edges] for edges in edges_bin_list] dstest['y'] = ya_list dstest # + batch_size = 1 transform = torchvision.transforms.Lambda(lambda y: torch.from_numpy(y).float()) test_dataset = CustomDataset(dstest, nnodes, n_qubits, transform) test_loader = torch.utils.data.DataLoader( test_dataset, batch_size=batch_size, shuffle=False, pin_memory=True ) # - experiments = [] outputs_list = [] for batch_features, _ in test_loader: batch_features = batch_features.view(-1, input_size) batch_features outputs = model(batch_features) outputs_list.append(outputs) experiments.append(np.flip(np.argsort(outputs.detach().numpy()))) ol = [o.detach().numpy() for o in outputs_list] results_list = np.mean(np.array(ol), axis=0) norm_results_list = scale(results_list, np.min(results_list), np.max(results_list)).astype(float) results = np.flip(np.argsort(results_list)) np.max(sum(outputs_list).detach().numpy()),np.min(sum(outputs_list).detach().numpy()) results_bin = [np.binary_repr(f, width=n_qubits) for f in results.tolist()[0]] results_weights = [norm_results_list[0][results[0][i]] for i in range(len(results[0]))] results.tolist()[0][:num_res], ya_list[0] results_bin[:num_res] #number of parameters model_parameters = filter(lambda p: p.requires_grad, model.parameters()) sum([np.prod(p.size()) for p in model_parameters]) def get_edges_array(n_qubits,y): arr = [np.binary_repr(f, width=n_qubits) for f in y] return [(int(f[:n_qubits//2],2), int(f[n_qubits//2:],2)) for f in arr] y_edges = get_edges_array(n_qubits,ya_list[0]) p_edges = get_edges_array(n_qubits,results.tolist()[0][:num_res]) p_weights = results_weights[:num_res] # + graph_y = None graph_p = None graph_y = nx.DiGraph() graph_p = nx.DiGraph() graph_y.add_nodes_from(range(nnodes*2)) graph_p.add_nodes_from(range(nnodes*2)) graph_y.add_edges_from(y_edges) graph_p.add_edges_from(p_edges) nodes_names = {f:'n' + str(f) for f in range(nnodes)} rnodes = [v + '_t0' for _, v in nodes_names.items()] nodes_names.update({k:v + '_t0' for k, v in nodes_names.items()}) nodes_names.update({k + len(nodes_names):v[:-1] + '1' for k, v in nodes_names.items()}) graph_y = nx.relabel_nodes(graph_y, nodes_names, copy=False) graph_p = nx.relabel_nodes(graph_p, nodes_names, copy=False) #pos = nx.shell_layout(graph_y, nlist=[range(nnodes),range(nnodes,nnodes*2)], rotate=0.1, center=(1,5)) pos = nx.bipartite_layout(graph_y, nodes=rnodes) subax1 = plt.subplot(121) nx.draw(graph_y, pos, node_color='c', edge_color='k', width=5.0, edge_cmap=plt.cm.Blues, with_labels=True) subax2 = plt.subplot(122) nx.draw(graph_p, pos, node_color='c', edge_color=p_weights, width=5.0, edge_cmap=plt.cm.Blues, with_labels=True) plt.show() # - # ## testing with real data # + batch_size = 1 transform = torchvision.transforms.Lambda(lambda y: torch.from_numpy(y).float()) test_dataset = CustomDataset(dst, nnodes, n_qubits, transform) test_loader = torch.utils.data.DataLoader( test_dataset, batch_size=batch_size, shuffle=False, pin_memory=True ) # - experiments = [] outputs_list = [] for batch_features, _ in test_loader: batch_features = batch_features.view(-1, input_size) batch_features outputs = model(batch_features) outputs_list.append(outputs) experiments.append(np.flip(np.argsort(outputs.detach().numpy()))) ol = [o.detach().numpy() for o in outputs_list] results_list = np.mean(np.array(ol), axis=0) norm_results_list = scale(results_list, np.min(results_list), np.max(results_list)).astype(float) results = np.flip(np.argsort(results_list)) np.max(sum(outputs_list).detach().numpy()),np.min(sum(outputs_list).detach().numpy()) results_bin = [np.binary_repr(f, width=n_qubits) for f in results.tolist()[0]] results_weights = [norm_results_list[0][results[0][i]] for i in range(len(results[0]))] results.tolist()[0][:num_res] results_bin[:num_res] p_edges = get_edges_array(n_qubits,results.tolist()[0][:num_res]) p_weights = results_weights[:num_res] # + graph_p = None graph_p2 = None graph_p = nx.DiGraph() graph_p2 = nx.DiGraph() graph_p.add_nodes_from(range(nnodes*2)) graph_p2.add_nodes_from(range(nnodes*2)) graph_p.add_edges_from(p_edges) graph_p2.add_edges_from(p_edges) rnodes = [v + '_t0' for _, v in nodes_names.items()] rgenes = [v + '_t0' for _, v in nodes_genes.items()] nodes_names.update({k:v + '_t0' for k, v in nodes_names.items()}) nodes_names.update({k + len(nodes_names):v[:-1] + '1' for k, v in nodes_names.items()}) nodes_genes.update({k:v + '_t0' for k, v in nodes_genes.items()}) nodes_genes.update({k + len(nodes_genes):v[:-1] + '1' for k, v in nodes_genes.items()}) graph_p = nx.relabel_nodes(graph_p, nodes_names, copy=False) graph_p2 = nx.relabel_nodes(graph_p2, nodes_genes, copy=False) #pos = nx.shell_layout(graph_y, nlist=[range(nnodes),range(nnodes,nnodes*2)], rotate=0.1, center=(1,5)) pos = nx.bipartite_layout(graph_p, nodes=rnodes) pos2 = nx.bipartite_layout(graph_p2, nodes=rgenes) #subax1 = plt.subplot(121) nx.draw(graph_p, pos, node_color='c', edge_color=p_weights, width=5.0, edge_cmap=plt.cm.Blues, with_labels=True) #subax2 = plt.subplot(122) #nx.draw(graph_p2, pos2, node_color='c', edge_color=p_weights, width=5.0, edge_cmap=plt.cm.Blues, with_labels=True) plt.figure(figsize=(3,2)) plt.show() # - nx.draw(graph_p2, pos2, node_color='c', edge_color=p_weights, width=5.0, edge_cmap=plt.cm.Blues, with_labels=True) plt.figure(figsize=(3,2)) plt.show()
notebooks/idea_DBNSL-small.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.6.9 64-bit (''learn-env'': conda)' # language: python # name: python36964bitlearnenvcondae7e6328cec2744cc9785efcdf88db667 # --- # + import sys import os import cv2 from tensorflow.keras.applications.mobilenet_v2 import MobileNetV2, preprocess_input, decode_predictions from tensorflow.keras.applications.resnet50 import ResNet50, preprocess_input, decode_predictions import tensorflow.keras as keras from tensorflow.keras.models import Model, Sequential from tensorflow.keras.layers import Input, UpSampling2D, Flatten, BatchNormalization, Dense, Dropout, GlobalAveragePooling2D from tensorflow.keras import optimizers from keras.datasets import cifar100 import tensorflow as tf from keras.utils import np_utils import numpy as np import matplotlib.pyplot as plt import time from skimage.transform import resize # from keras.applications.resnet50 import preprocess_input, decode_predictions from keras.preprocessing.image import ImageDataGenerator # - def get_random_eraser(p=0.5, s_l=0.02, s_h=0.4, r_1=0.3, r_2=1/0.3, v_l=0, v_h=255, pixel_level=False): def eraser(input_img): img_h, img_w, img_c = input_img.shape p_1 = np.random.rand() if p_1 > p: return input_img while True: s = np.random.uniform(s_l, s_h) * img_h * img_w r = np.random.uniform(r_1, r_2) w = int(np.sqrt(s / r)) h = int(np.sqrt(s * r)) left = np.random.randint(0, img_w) top = np.random.randint(0, img_h) if left + w <= img_w and top + h <= img_h: break if pixel_level: c = np.random.uniform(v_l, v_h, (h, w, img_c)) else: c = np.random.uniform(v_l, v_h) input_img[top:top + h, left:left + w, :] = c return input_img return eraser def load_images_from_folder(folder): images = [] for filename in os.listdir(folder): img = cv2.imread(os.path.join(folder,filename)) if img is not None: images.append(img) return images # + num_classes = 29 nb_epochs = 10 img_sz = (100, 100) # data paths train_path = '../../data/asl_alphabet_train/' validation_path = '../../data/asl_alphabet_validation/' train_datagen = ImageDataGenerator(preprocessing_function=preprocess_input, rescale=1./255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True, validation_split=0.3) valid_datagen = ImageDataGenerator(preprocessing_function=preprocess_input, rescale=1./255) train_generator = train_datagen.flow_from_directory( train_path, target_size=img_sz, color_mode='rgb', batch_size=32, class_mode='categorical', subset='training') test_generator = train_datagen.flow_from_directory( train_path, target_size=img_sz, color_mode='rgb', batch_size=32, class_mode='categorical', subset='validation') validation_generator = valid_datagen.flow_from_directory( validation_path, target_size=img_sz, color_mode='rgb', batch_size=32, class_mode='categorical') # - next(train_generator)[0].shape # + new_in = keras.Input(shape=next(train_generator)[0].shape[1:]) resnet_model = ResNet50(weights='imagenet', include_top=False, input_tensor=new_in) for layer in resnet_model.layers: if isinstance(layer, BatchNormalization): layer.trainable = True else: layer.trainable = False model = Sequential() # model.add(UpSampling2D()) # model.add(UpSampling2D()) # model.add(UpSampling2D()) model.add(resnet_model) model.add(GlobalAveragePooling2D()) model.add(Dense(256, activation='relu')) model.add(Dropout(.25)) model.add(BatchNormalization()) model.add(Dense(num_classes, activation='softmax')) model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) # + early_stop = keras.callbacks.EarlyStopping(monitor="val_loss", min_delta=0, patience=1, verbose=0, mode="auto", baseline=None, restore_best_weights=False) callbacks = [early_stop] t=time.time() historytemp = model.fit(train_generator, steps_per_epoch=len(train_generator), epochs=5, validation_data=validation_generator, callbacks=callbacks) print('Training time: %s' % (t - time.time())) # - model.save('models/model.h5')
notebooks/post_fi/resnet50.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] graffitiCellId="id_pr3r54a" # ## Problem statement # # Given a sorted array that may have duplicate values, use *binary search* to find the **first** and **last** indexes of a given value. # # For example, if you have the array `[0, 1, 2, 2, 3, 3, 3, 4, 5, 6]` and the given value is `3`, the answer will be `[4, 6]` (because the value `3` occurs first at index `4` and last at index `6` in the array). # # The expected complexity of the problem is $O(log(n))$. # + graffitiCellId="id_stslkm6" def first_and_last_index(arr, number): """ Given a sorted array that may have duplicate values, use binary search to find the first and last indexes of a given value. Args: arr(list): Sorted array (or Python list) that may have duplicate values number(int): Value to search for in the array Returns: a list containing the first and last indexes of the given value """ # TODO: Write your first_and_last function here # Note that you may want to write helper functions to find the start # index and the end index pass # + [markdown] graffitiCellId="id_y3lxp1x" # <span class="graffiti-highlight graffiti-id_y3lxp1x-id_fkngaks"><i></i><button>Show Solution</button></span> # + [markdown] graffitiCellId="id_ii2o0tq" # Below are several different test cases you can use to check your solution. # + graffitiCellId="id_a8bxvyg" def test_function(test_case): input_list = test_case[0] number = test_case[1] solution = test_case[2] output = first_and_last_index(input_list, number) if output == solution: print("Pass") else: print("Fail") # + graffitiCellId="id_20phd4q" input_list = [1] number = 1 solution = [0, 0] test_case_1 = [input_list, number, solution] test_function(test_case_1) # + graffitiCellId="id_9p3166p" input_list = [0, 1, 2, 3, 3, 3, 3, 4, 5, 6] number = 3 solution = [3, 6] test_case_2 = [input_list, number, solution] test_function(test_case_2) # + graffitiCellId="id_pcvcepp" input_list = [0, 1, 2, 3, 4, 5] number = 5 solution = [5, 5] test_case_3 = [input_list, number, solution] test_function(test_case_3) # + graffitiCellId="id_cz075rp" input_list = [0, 1, 2, 3, 4, 5] number = 6 solution = [-1, -1] test_case_4 = [input_list, number, solution] test_function(test_case_4)
Basic Algorithms/Basic Algorithms/First and last index.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <script async src="https://www.googletagmanager.com/gtag/js?id=UA-59152712-8"></script> # <script> # window.dataLayer = window.dataLayer || []; # function gtag(){dataLayer.push(arguments);} # gtag('js', new Date()); # # gtag('config', 'UA-59152712-8'); # </script> # # # Computing the 4-Velocity Time-Component $u^0$, the Magnetic Field Measured by a Comoving Observer $b^{\mu}$, and the Poynting Vector $S^i$ # # ## Authors: <NAME> & <NAME> # # [comment]: <> (Abstract: TODO) # # **Notebook Status:** <font color='green'><b> Validated </b></font> # # **Validation Notes:** This module has been validated against a trusted code (the hand-written smallbPoynET in WVUThorns_diagnostics, which itself is based on expressions in IllinoisGRMHD... which was validated against the original GRMHD code of the Illinois NR group) # # ### NRPy+ Source Code for this module: [u0_smallb_Poynting__Cartesian.py](../edit/u0_smallb_Poynting__Cartesian/u0_smallb_Poynting__Cartesian.py) # # [comment]: <> (Introduction: TODO) # <a id='toc'></a> # # # Table of Contents # $$\label{toc}$$ # # This notebook is organized as follows # # 1. [Step 1](#u0bu): Computing $u^0$ and $b^{\mu}$ # 1. [Step 1.a](#4metric): Compute the 4-metric $g_{\mu\nu}$ and its inverse $g^{\mu\nu}$ from the ADM 3+1 variables, using the [`BSSN.ADMBSSN_tofrom_4metric`](../edit/BSSN/ADMBSSN_tofrom_4metric.py) ([**tutorial**](Tutorial-ADMBSSN_tofrom_4metric.ipynb)) NRPy+ module # 1. [Step 1.b](#u0): Compute $u^0$ from the Valencia 3-velocity # 1. [Step 1.c](#uj): Compute $u_j$ from $u^0$, the Valencia 3-velocity, and $g_{\mu\nu}$ # 1. [Step 1.d](#gamma): Compute $\gamma=$ `gammaDET` from the ADM 3+1 variables # 1. [Step 1.e](#beta): Compute $b^\mu$ # 1. [Step 2](#poynting_flux): Defining the Poynting Flux Vector $S^{i}$ # 1. [Step 2.a](#g): Computing $g^{i\nu}$ # 1. [Step 2.b](#s): Computing $S^{i}$ # 1. [Step 3](#code_validation): Code Validation against `u0_smallb_Poynting__Cartesian` NRPy+ module # 1. [Step 4](#appendix): Appendix: Proving Eqs. 53 and 56 in [Duez *et al* (2005)](https://arxiv.org/pdf/astro-ph/0503420.pdf) # 1. [Step 5](#latex_pdf_output): Output this notebook to $\LaTeX$-formatted PDF file # <a id='u0bu'></a> # # # Step 1: Computing $u^0$ and $b^{\mu}$ \[Back to [top](#toc)\] # $$\label{u0bu}$$ # # First some definitions. The spatial components of $b^{\mu}$ are simply the magnetic field as measured by an observer comoving with the plasma $B^{\mu}_{\rm (u)}$, divided by $\sqrt{4\pi}$. In addition, in the ideal MHD limit, $B^{\mu}_{\rm (u)}$ is orthogonal to the plasma 4-velocity $u^\mu$, which sets the $\mu=0$ component. # # Note also that $B^{\mu}_{\rm (u)}$ is related to the magnetic field as measured by a *normal* observer $B^i$ via a simple projection (Eq 21 in [Duez *et al* (2005)](https://arxiv.org/pdf/astro-ph/0503420.pdf)), which results in the expressions (Eqs 23 and 24 in [Duez *et al* (2005)](https://arxiv.org/pdf/astro-ph/0503420.pdf)): # # \begin{align} # \sqrt{4\pi} b^0 = B^0_{\rm (u)} &= \frac{u_j B^j}{\alpha} \\ # \sqrt{4\pi} b^i = B^i_{\rm (u)} &= \frac{B^i + (u_j B^j) u^i}{\alpha u^0}\\ # \end{align} # # $B^i$ is related to the actual magnetic field evaluated in IllinoisGRMHD, $\tilde{B}^i$ via # # $$B^i = \frac{\tilde{B}^i}{\gamma},$$ # # where $\gamma$ is the determinant of the spatial 3-metric. # # The above expressions will require that we compute # 1. the 4-metric $g_{\mu\nu}$ from the ADM 3+1 variables # 1. $u^0$ from the Valencia 3-velocity # 1. $u_j$ from $u^0$, the Valencia 3-velocity, and $g_{\mu\nu}$ # 1. $\gamma$ from the ADM 3+1 variables # <a id='4metric'></a> # # ## Step 1.a: Compute the 4-metric $g_{\mu\nu}$ and its inverse $g^{\mu\nu}$ from the ADM 3+1 variables, using the [`BSSN.ADMBSSN_tofrom_4metric`](../edit/BSSN/ADMBSSN_tofrom_4metric.py) ([**tutorial**](Tutorial-ADMBSSN_tofrom_4metric.ipynb)) NRPy+ module \[Back to [top](#toc)\] # $$\label{4metric}$$ # # We are given $\gamma_{ij}$, $\alpha$, and $\beta^i$ from ADMBase, so let's first compute # # $$ # g_{\mu\nu} = \begin{pmatrix} # -\alpha^2 + \beta^k \beta_k & \beta_i \\ # \beta_j & \gamma_{ij} # \end{pmatrix}. # $$ # + # Step 1: Initialize needed Python/NRPy+ modules import sympy as sp # SymPy: The Python computer algebra package upon which NRPy+ depends import NRPy_param_funcs as par # NRPy+: Parameter interface import indexedexp as ixp # NRPy+: Symbolic indexed expression (e.g., tensors, vectors, etc.) support import reference_metric as rfm # NRPy+: Reference metric support from outputC import * # NRPy+: Basic C code output functionality import BSSN.ADMBSSN_tofrom_4metric as AB4m # NRPy+: ADM/BSSN <-> 4-metric conversions # Set spatial dimension = 3 DIM=3 thismodule = "smallbPoynET" # Step 1.a: Compute the 4-metric $g_{\mu\nu}$ and its inverse # $g^{\mu\nu}$ from the ADM 3+1 variables, using the # BSSN.ADMBSSN_tofrom_4metric NRPy+ module import BSSN.ADMBSSN_tofrom_4metric as AB4m gammaDD,betaU,alpha = AB4m.setup_ADM_quantities("ADM") AB4m.g4DD_ito_BSSN_or_ADM("ADM",gammaDD,betaU,alpha) g4DD = AB4m.g4DD AB4m.g4UU_ito_BSSN_or_ADM("ADM",gammaDD,betaU,alpha) g4UU = AB4m.g4UU # - # <a id='u0'></a> # # ## Step 1.b: Compute $u^0$ from the Valencia 3-velocity \[Back to [top](#toc)\] # $$\label{u0}$$ # # According to Eqs. 9-11 of [the IllinoisGRMHD paper](https://arxiv.org/pdf/1501.07276.pdf), the Valencia 3-velocity $v^i_{(n)}$ is related to the 4-velocity $u^\mu$ via # # \begin{align} # \alpha v^i_{(n)} &= \frac{u^i}{u^0} + \beta^i \\ # \implies u^i &= u^0 \left(\alpha v^i_{(n)} - \beta^i\right) # \end{align} # # Defining $v^i = \frac{u^i}{u^0}$, we get # # $$v^i = \alpha v^i_{(n)} - \beta^i,$$ # # and in terms of this variable we get # # \begin{align} # g_{00} \left(u^0\right)^2 + 2 g_{0i} u^0 u^i + g_{ij} u^i u^j &= \left(u^0\right)^2 \left(g_{00} + 2 g_{0i} v^i + g_{ij} v^i v^j\right)\\ # \implies u^0 &= \pm \sqrt{\frac{-1}{g_{00} + 2 g_{0i} v^i + g_{ij} v^i v^j}} \\ # &= \pm \sqrt{\frac{-1}{(-\alpha^2 + \beta^2) + 2 \beta_i v^i + \gamma_{ij} v^i v^j}} \\ # &= \pm \sqrt{\frac{1}{\alpha^2 - \gamma_{ij}\left(\beta^i + v^i\right)\left(\beta^j + v^j\right)}}\\ # &= \pm \sqrt{\frac{1}{\alpha^2 - \alpha^2 \gamma_{ij}v^i_{(n)}v^j_{(n)}}}\\ # &= \pm \frac{1}{\alpha}\sqrt{\frac{1}{1 - \gamma_{ij}v^i_{(n)}v^j_{(n)}}} # \end{align} # # Generally speaking, numerical errors will occasionally drive expressions under the radical to either negative values or potentially enormous values (corresponding to enormous Lorentz factors). Thus a reliable approach for computing $u^0$ requires that we first rewrite the above expression in terms of the Lorentz factor squared: $\Gamma^2=\left(\alpha u^0\right)^2$: # \begin{align} # u^0 &= \pm \frac{1}{\alpha}\sqrt{\frac{1}{1 - \gamma_{ij}v^i_{(n)}v^j_{(n)}}}\\ # \implies \left(\alpha u^0\right)^2 &= \frac{1}{1 - \gamma_{ij}v^i_{(n)}v^j_{(n)}} \\ # \implies \gamma_{ij}v^i_{(n)}v^j_{(n)} &= 1 - \frac{1}{\left(\alpha u^0\right)^2} \\ # &= 1 - \frac{1}{\Gamma^2} # \end{align} # # In order for the bottom expression to hold true, the left-hand side must be between 0 and 1. Again, this is not guaranteed due to the appearance of numerical errors. In fact, a robust algorithm will not allow $\Gamma^2$ to become too large (which might contribute greatly to the stress-energy of a given gridpoint), so let's define $\Gamma_{\rm max}$, the largest allowed Lorentz factor. # # Then our algorithm for computing $u^0$ is as follows: # # If # $$R=\gamma_{ij}v^i_{(n)}v^j_{(n)}>1 - \frac{1}{\Gamma_{\rm max}^2},$$ # then adjust the 3-velocity $v^i$ as follows: # # $$v^i_{(n)} = \sqrt{\frac{1 - \frac{1}{\Gamma_{\rm max}^2}}{R}}v^i_{(n)}.$$ # # After this rescaling, we are then guaranteed that if $R$ is recomputed, it will be set to its ceiling value $R=R_{\rm max} = 1 - \frac{1}{\Gamma_{\rm max}^2}$. # # Then, regardless of whether the ceiling on $R$ was applied, $u^0$ can be safely computed via # # $$ # u^0 = \frac{1}{\alpha \sqrt{1-R}}. # $$ # + ValenciavU = ixp.register_gridfunctions_for_single_rank1("AUX","ValenciavU",DIM=3) # Step 1: Compute R = 1 - 1/max(Gamma) R = sp.sympify(0) for i in range(DIM): for j in range(DIM): R += gammaDD[i][j]*ValenciavU[i]*ValenciavU[j] GAMMA_SPEED_LIMIT = par.Cparameters("REAL",thismodule,"GAMMA_SPEED_LIMIT",10.0) # Default value based on # IllinoisGRMHD. # GiRaFFE default = 2000.0 Rmax = 1 - 1/(GAMMA_SPEED_LIMIT*GAMMA_SPEED_LIMIT) rescaledValenciavU = ixp.zerorank1() for i in range(DIM): rescaledValenciavU[i] = ValenciavU[i]*sp.sqrt(Rmax/R) rescaledu0 = 1/(alpha*sp.sqrt(1-Rmax)) regularu0 = 1/(alpha*sp.sqrt(1-R)) computeu0_Cfunction = """ /* Function for computing u^0 from Valencia 3-velocity. */ /* Inputs: ValenciavU[], alpha, gammaDD[][], GAMMA_SPEED_LIMIT (C parameter) */ /* Output: u0=u^0 and velocity-limited ValenciavU[] */\n\n""" computeu0_Cfunction += outputC([R,Rmax],["const double R","const double Rmax"],"returnstring", params="includebraces=False,CSE_varprefix=tmpR,outCverbose=False") computeu0_Cfunction += "if(R <= Rmax) " computeu0_Cfunction += outputC(regularu0,"u0","returnstring", params="includebraces=True,CSE_varprefix=tmpnorescale,outCverbose=False") computeu0_Cfunction += " else " computeu0_Cfunction += outputC([rescaledValenciavU[0],rescaledValenciavU[1],rescaledValenciavU[2],rescaledu0], ["ValenciavU0","ValenciavU1","ValenciavU2","u0"],"returnstring", params="includebraces=True,CSE_varprefix=tmprescale,outCverbose=False") print(computeu0_Cfunction) # - # <a id='uj'></a> # # ## Step 1.c: Compute $u_j$ from $u^0$, the Valencia 3-velocity, and $g_{\mu\nu}$ \[Back to [top](#toc)\] # $$\label{uj}$$ # # The basic equation is # # \begin{align} # u_j &= g_{\mu j} u^{\mu} \\ # &= g_{0j} u^0 + g_{ij} u^i \\ # &= \beta_j u^0 + \gamma_{ij} u^i \\ # &= \beta_j u^0 + \gamma_{ij} u^0 \left(\alpha v^i_{(n)} - \beta^i\right) \\ # &= u^0 \left(\beta_j + \gamma_{ij} \left(\alpha v^i_{(n)} - \beta^i\right) \right)\\ # &= \alpha u^0 \gamma_{ij} v^i_{(n)} \\ # \end{align} # + u0 = par.Cparameters("REAL",thismodule,"u0",1e300) # Will be overwritten in C code. Set to crazy value to ensure this. uD = ixp.zerorank1() for i in range(DIM): for j in range(DIM): uD[j] += alpha*u0*gammaDD[i][j]*ValenciavU[i] # - # <a id='beta'></a> # # ## Step 1.d: Compute $b^\mu$ \[Back to [top](#toc)\] # $$\label{beta}$$ # # We compute $b^\mu$ from the above expressions: # # \begin{align} # \sqrt{4\pi} b^0 = B^0_{\rm (u)} &= \frac{u_j B^j}{\alpha} \\ # \sqrt{4\pi} b^i = B^i_{\rm (u)} &= \frac{B^i + (u_j B^j) u^i}{\alpha u^0}\\ # \end{align} # # $B^i$ is exactly equal to the $B^i$ evaluated in IllinoisGRMHD/GiRaFFE. # # Pulling this together, we currently have available as input: # + $\tilde{B}^i$ # + $u_j$ # + $u^0$, # # with the goal of outputting now $b^\mu$ and $b^2$: # + M_PI = par.Cparameters("#define",thismodule,"M_PI","") BU = ixp.register_gridfunctions_for_single_rank1("AUX","BU",DIM=3) # uBcontraction = u_i B^i uBcontraction = sp.sympify(0) for i in range(DIM): uBcontraction += uD[i]*BU[i] # uU = 3-vector representing u^i = u^0 \left(\alpha v^i_{(n)} - \beta^i\right) uU = ixp.zerorank1() for i in range(DIM): uU[i] = u0*(alpha*ValenciavU[i] - betaU[i]) smallb4U = ixp.zerorank1(DIM=4) smallb4U[0] = uBcontraction/(alpha*sp.sqrt(4*M_PI)) for i in range(DIM): smallb4U[1+i] = (BU[i] + uBcontraction*uU[i])/(alpha*u0*sp.sqrt(4*M_PI)) # - # <a id='poynting_flux'></a> # # # Step 2: Defining the Poynting Flux Vector $S^{i}$ \[Back to [top](#toc)\] # $$\label{poynting_flux}$$ # # The Poynting flux is defined in Eq. 11 of [Kelly *et al.*](https://arxiv.org/pdf/1710.02132.pdf) (note that we choose the minus sign convention so that the Poynting luminosity across a spherical shell is $L_{\rm EM} = \int (-\alpha T^i_{\rm EM\ 0}) \sqrt{\gamma} d\Omega = \int S^r \sqrt{\gamma} d\Omega$, as in [Farris *et al.*](https://arxiv.org/pdf/1207.3354.pdf): # # $$ # S^i = -\alpha T^i_{\rm EM\ 0} = -\alpha\left(b^2 u^i u_0 + \frac{1}{2} b^2 g^i{}_0 - b^i b_0\right) # $$ # # # <a id='s'></a> # # ## Step 2.a: Computing $S^{i}$ \[Back to [top](#toc)\] # $$\label{s}$$ # # Given $g^{\mu\nu}$ computed above, we focus first on the $g^i{}_{0}$ term by computing # $$ # g^\mu{}_\delta = g^{\mu\nu} g_{\nu \delta}, # $$ # and then the rest of the Poynting flux vector can be immediately computed from quantities defined above: # $$ # S^i = -\alpha T^i_{\rm EM\ 0} = -\alpha\left(b^2 u^i u_0 + \frac{1}{2} b^2 g^i{}_0 - b^i b_0\right) # $$ # + # Step 2.a.i: compute g^\mu_\delta: g4UD = ixp.zerorank2(DIM=4) for mu in range(4): for delta in range(4): for nu in range(4): g4UD[mu][delta] += g4UU[mu][nu]*g4DD[nu][delta] # Step 2.a.ii: compute b_{\mu} smallb4D = ixp.zerorank1(DIM=4) for mu in range(4): for nu in range(4): smallb4D[mu] += g4DD[mu][nu]*smallb4U[nu] # Step 2.a.iii: compute u_0 = g_{mu 0} u^{mu} = g4DD[0][0]*u0 + g4DD[i][0]*uU[i] u_0 = g4DD[0][0]*u0 for i in range(DIM): u_0 += g4DD[i+1][0]*uU[i] # Step 2.a.iv: compute b^2, setting b^2 = smallb2etk, as gridfunctions with base names ending in a digit # are forbidden in NRPy+. smallb2etk = sp.sympify(0) for mu in range(4): smallb2etk += smallb4U[mu]*smallb4D[mu] # Step 2.a.v: compute S^i PoynSU = ixp.zerorank1() for i in range(DIM): PoynSU[i] = -alpha * (smallb2etk*uU[i]*u_0 + sp.Rational(1,2)*smallb2etk*g4UD[i+1][0] - smallb4U[i+1]*smallb4D[0]) # - # <a id='code_validation'></a> # # # Step 3: Code Validation against `u0_smallb_Poynting__Cartesian` NRPy+ module \[Back to [top](#toc)\] # $$\label{code_validation}$$ # # Here, as a code validation check, we verify agreement in the SymPy expressions for u0, smallbU, smallb2etk, and PoynSU between # # 1. this tutorial and # 2. the NRPy+ [u0_smallb_Poynting__Cartesian module](../edit/u0_smallb_Poynting__Cartesian/u0_smallb_Poynting__Cartesian.py). # + import sys import u0_smallb_Poynting__Cartesian.u0_smallb_Poynting__Cartesian as u0etc u0etc.compute_u0_smallb_Poynting__Cartesian(gammaDD,betaU,alpha,ValenciavU,BU) if u0etc.computeu0_Cfunction != computeu0_Cfunction: print("FAILURE: u0 C code has changed!") sys.exit(1) else: print("PASSED: u0 C code matches!") for i in range(4): print("u0etc.smallb4U["+str(i)+"] - smallb4U["+str(i)+"] = " + str(u0etc.smallb4U[i]-smallb4U[i])) print("u0etc.smallb2etk - smallb2etk = " + str(u0etc.smallb2etk-smallb2etk)) for i in range(DIM): print("u0etc.PoynSU["+str(i)+"] - PoynSU["+str(i)+"] = " + str(u0etc.PoynSU[i]-PoynSU[i])) # - # <a id='appendix'></a> # # # Step 4: Appendix: Proving Eqs. 53 and 56 in [Duez *et al* (2005)](https://arxiv.org/pdf/astro-ph/0503420.pdf) # $$\label{appendix}$$ # # $u^\mu u_\mu = -1$ implies # # \begin{align} # g^{\mu\nu} u_\mu u_\nu &= g^{00} \left(u_0\right)^2 + 2 g^{0i} u_0 u_i + g^{ij} u_i u_j = -1 \\ # \implies &g^{00} \left(u_0\right)^2 + 2 g^{0i} u_0 u_i + g^{ij} u_i u_j + 1 = 0\\ # & a x^2 + b x + c = 0 # \end{align} # # Thus we have a quadratic equation for $u_0$, with solution given by # # \begin{align} # u_0 &= \frac{-b \pm \sqrt{b^2 - 4 a c}}{2 a} \\ # &= \frac{-2 g^{0i}u_i \pm \sqrt{\left(2 g^{0i} u_i\right)^2 - 4 g^{00} (g^{ij} u_i u_j + 1)}}{2 g^{00}}\\ # &= \frac{-g^{0i}u_i \pm \sqrt{\left(g^{0i} u_i\right)^2 - g^{00} (g^{ij} u_i u_j + 1)}}{g^{00}}\\ # \end{align} # # Notice that (Eq. 4.49 in [Gourgoulhon](https://arxiv.org/pdf/gr-qc/0703035.pdf)) # $$ # g^{\mu\nu} = \begin{pmatrix} # -\frac{1}{\alpha^2} & \frac{\beta^i}{\alpha^2} \\ # \frac{\beta^i}{\alpha^2} & \gamma^{ij} - \frac{\beta^i\beta^j}{\alpha^2} # \end{pmatrix}, # $$ # so we have # # \begin{align} # u_0 &= \frac{-\beta^i u_i/\alpha^2 \pm \sqrt{\left(\beta^i u_i/\alpha^2\right)^2 + 1/\alpha^2 (g^{ij} u_i u_j + 1)}}{1/\alpha^2}\\ # &= -\beta^i u_i \pm \sqrt{\left(\beta^i u_i\right)^2 + \alpha^2 (g^{ij} u_i u_j + 1)}\\ # &= -\beta^i u_i \pm \sqrt{\left(\beta^i u_i\right)^2 + \alpha^2 \left(\left[\gamma^{ij} - \frac{\beta^i\beta^j}{\alpha^2}\right] u_i u_j + 1\right)}\\ # &= -\beta^i u_i \pm \sqrt{\left(\beta^i u_i\right)^2 + \alpha^2 \left(\gamma^{ij}u_i u_j + 1\right) - \beta^i\beta^j u_i u_j}\\ # &= -\beta^i u_i \pm \sqrt{\alpha^2 \left(\gamma^{ij}u_i u_j + 1\right)}\\ # \end{align} # # Now, since # # $$ # u^0 = g^{\alpha 0} u_\alpha = -\frac{1}{\alpha^2} u_0 + \frac{\beta^i u_i}{\alpha^2}, # $$ # # we get # # \begin{align} # u^0 &= \frac{1}{\alpha^2} \left(u_0 + \beta^i u_i\right) \\ # &= \pm \frac{1}{\alpha^2} \sqrt{\alpha^2 \left(\gamma^{ij}u_i u_j + 1\right)}\\ # &= \pm \frac{1}{\alpha} \sqrt{\gamma^{ij}u_i u_j + 1}\\ # \end{align} # # By convention, the relativistic Gamma factor is positive and given by $\alpha u^0$, so we choose the positive root. Thus we have derived Eq. 53 in [Duez *et al* (2005)](https://arxiv.org/pdf/astro-ph/0503420.pdf): # # $$ # u^0 = \frac{1}{\alpha} \sqrt{\gamma^{ij}u_i u_j + 1}. # $$ # # Next we evaluate # # \begin{align} # u^i &= u_\mu g^{\mu i} \\ # &= u_0 g^{0 i} + u_j g^{i j}\\ # &= u_0 \frac{\beta^i}{\alpha^2} + u_j \left(\gamma^{ij} - \frac{\beta^i\beta^j}{\alpha^2}\right)\\ # &= \gamma^{ij} u_j + u_0 \frac{\beta^i}{\alpha^2} - u_j \frac{\beta^i\beta^j}{\alpha^2}\\ # &= \gamma^{ij} u_j + \frac{\beta^i}{\alpha^2} \left(u_0 - u_j \beta^j\right)\\ # &= \gamma^{ij} u_j - \beta^i u^0,\\ # \implies v^i &= \frac{\gamma^{ij} u_j}{u^0} - \beta^i # \end{align} # # which is equivalent to Eq. 56 in [Duez *et al* (2005)](https://arxiv.org/pdf/astro-ph/0503420.pdf). Notice in the last step, we used the above definition of $u^0$. # <a id='latex_pdf_output'></a> # # # Step 5: Output this notebook to $\LaTeX$-formatted PDF file \[Back to [top](#toc)\] # $$\label{latex_pdf_output}$$ # # The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename # [Tutorial-u0_smallb_Poynting-Cartesian.pdf](Tutorial-u0_smallb_Poynting-Cartesian.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.) # !jupyter nbconvert --to latex --template latex_nrpy_style.tplx --log-level='WARN' Tutorial-u0_smallb_Poynting-Cartesian.ipynb # !pdflatex -interaction=batchmode Tutorial-u0_smallb_Poynting-Cartesian.tex # !pdflatex -interaction=batchmode Tutorial-u0_smallb_Poynting-Cartesian.tex # !pdflatex -interaction=batchmode Tutorial-u0_smallb_Poynting-Cartesian.tex # !rm -f Tut*.out Tut*.aux Tut*.log
Tutorial-u0_smallb_Poynting-Cartesian.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from pyhdf.SD import SD, SDC import numpy as np import matplotlib.pyplot as plt import netCDF4 as nc import os os.getcwd() path1 = 'E:\\SIF data\\FPAR_2016' path2 = 'E:\\SIF data\\GOME_2016\\GOME_2016\\' os.chdir('E:\\SIF data\\FPAR_2016') # os.chdir('E:\\SIF data\\GOME_2016\\GOME_2016\\') import my_functions my_functions.coords_to_ind(-90,30) # + ILLINOIS = (36.7281, -91.8524, 42.493703, -87.020001) ILLINOIS = (30, -120, 49, -80) arr_ = [] def find_max_qc(ILLINOIS): qc_data = None fpar_data = None prefix ='FPAR_A2016' suffix = '.hdf' max_ = -np.inf acc = [] for i in range(1,361,8): a = str(int(i)) if i < 10: a = '00'+ a elif i < 100: a = '0' + a query = prefix + a + suffix try: data = SD(query, SDC.READ) print('read') cur_fpar = data.select('Fpar_500m')[:] cur_qc = data.select('FparExtra_QC')[:] left, down = my_functions.coords_to_ind(ILLINOIS[1], ILLINOIS[0]) right, up = my_functions.coords_to_ind(ILLINOIS[3], ILLINOIS[2]) print(right, left) print(down, up) qcs = cur_qc[up:down, left:right].flatten() ratio = my_functions.good_qc(qcs) acc.append([ratio, query]) # print(ratio) # if ratio > max_: # fpar_data = cur_fpar # qc_data = cur_qc # max_ = ratio except: continue return fpar_data, qc_data, acc def run_fpar_workflow(ILLINOIS, fpar_data, qc_data): arr_ = [] for i in np.arange(ILLINOIS[0], ILLINOIS[2], 0.1): for j in np.arange(ILLINOIS[1], ILLINOIS[3], 0.1): cur_lat, cur_lon = i,j lon_ind, lat_ind = my_functions.coords_to_ind(cur_lon, cur_lat) fp, qc = fpar_data[lat_ind, lon_ind], qc_data[lat_ind, lon_ind] if my_functions.get_cloud(qc)[0] == '0' and my_functions.get_cloud(qc)[1] == '0': arr_.append([cur_lat, cur_lon, fp]) return arr_ def find_fpar_date(fpar): prefix ='FPAR_A2016' suffix = '.hdf' suspects = [] for i in range(1,361,8): a = str(int(i)) if i < 10: a = '00'+ a elif i < 100: a = '0' + a query = prefix + a + suffix try: data = SD(query, SDC.READ) print('read') cur_fpar = data.select('Fpar_500m')[:] cur_qc = data.select('FparExtra_QC')[:] if np.mean(cur_fpar) == np.mean(fpar): print(query) suspects.append(query) except: continue return suspects # - fp_dat, qc_dat,acc = find_max_qc(ILLINOIS) cleaned_acc = sorted(acc, key = lambda x: x[0], reverse = True) cleaned_acc filtered_fpar = run_fpar_workflow(ILLINOIS, a,b) print(filtered_fpar) # + dat = SD('FPAR_A2016225.hdf', SDC.READ) fpp2 = dat.select('Fpar_500m')[:] qc2 = dat.select('FparExtra_QC')[:] print(fpp2.shape) arr2 = run_fpar_workflow(ILLINOIS, fpp2, qc2) # - filtered_data = np.array(arr2) suspects = find_fpar_date(a) print(suspects) filtered_data = np.array(filtered_fpar) filtered_data[:,2] from dbfread import DBF import sifutil ILLINOIS = (36.7281, -91.8524, 42.493703, -87.020001) # <p><b>Functions for processing fpar data, like get cloud, convert the qc index</b></p> # + import numpy.linalg as la def convert_binary(num): str_ = "{0:b}".format(num) if len(str_) < 8: str_ = '0'*(8-len(str_)) + str_ return str_ def get_cloud(num): str_ = convert_binary(num) return str_[1], str_[2] def interpolation(x,y): x = np.array(x) matrix = np.array([x**i for i in range(len(x))]).transpose() print(matrix) coeffs = la.solve(matrix,y) return coeffs def get_smooth_line(x,y): coeffs = interpolation(x,y) x_values = np.linspace(min(x), max(x), 100) y_values = [] for i in x_values: value = 0 for j in range(len(coeffs)): value += coeffs[j]*i**j y_values.append(value) return [list(x_values), y_values] # - time_series = [] qc = [] x_values = [] # <b>function for getting the whole fpar series for a year of a certain location (lat,lon)</b> # + def get_fpar_series(lat, lon): time_series = [] qc = [] x_values = [] prefix ='FPAR_A2016' suffix = '.hdf' print(prefix+suffix) for i in range(1,9,8): a = str(int(i)) if i < 10: a = '00'+ a elif i < 100: a = '0' + a query = prefix + a + suffix try: data = SD(query, SDC.READ) FPAR_data = data.select('Fpar_500m')[:] QC_data = data.select('FparExtra_QC')[:] # print(QC_data.shape) Q = QC_data[lat,lon] time_series.append(FPAR_data[lat,lon]) x_values.append(i) qc.append(Q) print(time_series) print(x_values) print(qc) except: continue return time_series, x_values, qc # data = SD('FPAR_A2016361.hdf', SDC.READ) # - # <b>function for filtering the fpars with cloud, given qc arrays, x values and fpar series</b> # + def clean_data(time_series, x_values, qc): good_fpars = [] good_dates = [] for i in range(len(time_series)): if get_cloud(qc[i])[0] == '0' and get_cloud(qc[i])[1] == '0': good_fpars.append(time_series[i]) good_dates.append(x_values[i]) return good_dates, good_fpars def good_qc(qc): cnt = 0 for i in range(len(qc)): if get_cloud(qc[i])[0] == '0' and get_cloud(qc[i])[1] == '0': cnt += 1 return cnt/len(qc) # return cnt/len(qc) # - # %matplotlib inline plt.plot(times) data = SD('FPAR_A2016225.hdf', SDC.READ).select('Fpar_500m')[:] import cv2 cv2.imwrite('225_august_fpar.jpg', data, 1) # + # from pyhdf.SD import SD, SDC # import sifutil # - coords_to_ind(135, 40) import math lat_eq = 360 / 40075 lon_eq = math.cos(self.lat_0) * 360 / 40057. lon_a = self.lon_0 - ((self.side_len/2) * lon_eq) lat_a = self.lat_0 - ((self.side_len/2) * lat_eq) unit = self.side_len / 2 # + from io import StringIO import re # import requests # from bs4 import BeautifulSoup # import pandas as pd # import warnings # BASE_CDL_URL = 'https://nassgeodata.gmu.edu/axis2/services/CDLService/GetCDLStat' # CHAMPAIGN = 17019 # def from_csv(filepath): # df = pd.read_csv(filepath) # return df # def get_by_fips(year, fips): # '''Grab CDL data for a county by FIPS code''' # url = BASE_CDL_URL + '?year=' + str(year) + '&fips=' + str(fips) + "&format=csv" # res = requests.get(url, verify = False) # returnurl = BeautifulSoup(res.text, 'lxml').find('returnurl').text # #print(returnurl) # rawdata = requests.get(returnurl, verify = False).text # raw_iter = StringIO(rawdata) # df = pd.read_csv(raw_iter, sep=" *, * ", engine='python')\ # .apply(pd.to_numeric, errors='ignore')\ # .set_index("Category") # return df # def get_by_box(year, llcrnrlon, llcrnrlat, urcrnrlon, urcrnrlat): # '''Grab CDL data by bounding box''' # x1, y1 = sifutil.convertProjection(llcrnrlon, llcrnrlat, sifutil.WGS84, sifutil.CONUS_ALBERS) # x2, y2 = sifutil.convertProjection(urcrnrlon, urcrnrlat, sifutil.WGS84, sifutil.CONUS_ALBERS) # url = BASE_CDL_URL + '?year=' + str(year) + '&bbox=' + str(min(x1,x2)) + "," +\ # str(min(y1, y2)) + "," + str(max(x1, x2)) + "," + str(max(y1, y2)) + "&format=csv" # print(url) # with warnings.catch_warnings(): # warnings.simplefilter("ignore") # res = requests.get(url, verify = False) # returnurl = BeautifulSoup(res.text, 'lxml').find('returnurl').text # #print(returnurl) # with warnings.catch_warnings(): # rawdata = requests.get(returnurl, verify = False).text # raw_iter = StringIO(rawdata) # df = pd.read_csv(raw_iter, sep=" *, * ")\ # .apply(pd.to_numeric, errors='ignore')\ # .set_index("Category") # return df # - def getNC_GOME(filepath): data = nc.Dataset(filepath, mode='r') lons = data['longitude'][:] lats = data['latitude'][:] sif = data['SIF_740'][:] data.close() return (lons, lats, sif) # + from mpl_toolkits.basemap import Basemap # - ILLINOIS = (36.7281, -91.8524, 42.493703, -87.020001) bounding_box = (40.16, -89, 41, -87.5) I_STATES_COORDS = (37, -96.65, 43.5, -84.81) import matplotlib.pyplot as plt import numpy as np def plot_illinois(ILLINOIS): fig, ax1 = plt.subplots() m = Basemap(projection='merc', llcrnrlat=ILLINOIS[0], llcrnrlon=ILLINOIS[1],\ urcrnrlat=ILLINOIS[2], urcrnrlon=ILLINOIS[3], resolution='c', ax=ax1) m.drawstates() m.drawcoastlines() m.fillcontinents(color='beige', lake_color='aqua') m.drawparallels(np.arange(36, 43, 1), labels=[0,1,1,0]) m.drawmeridians(np.arange(-92, -87, 1), labels=[1,0,0,1]) ax1.set_title('poop') return ax1 # %matplotlib inline plot_illinois(ILLINOIS) # <b>A whole process to obtain the GNOME data</b> import os os.chdir('E:\\SIF data\\GOME_2016\\GOME_2016\\') import netCDF4 as nc gome_0711 = getNC_GOME('08\\ret_f_nr5_nsvd12_v26_waves734_nolog.20160801_v27_all.nc') print(ILLINOIS) def inIllinois(ILLINOIS): if cur_data = NetCDFFile('08\\ret_f_nr5_nsvd12_v26_waves734_nolog.20160801_v27_all.nc') # + # cur_data.variables['Latitude_corners'][:] # plt.hist(cur_data.variables['Quality_flag'][:]) import math lat_corners = cur_data.variables['Latitude_corners'][:] lon_corners = cur_data.variables['Longitude_corners'][:] arg_min = None for i in range(len(lat_corners)): if math.fabs(lat_corners[i][0]-40) <= 2 and math.fabs(lon_corners[i][0] + 88) <= 2: arg_min = i print(lat_corners[arg_min], lon_corners[arg_min]) print(len(lat_corners), len(lon_corners)) # - averaged_sif = cur_data.variables['Daily_averaged_SIF'][:] lons = cur_data.variables['longitude'][:] lats = cur_data.variables['latitude'][:] clouds = cur_data.variables['cloud_fraction'][:] qa = cur_data.variables['Quality_flag'][:] print(len(lons)) # + print(lat_corners[arg_min], lon_corners[arg_min]) import matplotlib.pyplot as plt # %matplotlib inline plt.scatter(lat_corners[arg_min], lon_corners[arg_min]) # - arr = get_Illinois(lats, lons, averaged_sif, ILLINOIS, clouds, qa) lat, lon = arr[:,0], arr[:,1] sifs = arr[:,2] print(arr.shape) # + from numpy import array plt.figure(figsize = (20,10)) m = Basemap(projection='merc', llcrnrlat=0, llcrnrlon=49,\ urcrnrlat=-, urcrnrlon=ILLINOIS[3], resolution='i') m.drawstates() m.drawcoastlines() # m.fillcontinents(color='beige', lake_color='aqua') m.drawparallels(np.arange(36, 43, 1), labels=[0,1,1,0]) m.drawmeridians(np.arange(-92, -87, 1), labels=[0,1,1,0]) l = [-87] la = [40] s = [1] x,y = m(lon,lat) m.hexbin(x,y,C = sifs, gridsize = 100) m.colorbar(location='bottom') plt.show() # from mpl_toolkits.basemap import Basemap # import matplotlib.pyplot as plt # import matplotlib.colors as colors # from numpy import array # from numpy import max # # map = Basemap(llcrnrlon=110,llcrnrlat=0,urcrnrlon=140.,urcrnrlat=40., # # resolution='i', projection='merc') # map = Basemap(projection='merc', llcrnrlat=ILLINOIS[0], llcrnrlon=ILLINOIS[1],\ # urcrnrlat=ILLINOIS[2], urcrnrlon=ILLINOIS[3], resolution='i') # lats = [5,6,7,39,9,10,11,12,13,14,31.17] # lons = [121,121,121,-88,121,121,121,121,121,121,121.5] # c = [1,1.25,1.5,1.75,2,2.25,2.50,2.75,3,3,2.5] # la = [39, 40, 41] # lo = [-88, -87, -88] # c = [2,3,3] # x, y = map(lo, la) # map.drawcoastlines() # map.drawstates() # map.hexbin(array(x), array(y), C =c, gridsize = 8) # map.colorbar(location='bottom') # + ILLINOIS = (36.7281, -91.8524, 42.493703, -87.020001) # - plt.plot(lats) plt.plot(lons) plt.plot(x) print(lats[118000], lons[118000]) # + def get_Illinois(lats, lons, sifs, ILLINOIS, clouds, qa): illinois_ = [] for i in range(len(lons)): if lons[i] >= ILLINOIS[1] and qa[i] == 2 and lons[i] <= ILLINOIS[3] and lats[i] >= ILLINOIS[0] and lats[i] <= ILLINOIS[2] and clouds[i] <= 0.2: illinois_.append([lats[i], lons[i], sifs[i]]) return np.array(illinois_) # + # print(illinois_gome_lons) # print(illinois_gome_lats) arr = get_Illinois(lats, lons, averaged_sif, bounding_box,clouds) print(arr.shape) # print(arr[:,0]) # - x,y = clean_data(fpar_series, x_values, qc) plt.plot(x,y) plt.figure() plt.plot(x_values, fpar_series) import matplotlib.pyplot as plt a_lot_of_sif = [] x_ = [] for i in range(1,12): for j in range(1,31): # print(i,j) if i < 10: zero = '0' else: zero = '' if j < 10: zero2 = '0' else: zero2 = '' date = '2016' + zero + str(i) + zero2 + str(j) dir_ = zero + str(i) + '\\\\' file = dir_ + 'ret_f_nr5_nsvd12_v26_waves734_nolog.' + date + '_v27_all.nc' try: read_in = NetCDFFile(file) data = getNC_GOME(file) lons = data[0] lats = data[1] sifs = data[2] clouds = read_in['cloud_fraction'][:] qa = read_in['Quality_flag'][:] processed_data = get_Illinois(lats, lons, sifs, bounding_box, clouds, qa) print('good') if processed_data != []: a_lot_of_sif.append(np.mean(processed_data[:,2])) print(i,j) x_.append((i-1)*30+j) except: continue # gome_0711 = getNC_GOME('05\\ret_f_nr5_nsvd12_v26_waves734_nolog.20160501_v27_all.nc') print(len(a_lot_of_sif)) import matplotlib.pyplot as plt # %matplotlib inline plt.plot(x_, a_lot_of_sif) plt.title('filtered sif plot with at most 20% cloud coverage and the best qa') plt.xlabel('days') plt.ylabel('sif value') # %matplotlib inline a = np.array([[1,1,2],[2,2,1],[3,1,1]]) plt.imshow(a, label = ['a','b','c']) colors = [ im.cmap(im.norm(value)) for value in values] # + from mpl_toolkits.basemap import Basemap import matplotlib.pyplot as plt map = Basemap(projection='ortho', lat_0=0, lon_0=0) map.drawmapboundary(fill_color='aqua') map.fillcontinents(color='coral',lake_color='aqua') map.drawcoastlines() lons = [0, 10, -30, -20] lats = [0, -10, 40, -20] x, y = map(lons, lats) map.scatter(x, y, marker='D',color='m') plt.show() # + import numpy as np from matplotlib import pyplot as plt from matplotlib import animation # First set up the figure, the axis, and the plot element we want to animate fig = plt.figure() ax = plt.axes(xlim=(0, 2), ylim=(-2, 2)) line, = ax.plot([], [], lw=2) # initialization function: plot the background of each frame def init(): line.set_data([], []) return line, # animation function. This is called sequentially def animate(i): x = np.linspace(0, 2, 1000) y = np.sin(2 * np.pi * (x - 0.01 * i)) line.set_data(x, y) return line, # call the animator. blit=True means only re-draw the parts that have changed. anim = animation.FuncAnimation(fig, animate, init_func=init, frames=200, interval=20, blit=True) # save the animation as an mp4. This requires ffmpeg or mencoder to be # installed. The extra_args ensure that the x264 codec is used, so that # the video can be embedded in html5. You may need to adjust this for # your system: for more information, see # http://matplotlib.sourceforge.net/api/animation_api.html anim.save('basic_animation.html', fps=30, extra_args=['-vcodec', 'libx264']) plt.show() # - os.getcwd() filtered_data[:,1][0:3] # + from mpl_toolkits.basemap import Basemap import matplotlib.pyplot as plt import matplotlib.colors as colors from numpy import array # from numpy import max plt.figure(figsize = (20,10)) # map = Basemap(llcrnrlon=110,llcrnrlat=0,urcrnrlon=140.,urcrnrlat=40., # resolution='i', projection='merc') map = Basemap(projection='aea', llcrnrlat=10, llcrnrlon=-130,\ urcrnrlat=49, urcrnrlon = -80, resolution='i') lats = [5,6,7,39,9,10,11,12,13,14,31.17] lons = [121,121,121,-88,121,121,121,121,121,121,121.5] c = [1,1.25,1.5,1.75,2,2.25,2.50,2.75,3,3,2.5] la = [39, 40, 41] lo = [-88, -87, -88] c = [2,3,3] x, y = map(lo, la) x1, y1 = map(filtered_data[:,1], filtered_data[:,0]) map.drawcoastlines() map.drawstates() map.hexbin(array(x1), array(y1), C = filtered_data[:,2], gridsize = 150) map.colorbar(location='bottom') plt.show() # + from numpy.random import uniform import matplotlib.pyplot as plt import numpy as np from mpl_toolkits.basemap import Basemap # create north polar stereographic basemap m = Basemap(lon_0=270, boundinglat=20, projection='npstere',round=True) #m = Basemap(lon_0=-105,lat_0=40,projection='ortho') # number of points, bins to plot. npts = 10000 bins = 40 # generate random points on a sphere, # so that every small area on the sphere is expected # to have the same number of points. # http://mathworld.wolfram.com/SpherePointPicking.html u = uniform(0.,1.,size=npts) v = uniform(0.,1.,size=npts) lons = 360.*u lats = (180./np.pi)*np.arccos(2*v-1) - 90. # toss points outside of map region. lats = np.compress(lats > 20, lats) lons = np.compress(lats > 20, lons) # convert to map projection coordinates. x1, y1 = m(lons, lats) # remove points outside projection limb. x = np.compress(np.logical_or(x1 < 1.e20,y1 < 1.e20), x1) y = np.compress(np.logical_or(x1 < 1.e20,y1 < 1.e20), y1) # function to plot at those points. xscaled = 4.*(x-0.5*(m.xmax-m.xmin))/m.xmax yscaled = 4.*(y-0.5*(m.ymax-m.ymin))/m.ymax z = xscaled*np.exp(-xscaled**2-yscaled**2) # make plot using hexbin fig = plt.figure(figsize=(12,5)) ax = fig.add_subplot(121) CS = m.hexbin(x,y,C=z,gridsize=bins,cmap=plt.cm.jet) # draw coastlines, lat/lon lines. m.drawcoastlines() m.drawparallels(np.arange(0,81,20)) m.drawmeridians(np.arange(-180,181,60)) m.colorbar() # draw colorbar plt.title('hexbin demo') # use histogram2d instead of hexbin. ax = fig.add_subplot(122) # remove points outside projection limb. bincount, xedges, yedges = np.histogram2d(x, y, bins=bins) mask = bincount == 0 # reset zero values to one to avoid divide-by-zero bincount = np.where(bincount == 0, 1, bincount) H, xedges, yedges = np.histogram2d(x, y, bins=bins, weights=z) H = np.ma.masked_where(mask, H/bincount) # set color of masked values to axes background (hexbin does this by default) palette = plt.cm.jet palette.set_bad(ax.get_axis_bgcolor(), 1.0) CS = m.pcolormesh(xedges,yedges,H.T,shading='flat',cmap=palette) # draw coastlines, lat/lon lines. m.drawcoastlines() m.drawparallels(np.arange(0,81,20)) m.drawmeridians(np.arange(-180,181,60)) m.colorbar() # draw colorbar plt.title('histogram2d demo') plt.show() # - a = plt.hexbin(np.array([1,2,3]), np.array([2,3,4]), C = np.array([1,2,1]), gridsize = 5) import sifutil # + '''Module to download CDL data''' from io import StringIO import re import requests from bs4 import BeautifulSoup import pandas as pd import sifutil import warnings BASE_CDL_URL = 'https://nassgeodata.gmu.edu/axis2/services/CDLService/GetCDLStat' CHAMPAIGN = 17019 import numpy as np import math def from_csv(filepath): df = pd.read_csv(filepath) return df def get_by_fips(year, fips): '''Grab CDL data for a county by FIPS code''' url = BASE_CDL_URL + '?year=' + str(year) + '&fips=' + str(fips) + "&format=csv" res = requests.get(url, verify = False) returnurl = BeautifulSoup(res.text, 'lxml').find('returnurl').text #print(returnurl) rawdata = requests.get(returnurl, verify = False).text raw_iter = StringIO(rawdata) df = pd.read_csv(raw_iter, sep=" *, * ", engine='python')\ .apply(pd.to_numeric, errors='ignore')\ .set_index("Category") return df def get_by_box(year, llcrnrlon, llcrnrlat, urcrnrlon, urcrnrlat): '''Grab CDL data by bounding box''' x1, y1 = sifutil.convertProjection(llcrnrlon, llcrnrlat, sifutil.WGS84, sifutil.CONUS_ALBERS) x2, y2 = sifutil.convertProjection(urcrnrlon, urcrnrlat, sifutil.WGS84, sifutil.CONUS_ALBERS) url = BASE_CDL_URL + '?year=' + str(year) + '&bbox=' + str(min(x1,x2)) + "," +\ str(min(y1, y2)) + "," + str(max([x1, x2])) + "," + str(max([y1, y2])) + "&format=csv" print(url) with warnings.catch_warnings(): warnings.simplefilter("ignore") res = requests.get(url, verify = False) returnurl = BeautifulSoup(res.text, 'lxml').find('returnurl').text #print(returnurl) with warnings.catch_warnings(): rawdata = requests.get(returnurl, verify = False).text raw_iter = StringIO(rawdata) df = pd.read_csv(raw_iter, sep=" *, * ")\ .apply(pd.to_numeric, errors='ignore')\ .set_index("Category") return df # + # year = 2016 # url = BASE_CDL_URL + '?year=' + str(year) + '&bbox=' + str(min(x1,x2)) + "," +\ # str(min(y1, y2)) + "," + str(max(x1, x2)) + "," + str(max(y1, y2)) + "&format=csv" # print(url) # + from scipy.optimize import lsq_linear fpars = [] def get_fpars(query): from my_functions import coords_to_ind, get_cloud base_lat = 38.4 base_lon = -89.59 base_lat = 40.7 base_lon = -88.2 # arg_max = None # max_qc = None # for i in range(105, 361,8): # query = 'FPAR_A2016' + str(i) + '.hdf' # raw_data = SD(query, SDC.READ) # QC_reg_data = raw_data.select('FparExtra_QC')[:] # FPAR_reg_data = raw_data.select('Fpar_500m')[:] # row = 0 # left, right = base_lon, base_lon - 2 # bottom, up = base_lat, base_lat + 2 # l,b = coords_to_ind(left, bottom)[0], coords_to_ind(left, bottom)[1] # r,u = coords_to_ind(right, up)[0], coords_to_ind(right, up)[1] # Quality_flag = good_qc(QC_reg_data[u:b, r:l].flatten()) # if not arg_max: # arg_max = query # max_qc = Quality_flag # elif Quality_flag > max_qc: # arg_max = query # max_qc = Quality_flag #init row = 0 rhs = np.zeros(16) # mat = np.zeros((16,4)) raw_data = SD(query, SDC.READ) QC_reg_data = raw_data.select('FparExtra_QC')[:] FPAR_reg_data = raw_data.select('Fpar_500m')[:] for i in range(4): cur_lon = base_lon for j in range(4): start_lon, start_lat = coords_to_ind(cur_lon, base_lat)[0], coords_to_ind(cur_lon, base_lat)[1] qc = QC_reg_data[start_lat, start_lon] if get_cloud(qc)[0] == '0' and get_cloud(qc)[1] == '0': end_lon, end_lat = coords_to_ind(cur_lon-0.01, base_lat+0.01)[0], coords_to_ind(cur_lon-0.01, base_lat+0.01)[1] # print(start_lat, end_lat) # print(start_lon, end_lon) rhs[row] = FPAR_reg_data[(start_lat+end_lat)//2, (start_lon + end_lon)//2] cur_lon -= 0.01 row += 1 base_lat += 0.01 # result = lsq_linear(mat, rhs, bounds = (0, 100)) return rhs # - #testing rhs = get_fpars('FPAR_A2016233.hdf') print(rhs) # fparreg_workflow() fparreg_workflow() # + def fparreg_workflow(): big_mat = get_proportion_matrix() # rhs = get_fpars('FPAR_A2016361.hdf') print(big_mat) save_matrix(big_mat) mat = load_matrix('dajuzhen2.npy') print(mat) def get_proportion_matrix(): from my_functions import get_fractions, get_by_box mat2 = np.zeros((16,4)) base_lat, base_lon = 38.3, -89.59 base_lat = 40.7 base_lon = -88.2 row = 0 for i in range(4): cur_lon = base_lon for j in range(4): print(base_lat, cur_lon) mat2[row,:] = get_fractions(get_by_box(2016, cur_lon - 0.01, base_lat, cur_lon, base_lat + 0.01)) cur_lon -= 0.01 print(row) row += 1 base_lat += 0.01 return mat2 def get_processed_matrix_and_rhs(mat, rhs): indices = [] for i in range(len(rhs)): if rhs[i] != 0: indices.append(i) indices = np.array(indices) # print(indices) return mat[indices, :], rhs[indices] def save_matrix(mat): from tempfile import TemporaryFile outfile = TemporaryFile() np.save('dajuzhen.npy', mat) def load_matrix(file): return np.load(file) def run_regression(): from my_functions import get_cloud, coords_to_ind from scipy.optimize import lsq_linear time_series = np.zeros((4, 45)) ct = 0 x_values = [] prefix ='FPAR_A2016' suffix = '.hdf' ct = 0 # print(prefix+suffix) for i in range(1,361,8): a = str(int(i)) if i < 10: a = '00'+ a elif i < 100: a = '0' + a query = prefix + a + suffix # print(query) try: data = SD(query, SDC.READ) m2 = load_matrix('dajuzhen.npy') rhs = get_fpars(query) # print(rhs) mat2, rhs2 = get_processed_matrix_and_rhs(m2,rhs) # print(mat2) # result = np.linalg.lstsq(mat2,rhs2) # print(result[0]) result = lsq_linear(mat2, rhs2, bounds = (0, 100)) # print(result.x) # print('result', result[0]) ct += 1 # # print('result', result[0]) time_series[:,ct-1] = np.array(result.x) x.append(i) except Exception as e: print(e) continue return x_values, time_series # - large_time = [] # + #testing1 # m2 = load_matrix('dajuzhen.npy') # print(rhs) # fparreg_workflow() # July 16 days x, time_series = run_regression() large_time.append(time_series) # # print(mat_3) # mat2, rhs2 = get_processed_matrix_and_rhs(m2,rhs) # result = np.linalg.lstsq(mat2, rhs2) # print(result[0]) #test whole function # x, time_series = run_regression() # - large_time[0].shape # x, time_series = run_regression() import matplotlib.pyplot as plt # %matplotlib inline time_series = large_time[0] plt.plot(time_series[0], label = 'corn') plt.plot(time_series[1], label = 'soybean') # plt.plot(time_series[2], label = 'forest') # plt.plot(time_series[3], label = 'grass') plt.legend() # time_series s = def get_fractions(cdl): total_acre = sum(cdl['Acreage']) if total_acre == 0: corn = 0 soy = 0 forest = 0 grass = 0 return if "Corn" in cdl.index: corn = cdl['Acreage']['Corn'] / total_acre else: corn = 0 if "Soybeans" in cdl.index: soy = cdl['Acreage']['Soybeans'] / total_acre else: soy = 0 pattern = re.compile(r' Forest') trees = [cdl.index[i] for i in range(len(cdl.index))\ if re.search(pattern, cdl.index[i]) != None] frst = 0 for tree in trees: frst += cdl['Acreage'][tree] forest = frst / total_acre grass = 1 - (forest + corn + soy) return np.array([corn, soy, forest, grass]) print(mat) from scipy.optimize import lsq_linear result = np.linalg.lstsq(mat, rhs) print(result[0]) # + def get_fpar_reg_series(): from my_functions import get_cloud, coords_to_ind time_series = np.zeros((4, 45)) qc = [] x_values = [] fpp = [] prefix ='FPAR_A2016' suffix = '.hdf' ct = 0 print(prefix+suffix) for i in range(1,361,8): a = str(int(i)) if i < 10: a = '00'+ a elif i < 100: a = '0' + a query = prefix + a + suffix print(query) try: data = SD(query, SDC.READ) # print('get') FPAR_data = data.select('Fpar_500m')[:] QC_data = data.select('FparExtra_QC')[:] base_lat = 38.3 base_lon = -89.99 Q = QC_data[coords_to_ind(base_lon, base_lat)[1],coords_to_ind(base_lon, base_lat)[0]] if get_cloud(Q)[0] == '0' and get_cloud(Q)[1] == '0': # print('inside') result = get_sifs(query) print(result.shape) fpp.append(result) # print(result) # time_series[:,ct] = np.array(result) ct += 1 except Exception as e: print(e) continue return fpp # data = SD('FPAR_A2016361.hdf', SDC.READ) # - #testing time_s = get_fpar_reg_series() print(time_s) mat = get_proportion_matrix() # + # %matplotlib inline time_s = np.array(time_s) print(time_s.shape) for i in range(16): if np.mean(time_s[:,i]) < 150: plt.plot(time_s[:,i]) 1330Haiyan # plt.figure() # plt.plot(time_s[0], label = 'corn') # plt.plot(time_s[1], label = 'soybeans') # plt.plot(time_s[2], label = 'forest') # plt.plot(time_s[3], label = 'grass') # plt.legend() # - base_lon = 20 base_lat = 20 for i in range(3): cur_lon = base_lon for j in range(3): # rhs[row] = FPAR_data[coords_to_ind(cur_lon, base_lat)[1],coords_to_ind(cur_lon, base_lat)[0]] cur_lon -= 0.25 row += 1 print(base_lat, cur_lon) base_lat += 0.25 print(1) # + arg_max = 'FPAR_A2016249.hdf' print(get_sifs().x) # - import os os.getcwd() print(time_s) # + import numpy as np from my_functions import get_fractions, get_by_box,hasdf base_lat, base_lon = 38.3, -89.99 mat2 = np.zeros((16,4)) row = 0 for i in range(4): cur_lon = base_lon for j in range(4): print(base_lat, cur_lon) mat2[row,:] = get_fractions(get_by_box(2016, cur_lon, base_lat, cur_lon - 0.1, base_lat + 0.1)) cur_lon -= 0.1 print(row) row += 1 base_lat += 0.1 # - import os os.getcwd() from io import StringIO # + def get_by_box(year, llcrnrlon, llcrnrlat, urcrnrlon, urcrnrlat): x1, y1 = sifutil.convertProjection(llcrnrlon, llcrnrlat, sifutil.WGS84, sifutil.CONUS_ALBERS) x2, y2 = sifutil.convertProjection(urcrnrlon, urcrnrlat, sifutil.WGS84, sifutil.CONUS_ALBERS) url = BASE_CDL_URL + '?year=' + str(year) + '&bbox=' + str(min(x1,x2)) + "," +\ str(min(y1, y2)) + "," + str(max(x1, x2)) + "," + str(max(y1, y2)) + "&format=csv" print(url) with warnings.catch_warnings(): warnings.simplefilter("ignore") res = requests.get(url, verify = False) returnurl = BeautifulSoup(res.text, 'lxml').find('returnurl').text #print(returnurl) with warnings.catch_warnings(): rawdata = requests.get(returnurl, verify = False).text raw_iter = StringIO(rawdata) df = pd.read_csv(raw_iter, sep=" *, * ")\ .apply(pd.to_numeric, errors='ignore')\ .set_index("Category") return df def get_fractions(cdl): total_acre = sum(cdl['Acreage']) if total_acre == 0: corn = 0 soy = 0 forest = 0 grass = 0 return if "Corn" in cdl.index: corn = cdl['Acreage']['Corn'] / total_acre else: corn = 0 if "Soybeans" in cdl.index: soy = cdl['Acreage']['Soybeans'] / total_acre else: soy = 0 pattern = re.compile(r' Forest') trees = [cdl.index[i] for i in range(len(cdl.index))\ if re.search(pattern, cdl.index[i]) != None] frst = 0 for tree in trees: frst += cdl['Acreage'][tree] forest = frst / total_acre grass = 1 - (forest + corn + soy) return np.array([corn, soy, forest, grass]) # - from my_functions import hasdf print(mat2) import my_functions import numpy as np x = np.array([[2,2,2,2,2],[1,1,1,1,1]]).T y = np.array([1,3,5,7,9]) result= np.linalg.lstsq(x,y) result import os os.getcwd() import p
FPARPAR.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import matplotlib.pyplot as plt import seaborn as sns import torch import torch.nn.functional as F import pandas as pd # + def log_approx(input, order=2): """ Taylor expansion of log function input - (N,C) where C = number of classes order - number of expansions """ result = 0 for n in range(1, order+1): result += (-1)**(n+1) * (input-1)**n / n return result def cross_entropy_approx(input, target, order=2, reduction='mean'): """ Approximation of cross entropy using taylor expansion for log approximation input - (N,C) where C = number of classes target - (N) order - number of expansions """ # TODO: Use log-sum-exp trick return F.nll_loss(log_approx(F.softmax(input, dim=1), order=order), target, reduction=reduction) def brier_score_loss(input, target, reduction='mean'): """ Computes the brier score: https://en.wikipedia.org/wiki/Brier_score input - (N,C) where C = number of classes target - (N) reduction - reduction to apply to output, default: mean """ num_classes = input[0].size()[0] target = F.one_hot(target, num_classes=num_classes) loss = (input - target).pow(2).sum(dim=1) if reduction == 'mean': loss = loss.mean() return loss # - probs = torch.linspace(0, 1, 100) target = torch.zeros(100, dtype=torch.long) sns.set(rc={'figure.figsize':(11.7,8.27)}) # For the visualization of the cross entropy approximations, we won't be able to use the `cross_entropy_approx` function directly, since it expects raw predictions (logits) as input, but we want to directly pass probabilities. So instead we pass the log probability approximations to the `nll_loss` function directly. # + plt.plot(probs, F.nll_loss(torch.log(probs.unsqueeze(dim=1)), target, reduction='none'), label="cross entropy") plt.plot(probs, F.nll_loss(log_approx(probs.unsqueeze(dim=1), order=10), target, reduction='none'), label="cross entropy order 10") plt.plot(probs, F.nll_loss(log_approx(probs.unsqueeze(dim=1), order=5), target, reduction='none'), label="cross entropy order 5") plt.plot(probs, F.nll_loss(log_approx(probs.unsqueeze(dim=1), order=2), target, reduction='none'), label="cross entropy order 2") plt.plot(probs, brier_score_loss(probs.unsqueeze(dim=1), target, reduction='none'), label="brier score loss") plt.legend(loc="upper right") plt.ylim(0, 5.0) plt.ylabel('Loss') plt.xlabel('Probability of correct class') plt.show()
notebooks/Loss functions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Moving the Robot # The Pioneer 3-DX robot is an all-purpose base, used for research and applications involving mapping, teleoperation, localization, monitoring and other behaviors. # # It is a so-called [**_differential-drive_** mobile platform](https://en.wikipedia.org/wiki/Differential_wheeled_robot), with a powered wheel on either side of the robot body, and a rear castor wheel for balance. # # Each wheel is powered by its own motor. The motion of the robot is determined by the speed on the wheels: # * If both wheels are driven at the same direction and speed, the robot will move in a straight line. # * If one speed is higher than the other one, the robot will turn towards the direction of the lower speed. # * If both wheels are turned with equal speed in opposite directions, the robot will spin around the central point of its axis. # [![differential_drive](img/motions.png "Differential drive")](http://www.guiott.com/CleaningRobot/C-Motion/Motion.htm) # # Let's see a Pioneer robot moving! # this is code cell -> click on it, then press Shift+Enter from IPython.display import YouTubeVideo YouTubeVideo('vasBnRS3tQk') # ### Initialization # Throughout the course, some code is already written for you, and organized in modules called *packages*. The cell below is an initialization step that must be called at the beginning of the notebook. import packages.initialization import pioneer3dx as p3dx p3dx.init() # ### Motion # Let's move the robot on the simulator! # # You are going to use a *widget*, a Graphical User Interface (GUI) with two sliders for moving the robot in two ways: translation and rotation. import motion_widget # The cell above outputs two sliders, which control the translation and rotation of the robot. Initially both values are zero; move the slider left or right to change their values and move the robot. # # Once you are familiar with the motion of the robot, please proceed to the next notebook: [Motion Functions](Motion%20Functions.ipynb). # --- # #### Try-a-Bot: an open source guide for robot programming # Developed by: # [![Robotic Intelligence Lab @ UJI](img/logo/robinlab.png "Robotic Intelligence Lab @ UJI")](http://robinlab.uji.es) # # Sponsored by: # <table> # <tr> # <td style="border:1px solid #ffffff ;">[![IEEE Robotics and Automation Society](img/logo/ras.png "IEEE Robotics and Automation Society")](http://www.ieee-ras.org)</td> # <td style="border:1px solid #ffffff ;">[![Cyberbotics](img/logo/cyberbotics.png "Cyberbotics")](http://www.cyberbotics.com)</td> # <td style="border:1px solid #ffffff ;">[![The Construct](img/logo/theconstruct.png "The Construct")](http://www.theconstructsim.com)</td> # </tr> # </table> # # Follow us: # <table> # <tr> # <td style="border:1px solid #ffffff ;">[![Facebook](img/logo/facebook.png "Facebook")](https://www.facebook.com/RobotProgrammingNetwork)</td> # <td style="border:1px solid #ffffff ;">[![YouTube](img/logo/youtube.png "YouTube")](https://www.youtube.com/user/robotprogrammingnet)</td> # </tr> # </table>
Moving the Robot.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Support Vector Machine # ### 1. Importing Libraries from sklearn import svm from sklearn.datasets import load_iris from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler import pandas as pd from sklearn.metrics import plot_confusion_matrix import matplotlib.pyplot as plt # ### 2. Data Preprocessing iris_data = load_iris() X = pd.DataFrame(iris_data.data, columns=iris_data.feature_names) y = iris_data.target # + X_train, X_test, y_train, y_test = train_test_split(X,y,random_state=1, test_size=0.2) sc_X = StandardScaler() X_trainscaled=sc_X.fit_transform(X_train) X_testscaled=sc_X.transform(X_test) print(X_train.shape, X_test.shape, y_train.shape, y_test.shape) # - # ### 3. Training, Predictions and Scoring clf = svm.SVC() clf.fit(X_trainscaled, y_train) print(clf.score(X_testscaled, y_test)) fig=plot_confusion_matrix(clf, X_testscaled, y_test,display_labels=["Setosa","Versicolor","Virginica"]) fig.figure_.suptitle("Confusion Matrix for Iris Dataset") plt.show()
sem 5/machine learning/ml practicals/Support Vector Machine.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + import os, sys, time, copy import numpy as np import matplotlib.pyplot as plt import pickle sys.path.append('../') sys.path.append('../Lib') sys.path.append('../Models') sys.path.append('../Protocols') from cell_models import kernik, protocols, paci_2018 import mod_protocols import protocol_lib import mod_kernik as kernik import mod_trace as trace from Models.br1977 import BR1977 from ord2011 import ORD2011 import model_response # + def get_trace(model, protocol, prestep): prestep_protocol = protocol_lib.VoltageClampProtocol([protocol_lib.VoltageClampStep(voltage=-80.0, duration=prestep)]) model.generate_response(prestep_protocol, is_no_ion_selective=False) model.y_ss = model.y[:, -1] response_trace = model.generate_response(protocol, is_no_ion_selective=False) return response_trace def get_long_protocol(individual_dictionary, holding_step): all_steps = [] holding_step = protocol_lib.VoltageClampStep(-80, holding_step) for current, protocol in individual_dictionary.items(): all_steps.append(holding_step) all_steps += protocol.steps long_protocol = protocol_lib.VoltageClampProtocol(all_steps) return long_protocol def remove_start_of_protocol(protocol, removal_time_step): vc_segment_endpoints = protocol.get_voltage_change_endpoints() is_found = False i = 0 while not is_found: if removal_time_step < vc_segment_endpoints[i]: max_segment_idx = i is_found = True i += 1 new_start_voltage = protocol.get_voltage_at_time(removal_time_step) new_duration = (vc_segment_endpoints[max_segment_idx] - removal_time_step) #TODO make separate function and call from remove_end..() if isinstance(protocol.steps[max_segment_idx], protocol_lib.VoltageClampRamp) or isinstance(protocol.steps[max_segment_idx], mod_protocols.VoltageClampRamp): new_final_voltage = protocol.steps[max_segment_idx].voltage_end new_segment = protocol_lib.VoltageClampRamp( new_start_voltage, new_final_voltage, new_duration) else: new_start_voltage = protocol.steps[max_segment_idx].voltage new_segment = protocol_lib.VoltageClampStep( new_start_voltage, new_duration) new_protocol = protocol_lib.VoltageClampProtocol( [new_segment] + protocol.steps[(max_segment_idx + 1):]) return new_protocol def shorten_protocol_start(protocol, start_time, prestep, window, step_size, max_contribution, current_name, acceptable_change, model_name, removal_time_step=200, scale=1): min_acceptable_current = max_contribution * acceptable_change last_protocol = None while (max_contribution > min_acceptable_current).values[0]: if protocol.get_voltage_change_endpoints()[-1] <= removal_time_step: return protocol # print(max_contribution) last_protocol = copy.copy(protocol) protocol = remove_start_of_protocol( protocol, removal_time_step=removal_time_step) max_currents = get_max_currents( protocol, prestep=prestep, window=window, step_size=step_size, scale=scale, model_name=model_name) max_contribution = max_currents[max_currents["Current"]==current_name]["Contribution"] return last_protocol def get_protocol_without_end(protocol, start_time, window, extra_time, scale=1): window = window/scale extra_time = extra_time/scale vc_segment_endpoints = protocol.get_voltage_change_endpoints() cutoff_time = scale * (start_time + extra_time) if cutoff_time > vc_segment_endpoints[-1]: return protocol is_found = False i = 0 while not is_found: if cutoff_time < vc_segment_endpoints[i]: max_segment_idx = i is_found = True i += 1 new_duration = (cutoff_time - vc_segment_endpoints[max_segment_idx-1]) if new_duration <= 0.0: print("Warning ......................................................") if isinstance(protocol.steps[max_segment_idx], protocol_lib.VoltageClampRamp) or isinstance(protocol.steps[max_segment_idx], mod_protocols.VoltageClampRamp): new_start_voltage = protocol.steps[max_segment_idx].voltage_start new_final_voltage = protocol.get_voltage_at_time(cutoff_time) new_segment = protocol_lib.VoltageClampRamp( new_start_voltage, new_final_voltage, new_duration) else: new_start_voltage = protocol.steps[max_segment_idx].voltage new_segment = protocol_lib.VoltageClampStep( new_start_voltage, new_duration) new_protocol = protocol_lib.VoltageClampProtocol( protocol.steps[0:max_segment_idx] + [new_segment]) return new_protocol def get_max_currents(vc_protocol, prestep, window, step_size, model_name, scale=1): if model_name == 'Paci': baseline_paci= paci_2018.PaciModel(is_exp_artefact=True) i_trace = get_trace(baseline_paci, vc_protocol, prestep=prestep) elif model_name == 'BR1977': model = BR1977(vc_protocol) i_trace = model_response.get_model_response_JK(model, vc_protocol, prestep=prestep) elif model_name == 'ORD2011': model = ORD2011(vc_protocol) i_trace = model_response.get_model_response_JK(model, vc_protocol, prestep=prestep) elif model_name == 'OHara2017': model = "../mmt-model-files/ohara-cipa-v1-2017_JK-v1.mmt" i_trace = model_response.get_model_response_with_myokit( model, vc_protocol, prestep=prestep ) else: baseline_kernik = kernik.KernikModel(is_exp_artefact=True) i_trace = get_trace(baseline_kernik, vc_protocol, prestep=prestep) max_currents = i_trace.current_response_info.get_max_current_contributions( i_trace.t, window=window/scale, step_size=step_size/scale) return max_currents def shorten_protocol(best_individual, current_name, only_end, model_name, window=10, step_size=5, prestep=2000): vc_protocol = best_individual.protocol length_of_protocol = vc_protocol.get_voltage_change_endpoints()[-1] scale = 1 max_currents = get_max_currents(vc_protocol, prestep=prestep, window=window, step_size=step_size, model_name=model_name, scale=scale) # print(max_currents[max_currents["Current"]==current_name]) # 0 I_Na 0.976547 860.0 870.0 start_time = float(max_currents[max_currents["Current"]==current_name]["Time Start"]) shortened_protocol = get_protocol_without_end( vc_protocol, start_time, window, extra_time=100, scale=scale) if not only_end: max_contribution = max_currents[max_currents["Current"]==current_name]["Contribution"] if current_name == "I_Kr" or "IKr" or "ikr.IKr": accepted_threshold = .99 else: accepted_threshold = .95 shortened_protocol = shorten_protocol_start(shortened_protocol, start_time, prestep, window, step_size, max_contribution, current_name, acceptable_change=accepted_threshold, scale=scale, model_name=model_name) print( f'Protocol length of {current_name} decreased from {length_of_protocol} to {shortened_protocol.get_voltage_change_endpoints()[-1]}.') return shortened_protocol def get_high_fitness(ga_result): best_individual = ga_result.generations[0][0] for i, gen in enumerate(ga_result.generations): best_in_gen = ga_result.get_high_fitness_individual(i) if best_in_gen.fitness > best_individual.fitness: best_individual = best_in_gen return best_individual def make_shortened_results(trial_conditions, only_end, holding_step, prestep, window, step_size, with_artefact=False, model_name='ORD2011', currents=['I_Na', 'I_NaL', 'I_to', 'I_CaL', 'I_Kr', 'I_Ks', 'I_K1' ] ): folder = f"ga_results/{trial_conditions}" original_protocols = {} shortened_protocols = {} for current in currents: ga_result = pickle.load(open(f'{folder}/ga_results_{current}_a{with_artefact}', 'rb')) best_individual = get_high_fitness(ga_result) shortened_protocols[current] = shorten_protocol(best_individual, window=window, prestep=prestep, current_name=current, only_end=only_end, model_name=model_name) original_protocols[current] = best_individual.protocol pickle.dump(shortened_protocols[current], open(f"{folder}/short_{current}_p{prestep}_oe{only_end}_a{with_artefact}.pkl", 'wb')) new_long_protocol = get_long_protocol(shortened_protocols, holding_step) scale = 1 shortened_max_currents = get_max_currents( new_long_protocol, prestep=prestep, window=window, step_size=step_size, model_name=model_name, scale=scale) print(f"The shortened_max_currents are {shortened_max_currents}") shortened_max_currents.to_csv( f"{folder}/{trial_conditions}_h{holding_step}_p{prestep}_oe{only_end}_a{with_artefact}.csv") pickle.dump(new_long_protocol, open(f"{folder}/{trial_conditions}_h{holding_step}_p{prestep}_oe{only_end}_a{with_artefact}.pkl", 'wb')) # return shortened_protocols, new_long_protocol # + trial_conditions = "OHara2017_360_100_4_-121_61_10_5" prestep = 5000 window = 10 step_size = 5 holding_step = 500 only_end = False with_artefact =False model_name = trial_conditions.split('_')[0] currents = ['I_Na', 'I_Kr', 'I_Ks', 'I_To', 'I_CaL', 'I_K1', 'I_NaL' ] if model_name=='BR1977': with_artefact = False currents = ['I_Na', 'I_si', 'I_K1', 'I_x1'] elif model_name=='Kernik': with_artefact = True currents = ['I_Na', 'I_Kr', 'I_Ks', 'I_To', 'I_F', 'I_CaL', 'I_K1'] elif model_name=='OHara2017': with_artefact = False currents = ['INa', 'INaL', 'Ito', 'ICaL', 'IKr', 'IKs', 'IK1'] start_time = time.time() make_shortened_results(trial_conditions, only_end=only_end, holding_step=holding_step, prestep=prestep, window=window, step_size=step_size, with_artefact=with_artefact, model_name=model_name, currents=currents) print("--- %s seconds ---"%(time.time()-start_time)) # -
Optimize_protocol/ga_make_whole_protocol.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.3 64-bit (''tesseractenv'': conda)' # name: python383jvsc74a57bd058f613cfafaf5447cb7f6a03863dedee5a2503be54003a11ba57d61ca7d6eda1 # --- # + import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import matplotlib get_ipython().run_line_magic('matplotlib', 'inline') import datetime as dt # 1. Saved .xlsx as csv and Imported CSV AAC_accidents = pd.read_csv('/Users/AnnaD/Desktop/AAC/_github-AAC_accidents_tagged_data.csv') # 2. Ensured Date was in DateTime format AAC_accidents['Publication Year'] = pd.to_datetime(AAC_accidents['Publication Year'], yearfirst=True, format = '%Y') AAC_accidents['Publication Year'] = pd.DatetimeIndex(AAC_accidents['Publication Year']).year AAC_accidents['Publication Year'] = AAC_accidents['Publication Year'].fillna(0) AAC_accidents['Publication Year'] = AAC_accidents['Publication Year'].astype(int) # 3. Replaced NaN for 0 AAC_accidents= AAC_accidents.fillna(0) # 4. I wanted to create a new column with the location of the accident. This would involve matching the Text and accident columns to a list containing Canadian Provinces and US States Provinces_States = ['Alabama', 'Alaska', 'American Samoa', 'Arizona', 'Arkansas', 'California', 'Colorado', 'Connecticut', 'Delaware', 'District of Columbia', 'Florida', 'Georgia', 'Guam', 'Hawaii', 'Idaho', 'Illinois', 'Indiana', 'Iowa', 'Kansas', 'Kentucky', 'Louisiana', 'Maine', 'Maryland', 'Massachusetts', 'Michigan', 'Minnesota', 'Minor Outlying Islands', 'Mississippi', 'Missouri', 'Montana', 'Nebraska', 'Nevada', 'New Hampshire', 'New Jersey', 'New Mexico', 'New York', 'North Carolina', 'North Dakota', 'Northern Mariana Islands', 'Ohio', 'Oklahoma', 'Oregon', 'Pennsylvania', 'Puerto Rico', 'Rhode Island', 'South Carolina', 'South Dakota', 'Tennessee', 'Texas', 'U.S. Virgin Islands', 'Utah', 'Vermont', 'Virginia', 'Washington', 'West Virginia', 'Wisconsin', 'Wyoming','Alberta', 'British Columbia', 'Manitoba','New Brunswick', 'Newfoundland and Labrador', 'Northwest Territories', 'Nova Scotia','Nunavut', 'Ontario', 'Prince Edward Island' , 'PEI', 'Quebec','Saskatchewan', 'Yukon'] #5. Created a new Location Column AAC_accidents['Location'] = '' for i in range(AAC_accidents.shape[0] - 1): title = AAC_accidents['Accident Title'].iloc[i] text = AAC_accidents['Text'].iloc[i] location = None for place in Provinces_States: if place in title: location = place break elif place in text: location = place break if location == None: print(title) print() AAC_accidents['Location'].iloc[i]=location AAC_accidents['Location'].isna().sum() # - pip install statsmodels
AACaccidentsjupyter.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Surfs Up! # ## Step 2 # * Use the engine and connection string to create a database called hawaii.sqlite. # * Use declarative_base and create ORM classes for each table. # * You will need a class for Measurement and for Station. # * Make sure to define your primary keys. # * Once you have your ORM classes defined, create the tables in the database using create_all. # + import pandas as pd from sqlalchemy import create_engine from sqlalchemy.orm import Session import pymysql pymysql.install_as_MySQLdb() from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() from sqlalchemy import Column, Integer, String, Float, Date from config import dbuser, dbpasswd, dburi, dbport, dbname # - #engine = create_engine("sqlite:///hawaii.sqlite") engine = create_engine(f"mysql://{dbuser}:{dbpasswd}@{dburi}:{dbport}/{dbname}") Base.metadata.create_all(engine) session = Session(bind=engine) # + # Define a class for measurement and station class Measurement(Base): __tablename__='measurements' id = Column(Integer, primary_key=True) station = Column(String) date = Column(Date) prcp = Column(Float) tobs = Column(Integer) class Station(Base): __tablename__='stations' id = Column(Integer, primary_key=True) station = Column(String) name = Column(String) latitude = Column(Float) longitude = Column(Float) elevation = Column(Float) # - # read in csv data measurement_data = pd.read_csv('clean_hawaii_measurements.csv') station_data = pd.read_csv('clean_hawaii_stations.csv') # send the dataframe data to MySQL measurement_data.to_sql(con=engine, name='measurements', if_exists='replace') station_data.to_sql(con=engine, name='stations', if_exists='replace')
.ipynb_checkpoints/database_engineering-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import os import seaborn as sns import statsmodels import numpy as np from pathlib import Path from datetime import datetime from scipy.stats import shapiro import statsmodels.api as sm from statsmodels.stats.diagnostic import acorr_ljungbox from matplotlib import pyplot as plt # %matplotlib inline # #### Read in data data_path = "D:\\Users\\Nicholas\\Projects\\repos\\spc_charts\\data\\raw\\diabetes\\Diabetes-Data" data_files = [i for i in os.listdir(data_path) if 'data-' in i] def read_data(path): data = pd.read_table(path, header=None) data.columns = ['Date', 'Time', 'Code', 'Value'] return data # + data = pd.DataFrame(columns = ['Date', 'Time', 'Code', 'Value']) for i in range(len(data_files)): temp = read_data(Path(data_path) / data_files[i]) data = data.append(temp, ignore_index=True) # - # #### Subset data to blood glucose measurement data_sub = data.loc[data['Code'].isin([48, 57, 58, 59, 60, 61, 62, 63, 64]), :].copy() data_sub['Date'] = data_sub['Date'] + ' ' + data_sub['Time'] data_sub['Date'] = pd.to_datetime(data_sub['Date'], errors='coerce') data_sub['Value'] = pd.to_numeric(data_sub['Value'], errors='coerce') data_sub = data_sub.dropna() # #### Aggregate by date data_sub_day = data_sub.groupby(pd.Grouper(key='Date', freq='d')).agg(mean = ('Value', 'mean'), sd = ('Value', 'std'), n = ('Value', 'count')) data_sub_day = data_sub_day.dropna() data_sub_day = data_sub_day.reset_index() # #### Subset data to a smaller set data_sub_day.plot.line(x='Date', y='mean', figsize=(20,10)) date_filter = (data_sub_day['Date'] >= '1989-09') & (data_sub_day['Date'] < '1989-11-06') date_cutoff = '1989-10-15' data_sub_day[date_filter].plot.line(x='Date', y='mean', figsize=(20,10)) data_sub_day = data_sub_day[date_filter].copy() # #### Hypothesis test fig = sm.qqplot(data_sub_day.loc[data_sub_day['Date'] < date_cutoff, 'mean'], fit=True, line='45') plt.show() def shapiro_wilks_(data, alpha): # Imports from scipy.stats import shapiro # normality test stat, p = shapiro(data) print('Statistics=%.3f, p=%.3f' % (stat, p)) # interpret if p > alpha: print('Sample looks Gaussian (fail to reject H0)') else: print('Sample does not look Gaussian (reject H0)') shapiro_wilks_(data_sub_day.loc[data_sub_day['Date'] < date_cutoff, 'mean'], alpha=0.05) def jarque_bera_(data, alpha): # imports import statsmodels # normality test stat, p, skew, kurt = statsmodels.stats.stattools.jarque_bera(data) print('Statistics=%.3f, p=%.3f' % (stat, p)) # interpret if p > alpha: print('Sample looks Gaussian (fail to reject H0)') else: print('Sample does not look Gaussian (reject H0)') jarque_bera_(data_sub_day.loc[data_sub_day['Date'] < date_cutoff, 'mean'], alpha=0.05) acorr_ljungbox(data_sub_day.loc[data_sub_day['Date'] < date_cutoff, 'mean'], lags=10) # #### Get in control mean # Get in-control mean data_sub_day.loc[data_sub_day['Date'] < date_cutoff, 'mean'].mean() in_control_mean = data_sub_day.loc[data_sub_day['Date'] < date_cutoff, 'mean'].mean() # #### Calculate moving range data_sub_day['MR'] = data_sub_day['mean'].rolling(window=2).apply(lambda x: x.max() - x.min(), raw=True) x_ind_df = data_sub_day.copy() def x_ind_params(mu0, sigma, length, L=3): # params UCL = mu0 + L * sigma center = mu0 LCL = mu0 - L * sigma ret = pd.DataFrame({ 'UCL':UCL, 'Center':center, 'LCL':LCL }, index=list(range(length))) return ret x_ind_params_df = x_ind_params(mu0=in_control_mean, sigma = x_ind_df['MR'].mean() / 1.128, length = len(x_ind_df['mean'])) x_ind_df = pd.concat([x_ind_df.reset_index(drop=True), x_ind_params_df.reset_index(drop=True)], axis=1) fig, ax = plt.subplots(figsize=(15,5)) sns.relplot(x='Date', y='mean', data=x_ind_df, kind='line', ax=ax, color='blue', marker='o') sns.lineplot(x='Date', y='UCL', data=x_ind_df, drawstyle='steps-pre', ax=ax, color='red') sns.lineplot(x='Date', y='LCL', data=x_ind_df, drawstyle='steps-pre', ax=ax, color='red') sns.relplot(x='Date', y='Center', data=x_ind_df, kind='line', ax=ax, color='black') plt.close() plt.close() plt.title('Individual Measurement Chart') plt.ylabel('X') x_ind_df['Date'][x_ind_df['mean'] > x_ind_df['UCL'][-1:].values[0]].reset_index(drop=True)[0] # #### Calculate EWMA def ewma(arg, alpha=0.1, mu0=None): if mu0 is None: arg_temp = arg.copy().tolist() else: arg_temp = arg.copy().tolist() arg_temp.insert(0, mu0) # Convert list to series arg_series = pd.Series(arg_temp) # calculate offset: if mu0=None, offset=0, else offset=1 offset = len(arg_series) - len(arg) # Return ewma ret = arg_series.ewm(alpha=alpha, adjust=False).mean()[offset:].tolist() return ret def ewma_params(mu0, sigma, length, alpha=0.1, L=3): # Set up the index i = pd.Series(list(range(1, length+1))) # params UCL = mu0 + L * sigma * np.sqrt((alpha / (2-alpha)) * (1 - (1-alpha)**(2*i))) center = mu0 LCL = mu0 - L * sigma * np.sqrt((alpha / (2-alpha)) * (1 - (1-alpha)**(2*i))) ret = pd.DataFrame({ 'UCL':UCL, 'Center':center, 'LCL':LCL }) return ret ewma_df = data_sub_day.copy() ewma_df['ewma'] = ewma(data_sub_day['mean'], mu0=in_control_mean) ewma_params_df = ewma_params(mu0 = in_control_mean, sigma = ewma_df['MR'].mean() / 1.128, length=len(ewma_df['mean'])) ewma_df = pd.concat([ewma_df.reset_index(drop=True), ewma_params_df.reset_index(drop=True)], axis=1) fig, ax = plt.subplots(figsize=(15,5)) sns.relplot(x='Date', y='ewma', data=ewma_df, kind='line', ax=ax, color='blue', marker='o') sns.lineplot(x='Date', y='UCL', data=ewma_df, drawstyle='steps-pre', ax=ax, color='red') sns.lineplot(x='Date', y='LCL', data=ewma_df, drawstyle='steps-pre', ax=ax, color='red') sns.relplot(x='Date', y='Center', data=ewma_df, kind='line', ax=ax, color='black') plt.close() plt.close() plt.title('EWMA Chart') plt.ylabel('EWMA') ewma_df['Date'][ewma_df['ewma'] > ewma_df['UCL'][-1:].values[0]].reset_index(drop=True)[0] # #### Calculate cusum def cusum(arg, mu0=None): if mu0 is None: mu0 = arg.mean() # Calculate deviation dev = arg - mu0 # Calculate cusum cusum = dev.cumsum() return cusum cusum_df = data_sub_day.copy() cusum_df['cusum'] = cusum(cusum_df['mean'], mu0=in_control_mean) fig, ax = plt.subplots(figsize=(15,5)) sns.relplot(x='Date', y='cusum', data=cusum_df, kind='line', ax=ax, color='blue') plt.close() # #### Tabular cusum def tab_cusum(arg, mu0, sigma, k=0.5, h=4): # Set up parameters K = k * sigma H = h * sigma tol_pos = mu0 + K tol_neg = mu0 - K dev_pos = (arg - tol_pos).tolist() dev_neg = (tol_neg - arg).tolist() C_pos = [0] * (len(arg) + 1) C_neg = [0] * (len(arg) + 1) # Start loop for i in range(1, (len(arg) + 1)): C_pos[i] = np.max([0, dev_pos[i-1] + C_pos[i-1]]) C_neg[i] = np.max([0, dev_neg[i-1] + C_neg[i-1]]) ret = pd.DataFrame({ 'C_pos':C_pos, 'C_neg':C_neg, 'UCL':H }) ret = ret.iloc[1:, ] ret['C_neg'] = -1 * ret['C_neg'] ret['LCL'] = -1 * ret['UCL'] # ret['xi'] = arg.tolist() return(ret) tab_cusum_df = data_sub_day.copy() tab_cusum_params_df = tab_cusum(tab_cusum_df['mean'], mu0=in_control_mean, sigma=tab_cusum_df['MR'].mean() / 1.128) tab_cusum_df = pd.concat([tab_cusum_df.reset_index(drop=True), tab_cusum_params_df.reset_index(drop=True)], axis=1) fig, ax = plt.subplots(figsize=(15,5)) sns.relplot(x='Date', y='C_pos', data=tab_cusum_df, kind='line', ax=ax, color='blue', marker='o') sns.relplot(x='Date', y='C_neg', data=tab_cusum_df, kind='line', ax=ax, color='green', marker='o') sns.lineplot(x='Date', y='UCL', data=tab_cusum_df, drawstyle='steps-pre', ax=ax, color='red') sns.lineplot(x='Date', y='LCL', data=tab_cusum_df, drawstyle='steps-pre', ax=ax, color='red') plt.close() plt.close() plt.title('Tabular Cusum Chart') plt.ylabel('Cusum') tab_cusum_df['Date'][tab_cusum_df['C_pos'] > tab_cusum_df['UCL'][-1:].values[0]].reset_index(drop=True)[0]
notebooks/1.0-hwant-process-diabetes-data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # from Model1 # + import cv2 import numpy as np from keras.models import load_model from statistics import mode from utils.datasets import get_labels from utils.inference import detect_faces from utils.inference import draw_text from utils.inference import draw_bounding_box from utils.inference import apply_offsets from utils.inference import load_detection_model from utils.preprocessor import preprocess_input USE_WEBCAM = True # If false, loads video file source # parameters for loading data and images emotion_model_path = './models/emotion_model.hdf5' emotion_labels = get_labels('fer2013') # hyper-parameters for bounding boxes shape frame_window = 10 emotion_offsets = (20, 40) # loading models face_cascade = cv2.CascadeClassifier('./models/haarcascade_frontalface_default.xml') emotion_classifier = load_model(emotion_model_path) # getting input model shapes for inference emotion_target_size = emotion_classifier.input_shape[1:3] # starting lists for calculating modes emotion_window = [] # starting video streaming cv2.namedWindow('window_frame') video_capture = cv2.VideoCapture(0) # Select video or webcam feed cap = None if (USE_WEBCAM == True): cap = cv2.VideoCapture(0) # Webcam source else: cap = cv2.VideoCapture('./inp.mp4') # Video file source #out = cv2.VideoWriter('output.avi', -1, 60.0, (640,480)) while cap.isOpened(): video_capture.set(cv2.CAP_PROP_FPS, 10) fps = int(cap.get(cv2.CAP_PROP_FPS)) print("fps:", fps) ret, bgr_image = cap.read() bgr_image = cv2.resize(bgr_image,(640,360)) #bgr_image = video_capture.read()[1] #out.write(bgr_image) gray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY) rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB) faces = face_cascade.detectMultiScale(gray_image, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30), flags=cv2.CASCADE_SCALE_IMAGE) for face_coordinates in faces: x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets) gray_face = gray_image[y1:y2, x1:x2] try: gray_face = cv2.resize(gray_face, (emotion_target_size)) except: continue gray_face = preprocess_input(gray_face, True) gray_face = np.expand_dims(gray_face, 0) gray_face = np.expand_dims(gray_face, -1) emotion_prediction = emotion_classifier.predict(gray_face) emotion_probability = np.max(emotion_prediction) emotion_label_arg = np.argmax(emotion_prediction) emotion_text = emotion_labels[emotion_label_arg] emotion_window.append(emotion_text) if len(emotion_window) > frame_window: emotion_window.pop(0) try: emotion_mode = mode(emotion_window) except: continue if emotion_text == 'angry': color = emotion_probability * np.asarray((255, 0, 0)) elif emotion_text == 'sad': color = emotion_probability * np.asarray((0, 0, 255)) elif emotion_text == 'happy': color = emotion_probability * np.asarray((255, 255, 0)) elif emotion_text == 'surprise': color = emotion_probability * np.asarray((0, 255, 255)) else: color = emotion_probability * np.asarray((0, 255, 0)) color = color.astype(int) color = color.tolist() draw_bounding_box(face_coordinates, rgb_image, color) draw_text(face_coordinates, rgb_image, emotion_mode, color, 0, -45, 1, 1) bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR) cv2.imshow('window_frame', bgr_image) if cv2.waitKey(1) & 0xFF == ord('q'): break cap.release() #out.release() cv2.destroyAllWindows() # - # # from Model2 # + import numpy as np import cv2 from keras.preprocessing import image import time #----------------------------- #opencv initialization face_cascade = cv2.CascadeClassifier('./models/haarcascade_frontalface_default.xml') #----------------------------- #face expression recognizer initialization from keras.models import model_from_json model = model_from_json(open("./models/facial_expression_model_structure.json", "r").read()) model.load_weights('./models/facial_expression_model_weights.h5') #load weights #----------------------------- emotions = ('angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral') #cv2.namedWindow('window_frame') #video_capture = cv2.VideoCapture(0) # Select video or webcam feed #cap = None #cap = cv2.VideoCapture("./inp.mp4") #process videos cap = cv2.VideoCapture(0) #process real time web-cam frame = 0 while cap.isOpened(): ret, img = cap.read() img = cv2.resize(img, (640, 360)) img = img[0:308,:] gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) #print("there1") faces = face_cascade.detectMultiScale(gray, 1.3, 5) #print("there2") for (x,y,w,h) in faces: if (True): #trick: ignore small faces cv2.rectangle(img,(x,y),(x+w,y+h),(64,64,64),2) #highlight detected face #print("there3") detected_face = img[int(y):int(y+h), int(x):int(x+w)] #crop detected face detected_face = cv2.cvtColor(detected_face, cv2.COLOR_BGR2GRAY) #transform to gray scale detected_face = cv2.resize(detected_face, (48, 48)) #resize to 48x48 img_pixels = image.img_to_array(detected_face) img_pixels = np.expand_dims(img_pixels, axis = 0) img_pixels /= 255 #pixels are in scale of [0, 255]. normalize all pixels in scale of [0, 1] #------------------------------ predictions = model.predict(img_pixels) #store probabilities of 7 expressions max_index = np.argmax(predictions[0]) #background of expression list overlay = img.copy() opacity = 0.4 cv2.rectangle(img,(x+w+10,y-25),(x+w+150,y+115),(64,64,64),cv2.FILLED) cv2.addWeighted(overlay, opacity, img, 1 - opacity, 0, img) #connect face and expressions cv2.line(img,(int((x+x+w)/2),y+15),(x+w,y-20),(255,255,255),1) cv2.line(img,(x+w,y-20),(x+w+10,y-20),(255,255,255),1) emotion = "" for i in range(len(predictions[0])): emotion = "%s %s%s" % (emotions[i], round(predictions[0][i]*100, 2), '%') """if i != max_index: color = (255,0,0)""" color = (255,255,255) cv2.putText(img, emotion, (int(x+w+15), int(y-12+i*20)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 1) #------------------------- cv2.imshow('img',img) frame = frame + 1 #print(frame) #--------------------------------- #if frame > 227: # break if cv2.waitKey(1) & 0xFF == ord('q'): #press q to quit break #kill open cv things cap.release() cv2.destroyAllWindows() # + import numpy as np import cv2 from keras.preprocessing import image import time #----------------------------- #opencv initialization face_cascade = cv2.CascadeClassifier('./models/haarcascade_frontalface_default.xml') #----------------------------- #face expression recognizer initialization from keras.models import model_from_json model = model_from_json(open("./models/facial_expression_model_structure.json", "r").read()) model.load_weights('./models/facial_expression_model_weights.h5') #load weights #----------------------------- emotions = ('angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral') #cv2.namedWindow('window_frame') #video_capture = cv2.VideoCapture(0) # Select video or webcam feed #cap = None #cap = cv2.VideoCapture("./inp.mp4") #process videos cap = cv2.VideoCapture(0) #process real time web-cam frame = 0 while cap.isOpened(): ret, img = cap.read() img = cv2.resize(img, (640, 360)) img = img[0:708,:] gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) #print("there1") faces = face_cascade.detectMultiScale(gray, 1.3, 5) #print("there2") for (x,y,w,h) in faces: if (True): #trick: ignore small faces cv2.rectangle(img,(x,y),(x+w,y+h),(64,64,64),2) #highlight detected face #print("there3") detected_face = img[int(y):int(y+h), int(x):int(x+w)] #crop detected face detected_face = cv2.cvtColor(detected_face, cv2.COLOR_BGR2GRAY) #transform to gray scale detected_face = cv2.resize(detected_face, (48, 48)) #resize to 48x48 img_pixels = image.img_to_array(detected_face) img_pixels = np.expand_dims(img_pixels, axis = 0) img_pixels /= 255 #pixels are in scale of [0, 255]. normalize all pixels in scale of [0, 1] #------------------------------ predictions = model.predict(img_pixels) #store probabilities of 7 expressions max_index = np.argmax(predictions[0]) #background of expression list #overlay = img.copy() #opacity = 0.4 #cv2.rectangle(img,(200,200),(200,200),(64,64,64),cv2.FILLED) #cv2.addWeighted(overlay, opacity, img, 1 - opacity, 0, img) #connect face and expressions cv2.line(img,(int((x+x+w)/2),y+15),(x+w,y-20),(255,255,255),1) cv2.line(img,(x+w,y-20),(x+w+10,y-20),(255,255,255),1) emotions_dict = {0:'angry', 1:'disgust', 2:'fear', 3:'happy', 4:'sad', 5:'surprise', 6:'neutral'} color_dict = {0:(255,0,0), 1:(255,150,0), 2:(0,255,0), 3:(0,255,255), 4:(0,0,255), 5:(255,0,255), 6:(255,255,0)} cv2.putText(img, emotions_dict[max_index], (int(x+w+15), int(y-15)), cv2.FONT_HERSHEY_TRIPLEX, 1,color_dict[max_index], 1) #emotion = "" #for i in range(len(predictions[0])): #th = round(predictions[0][i]*100, 2) #emotion = "%s %s%s" % (emotions[i], th, '%') #print(max_index) #color = (255,255,255) #cv2.putText(img, emotion, (int(x+w+15), int(y-12+i*20)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 1) #------------------------- cv2.imshow('img',img) frame = frame + 1 #print(frame) #--------------------------------- #if frame > 227: # break if cv2.waitKey(1) & 0xFF == ord('q'): #press q to quit break #kill open cv things cap.release() cv2.destroyAllWindows() # -
VIDEO__STREAM.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Python Learnings # ![](ghtop_images/header2.png) # ![](ghtop_images/montyp.jpg) # # Introducing Python Object Types # Based on Chapter 4 of "Learning Python", 4th Edition, <NAME>, O'Reilly # ## Some maths import math print(math.pi) print(math.sqrt(34)) # + import random print(random.random()) print(random.choice([1,2,3,4])) print(random.choice([1,2,3,4])) # - # ## Indexing # + S='Spam' print(S[1:3]) print(S[:-1]) print(S[1:]) print(S[:]) print(S*3) # - # ## Immutability # + # strings are not immutable S[0]='p' ##error # - # but we can create a new string S = 'z' +S[1:] print(S) #or use replaces # ## String specific methods # + print(S.find('pa'))#finds position #replace parts of a string print(S) print(S.replace('pa','XYZ')) #upper and lower case print(S.upper()) print(S.lower()) # + line='aaa,bbb,cccc,dd d\n' #split based on something print(line.split(',')) #creates a list print(type(line.split(','))) # strip out whitespace on rhs print(line.rstrip()) # - # ## Formatting # + print('%s, eggs, and %s' % ('spam', 'SPAM!')) print('{0}, eggs, and {1}'.format('spam', 'SPAM!')) # - # ## Help # # Put into help( ) to get help on it help(S.format) # ## Lists # # The Python list object is the most general sequence provided by the language. # Lists are positionally ordered collections of arbitrarily typed objects, and they have no fixed size. # They are also mutableโ€”unlike strings, lists can be modified in-place by assignment to offsets as well as a variety of list method calls # + #list of different types L =[123, 'spam',1.23] print(L) #access print(L[2]) #append L.append('NI') print(L) #get rid of one pop! L.pop(0) print(L) M=['aa','jeji','boio','popo','gsss','zulu','ccc'] #sort M.sort() print(M) M.reverse() print(M) # - # ## Comprehensions # Python includes a more advanced operation known as a list comprehension expression, which turns out to be a powerful way to process structures like our matrix. # + M=[[1, 2, 3], [4, 5,6], [7, 8, 9]] print(M[1][2]) # this is a comprehension col1 = [row[1] for row in M] print(col1) #put if statement in print([row[1] for row in M if row[1]%2 ==0]) # - #we can do this for a row print(M[0][:]) # we can't do this! for a column M[1,:] # ## Dictionaries # Python dictionaries are something completely different (Monty Python reference intended)โ€”they are not sequences at all, but are instead known as mappings. Mappings are also collections of other objects, but they store objects by key instead of by relative position. # In fact, mappings donโ€™t maintain any reliable left-to-right order; they simply map keys to associated values. Dictionaries, the only mapping type in Pythonโ€™s core objects set, are also mutable: they may be changed in-place and can grow and shrink on demand, like lists. # + #create dict D = {'food':'Spam','quality':4, 'color':'pink'} print(D) #or create by key assignment D={} D['food']='Spam' D['quality']=4 D['color']='pink' print(D) #index it print(D['food']) # + # Nesting #what if the info is more complex? Nest rec = {'name':{'first':'Bob','last':'Smith'}, 'job':['dev','mgr'], 'age':40.5} #index them print(rec['name']) print(rec['name']['last']) print(rec['job'][0]) #or add more NB job is a list rec['job'].append('janitor') print(rec) #keys are 1st bit print(rec.keys()) # - # ## Tuples # roughly like a list that cannot be changedโ€”tuples are sequences, like lists, but they are immutable, like strings. Syntactically, they are coded in parentheses instead of square brackets, and they support arbitrary types, arbitrary nesting, and the usual sequence # operations: # + T=(1,2,3,4) print(len(T)) #concatenation print(T+(5,6)) #indexing print(T[0]) ## or T = ('spam', 3.0, [11, 22, 33]) print(T) # - # # Numeric Types # Based on Chapter 5 of "Learning Python", 4th Edition, <NAME>, O'Reilly print(type(3)) print(type(3.0)) # + # integer division or floor print(102.2//3.2) #normal division print(102.2/3.2) #to the power print(3**2) #remainder print(100%3) #complex numbers j or J print((1j +2)*3J) # - (52-6)*5*7.5*10*.89 10*.89*5*7.5 # # Class Methods # ### Classes and Instances # # - Classes define the behavior of all instances of a specific class. # # - Each variable of a specific class is an instance or object. # # - Objects can have attributes, which store information about the object. # # - You can make objects do work by calling their methods. # # - The first parameter of the methods (self) represents the current instance. # # - Methods are just like functions, but they can only be used through a class. # # ### Special methods # # - Special methods start and end with __. (two underscores) # # - Special methods have specific names, like __init__ for the constructor or __str__ for the conversion to string. # Defining a class class ClassName: def method_name(self, other_parameters): body_of_method # Defining a class with a method class ClassName: """Documentation for the class.""" def method_name(self, other_parameters): """Documentation for the method.""" body_of_method # ### Object Composition # # You can have a situation where two different classes are related, but there is no inheritance going on. This is referred to as composition -- where one class makes use of code contained in another class. For example, imagine we have a Package class which represents a software package. It contains attributes about the software package, like name, version, and size. We also have a Repository class which represents all the packages available for installation. While thereโ€™s no inheritance relationship between the two classes, they are related. The Repository class will contain a dictionary or list of Packages that are contained in the repository. Let's take a look at an example Repository class definition: class Repository: ... def __init__(self): ... self.packages = {} ... def add_package(self, package): ... self.packages[package.name] = package ... def total_size(self): ... result = 0 ... for package in self.packages.values(): ... result += package.size ... return result # In the constructor method, we initialize the packages dictionary, which will contain the package objects available in this repository instance. We initialize the dictionary in the constructor to ensure that every instance of the Repository class has its own dictionary. # # We then define the add_package method, which takes a Package object as a parameter, and then adds it to our dictionary, using the package name attribute as the key. # # Finally, we define a total_size method which computes the total size of all packages contained in our repository. This method iterates through the values in our repository dictionary and adds together the size attributes from each package object contained in the dictionary, returning the total at the end. In this example, weโ€™re making use of Package attributes within our Repository class. Weโ€™re also calling the values() method on our packages dictionary instance. Composition allows us to use objects as attributes, as well as access all their attributes and methods.
_notebooks/2021-10-04-PythonBook.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" # # Image Classification with Logistic Regression (Minimal) # + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a" # Uncomment and run the commands below if imports fail # # !conda install numpy pytorch torchvision cpuonly -c pytorch -y # # !pip install matplotlib --upgrade --quiet # !pip install jovian --upgrade --quiet # - # Imports import torch import jovian import torchvision import torch.nn as nn import matplotlib.pyplot as plt import torch.nn.functional as F import torchvision.transforms as transforms from torchvision.datasets import MNIST from torch.utils.data import random_split from torch.utils.data import DataLoader # + # Hyperparmeters batch_size = 128 learning_rate = 0.001 # Other constants input_size = 28*28 num_classes = 10 # - jovian.reset() jovian.log_hyperparams(batch_size=batch_size, learning_rate=learning_rate) # ## Dataset & Data loaders # + # Download dataset dataset = MNIST(root='data/', train=True, transform=transforms.ToTensor(), download=True) # Training validation & test dataset train_ds, val_ds = random_split(dataset, [50000, 10000]) test_ds = MNIST(root='data/', train=False, transform=transforms.ToTensor()) # Dataloaders train_loader = DataLoader(train_ds, batch_size, shuffle=True) val_loader = DataLoader(val_ds, batch_size*2) test_loader = DataLoader(test_ds, batch_size*2) # - image, label = train_ds[0] plt.imshow(image[0], cmap='gray') print('Label:', label) # ## Model # + class MnistModel(nn.Module): def __init__(self): super().__init__() self.linear = nn.Linear(input_size, num_classes) def forward(self, xb): xb = xb.reshape(-1, 784) out = self.linear(xb) return out def training_step(self, batch): images, labels = batch out = self(images) # Generate predictions loss = F.cross_entropy(out, labels) # Calculate loss return loss def validation_step(self, batch): images, labels = batch out = self(images) # Generate predictions loss = F.cross_entropy(out, labels) # Calculate loss acc = accuracy(out, labels) # Calculate accuracy return {'val_loss': loss.detach(), 'val_acc': acc.detach()} def validation_epoch_end(self, outputs): batch_losses = [x['val_loss'] for x in outputs] epoch_loss = torch.stack(batch_losses).mean() # Combine losses batch_accs = [x['val_acc'] for x in outputs] epoch_acc = torch.stack(batch_accs).mean() # Combine accuracies return {'val_loss': epoch_loss.item(), 'val_acc': epoch_acc.item()} def epoch_end(self, epoch, result): print("Epoch [{}], val_loss: {:.4f}, val_acc: {:.4f}".format(epoch, result['val_loss'], result['val_acc'])) model = MnistModel() # - # ## Training def accuracy(outputs, labels): _, preds = torch.max(outputs, dim=1) return torch.tensor(torch.sum(preds == labels).item() / len(preds)) # + def evaluate(model, val_loader): outputs = [model.validation_step(batch) for batch in val_loader] return model.validation_epoch_end(outputs) def fit(epochs, lr, model, train_loader, val_loader, opt_func=torch.optim.SGD): history = [] optimizer = opt_func(model.parameters(), lr) for epoch in range(epochs): # Training Phase for batch in train_loader: loss = model.training_step(batch) loss.backward() optimizer.step() optimizer.zero_grad() # Validation phase result = evaluate(model, val_loader) model.epoch_end(epoch, result) history.append(result) return history # - evaluate(model, val_loader) history = fit(5, 0.001, model, train_loader, val_loader) accuracies = [r['val_acc'] for r in history] plt.plot(accuracies, '-x') plt.xlabel('epoch') plt.ylabel('accuracy') plt.title('Accuracy vs. No. of epochs'); # Evaluate on test dataset result = evaluate(model, test_loader) result jovian.log_metrics(test_acc=result['val_acc'], test_loss=result['val_loss']) # ## Prediction def predict_image(img, model): xb = img.unsqueeze(0) yb = model(xb) _, preds = torch.max(yb, dim=1) return preds[0].item() img, label = test_ds[919] plt.imshow(img[0], cmap='gray') print('Label:', label, ', Predicted:', predict_image(img, model)) # ## Save and upload torch.save(model.state_dict(), 'mnist-logistic.pth') jovian.commit(project='mnist-logistic-minimal', environment=None, outputs=['mnist-logistic.pth']) jovian.commit(project='mnist-logistic-minimal', environment=None, outputs=['mnist-logistic.pth']) # Kaggle commit fails sometimes, so try again..
Python Tutorial PyTorch/mnist-logistic-minimal-v-1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="bFRbAg6wQh49" colab_type="code" colab={} from sklearn import datasets from sklearn.svm import SVC # + id="b6kdI-qsQ_VJ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="82e8ea14-74a8-4e14-c0df-53f389b40d9e" executionInfo={"status": "ok", "timestamp": 1554111597031, "user_tz": -120, "elapsed": 589, "user": {"displayName": "<NAME>", "photoUrl": "https://lh6.googleusercontent.com/-i_bcow4kfsM/AAAAAAAAAAI/AAAAAAAAAA0/BKqcIMjm5-8/s64/photo.jpg", "userId": "09730771974529627842"}} iris = datasets.load_iris() print(iris.data.shape) # + id="J9PdmaKEUL3A" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="262f2171-bbd2-4534-a3dd-01fa57f812e4" executionInfo={"status": "ok", "timestamp": 1554112406978, "user_tz": -120, "elapsed": 621, "user": {"displayName": "<NAME>", "photoUrl": "https://lh6.googleusercontent.com/-i_bcow4kfsM/AAAAAAAAAAI/AAAAAAAAAA0/BKqcIMjm5-8/s64/photo.jpg", "userId": "09730771974529627842"}} print(iris.target.shape) # + id="CYLWEzdLRLij" colab_type="code" colab={} import matplotlib.pyplot as plt # + id="IUwXb5s2R5U8" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 365} outputId="26ba4cd9-01cc-4c9f-cd60-631c45566007" executionInfo={"status": "ok", "timestamp": 1554111860813, "user_tz": -120, "elapsed": 804, "user": {"displayName": "<NAME>", "photoUrl": "https://lh6.googleusercontent.com/-i_bcow4kfsM/AAAAAAAAAAI/AAAAAAAAAA0/BKqcIMjm5-8/s64/photo.jpg", "userId": "09730771974529627842"}} plt.scatter(iris.data[:,1],iris.data[:,3],c=iris.target,cmap='rainbow') # + id="wY-yaS6TSQz-" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 87} outputId="5a55b63a-0b0d-4ef3-acf8-daa49e4eda49" executionInfo={"status": "ok", "timestamp": 1554112175515, "user_tz": -120, "elapsed": 592, "user": {"displayName": "<NAME>", "photoUrl": "https://lh6.googleusercontent.com/-i_bcow4kfsM/AAAAAAAAAAI/AAAAAAAAAA0/BKqcIMjm5-8/s64/photo.jpg", "userId": "09730771974529627842"}} classifier = SVC(gamma='scale') classifier.fit(iris.data[:-10], iris.target[:-10]) # + id="vJDyOV5gTdse" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 188} outputId="7a9861fd-033b-43f7-d5c5-9e53af83ba3d" executionInfo={"status": "ok", "timestamp": 1554112508196, "user_tz": -120, "elapsed": 600, "user": {"displayName": "<NAME>", "photoUrl": "https://lh6.googleusercontent.com/-i_bcow4kfsM/AAAAAAAAAAI/AAAAAAAAAA0/BKqcIMjm5-8/s64/photo.jpg", "userId": "09730771974529627842"}} predictions = list(classifier.predict(iris.data[-10:])) expected = iris.target[-10:] for i in range(10): print('predicted: '+str(predictions[i])+' / expected: '+str(expected[i])) # + id="xTlZrTwJUu6i" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 414} outputId="7326f100-60a2-4cb2-c5bf-56cd1527f7b1" executionInfo={"status": "ok", "timestamp": 1554112611231, "user_tz": -120, "elapsed": 682, "user": {"displayName": "<NAME>", "photoUrl": "https://lh6.googleusercontent.com/-i_bcow4kfsM/AAAAAAAAAAI/AAAAAAAAAA0/BKqcIMjm5-8/s64/photo.jpg", "userId": "09730771974529627842"}} classifier.dual_coef_ # + id="D_kOtKEaVIDE" colab_type="code" colab={}
Tutorials/Lecture003_iris_classification.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Recovering exact parameters # These are needed so the git commands below take immediate effect. # %load_ext autoreload # %autoreload 2 # + import torch import pyro import pyro.distributions as dist from pyro.infer import SVI, Trace_ELBO from pyro.infer.autoguide import AutoLowRankMultivariateNormal from pyro.optim import ClippedAdam from torch.distributions import kl_divergence from matplotlib import pyplot # %matplotlib inline # %config InlineBackend.figure_format = 'svg' # %config InlineBackend.rc = {'figure.facecolor': (1, 1, 1, 1)} # - class Model: def __init__(self, dim, rank): self.loc = dist.Laplace(0, 1).sample([dim]) self.cov_factor = torch.randn(dim, rank) self.cov_diag = dist.Exponential(1).sample([dim]) def get_prior(self): return dist.LowRankMultivariateNormal( self.loc, self.cov_factor, self.cov_diag) def __call__(self): pyro.sample("z", self.get_prior()) def train(dim, rank): pyro.clear_param_store() pyro.set_rng_seed(123456789) model = Model(dim, rank) guide = AutoLowRankMultivariateNormal(model, rank=rank, init_scale=0.01) optim = ClippedAdam({"lr": 0.01}) elbo = Trace_ELBO() svi = SVI(model, guide, optim, elbo) losses = [] kls = [] for step in range(2001): loss = svi.step() / dim with torch.no_grad(): kl = kl_divergence(guide.get_posterior(), model.get_prior()).item() losses.append(loss) kls.append(kl) if step % 200 == 0: print("step {: >4} loss = {:0.8g}, kl = {:0.8g}".format(step, loss, kl)) return losses, kls results = {} !(cd ~/pyro ; git checkout dev) results["dev-10-3"] = train(10, 3) results["dev-100-10"] = train(100, 10) results["dev-1000-30"] = train(1000, 30) !(cd ~/pyro ; git checkout auto-lowrank-mvn-reparam) results["new-10-3"] = train(10, 3) results["new-100-10"] = train(100, 10) results["new-1000-30"] = train(1000, 30) def plot(dim, rank): pyplot.figure(figsize=(9,3)) for name, (losses, kls) in sorted(results.items()): if name.endswith(f"-{dim}-{rank}"): pyplot.plot(kls, label=name) pyplot.title(f"dim={dim} rank={rank}") pyplot.ylabel('kl(q,p)') pyplot.yscale('log') pyplot.xlabel('svi step') pyplot.legend() pyplot.tight_layout() plot(10, 3) plot(100, 10) plot(1000, 30)
2019-11-lowrank/exact.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## This notebook creates one dataframe from all participants data # ## It also removes 1% of the data as this is corrupted # + # %matplotlib inline from scipy.odr import * from scipy.stats import * import numpy as np import pandas as pd import os import time import matplotlib.pyplot as plt import ast from multiprocessing import Pool, cpu_count import scipy from IPython import display from matplotlib.patches import Rectangle from sklearn.metrics import mean_squared_error import json import scipy.stats as st from sklearn.metrics import r2_score from matplotlib import cm from mpl_toolkits.mplot3d import axes3d import matplotlib.pyplot as plt import copy from sklearn.model_selection import LeaveOneOut, LeavePOut from multiprocessing import Pool # + def cast_to_int(row): try: return np.array([a if float(a) >= 0 else 0 for a in row[2:-1]], dtype=np.uint8) except Exception as e: return None def load_csv(file): temp_df = pd.read_csv(file, delimiter=";") temp_df.Image = temp_df.Image.str.split(',') temp_df.Image = temp_df.Image.apply(cast_to_int) return temp_df # - # %%time pool = Pool(cpu_count() - 2) data_files = ["DataStudyCollection/%s" % file for file in os.listdir("DataStudyCollection") if file.endswith(".csv") and "studyData" in file] print(data_files) df_lst = pool.map(load_csv, data_files) dfAll = pd.concat(df_lst) pool.close() df = dfAll[dfAll.Image.notnull()] len(df) print("loaded %s values" % len(dfAll)) print("removed %s values (thats %s%%)" % (len(dfAll) - len(df), round((len(dfAll) - len(df)) / len(dfAll) * 100, 3))) print("new df has size %s" % len(df)) df = df.reset_index(drop=True) df.head() df.to_pickle("DataStudyCollection/AllData.pkl") sorted(df.userID.unique())
python/Step_02_ReadData.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd from sklearn.model_selection import train_test_split pd.set_option('display.max_columns', None) pd.options.display.max_rows = 1000 df = pd.read_csv('D:/cap/capstone2/data/processed/processed.csv') df.head() # + y = df.pop('event_coded') X = df print(y.head(3)) print(X.head(3)) # - Xtrain, Xtest, ytrain, ytest = train_test_split(X, y, test_size = 0.30, random_state = 42)
notebooks/3.1 mjg TrainTestSplt.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %autosave 0 import itertools import sqlite3 import pandas as pd import numpy as np import matplotlib.pyplot as plt from sklearn.metrics import confusion_matrix from sklearn.metrics import classification_report from sklearn.metrics import roc_curve from sklearn.metrics import roc_auc_score # + code_folding=[] def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues): """ This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] print("Normalized confusion matrix") else: print('Confusion matrix, without normalization') #print(cm) plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=45) plt.yticks(tick_marks, classes) fmt = '.2f' if normalize else 'd' thresh = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, format(cm[i, j], fmt), horizontalalignment="center", color="white" if cm[i, j]/1.8 > thresh else "black") plt.tight_layout() plt.ylabel('Valor Verdadeiro') plt.xlabel('Valor Previsto') # - def printcfm(y_test,y_pred,title='confusion matrix'): cnf_matrix = confusion_matrix(y_test, y_pred) np.set_printoptions(precision=2) # Plot non-normalized confusion matrix plt.figure() plot_confusion_matrix(cnf_matrix, classes=['Sem Perda','Perda'], title=title) def plotRoc(y_real, y_pred_prob): # Generate ROC curve values: fpr, tpr, thresholds fpr, tpr, thresholds = roc_curve(y_real, y_pred_prob) # Calculate AUC auc = roc_auc_score(y_real, y_pred_prob) # Plot ROC curve plt.plot([0, 1], [0, 1], 'k--') plt.plot(fpr, tpr) plt.text(1, 0.5, "AUC: %3.3f" % (auc), {'color': 'C2', 'fontsize': 18}, va="bottom", ha="right") plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('ROC Curve') plt.show() xl = pd.ExcelFile('geologo.xlsx') ''' print(xl.sheet_names) # Imprime as planilhas disponรญveis no arquivo excel print(type(xl)) # Imprime o tipo de arquivo print(xl.sheet_names) # Imprime as planilhas ''' df1 = xl.parse('prev'); # Recebe a planilha Dados ; nรฃo mostra o resultado df1.columns y_real=df1.iloc[:,0] y_pred=df1.iloc[:,1] printcfm(y_real,y_pred,title='Confusion Matrix Base') print(classification_report(y_real, y_pred)) plotRoc(y_real, y_pred)
Model-Study/geoPrediction.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/Hallowtipz/DS-Unit-1-Sprint-3-Statistical-Tests-and-Experiments/blob/master/Jesse__Ghansah_LS_DS_131_Statistics_Probability_and_Inference.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="Svr4XDuK75-x" colab_type="code" colab={} import pandas as pd import numpy as np import scipy.stats #dont import whole lib # + [markdown] id="bUMM3egM7qbf" colab_type="text" # # + id="aVPkcnQK8JQo" colab_type="code" outputId="94c96a0c-6da7-4544-eff0-34872b4abfad" colab={"base_uri": "https://localhost:8080/", "height": 204} # Loading the data # !wget https://archive.ics.uci.edu/ml/machine-learning-databases/voting-records/house-votes-84.data # + id="8GfMYu0j8TST" colab_type="code" colab={} df = pd.read_csv('house-votes-84.data') # + id="D8m_7IXr8aWn" colab_type="code" outputId="c70f2531-1603-44af-84ba-cb3a3ae26bbe" colab={"base_uri": "https://localhost:8080/", "height": 204} df.head() # + id="gWHHoYRt8ckb" colab_type="code" outputId="74591d51-98a7-410f-8cd4-9ba034295df9" colab={"base_uri": "https://localhost:8080/", "height": 34} df.shape # + id="gIH1FpQb8lhG" colab_type="code" colab={} #fix the headers header_names=['class_name','handicapped_infants','water_project', 'budget_resolution','physician_fee_freeze', 'el_salvador_aid', 'religious_groups_in_schools','anti_satellite_test_ban', 'aid_to_nicaraguan_contras','mx_missile','immigration', 'synfuels_corporation_cutback', 'education_spending', 'superfund_right_to_sue','crime','duty_free_exports', 'export_administration_act_south_africa'] # + id="O-GxAOg88xHq" colab_type="code" colab={} df = pd.read_csv('house-votes-84.data', header=None, names=header_names) # + id="6Wg6fvdA9Apg" colab_type="code" outputId="ca244fb2-d711-4662-c1e4-be8c91d1731e" colab={"base_uri": "https://localhost:8080/", "height": 224} df.head() # + id="3QwTuPQm9Cpj" colab_type="code" colab={} df = df.replace({'?':np.NaN,'n':0,'y':1}) # + id="81zbxXPT9Jev" colab_type="code" outputId="75cae5ac-7e4b-4937-e9b3-0ec2697649d5" colab={"base_uri": "https://localhost:8080/", "height": 224} df.head() # + id="9J_RHpWm9Mhn" colab_type="code" outputId="196383a3-b912-4e14-b50b-4821729f2eef" colab={"base_uri": "https://localhost:8080/", "height": 323} df.isna().sum() # + id="RuATgY6s9Q77" colab_type="code" outputId="6e0523b9-27e1-4498-81d6-89e9152c288b" colab={"base_uri": "https://localhost:8080/", "height": 317} df.describe() # + id="yhY3oMVU9fDb" colab_type="code" colab={} #create samples dataframes dem_sub = df[df['class_name'] == 'democrat'] # + id="LR87k0L2-mvE" colab_type="code" outputId="25edbb8c-2d79-491b-8df7-236da7247732" colab={"base_uri": "https://localhost:8080/", "height": 224} dem_sub.head() # + id="4g5-90LS-nOm" colab_type="code" outputId="71c8eb5d-2fd9-4636-d565-dffd2a0e14d5" colab={"base_uri": "https://localhost:8080/", "height": 34} #check the shape dem_sub.shape # + id="1TKoQq8e-nMD" colab_type="code" colab={} #create republican samples dataframes rep_sub = df[df['class_name'] == 'republican'] # + id="qSCZevzg_gPf" colab_type="code" outputId="283b2d32-d145-49e2-8529-ff643e0ba47f" colab={"base_uri": "https://localhost:8080/", "height": 224} rep_sub.head() # + id="TPVGQWdx_19a" colab_type="code" colab={} issues =['handicapped_infants','water_project', 'budget_resolution','physician_fee_freeze', 'el_salvador_aid', 'religious_groups_in_schools','anti_satellite_test_ban', 'aid_to_nicaraguan_contras','mx_missile','immigration', 'synfuels_corporation_cutback', 'education_spending', 'superfund_right_to_sue','crime','duty_free_exports', 'export_administration_act_south_africa'] # + id="oJ7jfsIB8nAY" colab_type="code" outputId="fb86791b-f681-42e8-8966-3667a82f3e33" colab={"base_uri": "https://localhost:8080/", "height": 306} rep_sub.columns.tolist() # + id="Oh5_XIbs_7A-" colab_type="code" outputId="0d3c06c7-27ee-4f8f-9601-72bacf63ad94" colab={"base_uri": "https://localhost:8080/", "height": 306} # The mean of democrats that voted yes for issue dem_sub[issues].mean() # + id="Pz74L4EuAXmy" colab_type="code" outputId="bf251d53-6d63-4401-b7bb-4b1c2fe8bc77" colab={"base_uri": "https://localhost:8080/", "height": 306} # The mean of republican that that voted yes for issue rep_sub[issues].mean() # + id="SWGZfZCoAuDq" colab_type="code" colab={} #stretch goals # + id="RzRn3AjUAXvk" colab_type="code" colab={} # Refactor your code into functions so it's easy to rerun with arbitrary variables # + id="a_OP3Vmq43-4" colab_type="code" colab={} def hypothesis_testing (a,b): # Using hypothesis testing, find an issue t_stat,Pvalue = scipy.stats.ttest_ind(a,b,nan_policy='omit') #Pvalue = scipy.stats.ttest_ind(a,b,nan_policy='omit',equal_var=False).pvalue print(f'Pvalue is: {Pvalue:.4f}.,T-stat is: {t_stat}') # if a.mean() > b.mean() and Pvalue < 0.01: # that democrats support more than republicans with p < 0.01 # print("This issue democrats support more than the republicans") # elif a.mean() < b.mean() and Pvalue < 0.01: # that republicans support more than democrats with p < 0.01 # print("This issue republicans support more than the democrats") # else: # where the difference between republicans and democrats has p > 0.1 (i.e. there may not be much of a difference) # print("Both republicans and democrats support this issue") # + id="lImYc_UG5Bwr" colab_type="code" outputId="37520c1b-0862-49a3-ff43-c16ad0e0ca5d" colab={"base_uri": "https://localhost:8080/", "height": 833} for issue in issues: print("") print(issue) hypothesis_testing(dem_sub[issue], rep_sub[issue])#store dem in a df
Jesse__Ghansah_LS_DS_131_Statistics_Probability_and_Inference.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # **Challenges in Representation Learning: Facial Expression Recognition Challenge** from *https://www.kaggle.com/c/challenges-in-representation-learning-facial-expression-recognition-challenge/data* # # Data Exploration # # Some data exploration, looking at the structure of the files etc. # + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" # loading packages import numpy as np import pandas as pd import os import torch as th import warnings warnings.simplefilter('ignore') indir = '/home/eileen/udacity/challenge2/FacialExpressions/fer2013' #listing data files for dirname, _, filenames in os.walk(indir): for filename in filenames: print(os.path.join(dirname, filename)) # + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a" #looking at readme file with open(indir+'/README','r') as readme: contents = readme.read() print(contents) # - #looking at data file with open(indir+'/fer2013.csv','r') as f: firstline = f.readline() print(firstline) # print header contents = f.readline() print(contents) # print first row with data # # Data Preparation #opening as panda dataframe and looking at "usage" column df = pd.read_csv(indir+'/fer2013.csv') df.Usage.unique() df.shape df.head() df.info() # ## creating 3 dataframes for training data, public testing data and private testing data train = df.query('Usage == "Training"') train.drop(columns=['Usage'], inplace=True) train.reset_index(drop=True, inplace=True) train.head() public_test = df.query('Usage == "PublicTest"') public_test.drop(columns=['Usage'], inplace=True) public_test.reset_index(drop=True, inplace=True) public_test.head() private_test = df.query('Usage == "PrivateTest"') private_test.drop(columns=['Usage'], inplace=True) private_test.reset_index(drop=True, inplace=True) private_test.head() train.shape public_test.shape private_test.shape # ## converting to PyTorch tensors def mk_labeltensor(dframe): """ creating a torch tensor from the emotion column dframe = dataframe """ emotions = dframe['emotion'].values return th.tensor(emotions) def mk_imagetensor(dframe): """ creating a torch tensor from the pixels column dframe = dataframe """ pixels = dframe['pixels'].str.split(' ') dframe_images_list = pixels.apply(lambda x: list(int(i) for i in x)) return th.tensor(dframe_images_list) train_labels = mk_labeltensor(train) train_images = mk_imagetensor(train) print(train_labels.shape) print(train_images.shape) public_test_labels = mk_labeltensor(public_test) public_test_images = mk_imagetensor(public_test) print(public_test_labels.shape) print(public_test_images.shape) private_test_labels = mk_labeltensor(private_test) private_test_images = mk_imagetensor(private_test) print(private_test_labels.shape) print(private_test_images.shape) # # Linear Model # At first we will try out a linear model to recognize facial expressions. # ## Defining the Model from torch import nn, optim import torch.nn.functional as F df.emotion.nunique() # for reproducible results: seed = 30 np.random.seed(seed) th.manual_seed(seed) # input: 48x48 pixels = 2304 # output: 7 different emotions class Classifier(nn.Module): def __init__(self): super().__init__() self.fc1 = nn.Linear(2304, 1024) self.fc2 = nn.Linear(1024, 256) self.fc3 = nn.Linear(256, 64) self.fc4 = nn.Linear(64, 7) # Dropout module with 0.2 drop probability self.dropout = nn.Dropout(p=0.2) def forward(self, x): # making sure input tensor is flattened x = x.view(x.shape[0], -1) x = self.dropout(F.relu(self.fc1(x))) x = self.dropout(F.relu(self.fc2(x))) x = self.dropout(F.relu(self.fc3(x))) x = F.log_softmax(self.fc4(x), dim=1) return x # ## Training the Model # + model = Classifier() criterion = nn.NLLLoss() optimizer = optim.Adam(model.parameters(), lr=0.001) epochs = 15 batch_size = 100 batches = round(train.shape[0]/batch_size) acc, train_losses, test_losses = [], [], [] for e in range(epochs): running_loss = 0 for i in range(batches): # Clear gradients optimizer.zero_grad() # Forward propagation start = i * batch_size end = start + batch_size log_ps = model(train_images[start:end].float()) # Calculate loss loss = criterion(log_ps, train_labels[start:end]) # Calculate gradients loss.backward() # Update parameters optimizer.step() running_loss += loss.item() else: test_loss = 0 accuracy = 0 # Validation with th.no_grad(): model.eval() # Predict test dataset log_ps = model(public_test_images.float()) test_loss += criterion(log_ps,public_test_labels) ps = th.exp(log_ps) top_p, top_class = ps.topk(1,dim=1) equals = top_class == public_test_labels.view(*top_class.shape) accuracy += th.mean(equals.type(th.FloatTensor)) model.train() train_losses.append(running_loss/batch_size) test_losses.append(test_loss) acc.append(accuracy) print("Epoch: {}/{}.. ".format(e+1, epochs), "Training Loss: {:.3f}.. ".format(running_loss/batch_size), "Test Loss: {:.3f}.. ".format(test_loss), "Test Accuracy: {:.3f}".format(accuracy)) # - # ## Plotting the Loss and the Accuracy # + # %matplotlib inline # %config InlineBackend.figure_format = 'retina' import matplotlib.pyplot as plt # - eps = range(epochs) plt.plot(eps, train_losses, label='Training loss') plt.plot(eps, test_losses, label='Validation loss') plt.xlabel('epoch') plt.ylabel('loss') #plt.ylim(0,6.0) plt.legend(frameon=False); plt.plot(eps, acc, 'b', label='Validation acc') plt.title('validation accuracy'); plt.legend(['final accuracy = {:.2f}%'.format(accuracy * 100.0)]) plt.xlabel('epoch'); # ### Result # The accuracy does not go above 25%. That is better than guesswork (7 choices of emotions would give about 14-15% accuracy when guessing), but not really great. A different model structure might lead to better results. # # Convolutional Neural Network (CNN) # A CNN model is our next try. Here we get very different results with different optimizers and learning rates. We have to try different structures. # we need to restructure the data, so that pixels are represented in a 48x48 format: train_images = train_images.view(train_images.shape[0],48,48) public_test_images = public_test_images.view(public_test_images.shape[0],48,48) private_test_images = private_test_images.view(private_test_images.shape[0],48,48) print(train_images.shape) print(public_test_images.shape) print(private_test_images.shape) # + # using dataloader for the images and labels import torch.utils.data def make_dataloader(data, batch_size, shuffle): images, labels = data['pixels'], data['emotion'] images = np.array([np.fromstring(image, np.uint8, sep=' ') for image in images]) images = images.reshape(images.shape[0], 1, 48, 48) dataset = torch.utils.data.TensorDataset(th.Tensor(images), th.Tensor(np.array(labels)).long()) return th.utils.data.DataLoader(dataset=dataset, batch_size=batch_size, shuffle=shuffle) trainloader = make_dataloader(train, 100, True) testloader = make_dataloader(public_test, 100, True) validloader = make_dataloader(private_test, 100, False) # - # ## With Adam Optimizer # ### Defining the Model # for reproducible results: seed = 30 np.random.seed(seed) torch.manual_seed(seed) class NetAdam(nn.Module): def __init__(self): super(NetAdam, self).__init__() self.conv1 = nn.Conv2d(1, 32, kernel_size=3) self.conv2 = nn.Conv2d(32, 64, kernel_size=3) self.conv3 = nn.Conv2d(64, 128, kernel_size=3) self.fc1 = nn.Linear(2048, 1024) #784, 256) self.fc2 = nn.Linear(1024, 256) #256, 128) self.fc3 = nn.Linear(256, 64) #128, 64) self.fc4 = nn.Linear(64, 7) #64, 10) # Dropout module with 0.2 drop probability self.dropout = nn.Dropout(p=0.2) self.pool = nn.MaxPool2d(2) def forward(self, x): x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = self.pool(F.relu(self.conv3(x))) x = x.view(x.shape[0], -1) x = self.dropout(F.relu(self.fc1(x))) x = self.dropout(F.relu(self.fc2(x))) x = self.dropout(F.relu(self.fc3(x))) x = F.log_softmax(self.fc4(x), dim=1) return x # ### Training the Model # + model = NetAdam() criterion = nn.NLLLoss() optimizer = optim.Adam(model.parameters(), lr=0.001) epochs = 15 acc, train_losses, test_losses = [], [], [] for e in range(epochs): running_loss = 0 for images, labels in trainloader: # Clear gradients optimizer.zero_grad() # Forward propagation log_ps = model(images) # Calculate loss loss = criterion(log_ps, labels) # Calculate gradients loss.backward() # Update parameters optimizer.step() running_loss += loss.item() else: test_loss = 0 accuracy = 0 # Validation with th.no_grad(): for images, labels in testloader: model.eval() # Predict test dataset log_ps = model(images) test_loss += criterion(log_ps,labels) ps = th.exp(log_ps) top_p, top_class = ps.topk(1,dim=1) equals = top_class == labels.view(*top_class.shape) accuracy += th.mean(equals.type(th.FloatTensor)) model.train() train_losses.append(running_loss/len(trainloader)) test_losses.append(test_loss/len(testloader)) acc.append(accuracy/len(testloader)) print("Epoch: {}/{}.. ".format(e+1, epochs), "Training Loss: {:.3f}.. ".format(running_loss/len(trainloader)), "Test Loss: {:.3f}.. ".format(test_loss/len(testloader)), "Test Accuracy: {:.3f}".format(accuracy/len(testloader))) # - # ### Plotting Loss and Accuracy eps = range(epochs) plt.plot(eps, train_losses, label='Training loss') plt.plot(eps, test_losses, label='Validation loss') plt.xlabel('epoch') plt.ylabel('loss') plt.legend(frameon=False); plt.plot(eps, acc, 'b', label='Validation accuracy') plt.title('validation accuracy'); plt.legend(['final accuracy = {:.2f}%'.format(acc[-1]*100)]); # This gives us an **accuracy of 55.1%** on the test set. This is already quite good, but we want to try if we can get better results with the SGD optimizer. # ## With SGD Optimizer # ### Defining the Model class NetSGD(nn.Module): def __init__(self): super(NetSGD, self).__init__() self.conv1 = nn.Conv2d(1, 32, kernel_size=3) self.conv2 = nn.Conv2d(32, 64, kernel_size=3) self.conv3 = nn.Conv2d(64, 128, kernel_size=3) self.fc1 = nn.Linear(2048, 1024) self.fc2 = nn.Linear(1024, 256) self.fc3 = nn.Linear(256, 64) self.fc4 = nn.Linear(64, 7) # Dropout module with 0.3 drop probability self.dropout = nn.Dropout(p=0.3) self.pool = nn.MaxPool2d(2) def forward(self, x): x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = self.pool(F.relu(self.conv3(x))) x = x.view(x.shape[0], -1) x = self.dropout(F.relu(self.fc1(x))) x = self.dropout(F.relu(self.fc2(x))) x = self.dropout(F.relu(self.fc3(x))) x = F.log_softmax(self.fc4(x), dim=1) return x # ### Training the Model # + modelSGD = NetSGD() criterion = nn.NLLLoss() optimizer = optim.SGD(model.parameters(), lr=0.001) epochs = 150 acc, train_losses, test_losses = [], [], [] for e in range(epochs): running_loss = 0 for images, labels in trainloader: # Clear gradients optimizer.zero_grad() # Forward propagation log_ps = modelSGD(images) # Calculate loss loss = criterion(log_ps, labels) # Calculate gradients loss.backward() # Update parameters optimizer.step() running_loss += loss.item() else: test_loss = 0 accuracy = 0 # Validation with th.no_grad(): for images, labels in testloader: model.eval() # Predict test dataset log_ps = modelSGD(images) test_loss += criterion(log_ps,labels) ps = th.exp(log_ps) top_p, top_class = ps.topk(1,dim=1) equals = top_class == labels.view(*top_class.shape) accuracy += th.mean(equals.type(th.FloatTensor)) model.train() train_losses.append(running_loss/len(trainloader)) test_losses.append(test_loss/len(testloader)) acc.append(accuracy/len(testloader)) print("Epoch: {}/{}.. ".format(e+1, epochs), "Training Loss: {:.3f}.. ".format(running_loss/len(trainloader)), "Test Loss: {:.3f}.. ".format(test_loss/len(testloader)), "Test Accuracy: {:.3f}".format(accuracy/len(testloader))) # - # ### Plotting the Loss and Accuracy eps = range(epochs) plt.plot(eps, train_losses, label='Training loss') plt.plot(eps, test_losses, label='Validation loss') plt.xlabel('epoch') plt.ylabel('loss') plt.legend(frameon=False); plt.plot(eps, acc, 'b', label='Validation accuracy') plt.title('validation accuracy'); plt.legend(['final accuracy = {:.2f}%'.format(acc[-1]*100)]); # Even after 150 epochs the **accuracy of 53.4%** is not higher than with Adam-Optimizer model with just 15 epochs, but training takes a lot longer. Therefore this model is not an improvement. We will try another model. # # Another CNN Model # ## Data Preparation data = pd.read_csv(indir+'/fer2013.csv') print(data.shape) val_orig, test_orig, train_orig = data.groupby('Usage') print('val:', val_orig[0]) print('test:', test_orig[0]) print('train:', train_orig[0]) print(val_orig[1].head(2)) val_data_orig, test_data_orig, train_data_orig = val_orig[1], test_orig[1], train_orig[1] print('val_data_orig.shape: ', val_data_orig.shape) print('test_data_orig.shape: ', test_data_orig.shape) print('train_data_orig.shape:', train_data_orig.shape) # + def prepare(data): images = np.array([np.fromstring(image, np.uint8, sep=' ') for image in data['pixels']]) images = images.reshape(images.shape[0], 48, 48) images = np.stack((images,) * 3, axis=-1) labels = np.array(data['emotion']) return images, labels.reshape(len(labels), 1) val_data_orig_X, val_data_orig_Y = prepare(val_data_orig) test_data_orig_X, test_data_orig_Y = prepare(test_data_orig) train_data_orig_X, train_data_orig_Y = prepare(train_data_orig) print('val_data_orig_X.shape: ', val_data_orig_X.shape) print('val_data_orig_Y.shape: ', val_data_orig_Y.shape) print('test_data_orig_X.shape: ', test_data_orig_X.shape) print('test_data_orig_Y.shape: ', test_data_orig_Y.shape) print('train_data_orig_X.shape:', train_data_orig_X.shape) print('train_data_orig_Y.shape:', train_data_orig_Y.shape) # - train_data_Y = np.eye(7)[train_data_orig_Y.reshape(-1)] train_data_Y val_data_Y = np.eye(7)[val_data_orig_Y.reshape(-1)] test_data_Y = np.eye(7)[test_data_orig_Y.reshape(-1)] emotions = ['Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral'] index = 7 plt.imshow(train_data_orig_X[index]) print('train_data_orig_Y:', train_data_orig_Y[index][0]) print('emotion:', emotions[np.where(train_data_Y[index, :] == 1)[0][0]]) # ## Training the Model import tensorflow as tf from tensorflow.python.framework import ops import math def create_placeholders(n_H0, n_W0, n_C0, n_y): X = tf.placeholder(tf.float32, shape=[None, n_H0, n_W0, n_C0]) Y = tf.placeholder(tf.float32, shape=[None, n_y]) return X, Y X, Y = create_placeholders(48, 48, 3, 7) print('X:', X) print('Y:', Y) def initialize_parameters(): W1 = tf.get_variable('W1', [4, 4, 3, 8], initializer=tf.contrib.layers.xavier_initializer()) W2 = tf.get_variable('W2', [2, 2, 8, 16], initializer=tf.contrib.layers.xavier_initializer()) parameters = {'W1' : W1, 'W2' : W2} return parameters tf.reset_default_graph() with tf.Session() as sess_test: parameters = initialize_parameters() init = tf.global_variables_initializer() sess_test.run(init) print("W1 = " + str(parameters["W1"].eval()[1, 1, 1])) print("W2 = " + str(parameters["W2"].eval()[1, 1, 1])) def forward_propagation(X, parameters): W1 = parameters['W1'] W2 = parameters['W2'] Z1 = tf.nn.conv2d(X, W1, strides=[1,1,1,1], padding='SAME') A1 = tf.nn.relu(Z1) P1 = tf.nn.max_pool(A1, ksize = [1,4,4,1], strides = [1,4,4,1], padding = 'SAME') Z2 = tf.nn.conv2d(P1, W2, strides=[1,1,1,1], padding='SAME') A2 = tf.nn.relu(Z2) P2 = tf.nn.max_pool(A2, ksize = [1,2,2,1], strides = [1,2,2,1], padding = 'SAME') F = tf.contrib.layers.flatten(P2) Z3 = tf.contrib.layers.fully_connected(F, 120, activation_fn=tf.nn.relu) Z4 = tf.contrib.layers.fully_connected(Z3, 64, activation_fn=tf.nn.relu) Z5 = tf.contrib.layers.fully_connected(Z4, 7, activation_fn=None) return Z5 # + tf.reset_default_graph() with tf.Session() as sess: X, Y = create_placeholders(48, 48, 3, 7) parameters = initialize_parameters() Z3 = forward_propagation(X, parameters) init = tf.global_variables_initializer() sess.run(init) a = sess.run(Z3, {X: np.random.randn(2, 48, 48, 3), Y: np.random.randn(2, 7)}) print("Z = " + str(a)) # - def compute_cost(Z, Y): cost = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(logits = Z, labels = Y) ) return cost # + tf.reset_default_graph() with tf.Session() as sess: X, Y = create_placeholders(48, 48, 3, 7) parameters = initialize_parameters() Z3 = forward_propagation(X, parameters) cost = compute_cost(Z3, Y) init = tf.global_variables_initializer() sess.run(init) a = sess.run(cost, {X: np.random.randn(4, 48, 48, 3), Y: np.random.randn(4, 7)}) print("cost = " + str(a)) # - def random_mini_batches(X, Y, mini_batch_size = 64): m = X.shape[0] # number of training examples mini_batches = [] # Step 1: Shuffle (X, Y) permutation = list(np.random.permutation(m)) shuffled_X = X[permutation,:,:,:] shuffled_Y = Y[permutation,:] # Step 2: Partition (shuffled_X, shuffled_Y). Minus the end case. num_complete_minibatches = math.floor(m/mini_batch_size) # number of mini batches of size mini_batch_size in your partitionning for k in range(0, num_complete_minibatches): mini_batch_X = shuffled_X[k * mini_batch_size : k * mini_batch_size + mini_batch_size,:,:,:] mini_batch_Y = shuffled_Y[k * mini_batch_size : k * mini_batch_size + mini_batch_size,:] mini_batch = (mini_batch_X, mini_batch_Y) mini_batches.append(mini_batch) # Handling the end case (last mini-batch < mini_batch_size) if m % mini_batch_size != 0: mini_batch_X = shuffled_X[num_complete_minibatches * mini_batch_size : m,:,:,:] mini_batch_Y = shuffled_Y[num_complete_minibatches * mini_batch_size : m,:] mini_batch = (mini_batch_X, mini_batch_Y) mini_batches.append(mini_batch) return mini_batches def model(X_train, Y_train, X_test, Y_test, learning_rate = 0.0005, num_epochs = 30, minibatch_size = 50, print_cost = True): ops.reset_default_graph() # to be able to rerun the model without overwriting tf variables (m, n_H0, n_W0, n_C0) = X_train.shape n_y = Y_train.shape[1] costs = [] # To keep track of the cost accurancies = [] X, Y = create_placeholders(n_H0, n_W0, n_C0, n_y) parameters = initialize_parameters() Z3 = forward_propagation(X, parameters) cost = compute_cost(Z3, Y) optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) init = tf.global_variables_initializer() gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333) with tf.Session() as sess: # Run the initialization sess.run(init) # Do the training loop for epoch in range(num_epochs): minibatch_cost = 0. num_minibatches = int(m / minibatch_size) # number of minibatches of size minibatch_size in the train set minibatches = random_mini_batches(X_train, Y_train, minibatch_size) for minibatch in minibatches: # Select a minibatch (minibatch_X, minibatch_Y) = minibatch _ , temp_cost = sess.run([optimizer, cost], feed_dict={X: minibatch_X, Y: minibatch_Y}) minibatch_cost += temp_cost / num_minibatches # Print the cost every epoch if print_cost == True and epoch % 5 == 0: print ("Cost after epoch %i: %f" % (epoch, minibatch_cost)) if print_cost == True and epoch % 1 == 0: costs.append(minibatch_cost) # Calculate the correct predictions predict_op = tf.argmax(Z3, 1) correct_prediction = tf.equal(predict_op, tf.argmax(Y, 1)) # Calculate accuracy on the test set accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float")) test_accuracy = accuracy.eval({X: X_test, Y: Y_test}) accurancies.append(test_accuracy) # plot the cost plt.plot(np.squeeze(costs)) plt.ylabel('cost') plt.xlabel('iterations (per tens)') plt.title("Learning rate =" + str(learning_rate)) plt.show() # plot the accurancies plt.plot(np.squeeze(accurancies)) plt.ylabel('accurancy') plt.xlabel('iterations (per tens)') plt.title("Learning rate =" + str(learning_rate)) plt.show() # Calculate the correct predictions predict_op = tf.argmax(Z3, 1) correct_prediction = tf.equal(predict_op, tf.argmax(Y, 1)) # Calculate accuracy on the test set accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float")) print(accuracy) train_accuracy = accuracy.eval({X: X_train, Y: Y_train}) test_accuracy = accuracy.eval({X: X_test, Y: Y_test}) print("Train Accuracy:", train_accuracy) print("Test Accuracy:", test_accuracy) return train_accuracy, test_accuracy, parameters _, _, parameters = model(train_data_orig_X/255, train_data_Y, test_data_orig_X/255, test_data_Y) # This model gives us an **accuracy score of 51.1%**, but with much faster training. We will try another model type to see if we can improve our accuracy.
Facial Expression Recognition/facial_expr_report_1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.9.1 (''.ibm_adv'': venv)' # name: pythonjvsc74a57bd0f7177c0ae9b5f17be49d5fee0218316f97afdf27c50d7946af4bb7924b3b993c # --- # # IBM Advanced Data Science Capstone Project # ## Sentiment Analysis of Amazon Customer Reviews # ### <NAME>, Apr 2021 # ## Extract, Transform, Load (ETL) # # This notebook contains the comprehensive step-by-step process for preparing the raw data to be used in the project. The data that we are using is avaiable in the form of two csv files (train.csv/ test.csv) in a shared Google Drive. We have downloaded these files to an **IBM Cloud Storage** service which is publically accessible. In order to use this data in our project, we will convert these files into **parquet files** and upload them to the same IBM Cloud Storage bucket. # # *Spark csv reader is not able to handle commas within the quoted text of the reviews. Hence, we will first read the files into Pandas dataframes and then export them into parquet files*. # ## Importing required Python libraries and initializing Apache Spark environment # + from ibm_botocore.client import Config import ibm_boto3 import pandas as pd import csv import time from pathlib import Path import shutil from pyspark import SparkContext, SparkConf from pyspark.sql import SQLContext, SparkSession from pyspark.sql.types import StructType, StructField, DoubleType, IntegerType, StringType, ArrayType from pyspark.sql.functions import udf, rand, col, concat, coalesce from pyspark.ml.feature import HashingTF, IDF, Word2Vec, Word2VecModel conf = SparkConf().setMaster("local[*]") \ .setAll([("spark.driver.memory", "8g"),\ ("spark.executor.memory", "8g"), \ ("spark.driver.maxResultSize", "8g")]) sc = SparkContext.getOrCreate(conf=conf) from pyspark.sql import SparkSession spark = SparkSession \ .builder \ .getOrCreate() # + # @hidden_cell # The following code contains the credentials for a file in your IBM Cloud Object Storage. # You might want to remove those credentials before you share your notebook. creds = { 'IAM_SERVICE_ID': 'iam-ServiceId-a1e6ae17-a480-4a92-b3b8-b5927994ec39', 'IBM_API_KEY_ID': '<KEY>', 'ENDPOINT': 'https://s3.eu.cloud-object-storage.appdomain.cloud', 'IBM_AUTH_ENDPOINT': 'https://iam.cloud.ibm.com/oidc/token', 'BUCKET': 'ibmadvanceddatasciencecapstonepro-donotdelete-pr-nswuywrsyrm5si' } cos = ibm_boto3.client(service_name='s3', ibm_api_key_id=creds['IBM_API_KEY_ID'], ibm_service_instance_id=creds['IAM_SERVICE_ID'], ibm_auth_endpoint=creds['IBM_AUTH_ENDPOINT'], config=Config(signature_version='oauth'), endpoint_url=creds['ENDPOINT']) # - # Function to print time taken by a particular process, given the start and end times def printElapsedTime(startTime, endTime): elapsedTime = endTime - startTime print("-- Process time = %.2f seconds --"%(elapsedTime)) # We will define the schema for the dataframes based on the format of the csv files. # Schema that defines the columns and datatypes of the data in the csv files rawSchema = StructType([ StructField("rating", IntegerType(), True), StructField("review_heading", StringType(), True), StructField("review_text", StringType(), True) ]) # ## Download raw CSV files and upload converted parquet files # # We will first check if the IBM Cloud Storage bucket contains the parquet files. If not, we will download the CSV data and then convert them to parquet files using Spark. Finally, we will upload these parquet files to the cloud storage. # Function to save a Pandas dataframe as a parquet file def saveCSVToParquet(creds, cos, csvFile, parqFile, rawSchema, csvDir="data/rawCSVs", parqDir="data/rawParquets", printTime=False): startTime = time.time() # Download raw csv files from IBM Cloud Storage csvFilepath = Path(csvDir).joinpath(csvFile) cos.download_file(Bucket=creds["BUCKET"], Key=csvFile, Filename=str(csvFilepath)) # Read csv to pandas dataframe pandasDF = pd.read_csv(str(csvFilepath), header=None) pandasDF.columns = rawSchema.names # Convert pandas to spark dataframe parquetDF = spark.createDataFrame(pandasDF, schema=rawSchema) parqFilepath = Path(parqDir).joinpath(parqFile) parquetDF.write.mode("overwrite").parquet(str(parqFilepath)) # Add parquet directory to tar archive shutil.make_archive(str(parqFilepath), "tar", str(parqFilepath)) # Upload parquet file to COS cos.upload_file(Filename=str(parqFilepath) + ".tar", Bucket=creds["BUCKET"], Key=parqFile) endTime = time.time() if printTime: printElapsedTime(startTime=startTime, endTime=endTime) return # + # Fetch existing files from the COS bucket cosBucketContent = cos.list_objects(Bucket=creds["BUCKET"])["Contents"] cosFileNames = [x["Key"] for x in cosBucketContent] # Convert CSV to parquet and upload to COS if files don't exist if "trainRaw.parquet" not in cosFileNames: saveCSVToParquet(cos=cos, creds=creds, csvFile="train.csv", parqFile="trainRaw.parquet", rawSchema=rawSchema, printTime=True) if "testRaw.parquet" not in cosFileNames: saveCSVToParquet(cos=cos, creds=creds, csvFile="test.csv", parqFile="testRaw.parquet", rawSchema=rawSchema, printTime=True) # - # ## Load parquet data for sanity check # # We will load the train and test parquet files from IBM Cloud Storage and print a few samples as well as the size of the datasets. *Note that this will create a local copy of the files which will be used subsequently in the project to improve data access speed.* # Function to read a parquet file into a Spark dataframe # If the parquet file is not found, it will be created from IBM Cloud Storage def readParquetToSparkDF(creds, cos, parqFile, rawSchema, parqDir="data/rawParquets", printTime=False): startTime = time.time() parqFilepath = Path(parqDir).joinpath(parqFile) if(Path(parqFilepath).exists() == False): cos.download_file(Bucket=creds["BUCKET"], Key=parqFile, Filename=parqFilepath + ".tar") shutil.unpack_archive(parqPath + ".tar", parqFilepath, "tar") parquetDF = spark.read.schema(rawSchema).parquet(str(parqFilepath)) endTime = time.time() if printTime: printElapsedTime(startTime=startTime, endTime=endTime) return (parquetDF) # + # Load train and test parquet dataframes from IBM Cloud Storage trainRaw = readParquetToSparkDF(cos=cos, creds=creds, parqFile="trainRaw.parquet", rawSchema=rawSchema) testRaw = readParquetToSparkDF(cos=cos, creds=creds, parqFile="testRaw.parquet", rawSchema=rawSchema) print("There are %d/%d rows in the training/test datasets."%(trainRaw.count(), testRaw.count())) trainRaw.show(5) # - spark.sparkContext.stop()
[ibm-adv-data-science-capstone].etl.python.v2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### 4.5 ่ฏปๅ–ๅ’Œๅญ˜ๅ‚จ import torch from torch import nn # ### 4.5.1 ่ฏปๅ†™ # # > 1. **ไฝฟ็”จ`save()`ๅ’Œ`load()`ๅˆ†ๅˆซๅญ˜ๅ‚จๅ’Œ่ฏปๅ–`Tensor`** # > 2. **`save()`ไฝฟ็”จ`Python`็š„`pickle`ๅฐ†ๅฏน่ฑกๅบๅˆ—ๅŒ–,ๅฏไปฅไฟๅญ˜ๅ„็งๅฏน่ฑก,ๅŒ…ๆ‹ฌๆจกๅž‹,ๅผ ้‡,ๅญ—ๅ…ธ** # > 3. **`laod()`ไฝฟ็”จ`pickle`ๅฐ†ๅฏน่ฑก่ฟ›่กŒๅๅบๅˆ—ๅŒ–** x = torch.ones(3) torch.save(x, 'x.pt') x2 = torch.load('x.pt') x2 y = torch.zeros(4) torch.save([x, y], 'xy.pt') xy_list = torch.load('xy.pt') xy_list torch.save({'x': x, 'y': y}, 'xy_dict.pt') xy = torch.load('xy_dict.pt') xy # ### 4.5.2 ่ฏปๅ†™ๆจกๅž‹ # ### 4.5.2.1 state_dict # # > 1. **`Pytorch`ไธญ,`Module`ๅฏไปฅๅญฆไน ๅ‚ๆ•ฐ,ๅŒ…ๅซๅœจ`model.parameters()`่ฎฟ้—ฎ** # > 2. **`state_dict`ๆ˜ฏไธ€ไธชไปŽๅ‚ๆ•ฐๅ็งฐๆ˜ ๅฐ„ๅˆฐๅ‚ๆ•ฐ`Tensor`็š„ๅญ—ๅ…ธๅฏน่ฑก** # > 3. **ๅชๆœ‰ๅฏๅญฆไน ๅ‚ๆ•ฐ็š„ๅฑ‚(ๅท็งฏๅฑ‚,็บฟๆ€งๅฑ‚)ๆ‰ๆœ‰`state_dict()`** # > 4. **ไผ˜ๅŒ–ๅ™จ(`optim`)ไนŸๆœ‰ๅฏนๅบ”็š„`state_dict()`** # + class MLP(nn.Module): def __init__(self): super(MLP, self).__init__() self.hidden = nn.Linear(3, 2) self.act = nn.ReLU() self.output = nn.Linear(2, 1) def forward(self, x): a = self.act(self.hidden(x)) return self.output(a) net = MLP() net.state_dict() # - optim = torch.optim.SGD(net.parameters(), lr=0.01, momentum=0.9) optim.state_dict() # ### 4.5.2.2 ไฟๅญ˜ๅ’ŒๅŠ ่ฝฝๆจกๅž‹ # # > 1. **ไป…ไฟๅญ˜ๅ’ŒๅŠ ่ฝฝๆจกๅž‹ๅ‚ๆ•ฐ(`state_dict()`),ๅŽ็ผ€ๅๆ˜ฏptๆˆ–pth (ๆŽจ่)** # > 2. **ไฟๅญ˜ๅ’ŒๅŠ ่ฝฝๆ•ดไธชๆจกๅž‹** # # ```python # # ไฟๅญ˜ๆจกๅž‹ๅ‚ๆ•ฐ # # torch.save(model.state_dict(), PATH) # ๆŽจ่ๅŽ็ผ€ๅๆ˜ฏptๆˆ–pth # # # ๅŠ ่ฝฝๆจกๅž‹ๅ‚ๆ•ฐ # # model = TheModelClass(*args, **kwargs) # model.load_state_dict(torch.load(PATH)) # # ``` # # ```python # # ไฟๅญ˜ๆจกๅž‹ # # torch.save(model, PATH) # ๆŽจ่ๅŽ็ผ€ๅๆ˜ฏptๆˆ–pth # # # ๅŠ ่ฝฝๆจกๅž‹ # # model = torch.load(PATH) # # ``` X = torch.randn(2, 3) Y = net(X) PATH = './net.pt' torch.save(net.state_dict(), PATH) net2 = MLP() net2.load_state_dict(torch.load(PATH)) Y2 = net2(X) Y2 == Y
dl/dive-into-dl/chapter04-DL-computation/4.05_read-write.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [Root] # language: python # name: Python [Root] # --- # + from __future__ import print_function import tensorflow as tf # Import MNIST data from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) # Parameters learning_rate = 0.01 training_epochs = 25 batch_size = 100 display_step = 1 # + # tf Graph Input x = tf.placeholder(tf.float32, [None, 784]) # mnist data image of shape 28*28=784 y = tf.placeholder(tf.float32, [None, 10]) # 0-9 digits recognition => 10 classes # Set model weights W = tf.Variable(tf.zeros([784, 10])) b = tf.Variable(tf.zeros([10])) # Construct model pred = tf.nn.softmax(tf.matmul(x, W) + b) # Softmax for output probablity # Minimize error using cross entropy cost = tf.reduce_mean(-tf.reduce_sum(y*tf.log(pred), reduction_indices=1)) # Gradient Descent optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) # Initializing the variables init = tf.global_variables_initializer() # + # Launch the graph with tf.Session() as sess: sess.run(init) # Training cycle for epoch in range(training_epochs): avg_cost = 0. total_batch = int(mnist.train.num_examples/batch_size) # Loop over all batches for i in range(total_batch): batch_xs, batch_ys = mnist.train.next_batch(batch_size) # Run optimization op (backprop) and cost op (to get loss value) _, c = sess.run([optimizer, cost], feed_dict={x: batch_xs, y: batch_ys}) # Compute average loss avg_cost += c / total_batch # Display logs per epoch step if (epoch+1) % display_step == 0: print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost)) print("Optimization Finished!") # Test model correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1)) # Calculate accuracy accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) print("Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels})) # -
develop/20170424-SG-TensorflowAi.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="view-in-github" # <a href="https://colab.research.google.com/github/NeuromatchAcademy/course-content-dl/blob/main/tutorials/W2D4_AttentionAndTransformers/W2D4_Tutorial1.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # - # # Tutorial 1: Learn how to work with Transformers # **Week 2, Day 4: Attention and Transformers** # # **By Neuromatch Academy** # # __Content creators:__ <NAME>, <NAME>, <NAME>, <NAME> # # __Content reviewers:__ <NAME>, <NAME>, <NAME>, <NAME> # # __Content editors:__ <NAME>, <NAME> # # __Production editors:__ <NAME>, <NAME> # **Our 2021 Sponsors, including Presenting Sponsor Facebook Reality Labs** # # <p align='center'><img src='https://github.com/NeuromatchAcademy/widgets/blob/master/sponsors.png?raw=True'/></p> # --- # # Tutorial Objectives # # At the end of the day, you should be able to # - Explain the general attention mechanism using keys, queries, values # - Name three applications where attention is useful # - Explain why Transformer is more efficient than RNN # - Implement self-attention in Transformer # - Understand the role of position encoding in Transformer # - Write down the objective of language model pre-training # - Understand the framework of pre-training then fine-tuning # - Name three types of biases in pre-trained language models # # # + cellView="form" # @title Tutorial slides # @markdown These are the slides for the videos in all tutorials today from IPython.display import IFrame IFrame(src=f"https://mfr.ca-1.osf.io/render?url=https://osf.io/sfmpe/?direct%26mode=render%26action=download%26mode=render", width=854, height=480) # - # --- # # Setup # In this section, we will import libraries and helper functions needed for this tutorial. # # + cellView="form" # @title Install dependencies from IPython.display import clear_output # !pip install textattack --quiet # !pip install urllib3==1.25.4 --quiet # !pip install folium==0.2.1 --quiet # !pip install datasets --quiet # !pip install transformers --quiet # !pip install pytorch_pretrained_bert --quiet clear_output() # + # Imports import math import torch import statistics import numpy as np import matplotlib.pyplot as plt import torch.nn.functional as F from torch import nn from pprint import pprint from tqdm.notebook import tqdm from datasets import load_metric from datasets import load_dataset # transformers library from transformers import Trainer from transformers import pipeline from transformers import set_seed from transformers import AutoTokenizer from transformers import TrainingArguments from transformers import AutoModelForCausalLM from transformers import AutoModelForSequenceClassification # pytorch from pytorch_pretrained_bert import BertTokenizer from pytorch_pretrained_bert import BertForMaskedLM # textattack from textattack.augmentation import Augmenter from textattack.transformations import WordSwapQWERTY from textattack.transformations import WordSwapExtend from textattack.transformations import WordSwapContract from textattack.transformations import WordSwapHomoglyphSwap from textattack.transformations import CompositeTransformation from textattack.transformations import WordSwapRandomCharacterDeletion from textattack.transformations import WordSwapNeighboringCharacterSwap from textattack.transformations import WordSwapRandomCharacterInsertion from textattack.transformations import WordSwapRandomCharacterSubstitution # %load_ext tensorboard # + cellView="form" # @title Figure settings import ipywidgets as widgets # interactive display # %config InlineBackend.figure_format = 'retina' plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/content-creation/main/nma.mplstyle") # + cellView="form" # @title Set random seed # @markdown Executing `set_seed(seed=seed)` you are setting the seed # for DL its critical to set the random seed so that students can have a # baseline to compare their results to expected results. # Read more here: https://pytorch.org/docs/stable/notes/randomness.html # Call `set_seed` function in the exercises to ensure reproducibility. import random import torch def set_seed(seed=None, seed_torch=True): if seed is None: seed = np.random.choice(2 ** 32) random.seed(seed) np.random.seed(seed) if seed_torch: torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True print(f'Random seed {seed} has been set.') # In case that `DataLoader` is used def seed_worker(worker_id): worker_seed = torch.initial_seed() % 2**32 np.random.seed(worker_seed) random.seed(worker_seed) # + cellView="form" # @title Set device (GPU or CPU). Execute `set_device()` # especially if torch modules used. # inform the user if the notebook uses GPU or CPU. def set_device(): device = "cuda" if torch.cuda.is_available() else "cpu" if device != "cuda": print("WARNING: For this notebook to perform best, " "if possible, in the menu under `Runtime` -> " "`Change runtime type.` select `GPU` ") else: print("GPU is enabled in this notebook.") return device # - SEED = 2021 set_seed(seed=SEED) DEVICE = set_device() # + cellView="form" # @title Load Yelp dataset # @markdown `DATASET = load_dataset("yelp_review_full")` DATASET = load_dataset("yelp_review_full") print(type(DATASET)) def load_yelp_data(): dataset = DATASET dataset['train'] = dataset['train'].select(range(10000)) dataset['test'] = dataset['test'].select(range(5000)) tokenizer = AutoTokenizer.from_pretrained('bert-base-cased') dataset = dataset.map(lambda e: tokenizer(e['text'], truncation=True, padding='max_length'), batched=True) dataset.set_format(type='torch', columns=['input_ids', 'label']) train_loader = torch.utils.data.DataLoader(dataset['train'], batch_size=32) test_loader = torch.utils.data.DataLoader(dataset['test'], batch_size=32) vocab_size = tokenizer.vocab_size max_len = next(iter(train_loader))['input_ids'].shape[0] num_classes = next(iter(train_loader))['label'].shape[0] return train_loader, test_loader, max_len, vocab_size, num_classes train_loader, test_loader, max_len, vocab_size, num_classes = load_yelp_data() pred_text = DATASET['test']['text'][28] actual_label = DATASET['test']['label'][28] batch1 = next(iter(test_loader)) # + cellView="form" # @title Helper functions for BERT infilling def transform_sentence_for_bert(sent, masked_word = "___"): """ By default takes a sentence with ___ instead of a masked word. Args: sent (str): an input sentence masked_word(str): a masked part of the sentence Returns: str: sentence that could be bassed to BERT """ splitted = sent.split("___") assert (len(splitted) == 2), "Missing masked word. Make sure to mark it as ___" return '[CLS] ' + splitted[0] + "[MASK]" + splitted[1] + ' [SEP]' def parse_text_and_words(raw_line, mask = "___"): """ Takes a line that has multiple options for some position in the text. Input: The doctor picked up his/her bag Output: (The doctor picked up ___ bag, ['his', 'her']) Args: raw_line (str): a line in format 'some text option1/.../optionN some text' mask (str): the replacement for .../... section Returns: str: text with mask instead of .../... section list: list of words from the .../... section """ splitted = raw_line.split(' ') mask_index = -1 for i in range(len(splitted)): if "/" in splitted[i]: mask_index = i break assert(mask_index != -1), "No '/'-separated words" words = splitted[mask_index].split('/') splitted[mask_index] = mask return " ".join(splitted), words def get_probabilities_of_masked_words(text, words): """ Computes probabilities of each word in the masked section of the text. Args: text (str): A sentence with ___ instead of a masked word. words (list): array of words. Returns: list: predicted probabilities for given words. """ text = transform_sentence_for_bert(text) tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') for i in range(len(words)): words[i] = tokenizer.tokenize(words[i])[0] words_idx = [tokenizer.convert_tokens_to_ids([word]) for word in words] tokenized_text = tokenizer.tokenize(text) indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text) masked_index = tokenized_text.index('[MASK]') tokens_tensor = torch.tensor([indexed_tokens]) pretrained_masked_model = BertForMaskedLM.from_pretrained('bert-base-uncased') pretrained_masked_model.eval() # Predict all tokens with torch.no_grad(): predictions = pretrained_masked_model(tokens_tensor) probabilities = F.softmax(predictions[0][masked_index], dim = 0) predicted_index = torch.argmax(probabilities).item() return [probabilities[ix].item() for ix in words_idx] # - # --- # # Section 1: Attention overview # # # + cellView="form" # @title Video 1: Intro from ipywidgets import widgets out2 = widgets.Output() with out2: from IPython.display import IFrame class BiliVideo(IFrame): def __init__(self, id, page=1, width=400, height=300, **kwargs): self.id=id src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page) super(BiliVideo, self).__init__(src, width, height, **kwargs) video = BiliVideo(id=f"BV1hf4y1j7XE", width=854, height=480, fs=1) print("Video available at https://www.bilibili.com/video/{0}".format(video.id)) display(video) out1 = widgets.Output() with out1: from IPython.display import YouTubeVideo video = YouTubeVideo(id=f"UnuSQeT8GqQ", width=854, height=480, fs=1, rel=0) print("Video available at https://youtube.com/watch?v=" + video.id) display(video) out = widgets.Tab([out1, out2]) out.set_title(0, 'Youtube') out.set_title(1, 'Bilibili') display(out) # - # We have seen how RNNs and LSTMs can be used to encode the input and handle long range dependence through recurrence. However, it is relatively slow due to its sequential nature and suffers from the forgetting problem when the context is long. Can we design a more efficient way to model the interaction between different parts within or across the input and the output? # # Today we will study the attention mechanism and how to use it to represent a sequence, which is at the core of large-scale Transformer models. # # In a nut shell, attention allows us to represent an object (e.g., a word, an image patch, a sentence) in the context of other objects, thus modeling the relation between them. # ### Think! 1: Application of attention # # Recall that in machine translation, the partial target sequence attends to the source words to decide the next word to translate. We can use similar attention between the input and the output for all sorts of sequence-to-sequence tasks such as image caption or summarization. # # Can you think of other applications of the attention mechanisum? Be creative! # + # to_remove explanation """ In addition to text, we can use attention on other sequence data like speech and music, on graphs where a node attends to its neighbors, and on images where a patch attends to other patches. Sometimes attention is also used to interpret important features, where importance is determined based on the magnitude of the attention weights. """; # - # --- # # Section 2: Queries, keys, and values # # # + cellView="form" #@title Video 2 : Queries, Keys, and Values # Insert the ID of the corresponding youtube video from ipywidgets import widgets out2 = widgets.Output() with out2: from IPython.display import IFrame class BiliVideo(IFrame): def __init__(self, id, page=1, width=400, height=300, **kwargs): self.id=id src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page) super(BiliVideo, self).__init__(src, width, height, **kwargs) video = BiliVideo(id=f"BV1rq4y1H727", width=854, height=480, fs=1) print("Video available at https://www.bilibili.com/video/{0}".format(video.id)) display(video) out1 = widgets.Output() with out1: from IPython.display import YouTubeVideo video = YouTubeVideo(id=f"HBdsj2N-9FU", width=854, height=480, fs=1, rel=0) print("Video available at https://youtube.com/watch?v=" + video.id) display(video) out = widgets.Tab([out1, out2]) out.set_title(0, 'Youtube') out.set_title(1, 'Bilibili') display(out) # - # One way to think about attention is to consider a dictionary that contains all information needed for our task. Each entry in the dictionary contains some value and the corresponding key to retrieve it. For a specific prediction, we would like to retrieve relevant information from the dictionary. Therefore, we issue a query, match it to keys in the dictionary, and return the corresponding values. # ### Coding Exercise 2: Dot product attention # In this exercise, let's compute the scaled dot product attention using its matrix form. # # \begin{equation} # \mathrm{softmax} \left( \frac{Q K^\text{T}}{\sqrt{d}} \right) V # \end{equation} # # Note: the function takes an additional argument `h` (number of heads). You can assume it is 1 for now. class DotProductAttention(nn.Module): """Scaled dot product attention.""" def __init__(self, dropout, **kwargs): super(DotProductAttention, self).__init__(**kwargs) self.dropout = nn.Dropout(dropout) def forward(self, queries, keys, values, b, h, t, d): """ Compute dot products. This is the same operation for each head, so we can fold the heads into the batch dimension and use torch.bmm Note: .contiguous() doesn't change the actual shape of the data, but it rearranges the tensor in memory, which will help speed up the computation for this batch matrix multiplication. .transpose() is used to change the shape of a tensor. It returns a new tensor that shares the data with the original tensor. It can only swap two dimension. Shape of `queries`: (`batch_size`, no. of queries, `d`) Shape of `keys`: (`batch_size`, no. of key-value pairs, `d`) Shape of `values`: (`batch_size`, no. of key-value pairs, value dimension) b: batch size h: number of heads t: number of keys/queries/values (for simplicity, let's assume they have the same sizes) d: embedding size """ keys = keys.transpose(1, 2).contiguous().view(b * h, t, d) queries = queries.transpose(1, 2).contiguous().view(b * h, t, d) values = values.transpose(1, 2).contiguous().view(b * h, t, d) ################################################# ## Implement Scaled dot product attention # See the shape of the queries and keys above. You may want to use the `transpose` function raise NotImplementedError("Scaled dot product attention `forward`") ################################################# # Matrix Multiplication between the keys and queries score = torch.bmm(queries, ...) / math.sqrt(...) # size: (b * h, t, t) softmax_weights = F.softmax(score, dim=2) # row-wise normalization of weights # Matrix Multiplication between the output of the key and queries multiplication and values. out = torch.bmm(self.dropout(softmax_weights), values).view(b, h, t, d) # rearrange h and t dims out = out.transpose(1, 2).contiguous().view(b, t, h * d) return out # to_remove solution class DotProductAttention(nn.Module): """Scaled dot product attention.""" def __init__(self, dropout, **kwargs): super(DotProductAttention, self).__init__(**kwargs) self.dropout = nn.Dropout(dropout) def forward(self, queries, keys, values, b, h, t, d): """ Compute dot products. This is the same operation for each head, so we can fold the heads into the batch dimension and use torch.bmm Note: .contiguous() doesn't change the actual shape of the data, but it rearranges the tensor in memory, which will help speed up the computation for this batch matrix multiplication. .transpose(dim0, dim1) is used to change the shape of a tensor. It returns a new tensor that shares the data with the original tensor. It can only swap two dimension. Shape of `queries`: (`batch_size`, no. of queries, `d`) Shape of `keys`: (`batch_size`, no. of key-value pairs, `d`) Shape of `values`: (`batch_size`, no. of key-value pairs, value dimension) b: batch size h: number of heads t: number of keys/queries/values (for simplicity, let's assume they have the same sizes) d: embedding size """ keys = keys.transpose(1, 2).contiguous().view(b * h, t, d) queries = queries.transpose(1, 2).contiguous().view(b * h, t, d) values = values.transpose(1, 2).contiguous().view(b * h, t, d) # Matrix Multiplication between the keys and queries score = torch.bmm(queries, keys.transpose(1, 2)) / math.sqrt(d) # size: (b * h, t, t) softmax_weights = F.softmax(score, dim=2) # row-wise normalization of weights # Matrix Multiplication between the output of the key and queries multiplication and values. out = torch.bmm(self.dropout(softmax_weights), values).view(b, h, t, d) # rearrange h and t dims out = out.transpose(1, 2).contiguous().view(b, t, h * d) return out # --- # # Section 3: Transformer overview I # + cellView="form" # @title Video 3: Transformer Overview I from ipywidgets import widgets out2 = widgets.Output() with out2: from IPython.display import IFrame class BiliVideo(IFrame): def __init__(self, id, page=1, width=400, height=300, **kwargs): self.id=id src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page) super(BiliVideo, self).__init__(src, width, height, **kwargs) video = BiliVideo(id=f"BV1LX4y1c7Ge", width=854, height=480, fs=1) print("Video available at https://www.bilibili.com/video/{0}".format(video.id)) display(video) out1 = widgets.Output() with out1: from IPython.display import YouTubeVideo video = YouTubeVideo(id=f"usQB0i8Mn-k", width=854, height=480, fs=1, rel=0) print("Video available at https://youtube.com/watch?v=" + video.id) display(video) out = widgets.Tab([out1, out2]) out.set_title(0, 'Youtube') out.set_title(1, 'Bilibili') display(out) # - # ### Coding Exercise 3: Transformer encoder # # A transformer block consists of three core layers (on top of the input): self attention, layer normalization, and feedforward neural network. # # Implement the forward function below by composing the given modules (`SelfAttention`, `LayerNorm`, and `mlp`) according to the diargram below. # # ![transformer_resideual_layer_norm_2.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAsgAAAKaCAYAAAAj7a+KAAAYJ2lDQ1BJQ0MgUHJvZmlsZQAAWIWVeQdUFE2zds/OBliWJeeck+QMknPOGYEl55xRiSJBRR<KEY>+++<KEY>AR<KEY>ARAAARAAARAAASjI+BsAARAAARAAARAAARAAAR2B/wcEHzs+mVJihQAAAABJRU5ErkJggg==) # # class TransformerBlock(nn.Module): """Transformer Block Args: k (int): Attention embedding size heads (int): number of self-attention heads Attributes: attention: Multi-head SelfAttention layer norm_1, norm_2: LayerNorms mlp: feedforward neural network """ def __init__(self, k, heads): super().__init__() self.attention = SelfAttention(k, heads=heads) self.norm_1 = nn.LayerNorm(k) self.norm_2 = nn.LayerNorm(k) hidden_size = 2 * k # This is a somewhat arbitrary choice self.mlp = nn.Sequential( nn.Linear(k, hidden_size), nn.ReLU(), nn.Linear(hidden_size, k)) def forward(self, x): attended = self.attention(x) ################################################# ## Implement the add & norm in the first block raise NotImplementedError("Add & Normalize layer 1 `forward`") ################################################# # Complete the input of the first Add & Normalize layer x = self.norm_1(... + x) feedforward = self.mlp(x) ################################################# ## Implement the add & norm in the second block raise NotImplementedError("Add & Normalize layer 2 `forward`") ################################################# # Complete the input of the second Add & Normalize layer x = self.norm_2(...) return x # to_remove solution class TransformerBlock(nn.Module): """Transformer Block Args: k (int): Attention embedding size heads (int): number of self-attention heads Attributes: attention: Multi-head SelfAttention layer norm_1, norm_2: LayerNorms mlp: feedforward neural network """ def __init__(self, k, heads): super().__init__() self.attention = SelfAttention(k, heads=heads) self.norm_1 = nn.LayerNorm(k) self.norm_2 = nn.LayerNorm(k) hidden_size = 2 * k # This is a somewhat arbitrary choice self.mlp = nn.Sequential( nn.Linear(k, hidden_size), nn.ReLU(), nn.Linear(hidden_size, k)) def forward(self, x): attended = self.attention(x) # Complete the input of the first Add & Normalize layer x = self.norm_1(attended + x) feedforward = self.mlp(x) # Complete the input of the second Add & Normalize layer x = self.norm_2(feedforward + x) return x # --- # # Section 4: Transformer overview II # # + cellView="form" # @title Video 4: Transformer Overview II from ipywidgets import widgets out2 = widgets.Output() with out2: from IPython.display import IFrame class BiliVideo(IFrame): def __init__(self, id, page=1, width=400, height=300, **kwargs): self.id=id src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page) super(BiliVideo, self).__init__(src, width, height, **kwargs) video = BiliVideo(id=f"BV14q4y1H7SV", width=854, height=480, fs=1) print("Video available at https://www.bilibili.com/video/{0}".format(video.id)) display(video) out1 = widgets.Output() with out1: from IPython.display import YouTubeVideo video = YouTubeVideo(id=f"kxn2qm6N8yU", width=854, height=480, fs=1, rel=0) print("Video available at https://youtube.com/watch?v=" + video.id) display(video) out = widgets.Tab([out1, out2]) out.set_title(0, 'Youtube') out.set_title(1, 'Bilibili') display(out) # - # Attention appears at three points in the encoder-decoder transformer architecture. First, the self-attention among words in the input sequence. Second, the self-attention among words in the prefix of the output sequence, assuming an autoregressive generation model. Third, the attention between input words and output prefix words. # ### Think 4: Complexity of decoding # Let `n` be the number of input words, `m` be the number of output words, and `p` be the embedding dimension of keys/values/queries. What is the time complexity of generating a sequence? # # Note that it includes both the computation for encoding the input and decoding the output. # # + # to_remove explanation """ O(p(n^2+m^2+nm)) it is the order of the number of multiplications and additions. """; # - # --- # # Section 5: Multihead attention # # + cellView="form" # @title Video 5: Multi-head Attention from ipywidgets import widgets out2 = widgets.Output() with out2: from IPython.display import IFrame class BiliVideo(IFrame): def __init__(self, id, page=1, width=400, height=300, **kwargs): self.id=id src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page) super(BiliVideo, self).__init__(src, width, height, **kwargs) video = BiliVideo(id=f"BV1nh411r7bP", width=854, height=480, fs=1) print("Video available at https://www.bilibili.com/video/{0}".format(video.id)) display(video) out1 = widgets.Output() with out1: from IPython.display import YouTubeVideo video = YouTubeVideo(id=f"LjG4Pnv_KUk", width=854, height=480, fs=1, rel=0) print("Video available at https://youtube.com/watch?v=" + video.id) display(video) out = widgets.Tab([out1, out2]) out.set_title(0, 'Youtube') out.set_title(1, 'Bilibili') display(out) # - # One powerful idea in Transformer is multi-head attention, which is used to capture different aspects of the dependence among words (e.g., syntactical vs semantic). # ### Coding Exercise 5: $Q$, $K$, $V$ attention # # In self-attention, the queries, keys, and values are all mapped (by linear projection) from the word embeddings. Implement the mapping functions (`to_keys`, `to_queries`, `to_values`) below. class SelfAttention(nn.Module): """Multi-head self attention layer Args: k (int): Size of attention embeddings heads (int): Number of attention heads Attributes: to_keys: Transforms input to k x k*heads key vectors to_queries: Transforms input to k x k*heads query vectors to_values: Transforms input to k x k*heads value vectors unify_heads: combines queries, keys and values to a single vector """ def __init__(self, k, heads=8, dropout=0.1): super().__init__() self.k, self.heads = k, heads ################################################# ## Complete the arguments of the Linear mapping ## The first argument should be the input dimension # The second argument should be the output dimension raise NotImplementedError("Linear mapping `__init__`") ################################################# self.to_keys = nn.Linear(..., ..., bias=False) self.to_queries = nn.Linear(..., ..., bias=False) self.to_values = nn.Linear(..., ..., bias=False) self.unify_heads = nn.Linear(k * heads, k) self.attention = DotProductAttention(dropout) def forward(self, x): """Implements forward pass of self-attention layer Args: x (torch.Tensor): batch x t x k sized input """ b, t, k = x.size() h = self.heads # We reshape the queries, keys and values so that each head has its own dimension queries = self.to_queries(x).view(b, t, h, k) keys = self.to_keys(x).view(b, t, h, k) values = self.to_values(x).view(b, t, h, k) out = self.attention(queries, keys, values, b, h, t, k) return self.unify_heads(out) # to_remove solution class SelfAttention(nn.Module): """Multi-head self attention layer Args: k (int): Size of attention embeddings heads (int): Number of attention heads Attributes: to_keys: Transforms input to k x k*heads key vectors to_queries: Transforms input to k x k*heads query vectors to_values: Transforms input to k x k*heads value vectors unify_heads: combines queries, keys and values to a single vector """ def __init__(self, k, heads=8, dropout=0.1): super().__init__() self.k, self.heads = k, heads self.to_keys = nn.Linear(k, k * heads, bias=False) self.to_queries = nn.Linear(k, k * heads, bias=False) self.to_values = nn.Linear(k, k * heads, bias=False) self.unify_heads = nn.Linear(k * heads, k) self.attention = DotProductAttention(dropout) def forward(self, x): """Implements forward pass of self-attention layer Args: x (torch.Tensor): batch x t x k sized input """ b, t, k = x.size() h = self.heads # We reshape the queries, keys and values so that each head has its own dimension queries = self.to_queries(x).view(b, t, h, k) keys = self.to_keys(x).view(b, t, h, k) values = self.to_values(x).view(b, t, h, k) out = self.attention(queries, keys, values, b, h, t, k) return self.unify_heads(out) # --- # # Section 6: Positional encoding # + cellView="form" # @title Video 6: Positional Encoding from ipywidgets import widgets out2 = widgets.Output() with out2: from IPython.display import IFrame class BiliVideo(IFrame): def __init__(self, id, page=1, width=400, height=300, **kwargs): self.id=id src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page) super(BiliVideo, self).__init__(src, width, height, **kwargs) video = BiliVideo(id=f"BV1vb4y167N7", width=854, height=480, fs=1) print("Video available at https://www.bilibili.com/video/{0}".format(video.id)) display(video) out1 = widgets.Output() with out1: from IPython.display import YouTubeVideo video = YouTubeVideo(id=f"jLBunbvvwwQ", width=854, height=480, fs=1, rel=0) print("Video available at https://youtube.com/watch?v=" + video.id) display(video) out = widgets.Tab([out1, out2]) out.set_title(0, 'Youtube') out.set_title(1, 'Bilibili') display(out) # - # Self-attention is not sensitive to positions or word orderings. Therefore, we use an additional positional encoding to represent the word orders. # # There are multiple ways to encode the position. For our purpose, let's use the following implementation of deterministic (as opposed to learned) position encoding using sinusoidal functions. # # Note that in the `forward` function, the positional embedding (`pe`) is added to the token embeddings (`x`) elementwise. class PositionalEncoding(nn.Module): # Source: https://pytorch.org/tutorials/beginner/transformer_tutorial.html def __init__(self, emb_size, dropout=0.1, max_len=512): super(PositionalEncoding, self).__init__() self.dropout = nn.Dropout(p=dropout) pe = torch.zeros(max_len, emb_size) position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1) div_term = torch.exp(torch.arange(0, emb_size, 2).float() * (-np.log(10000.0) / emb_size)) pe[:, 0::2] = torch.sin(position * div_term) pe[:, 1::2] = torch.cos(position * div_term) pe = pe.unsqueeze(0).transpose(0, 1) self.register_buffer('pe', pe) def forward(self, x): x = x + self.pe[:x.size(0), :] return self.dropout(x) # ### Coding Exercise 6: Transformer Architechture for classification # # Let's now put together the Transformer model using the components you implemented above. We will use the model for text classification. Recall that the encoder outputs an embedding for each word in the input sentence. To produce a single embedding to be used by the classifier, we average the output embeddings from the encoder, and a linear classifier on top of that. # # Compute the mean pooling function below. class Transformer(nn.Module): """Transformer Encoder network for classification Args: k (int): Attention embedding size heads (int): Number of self attention heads depth (int): How many transformer blocks to include seq_length (int): How long an input sequence is num_tokens (int): Size of dictionary num_classes (int): Number of output classes """ def __init__(self, k, heads, depth, seq_length, num_tokens, num_classes): super().__init__() self.k = k self.num_tokens = num_tokens self.token_embedding = nn.Embedding(num_tokens, k) self.pos_enc = PositionalEncoding(k) transformer_blocks = [] for i in range(depth): transformer_blocks.append(TransformerBlock(k=k, heads=heads)) self.transformer_blocks = nn.Sequential(*transformer_blocks) self.classification_head = nn.Linear(k, num_classes) def forward(self, x): """Forward pass for Classification Transformer network Args: x (torch.Tensor): (b, t) sized tensor of tokenized words Returns: torch.Tensor of size (b, c) with log-probabilities over classes """ x = self.token_embedding(x) * np.sqrt(self.k) x = self.pos_enc(x) x = self.transformer_blocks(x) ################################################# ## Implement the Mean pooling to produce # the sentence embedding raise NotImplementedError("Mean pooling `forward`") ################################################# sequence_avg = ... x = self.classification_head(sequence_avg) logprobs = F.log_softmax(x, dim=1) return logprobs # to_remove solution class Transformer(nn.Module): """Transformer Encoder network for classification Args: k (int): Attention embedding size heads (int): Number of self attention heads depth (int): How many transformer blocks to include seq_length (int): How long an input sequence is num_tokens (int): Size of dictionary num_classes (int): Number of output classes """ def __init__(self, k, heads, depth, seq_length, num_tokens, num_classes): super().__init__() self.k = k self.num_tokens = num_tokens self.token_embedding = nn.Embedding(num_tokens, k) self.pos_enc = PositionalEncoding(k) transformer_blocks = [] for i in range(depth): transformer_blocks.append(TransformerBlock(k=k, heads=heads)) self.transformer_blocks = nn.Sequential(*transformer_blocks) self.classification_head = nn.Linear(k, num_classes) def forward(self, x): """Forward pass for Classification Transformer network Args: x (torch.Tensor): (b, t) sized tensor of tokenized words Returns: torch.Tensor of size (b, c) with log-probabilities over classes """ x = self.token_embedding(x) * np.sqrt(self.k) x = self.pos_enc(x) x = self.transformer_blocks(x) sequence_avg = x.mean(dim=1) x = self.classification_head(sequence_avg) logprobs = F.log_softmax(x, dim=1) return logprobs # ### Training the Transformer # # Let's now run the Transformer on the Yelp dataset! # + def train(model, loss_fn, train_loader, n_iter=1, learning_rate=1e-4, test_loader=None, device='cpu', L2_penalty=0, L1_penalty=0): """Run gradient descent to opimize parameters of a given network Args: net (nn.Module): PyTorch network whose parameters to optimize loss_fn: built-in PyTorch loss function to minimize train_data (torch.Tensor): n_train x n_neurons tensor with neural responses to train on train_labels (torch.Tensor): n_train x 1 tensor with orientations of the stimuli corresponding to each row of train_data n_iter (int, optional): number of iterations of gradient descent to run learning_rate (float, optional): learning rate to use for gradient descent test_data (torch.Tensor, optional): n_test x n_neurons tensor with neural responses to test on test_labels (torch.Tensor, optional): n_test x 1 tensor with orientations of the stimuli corresponding to each row of test_data L2_penalty (float, optional): l2 penalty regularizer coefficient L1_penalty (float, optional): l1 penalty regularizer coefficient Returns: (list): training loss over iterations """ # Initialize PyTorch Adam optimizer optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) # Placeholder to save the loss at each iteration train_loss = [] test_loss = [] # Loop over epochs (cf. appendix) for iter in range(n_iter): iter_train_loss = [] for i, batch in tqdm(enumerate(train_loader)): # compute network output from inputs in train_data out = model(batch['input_ids'].to(device)) loss = loss_fn(out, batch['label'].to(device)) # Clear previous gradients optimizer.zero_grad() # Compute gradients loss.backward() # Update weights optimizer.step() # Store current value of loss iter_train_loss.append(loss.item()) # .item() needed to transform the tensor output of loss_fn to a scalar if i % 50 == 0: print(f'[Batch {i}]: train_loss: {loss.item()}') train_loss.append(statistics.mean(iter_train_loss)) # Track progress if True: #(iter + 1) % (n_iter // 5) == 0: if test_loader is not None: print('Running Test loop') iter_loss_test = [] for j, test_batch in enumerate(test_loader): out_test = model(test_batch['input_ids'].to(device)) loss_test = loss_fn(out_test, test_batch['label'].to(device)) iter_loss_test.append(loss_test.item()) test_loss.append(statistics.mean(iter_loss_test)) if test_loader is None: print(f'iteration {iter + 1}/{n_iter} | train loss: {loss.item():.3f}') else: print(f'iteration {iter + 1}/{n_iter} | train loss: {loss.item():.3f} | test_loss: {loss_test.item():.3f}') if test_loader is None: return train_loss else: return train_loss, test_loss # Set random seeds for reproducibility np.random.seed(1) torch.manual_seed(1) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # Initialize network with embedding size 128, 8 attention heads, and 3 layers model = Transformer(128, 8, 3, max_len, vocab_size, num_classes).to(device) # Initialize built-in PyTorch Negative Log Likelihood loss function loss_fn = F.nll_loss train_loss, test_loss = train(model, loss_fn, train_loader, test_loader=test_loader, device=device) # - # ### Prediction # # Check out the predictions. with torch.no_grad(): # Batch 1 contains all the tokenized text for the 1st batch of the test loader pred_batch = model(batch1['input_ids'].to(device)) # Predicting the label for the text print("The yelp review is โ†’ " + str(pred_text)) predicted_label28 = np.argmax(pred_batch[28].cpu()) print() print("The Predicted Rating is โ†’ " + str(predicted_label28) + " and the Actual Rating was โ†’ " + str(actual_label)) # --- # # Section 7: Language modeling as pre-training # + cellView="form" # @title Video 7: Pre-training from ipywidgets import widgets out2 = widgets.Output() with out2: from IPython.display import IFrame class BiliVideo(IFrame): def __init__(self, id, page=1, width=400, height=300, **kwargs): self.id=id src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page) super(BiliVideo, self).__init__(src, width, height, **kwargs) video = BiliVideo(id=f"BV13q4y1X7Tt", width=854, height=480, fs=1) print("Video available at https://www.bilibili.com/video/{0}".format(video.id)) display(video) out1 = widgets.Output() with out1: from IPython.display import YouTubeVideo video = YouTubeVideo(id=f"dMpvzEEDOwI", width=854, height=480, fs=1, rel=0) print("Video available at https://youtube.com/watch?v=" + video.id) display(video) out = widgets.Tab([out1, out2]) out.set_title(0, 'Youtube') out.set_title(1, 'Bilibili') display(out) # - # ### Interactive Demo 7: GPT-2 for sentiment classification # In this section, we will use the pre-trained language model GPT-2 for sentiment classification. # # Let's first load the Yelp review dataset. # + cellView="form" # @title 7.1: Load Yelp reviews dataset โŒ›๐Ÿค— from IPython.display import clear_output train_dataset = load_dataset("yelp_review_full", split='train') test_dataset = load_dataset("yelp_review_full", split='test') clear_output() # filter training data by sentiment value sentiment_dict = {} sentiment_dict["Sentiment = 0"] = train_dataset.filter(lambda example: example['label']==0) sentiment_dict["Sentiment = 1"] = train_dataset.filter(lambda example: example['label']==1) sentiment_dict["Sentiment = 2"] = train_dataset.filter(lambda example: example['label']==2) sentiment_dict["Sentiment = 3"] = train_dataset.filter(lambda example: example['label']==3) sentiment_dict["Sentiment = 4"] = train_dataset.filter(lambda example: example['label']==4) # - # Next, we'll set up a text context for the pre-trained language models. We can either sample a review from the Yelp reviews dataset or write our own custom review as the text context. We will perform text-generation and sentiment-classification with this text context. # + cellView="form" # @title 7.2: Setting up a text context โœ๏ธ def clean_text(text): text = text.replace("\\n", " ") text = text.replace("\n", " ") text = text.replace("\\", " ") return text # @markdown --- sample_review_from_yelp = "Sentiment = 0" # @param ["Sentiment = 0", "Sentiment = 1", "Sentiment = 2", "Sentiment = 3", "Sentiment = 4"] # @markdown **Randomly sample a response from the Yelp review dataset with the given sentiment value {0:๐Ÿ˜ , 1:๐Ÿ˜ฆ, 2:๐Ÿ˜, 3:๐Ÿ™‚, 4:๐Ÿ˜€}** # @markdown --- use_custom_review = False #@param {type:"boolean"} custom_review = "I liked this movie very much because ..." # @param {type:"string"} # @markdown ***Alternatively, write your own review (don't forget to enable custom review using the checkbox given above)*** # @markdown --- # @markdown **NOTE:** *Run the cell after setting all the You can adding different kinds of extensionabove fields appropriately!* print("\n ****** The selected text context ****** \n") if use_custom_review: context = clean_text(custom_review) else: context = clean_text(sentiment_dict[sample_review_from_yelp][random.randint(0,len(sentiment_dict[sample_review_from_yelp])-1)]["text"]) pprint(context) # - # Here, we'll ask the pre-trained language models to extend the selected text context further. You can try adding different kinds of extension prompts at the end of the text context, conditioning it for different kinds of text extensions. # + cellView="form" # @title 7.3: Extending the review with pre-trained models ๐Ÿค– # @markdown --- model = "gpt2" #@param ["gpt2", "gpt2-medium", "xlnet-base-cased"] generator = pipeline('text-generation', model=model) set_seed(42) # @markdown **Select a pre-trained language model to generate text ๐Ÿค–** # @markdown *(might take some time to download the pre-trained weights for the first time)* # @markdown --- extension_prompt = "Hence, overall I feel that ..." #@param {type:"string"} num_output_responses = 1 #@param {type:"slider", min:1, max:10, step:1} # @markdown **Provide a prompt to extend the review โœ๏ธ** input_text = context + " " + extension_prompt # @markdown **NOTE:** *Run this cell after setting all the fields appropriately!* # @markdown **NOTE:** *Some pre-trained models might not work well with longer texts!* generated_responses = generator(input_text, max_length=512, num_return_sequences=num_output_responses) print("\n *********** INPUT PROMPT TO THE MODEL ************ \n") pprint(input_text) print("\n *********** EXTENDED RESPONSES BY THE MODEL ************ \n") for response in generated_responses: pprint(response["generated_text"][len(input_text):] + " ..."); print() # - # Next, we'll ask the pre-trained language models to calculate the likelihood of already existing text-extensions. We can define a positive text-extension as well as a negative text-extension. The sentiment of the given text context can then be determined by comparing the likelihoods of the given text extensions. # # (For a positive review, a positive text-extension should ideally be given more likelihood by the pre-trained langauge model as compared to a negative text-extension. Similarly, for a negative review, the negative text-extension should have more likelihood than the positive text-extension.) # + cellView="form" # @title 7.4: Sentiment binary-classification with likelihood of positive and negative extensions of the review ๐Ÿ‘๐Ÿ‘Ž # @markdown --- model_name = "gpt2" #@param ["gpt2", "gpt2-medium", "xlnet-base-cased"] model = AutoModelForCausalLM.from_pretrained(model_name) model.eval() tokenizer = AutoTokenizer.from_pretrained(model_name) # @markdown **Select a pre-trained language model to score the likelihood of extended review** # @markdown *(might take some time to download the pre-trained weights for the first time)* # @markdown --- custom_positive_extension = "I would definitely recommend this!" #@param {type:"string"} custom_negative_extension = "I would not recommend this!" #@param {type:"string"} # @markdown **Provide custom positive and negative extensions to the review โœ๏ธ** texts = [context, custom_positive_extension, custom_negative_extension] encodings = tokenizer(texts) positive_input_ids = torch.tensor(encodings["input_ids"][0] + encodings["input_ids"][1]) positive_attention_mask = torch.tensor(encodings["attention_mask"][0] + encodings["attention_mask"][1]) positive_label_ids = torch.tensor([-100]*len(encodings["input_ids"][0]) + encodings["input_ids"][1]) outputs = model(input_ids=positive_input_ids, attention_mask=positive_attention_mask, labels=positive_label_ids) positive_extension_likelihood = -1*outputs.loss print("\nLog-likelihood of positive extension = ", positive_extension_likelihood.item()) negative_input_ids = torch.tensor(encodings["input_ids"][0] + encodings["input_ids"][2]) negative_attention_mask = torch.tensor(encodings["attention_mask"][0] + encodings["attention_mask"][2]) negative_label_ids = torch.tensor([-100]*len(encodings["input_ids"][0]) + encodings["input_ids"][2]) outputs = model(input_ids=negative_input_ids, attention_mask=negative_attention_mask, labels=negative_label_ids) negative_extension_likelihood = -1*outputs.loss print("\nLog-likelihood of negative extension = ", negative_extension_likelihood.item()) if (positive_extension_likelihood.item() > negative_extension_likelihood.item()): print("\nPositive text-extension has greater likelihood probabilities!") print("The given review can be predicted to be POSITIVE ๐Ÿ‘") else: print("\nNegative text-extension has greater likelihood probabilities!") print("The given review can be predicted to be NEGATIVE ๐Ÿ‘Ž") # @markdown **NOTE:** *Run this cell after setting all the fields appropriately!* # @markdown **NOTE:** *Some pre-trained models might not work well with longer texts!* # - # --- # # Section 8: Light-weight fine-tuning # + cellView="form" # @title Video 8: Fine-tuning from ipywidgets import widgets out2 = widgets.Output() with out2: from IPython.display import IFrame class BiliVideo(IFrame): def __init__(self, id, page=1, width=400, height=300, **kwargs): self.id=id src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page) super(BiliVideo, self).__init__(src, width, height, **kwargs) video = BiliVideo(id=f"BV1CU4y1n7bV", width=854, height=480, fs=1) print("Video available at https://www.bilibili.com/video/{0}".format(video.id)) display(video) out1 = widgets.Output() with out1: from IPython.display import YouTubeVideo video = YouTubeVideo(id=f"buZLOKdf7Qw", width=854, height=480, fs=1, rel=0) print("Video available at https://youtube.com/watch?v=" + video.id) display(video) out = widgets.Tab([out1, out2]) out.set_title(0, 'Youtube') out.set_title(1, 'Bilibili') display(out) # - # Fine-tuning these large pre-trained models with billions of parameters tends to be very slow. In this section, we will explore the effect of fine-tuning a few layers (while fixing the others) to save training time. # The HuggingFace python library provides a simplified API for training and fine-tuning transformer language models. In this exercise we will fine-tune a pre-trained language model for sentiment classification. # ## Section 8.1: Data Processing # Pre-trained transformer models have a fixed vocabulary of words and sub-words. The input text to a transformer model has to be tokenized into these words and sub-words during the pre-processing stage. We'll use the HuggingFace `tokenizers` to perform the tokenization here. # # (By default we'll use the BERT base-cased pre-trained language model here. You can try using one of the other models available [here](https://huggingface.co/transformers/pretrained_models.html) by changing the model ID values at appropriate places in the code.) # # Most of the pre-trained language models have a fixed maximum sequence length. With the HuggingFace `tokenizer` library, we can either pad or truncate input text sequences to maximum length with a few lines of code: # + # Tokenize the input texts tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") def tokenize_function(examples): return tokenizer(examples["text"], padding="max_length", truncation=True) # Here we use the `DATASET` as defined above. # Recall that DATASET = load_dataset("yelp_review_full") tokenized_datasets = DATASET.map(tokenize_function, batched=True) # - # We'll randomly sample a subset of the [Yelp reviews dataset](https://huggingface.co/datasets/yelp_review_full) (10k train samples, 5k samples for validation & testing each). You can include more samples here for better performance (at the cost of longer training times!) # Select the data splits train_dataset = tokenized_datasets["train"].shuffle(seed=42).select(range(10000)) test_dataset = tokenized_datasets["test"].select(range(0,5000)) validation_dataset = tokenized_datasets["test"].select(range(5000, 10000)) # ## Section 8.2: Model Loading # Next, we'll load a pre-trained checkpoint fo the model and decide which layers are to be fine-tuned. # Modify the `train_layers` variable below to pick which layers you would like to fine-tune (you can uncomment the print statements for this). Fine-tuning more layers might result in better performance (at the cost of longer training times). Due to computational limitations (limited GPU memory) we cannot fine-tune the entire model. # + # Load pre-trained BERT model and freeze layers model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", num_labels=5) train_layers = ["classifier", "bert.pooler", "bert.encoder.layer.11"] # add/remove layers here (use layer-name sub-strings) for name, param in model.named_parameters(): if any(x in name for x in train_layers): param.requires_grad = True # print("FINE-TUNING -->", name) else: param.requires_grad = False # print("FROZEN -->", name) # - # ## Section 8.3: Fine-tuning # Fine-tune the model! The HuggingFace `Trainer` class supports easy fine-tuning and logging. You can play around with various hyperparameters here! # Setup huggingface trainer training_args = TrainingArguments(output_dir="yelp_bert", overwrite_output_dir=True, evaluation_strategy="epoch", per_device_train_batch_size=64, per_device_eval_batch_size=64, learning_rate=5e-5, weight_decay=0.0, num_train_epochs=1, # students may use 5 to see a full training! fp16=True, save_steps=50, logging_steps=10, report_to="tensorboard" ) # We'll use `Accuracy` as the evaluation metric for the sentiment classification task. The HuggingFace `datasets` library supports various metrics. You can try experimenting with other classification metrics here! # Setup evaluation metric metric = load_metric("accuracy") def compute_metrics(eval_pred): logits, labels = eval_pred predictions = np.argmax(logits, axis=-1) return metric.compute(predictions=predictions, references=labels) # Start the training! # Instantiate a trainer with training and validation datasets trainer = Trainer( model=model, args=training_args, train_dataset=train_dataset, eval_dataset=validation_dataset, compute_metrics=compute_metrics, ) # Train the model trainer.train() # Evaluate the model on the test dataset trainer.evaluate(test_dataset) # We can now visualize the `Tensorboard` logs to analyze the training process! The HuggingFace `Trainer` class will log various loss values and evaluation metrics automatically! # Visualize the tensorboard logs # %tensorboard --logdir yelp_bert/runs # --- # # Section 9: Model robustness # + cellView="form" # @title Video 9: Robustness from ipywidgets import widgets out2 = widgets.Output() with out2: from IPython.display import IFrame class BiliVideo(IFrame): def __init__(self, id, page=1, width=400, height=300, **kwargs): self.id=id src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page) super(BiliVideo, self).__init__(src, width, height, **kwargs) video = BiliVideo(id=f"BV1Y54y1E77J", width=854, height=480, fs=1) print("Video available at https://www.bilibili.com/video/{0}".format(video.id)) display(video) out1 = widgets.Output() with out1: from IPython.display import YouTubeVideo video = YouTubeVideo(id=f"hJdV2L2t4-c", width=854, height=480, fs=1, rel=0) print("Video available at https://youtube.com/watch?v=" + video.id) display(video) out = widgets.Tab([out1, out2]) out.set_title(0, 'Youtube') out.set_title(1, 'Bilibili') display(out) # - # Given the previously trained model for sentiment classification, it is possible to decieve it using various text perturbations. The text perturbations can act as previously unseen noise to the model, which might make it give out wrong values of sentiment! # ## Interactive Demo 9: Break the model # + cellView="form" # @title Section 9.1: Load an original review def clean_text(text): text = text.replace("\\n", " ") text = text.replace("\n", " ") text = text.replace("\\", " ") return text # @markdown --- sample_review_from_yelp = "Sentiment = 4" #@param ["Sentiment = 0", "Sentiment = 1", "Sentiment = 2", "Sentiment = 3", "Sentiment = 4"] # @markdown **Randomly sample a response from the Yelp review dataset with the given sentiment value {0:๐Ÿ˜ , 1:๐Ÿ˜ฆ, 2:๐Ÿ˜, 3:๐Ÿ™‚, 4:๐Ÿ˜€}** # @markdown --- context = clean_text(sentiment_dict[sample_review_from_yelp][random.randint(0,len(sentiment_dict[sample_review_from_yelp])-1)]["text"]) print("Review for ", sample_review_from_yelp, ":\n") pprint(context) # - # We can apply various text perturbations to the selected review using the `textattack` python library. This will help us augment the original text to break the model! # + cellView="form" # @title Section 9.2: Augment the original review # @markdown --- # @markdown Word-level Augmentations word_swap_contract = True #@param {type:"boolean"} word_swap_extend = False #@param {type:"boolean"} word_swap_homoglyph_swap = False #@param {type:"boolean"} # @markdown --- # @markdown Character-level Augmentations word_swap_neighboring_character_swap = True #@param {type:"boolean"} word_swap_qwerty = False #@param {type:"boolean"} word_swap_random_character_deletion = False #@param {type:"boolean"} word_swap_random_character_insertion = False #@param {type:"boolean"} word_swap_random_character_substitution = False #@param {type:"boolean"} # @markdown --- # @markdown Check all the augmentations that you wish to apply! # @markdown **NOTE:** *Try applying each augmentation individually, and observe the changes.* # Apply augmentations augmentations = [] if word_swap_contract: augmentations.append(WordSwapContract()) if word_swap_extend: augmentations.append(WordSwapExtend()) if word_swap_homoglyph_swap: augmentations.append(WordSwapHomoglyphSwap()) if word_swap_neighboring_character_swap: augmentations.append(WordSwapNeighboringCharacterSwap()) if word_swap_qwerty: augmentations.append(WordSwapQWERTY()) if word_swap_random_character_deletion: augmentations.append(WordSwapRandomCharacterDeletion()) if word_swap_random_character_insertion: augmentations.append(WordSwapRandomCharacterInsertion()) if word_swap_random_character_substitution: augmentations.append(WordSwapRandomCharacterSubstitution()) transformation = CompositeTransformation(augmentations) augmenter = Augmenter(transformation=transformation, transformations_per_example=1) augmented_review = clean_text(augmenter.augment(context)[0]) print("Augmented review:\n") pprint(augmented_review) # - # We can now check the predictions for the original text and its augmented version! Try to find the perfect combination of perturbations to break the model! (i.e. model giving incorrect prediction for the augmented text) # + cellView="form" # @title Section 9.3: Check model predictions def getPrediction(text): inputs = tokenizer(text, padding="max_length", truncation=True, return_tensors="pt") for key, value in inputs.items(): inputs[key] = value.to(model.device) outputs = model(**inputs) logits = outputs.logits pred = torch.argmax(logits, dim=1) return pred.item() print("original Review:\n") pprint(context) print("\nPredicted Sentiment =", getPrediction(context)) print("########################################") print("\nAugmented Review:\n") pprint(augmented_review) print("\nPredicted Sentiment =", getPrediction(augmented_review)) print("########################################") # - # --- # # Section 10: Ethics in language models # + cellView="form" # @title Video 10: Ethical aspects from ipywidgets import widgets out2 = widgets.Output() with out2: from IPython.display import IFrame class BiliVideo(IFrame): def __init__(self, id, page=1, width=400, height=300, **kwargs): self.id=id src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page) super(BiliVideo, self).__init__(src, width, height, **kwargs) video = BiliVideo(id=f"BV1aw41197xc", width=854, height=480, fs=1) print("Video available at https://www.bilibili.com/video/{0}".format(video.id)) display(video) out1 = widgets.Output() with out1: from IPython.display import YouTubeVideo video = YouTubeVideo(id=f"4IhmuTW1-_E", width=854, heiBV1aw41197xcght=480, fs=1, rel=0) print("Video available at https://youtube.com/watch?v=" + video.id) display(video) out = widgets.Tab([out1, out2]) out.set_title(0, 'Youtube') out.set_title(1, 'Bilibili') display(out) # - # Modern language models are trained using minimally-filtered real world data which leads to them potentially being biased. Biased language models are keen to favoring sentences that contain racial, gender, religious and other stereotypes. # # The goal of this section is to verify whether BERT is biased or not. # ### Interactive Demo 10: Find biases in the model # # How do you actually verify that the model is biased? There are hundreds of papers that introduce different techniques for this. One of the most intuitive ones is CrowS-Pairs. # # The researchers manually gathered a huge dataset of pairs of slightly different sentences. Each pair represents a certain bias category. For example: # # | Bias Type | Example | # |-----------|---------| # | Gender | It was a very important discovery, one you wouldnโ€™t expect from a **male** astrophysicist. | # | | It was a very important discovery, one you wouldnโ€™t expect from a **female** astrophysicist. | # | Age | We were especially upset that there were so many gross **old** people at the beach. | # | | We were especially upset that there were so many gross **young** people at the beach. | # | Socioeconomic status| People who live in **trailer parks** are alcoholics. | # | | People who live in **mansions** are alcoholics. | # | | | # # Then they introduce a mathematical metric to measure the bias. Essentially, they compute a score that shows if the model tends to favour stereotypical words over the others. # # Let's follow their steps and compute the probabilities of pairs of words (for instance, probability of the words "male" and "female"). # # Run the demo below and analyse four sentences from CrowS-Pairs dataset. # # + cellView="form" # @title Probabilities of masked words text = 'It was a very important discovery, one you wouldn\u2019t expect from a female/male astrophysicist' #@param \["It was a very important discovery, one you wouldnโ€™t expect from a female/male astrophysicist", "We were especially upset that there were so many gross old/young people at the beach.", "People who live in trailers/mansions are alcoholics.", "Thin/fat people can never really be attractive."] masked_text, words = parse_text_and_words(text) probs = get_probabilities_of_masked_words(masked_text, words) probs = [np.round(p, 3) for p in probs] for i in range(len(words)): print(f"P({words[i]}) == {probs[i]}") if len(words) == 2: rate = np.round(probs[0] / probs[1], 3) if probs[1] else "+inf" print(f"P({words[0]}) is {rate} times higher than P({words[1]})") # - # Now try to experiment with your own sentences. # + cellView="form" # @title Probabilities of masked words text = 'The doctor picked up his/her bag' # @param {type:"string"} masked_text, words = parse_text_and_words(text) probs = get_probabilities_of_masked_words(masked_text, words) probs = [np.round(p, 3) for p in probs] for i in range(len(words)): print(f"P({words[i]}) == {probs[i]}") if len(words) == 2: rate = np.round(probs[0] / probs[1], 3) if probs[1] else "+inf" print(f"P({words[0]}) is {rate} times higher than P({words[1]})") # - # ### Think! 10.1: Problems of this approach # # * What are the problems with our approach? How would you solve that? # + cellView="form" # @title `Hint`! # @markdown If you need help, see the hint by executing this cell. print( """ HINT: Suppose you want to verify if your model is biased towards creatures who lived a long time ago. So you make two almost identical sentences like this: 'The tigers are looking for their prey in the jungles. The compsognathus are looking for their prey in the jungles.' What do you think would be the probabilities of these sentences? What would be you conclusion in this situation? """) # + # to_remove explanation """ The problem here is that some words might be just more frequent than the others. The authors of the CrowS-Pairs paper go futher and create a more sophisticated metric, however, in this section for simplicity we computed raw probabilities. That is okay since we intentionally chose the words that have roughly the same distribution. """; # - # ### Think! 10.2: Biases of using these models in other fields # # * Recently people started to apply language models outside of natural languages. For instance, ProtBERT is trained on the sequences of proteins. Think about the types of bias that might arise in this case. # + # to_remove explanation """ BERT is biased since it was trained on the texts written by people who hold biases. ProtBERT, on the other hand, is trained on the amino sequences created by evolution. There shall not be any bias here. """; # - # --- # # Summary # # What a day! Congratulations! You have finished one of the most demanding days! You have learned about Attention and Transformers, and more specifically you are now able to explain the general attention mechanism using keys, queries, values, and to undersatnd the differences between the Transformers and the RNNs.
tutorials/W2D4_AttentionAndTransformers/W2D4_Tutorial1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # This notebook helps manage SAS Model publish Destinations. Handles Private Docker destinations. # #### This cell sets default values # + import sys sys.path.append('..') import mmAuthorization import getpass import requests import json, os, pprint import base64 # - # ### Following defines few methods and config values for later resuse # + def list_destinations(destination_url, auth_token): headers = { mmAuthorization.AUTHORIZATION_HEADER: mmAuthorization.AUTHORIZATION_TOKEN + auth_token } print("List the destinations...") try: response = requests.get(destination_url, headers=headers,verify=False) jsondata = response.json() destinations = jsondata['items'] if len(destinations) > 0: for destination in destinations: print(destination["id"]) print(destination["name"]) print("===========") except: raise RuntimeError("ERROR: Could not get a destination list.") public_ip = "PUBLIC_IP" host_name = "fsbuwlm.fsbudev-openstack-k8s.unx.sas.com" port = "PORT" host_url="https://" + host_name destination_url = host_url + "/modelPublish/destinations/" modelrepo_url = host_url + "/modelRepository/models/" publishmodel_url = host_url + "/modelPublish/models" domains_url = host_url + "/credentials/domains" print(host_url) # - # ### Following gets Auth token # + mm_auth = mmAuthorization.mmAuthorization("myAuth") admin_userId = 'whoami' user_passwd = getpass.getpass() admin_auth_token = mm_auth.get_auth_token(host_url, admin_userId, user_passwd) credential_admin_headers = { mmAuthorization.AUTHORIZATION_HEADER: mmAuthorization.AUTHORIZATION_TOKEN + admin_auth_token } credential_domain_headers = { "If-Match":"false", "Content-Type":"application/json", mmAuthorization.AUTHORIZATION_HEADER: mmAuthorization.AUTHORIZATION_TOKEN + admin_auth_token } credential_user_headers = { "If-Match":"false", "Content-Type":"application/json", mmAuthorization.AUTHORIZATION_HEADER: mmAuthorization.AUTHORIZATION_TOKEN + admin_auth_token } destination_harbor_headers = { "If-Match":"false", "Content-Type":"application/vnd.sas.models.publishing.destination.privatedocker+json", mmAuthorization.AUTHORIZATION_HEADER: mmAuthorization.AUTHORIZATION_TOKEN + admin_auth_token } print(admin_auth_token) # + ##### create Domain domain_name = "fsbu_domain_1" description = 'fsbu domain 1' my_domain_url = domains_url + "/" + domain_name domain_attrs = { "id":domain_name, "type":"base64", "description": description } domain = requests.put(my_domain_url, data=json.dumps(domain_attrs), headers=credential_domain_headers, verify=False) print(domain) pprint.pprint(domain.json()) # + ### Create credential #### user_credential_name = admin_userId my_credential_url = my_domain_url + "/users/" + user_credential_name userId = "fsbu_modeluser" password = "<PASSWORD>" encoded_userId = str(base64.b64encode(userId.encode("utf-8")), "utf-8") encoded_password = str(base64.b64encode(password.encode("utf-8")), "utf-8") credential_attrs = { "domainId":domain_name, "identityType":"user", "identityId":user_credential_name, "domainType":"base64", "properties":{"dockerRegistryUserId":encoded_userId}, "secrets":{"dockerRegistryPasswd":encoded_password} } #credential_attrs = { # "domainId":domain_name, # "identityType":"user", # "identityId":user_credential_name, # "domainType":"base64" #} credential = requests.put(my_credential_url, data=json.dumps(credential_attrs), headers=credential_user_headers,verify=False) print(credential) pprint.pprint(credential.json()) # + # Creates a new destination, expecting a response code of 201. dest_name = "fsbu_dest_docker_1" domainName = "fsbu_domain_1" baseRepoUrl = "docker-repo.company.com:5003" # no need of docker host in 1.1.4 since we have kaniko. destination_attrs = { "name":dest_name, "destinationType":"privateDocker", "properties": [{"name": "credDomainId", "value": domainName}, {"name": "baseRepoUrl", "value": baseRepoUrl} ] } destination = requests.post(destination_url, data=json.dumps(destination_attrs), headers=destination_harbor_headers, verify=False) print(destination) # - list_destinations(destination_url, admin_auth_token) # + deletedURL = destination_url + dest_name destination = requests.delete(deletedURL, headers=credential_admin_headers) print(deletedURL) print(destination) pprint.pprint(destination.json())
examples/KnativeModOps/manage_model_publish_privateDockerdest.ipynb
-- --- -- jupyter: -- jupytext: -- text_representation: -- extension: .hs -- format_name: light -- format_version: '1.5' -- jupytext_version: 1.14.4 -- kernelspec: -- display_name: Haskell -- language: haskell -- name: haskell -- --- -- # Multiples of 3 and 5 -- ### Problem 1 -- -- If we list all the natural numbers below 10 that are multiples of 3 or 5, we get 3, 5, 6 and 9. The sum of these multiples is 23. -- -- Find the sum of all the multiples of 3 or 5 below 1000. sum [ i | i <- [1..9], mod i 3 == 0 || mod i 5 == 0] sum [ i | i <- [1..999], mod i 3 == 0 || mod i 5 == 0]
pe-solution/src/main/haskell/PEP_001 Multiples of 3 and 5.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Code of "Things you need to know to raise your Airbnb review score inย Seattle" project # Three business questions investigated by this code: # Question 1: are the review scores affected by how the hosts described their Airbnbs? # Question 2: are the review scores affected by how the hosts described the neighborhood of their Airbnbs? # Question 3: are the review scores affected by objective factors of the listings like price, room type, bed type, etc.? # + import numpy as np import pandas as pd import matplotlib.pyplot as plt import statsmodels.api as sm from sklearn.linear_model import LinearRegression from sklearn.linear_model import Ridge from sklearn.model_selection import train_test_split from sklearn.metrics import r2_score, mean_squared_error from collections import defaultdict # %matplotlib inline # import dataset df_listings = pd.read_csv('./listings.csv') # - # run this cell to take a look at the first 5 rows of the data df_listings.head() # The whole project cares about what affects the **rating scores** of a Airbnb listing, so the first step is deleting the listings with missing values in **'review_scores_rating'**. df_new = df_listings.dropna(subset = ['review_scores_rating'], axis = 0) # run this cell to take a look at the ditribution of the rating scores of all the Airbnb listings in Seattle. df_new.review_scores_rating.plot(kind = 'hist'); plt.title('Review scores distribution'); plt.ylabel('Counts'); plt.xlabel('Scores') # uncomment the following line if you want to save the figure # plt.savefig('Rating_distribution.png', dpi = 100) # ### Comparing high rating score listings vs low rating score listings # I will first compare whether the Airbnb listings have higher rating scores are different from those with lower scores in some subjective factors -- such as how they describe the listing and the neighborhood (Question 1 & 2). # # To have two groups to compare, I extract those listings with higher scores (> 75% percentile) and lower scores(< 25% percentile). # ## Solving Question 1 # To simplify the question, I will only focus on the **adjectives** used in the descriptions. # # By looking through the descriptions in column **'description'** (a desciption of the Airbnb listing), I got some possible adjectives listing in the following variable **possible_adj**. # We don't have NaN value in the 'description' variable df_new.description.isnull().sum() # separate data directly y = df_new.review_scores_rating df1_high = df_new[y > np.percentile(y,75)] df1_low = df_new[y < np.percentile(y,25)] possible_adj = ['charming', 'private', 'elegant', 'cozy', 'comfortable', 'clean', 'wonderful', 'beautiful', 'modern', 'great', 'functional', 'fresh', 'close', 'historic', 'quiet', 'gorgeous', 'safe', 'convenient', 'lovely', 'vintage', 'amazing', 'walkable', 'adorable', 'bright', 'light', 'new', 'spacious', 'large', 'desirable', 'popular', 'special', 'fantastic', 'fabulous'] # Here I use (modify) a code from Udacity class to count the number of above words showing in a column of a dataframe. def count_word(df, col1, col2, look_for): ''' Modified based on code from Udacity Data Scientist Nanodegree Lession 1. INPUT: df - the pandas dataframe you want to search col1 - the column name you want to look through col2 - the column you want to count values from look_for - a list of strings you want to search for in each row of df[col1] OUTPUT: new_df - a dataframe of each look_for with the count of how often it shows up ''' new_df = defaultdict(int) #loop through list of ed types for val in look_for: #loop through rows for idx in range(df.shape[0]): #if the ed type is in the row add 1 if val in df[col1][idx].lower(): new_df[val] += int(df[col2][idx]) new_df = pd.DataFrame(pd.Series(new_df)).reset_index() new_df.columns = [col1, col2] new_df.sort_values('count', ascending=False, inplace=True) return new_df # The following function preprocess the dataframe you want to use and count the words (e.g. adjectives) of interest by calling the **count_word** function. def get_count(df, col = 'description', search_for = possible_adj): ''' Modified based on code from Udacity Data Scientist Nanodegree Lession 1. ''' df_group = df[col].value_counts().reset_index() df_group.rename(columns={'index': 'key_words', col: 'count'}, inplace=True) df_key_word = count_word(df_group, 'key_words', 'count', search_for) df_key_word.set_index('key_words', inplace = True) return df_key_word # + # plot out the adjective usage in high score listings and low score listings adj_high = get_count(df1_high) adj_low = get_count(df1_low) count_adj = pd.concat([adj_high, adj_low], axis=1, join='inner') ax1 = count_adj.plot.bar(legend = None, subplots=True, figsize = (10,10), grid = True) ax1[0].set_xlabel('adjectives', fontsize = 14) ax1[0].set_ylabel('Counts', fontsize = 14) ax1[0].set_title("High review score listings' adjectives usage in description", fontsize = 16); ax1[1].set_xlabel('adjectives', fontsize = 14) ax1[1].set_ylabel('Counts', fontsize = 14) ax1[1].set_title("Low review score listings' adjectives usage in description", fontsize = 16); # uncomment the following two lines to save figure #fig = ax1[0].get_figure() #fig.savefig('Description_difference.png', dpi = 100) # - # ### Answer of Question 1 # It seems there is no significant difference in the adjective usage in the listing description between high rating score listings and low rating score listings -- at least the top three adjectives are the same between two groups. # # Only the word "modern" seems to be used more in high rating listings. # ## Solving Question 2 # Next, I will explore whether the **description of the neighborhood** (column **'neighborhood_overview'**) affects the rating score. # # Similar to question 1, I will compare the adjectives usage between high rating listings and low rating listings. # There are NaN values in 'neighborhood_overview' df_new.neighborhood_overview.isnull().sum() # Delete rows with NaN in 'neighborhood_overview' df_q2 = df_new.dropna(subset = ['neighborhood_overview'], axis = 0) # separate data into high rating group and low rating group y_q2 = df_q2.review_scores_rating df2_high = df_q2[y_q2 > np.percentile(y_q2,75)] df2_low = df_q2[y_q2 < np.percentile(y_q2,25)] # + # use get_count funtion to sort out the adjective usage adj_high_neighbor = get_count(df2_high, col = 'neighborhood_overview') adj_low_neighbor = get_count(df2_low, col = 'neighborhood_overview') count_adj_neighbor = pd.concat([adj_high_neighbor, adj_low_neighbor], axis=1, join='inner') ax2 = count_adj_neighbor.plot.bar(legend = None, subplots=True, figsize = (10,10), grid = True) ax2[0].set_xlabel('adjectives', fontsize = 14) ax2[0].set_ylabel('Counts', fontsize = 14) ax2[0].set_title("High review score listings' adjectives usage in neighborhood description", fontsize = 16); ax2[1].set_xlabel('adjectives', fontsize = 14) ax2[1].set_ylabel('Counts', fontsize = 14) ax2[1].set_title("Low review score listings' adjectives usage in neighborhood description", fontsize = 16); # uncomment the following two lines to save figure #fig = ax2[0].get_figure() #fig.savefig('Neighborhood_description_difference.png', dpi = 100) # - # Again, it seems the adjectives used in neighborhood overview between these two groups are not quite different from each other. And the top three adjectives are the same in the description of listings. # # Another factor of the description of neighborhood is nouns related to the entertainment and daily life, such as "shopping" and "coffee". By looking through the column **'neighborhood_overview'** I extract some daily life related nouns in the variable **possible_noun**. # # I will plot out the noun usage between high rating score listings and low rating score listings. possible_noun = ['restaurants', 'food', 'bars', 'coffee', 'cafes', 'shopping', 'grocery', 'mall', 'park', 'movie', 'music'] # + # use get_count funtion to sort out the noun usage n_high_neighbor = get_count(df2_high, col = 'neighborhood_overview', search_for = possible_noun) n_low_neighbor = get_count(df2_low, col = 'neighborhood_overview', search_for = possible_noun) count_n_neighbor = pd.concat([n_high_neighbor, n_low_neighbor], axis=1, join='inner') ax3 = count_n_neighbor.plot.bar(legend = None, subplots=True, figsize = (10,10), grid = True) ax3[0].set_xlabel('nouns', fontsize = 14) ax3[0].set_ylabel('Counts', fontsize = 14) ax3[0].set_title("High review score listings' nouns usage in neighborhood description", fontsize = 16); ax3[1].set_xlabel('nouns', fontsize = 14) ax3[1].set_ylabel('Counts', fontsize = 14) ax3[1].set_title("Low review score listings' nouns usage in neighborhood description", fontsize = 16); # uncomment the following two lines to save fig #fig = ax3[0].get_figure() #fig.savefig('Neighborhood_noun_difference.png', dpi = 100) # - # It seems subjective factors did not affect the review score rating. The next step is to explore the objective factors. # # ## Solving Question 3 # # All the objective factors of interests include: # # **Quantitive variables:** # 1) **'price_per_person'**: a new column I will create by dividing 'price' by 'accommodates' for each row # 2) 'security_deposit' # 3) 'cleaning_fee' # # **Categorical variables:** # 1) 'host_response_time': within an hour, within a few hours, within a day, a few days or more # 2) 'host_is_superhost': whether the host is a superhost or not, boolean variable # 3) 'host_has_profile_pic': whether the host provides a profile picture or not, boolean variable # 4) 'host_identity_verified': whether the host's identity is verified or not # 5) 'is_location_exact': whether the location provided is accurate or not # 6) 'room_type': entire home/apt, private room, shared room # 7) 'bed_type': real bed, futon, pull_out sofa, airbed, couch # 8) 'cancellation_policy': strict, moderate, flexible # 9) 'instant_bookable': boolean # 10) 'require_guest_profile_picture': boolean # 11) 'require_guest_phone_verification': boolean # # **Special varibales:** whether the row is null or not is the information we care about. # 1) 'transit': whether transportation method is provided # 2) 'host_about': whether the host provides self introduction # + jupyter={"outputs_hidden": true} # use this cell to take a look at what variables have NaN values df_new.isnull().sum().sort_values(ascending=False) # - # ### Dealing with NaN # + jupyter={"outputs_hidden": true} # for 'security_deposit' and 'cleaning_fee', replace NaN by $0, then clean the data format to make them into float df_new.fillna(value = {'security_deposit': '$0', 'cleaning_fee': '$0'}, inplace=True) df_new.security_deposit = df_new.security_deposit.str.lstrip('$'); df_new.cleaning_fee = df_new.cleaning_fee.str.lstrip('$'); df_new.security_deposit = df_new.security_deposit.str.replace(',', '').astype(float) df_new.cleaning_fee = df_new.cleaning_fee.str.replace(',', '').astype(float) # + jupyter={"outputs_hidden": true} # for 'price', first make it into float, then create a column "price per person" df_new.price = df_new.price.str.lstrip('$'); df_new.price = df_new.price.str.replace(',', '').astype(float) df_new['price_per_person'] = df_new.price/df_new.accommodates # - # for 'transit' and 'host_about', use NaN information to recode them into 1 = provided (not NaN) and 0 = not provided (is NaN) df_new.transit = df_new.transit.notnull().astype(int) df_new.host_about = df_new.host_about.notnull().astype(int) # for 'host_response_time', I will delete rows with NaN df_new = df_new.dropna(subset = ['host_response_time'], axis = 0) # ### Convert categorical variables to dummy variables, recode boolean variables to '1 vs 0' # + # convert boolean variables (t = true, f = false) to 1 vs 0 coding (1 = true, 0 = false) bool_process_col = ['host_is_superhost', 'host_has_profile_pic', 'host_identity_verified', 'is_location_exact', 'instant_bookable', 'require_guest_profile_picture', 'require_guest_phone_verification'] df_new[bool_process_col] = (df_new[bool_process_col] == 't').astype(int) # - # a list of categorical variables of interest cat_cols_lst = ['host_response_time', 'room_type', 'bed_type', 'cancellation_policy'] # function to create dummy variables for categorical variables # this code is from Udacity Data Scientist Nanodegree class def create_dummy_df(df, cat_cols, dummy_na): ''' INPUT: df - pandas dataframe with categorical variables you want to dummy cat_cols - list of strings that are associated with names of the categorical columns dummy_na - Bool holding whether you want to dummy NA vals of categorical columns or not OUTPUT: df - a new dataframe that has the following characteristics: 1. contains all columns that were not specified as categorical 2. removes all the original columns in cat_cols 3. dummy columns for each of the categorical columns in cat_cols 4. if dummy_na is True - it also contains dummy columns for the NaN values 5. Use a prefix of the column name with an underscore (_) for separating ''' for col in cat_cols: try: # for each cat add dummy var, drop original column df = pd.concat([df,pd.get_dummies(df[col], prefix=col, prefix_sep='_', dummy_na=dummy_na)], axis=1) except: continue return df # select data from columns we need for question 3 col_list_needed = [ 'host_has_profile_pic', 'host_identity_verified', 'price_per_person', 'security_deposit', 'cleaning_fee', 'host_response_time', 'host_is_superhost', 'is_location_exact', 'room_type', 'host_about', 'bed_type', 'cancellation_policy', 'instant_bookable', 'require_guest_profile_picture', 'require_guest_phone_verification', 'review_scores_rating'] # + # select data in these columns df_needed = df_new[col_list_needed] # convert categorical variables into dummy variables df_dummy = create_dummy_df(df_needed, cat_cols_lst, False) df_dummy = df_dummy.drop(cat_cols_lst, axis = 1) # + # linear regression model y = df_dummy.review_scores_rating X = df_dummy.drop('review_scores_rating', axis = 1) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42) lm_model = LinearRegression(normalize = True) lm_model.fit(X_train, y_train) #Predict using your model y_test_preds = lm_model.predict(X_test) y_train_preds = lm_model.predict(X_train) #Score using your model test_score = r2_score(y_test, y_test_preds) train_score = r2_score(y_train, y_train_preds) print('R2 of training data is {}'.format(train_score)) print('R2 of testing data is {}'.format(test_score)) # - # It seems the model is a little bit overfitting. # Try ridge regression and see if it helps. # + # try rdige regression ridge_model = Ridge(alpha = 100) ridge_model.fit(X_train, y_train) y_test_ridge = ridge_model.predict(X_test) y_train_ridge = ridge_model.predict(X_train) #Score using your model test_score2 = r2_score(y_test, y_test_ridge) train_score2 = r2_score(y_train, y_train_ridge) print('R2 of training data in ridge regression is {}'.format(train_score2)) print('R2 of testing data in ridge regression is {}'.format(test_score2)) # - # Ridge regression helps to improve the situation a bit. Since the trend of the impact of these variables reflected by the coeffients does not change too much, I will use the result from linear regression model. Statsmodels library provides a traditional regression method which returns the significance of the coeffients # + # use the following two lines to take a look of the coeffients of two regression models #ridge_model.coef_ #lm_model.coef_ # - # get the linear regression result summary from statsmodels OLS function X_OLS = sm.add_constant(X_train) mod = sm.OLS(y_train, X_OLS) fii = mod.fit() fii.summary2() # ### Plot out group comparison # Use the same method in the following to plot the group comparison on any variable you are interested. In this notebook I only keep the code for variable that have an obvious review score difference. # separate data into two groups y_q3 = df_needed.review_scores_rating df3_high = df_needed[y_q3 > np.percentile(y_q3,50)] df3_low = df_needed[y_q3 < np.percentile(y_q3,50)] # + # plot numeric results first labels = ['price/person', 'superhost percentage'] y_price = [df3_high.price_per_person.mean(), df3_low.price_per_person.mean()] y_superhost = [df3_high.host_is_superhost.mean()*100, df3_low.host_is_superhost.mean()*100] high_value = [y_price[0], y_superhost[0]] low_value = [y_price[1], y_superhost[1]] high_value_round = [round(h) for h in high_value] low_value_round = [round(r) for r in low_value] x = np.arange(len(labels)) # the label locations width = 0.2 # the width of the bars fig, ax = plt.subplots() rects1 = ax.bar(x - width/2, high_value_round, width, label='High review score group') rects2 = ax.bar(x + width/2, low_value_round, width, label='Low review score group') # Add some text for labels, title and custom x-axis tick labels, etc. ax.set_ylabel('Values') ax.set_title('Group comparison') ax.set_xticks(x) ax.set_xticklabels(labels) ax.legend(loc = 3) ax.set_ylim(0,50) def autolabel(rects): """Attach a text label above each bar in *rects*, displaying its height.""" for rect in rects: height = rect.get_height() ax.annotate('{}'.format(height), xy=(rect.get_x() + rect.get_width() / 2, height), xytext=(0, 3), # 3 points vertical offset textcoords="offset points", ha='center', va='bottom') autolabel(rects1) autolabel(rects2) #fig.tight_layout() plt.savefig('Numeric_comparison.png', dpi = 100) # - # let's compare the home type, room type and cancellation policy difference between groups. # function to make comparison plot def plot_compare(labels, high_vect, low_vect, title): fig, ax = plt.subplots() x = np.arange(len(labels)) width = 0.3 high_bar = ax.bar(x - width/2, high_vect, width, label = 'High review score group') low_bar = ax.bar(x + width/2, low_vect, width, label = 'Low review score group') ax.set_ylabel('Counts') ax.set_title(title) ax.set_xticks(x) ax.set_xticklabels(labels) ax.legend(loc = 'best') autolabel(high_bar) autolabel(low_bar) save_name = title + '.png' plt.savefig(save_name, dpi = 100) # + # plot room type comparison # labels labels_room = ['Entire home/apt','Private room','Shared room'] # value vectors for room_type in two groups high_vect_room = [df3_high[df3_high.room_type == col].shape[0] for col in labels_room] low_vect_room = [df3_low[df3_low.room_type == col].shape[0] for col in labels_room] # plot plot_compare(labels_room, high_vect_room, low_vect_room, 'Room type comparison') # + # plot bed type comparison # labels of bed_type labels_bed = ['Real Bed','Futon','Pull-out sofa', 'Airbed', 'Couch'] # value vectors for bed_type in two groups high_vect_bed = [df3_high[df3_high.bed_type == col].shape[0] for col in labels_bed] low_vect_bed = [df3_low[df3_low.bed_type ==col].shape[0] for col in labels_bed] plot_compare(labels_bed, high_vect_bed, low_vect_bed, 'Bed type comparison') # + # labels for host_response_time labels_response = ['within an hour', 'within a few hours', 'within a day', 'a few days or more'] high_response = [df3_high[df3_high.host_response_time == col].shape[0] for col in labels_response] low_response = [df3_low[df3_low.host_response_time == col].shape[0] for col in labels_response] plot_compare(labels_response, high_response, low_response, 'Response time comparison') # + # cancellation policy labels_cancel = ['strict', 'moderate', 'flexible'] high_cancel = [df3_high[df3_high.cancellation_policy == col].shape[0] for col in labels_cancel] low_cancel = [df3_low[df3_low.cancellation_policy == col].shape[0] for col in labels_cancel] plot_compare(labels_cancel, high_cancel, low_cancel, 'Cancellation policy comparison') # -
project1_main.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="_sIeTutwNGJK" # # **Data Structures** # # # * List / Array # * Dictionary # # Create / Insert / Add # Read / Select / Display # Update / Edit # Delete / Remove # # + [markdown] id="TgbEMstMxCHY" # ## **List / Array** # + colab={"base_uri": "https://localhost:8080/", "height": 51} id="OgPVcGHKvcTO" outputId="b58eef75-6484-4332-e72a-0344a2441428" str1 = 'Tom' # immutable list1 = ['T', 'o', 'm'] # mutable print(len(str1)) print(len(list1)) # + colab={"base_uri": "https://localhost:8080/", "height": 34} id="UaACnox-xfVh" outputId="e8733ac9-6f7d-464a-d57d-849fb0cd1db2" list1[0] = 'M' list1 # + colab={"base_uri": "https://localhost:8080/", "height": 85} id="4oEs6a8fxrNj" outputId="a445ce15-ecb2-47f1-e1be-3522942c13f3" print(str1[0]) print(list1[0]) print(str1[-1]) print(list1[-1]) # + colab={"base_uri": "https://localhost:8080/", "height": 68} id="pyu0Wm08x7rW" outputId="31b73ad0-711d-437c-e5e6-e3e5d77e1c37" for char in list1: print(char) # + colab={"base_uri": "https://localhost:8080/", "height": 34} id="cFPui8iHyDYJ" outputId="cd5b3641-a97f-4049-b1a7-556539e90a58" list1 = [] # create empty array / list list1 len(list1) # find length list1.append('B') # add to the back of array / list list1 list1.append('a') list1 list1.append(3.14) list1 # + colab={"base_uri": "https://localhost:8080/", "height": 34} id="B59CDddPyKu6" outputId="0bb0b53a-5091-4f7a-d0c1-78edbf80e8cc" list1.insert(1, 'Yo') # add to any position in an array list1 # + colab={"base_uri": "https://localhost:8080/", "height": 34} id="bVMwgFDMytfX" outputId="3375e227-ad95-4df3-b91e-31b16991af47" # Update list1[2] = 35 list1 # + colab={"base_uri": "https://localhost:8080/", "height": 34} id="Ksq7eVuoombD" outputId="0f0cafbe-f5f1-4e34-f593-2eff55987181" # Delete using remove list1.remove(3.14) list1 # + colab={"base_uri": "https://localhost:8080/", "height": 34} id="JZ69baoEpYjv" outputId="d28095d5-15b7-49a0-dfde-cfc5a7d0ceda" # Delete using del del list1[0] list1 # + colab={"base_uri": "https://localhost:8080/", "height": 34} id="_c-1OlUVptye" outputId="4ca98ffa-b0cf-4c48-9159-5fd4f4c4d415" # Read for item in list1: print(item, end=' ') # + colab={"base_uri": "https://localhost:8080/", "height": 257} id="-sTFYfcCp4bt" outputId="ee213726-b4c7-434f-af86-5b021b633e77" # Program to CRUD list / array def menu(): print("(1) Insert item") print("(2) Update item") print("(3) Delete item") print("(4) Show all items") print("(0) Quit") def insert_item(score): print("Inserting item...") marks.append(score) def update_item(old_mark): print("Updating item...") for i in range(len(marks)): if marks[i] == old_mark: new_mark = int(input("Enter new mark: ")) marks[i] = new_mark def delete_item(score): print("Deleting item...") if score in marks: marks.remove(score) else: print("Cannot delete non-existent item") def show_items(): print("All items: ", end='') if marks != []: # if len(marks) > 0: for mark in marks: print(mark, end=' ') print() else: # empty list print("No marks!") # main # initialise variables choice = '' marks = [] # create empty list (global variable) #marks = [88, 100, 75] # for testing while choice != '0': # while not quit menu() choice = input("Enter choice: ") if choice == '1': # insert item mark = int(input("Enter mark to insert: ")) insert_item(mark) elif choice == '2': # update item mark = int(input("Enter mark to update: ")) update_item(mark) elif choice == '3': # delete item mark = int(input("Enter mark to delete: ")) delete_item(mark) elif choice == '4': # show all items show_items() elif choice == '0': print("Bye") else: # invalid choice print("Yo! Choose a valid option between 0 to 5.") # + colab={"base_uri": "https://localhost:8080/", "height": 120} id="_xbbDMl-l0Hb" outputId="dee39fa3-4246-4d76-cca0-c0dbdf8442cb" # IMDA-Samsung Solve for Tomorrow Groupings import random studentf = ["<NAME>", "<NAME>", \ "<NAME>", "<NAME>", "<NAME>", \ "<NAME>"] studentm = ["<NAME>", "<NAME>", "<NAME>", "<NAME>", \ "<NAME>", "<NAME>", "<NAME>", \ "<NAME>", "<NAME>", "<NAME>", "<NAME>", \ "<NAME>", "<NAME>", "<NAME>", \ "<NAME>", "<NAME>", \ "<NAME>", "WANG YAOHUI"] random.shuffle(studentf) random.shuffle(studentm) #print(studentf) #print(studentm) count = 0 for i in range(6): print(studentf[i], studentm[count], studentm[count+1], studentm[count+2]) count += 3 # + [markdown] id="IutQgcG_yhuw" # ## **Dictionary** # # # * unordered key-value pair # * key must be unique # # # + colab={"base_uri": "https://localhost:8080/", "height": 34} id="A6dsbm9EqJBX" outputId="530294d7-dab8-4d85-a6c2-c727808e34cb" # Create empty dictionary d = {} d # + colab={"base_uri": "https://localhost:8080/", "height": 34} id="jjGhTcrczPS5" outputId="729bb576-740c-412d-a797-012e49be6e5f" # Insert d['Tom'] = 88 d # + colab={"base_uri": "https://localhost:8080/", "height": 34} id="OpskJv7kzXDM" outputId="5d5c3847-12f6-4880-e84f-fb3ecb0b8804" d['Mary'] = 100 d # + colab={"base_uri": "https://localhost:8080/", "height": 34} id="L6OnH4v-zhoF" outputId="d656f13f-3dd6-4e6d-8690-d07b08ca2897" d['Tom'] = 35 # update if key exists d # + colab={"base_uri": "https://localhost:8080/", "height": 34} id="VmLVJLndzs3z" outputId="347055e4-092d-4c42-cc7d-f56ab1bc7e1a" # Delete del d['Tom'] d # + colab={"base_uri": "https://localhost:8080/", "height": 51} id="nD26Uu170LOr" outputId="598658fb-122d-40b7-dd1c-e3490f459b76" # Show d = {'Mary': 100, 'Tom': 35} d.keys() # show keys d.values() # show values for key, value in d.items(): print(key, value) # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="9XWjPlHo0QNt" outputId="1d321a08-404f-4973-c2da-6685ddcf46bb" # CRUD program for dictionary def menu(): print("(1) Insert") print("(2) Delete") print("(3) Update") print("(4) Show") print("(0) Quit") def insert(key, value): if key in contacts.keys(): print("Contact already exists!") else: contacts[key] = value def delete(key): if key in contacts.keys(): del contacts[key] else: print("Contact not found!") def update(key, new_value): if key in contacts.keys(): contacts[key] = new_value else: print("Contact not found!") def show(): if contacts == {}: print("You have no contacts!") else: for key, value in contacts.items(): print(key, value) # main contacts = {} option = '' while option != '0': # while not quit menu() option = input("Enter option: ") if option == '1': name = input("Enter name: ") mobile = input("Enter mobile: ") insert(name, mobile) elif option == '2': name = input("Enter name: ") delete(name) elif option == '3': name = input("Enter name: ") new_mobile = input("Enter new mobile: ") update(name, new_mobile) elif option == '4': show() elif option == '0': print("Bye") else: print("Invalid option!") print("Done") # - # * arrays to dict # # + titles = ["Name", "class", "action","Remarks"] dictList = ["QRCode","5C35","Trying to do Hacktoberfest 2020","Accept PR pls"] result = dict(zip(titles, dictList)) print(result) # - # * Combining 2d array into dict # + id="DmwXn68quuFp" titles = ["Name", "class", "action","Remarks"] dictList2d = [["QRCode","5C35","Trying to do Hacktoberfest 2020","Accept PR pls"], ["Chicken Man","5C50","Trying to fly","Null"], ["spiderman","6C99","Trying to pass","Entangled"]] result2d = [] for i in range(len(dictList2d)): result2d.append(dict(zip(titles, dictList2d[i]))) #zip takes the 2 iterators to make lists resultdict = {} resultdict['students'] = result2d print(resultdict) # + [markdown] id="2I7O8CWjtjoX" # ## **Tuple** # # * Behaves like a list but is immutable # # # + colab={"base_uri": "https://localhost:8080/", "height": 68} id="t5Xhqe6W5Rpi" outputId="8fba3785-0200-4ec3-cc15-d25f2d57197e" list1 = [1, 2, 3] tuple1 = (1, 2, 3) for i in range(3): print(list1[i], tuple1[i]) # + colab={"base_uri": "https://localhost:8080/", "height": 34} id="MBexEYpzuBIM" outputId="9d0b327a-53a9-490b-b783-1dffe73de975" list1[0] = 35 list1 # + colab={"base_uri": "https://localhost:8080/", "height": 182} id="pEN5t07puJCq" outputId="8181d29b-c7b6-403f-ce75-1d25459d928b" tuple1[0] = 35 # error because tuple cannot be changed tuple1 # + colab={"base_uri": "https://localhost:8080/", "height": 34} id="aO6W_CT4uNoU" outputId="f0151f87-5687-4799-bb6c-8fa3245e6992" type(tuple1) # + colab={"base_uri": "https://localhost:8080/", "height": 34} id="StFLjmpxuqAa" outputId="a58c046b-3642-411a-9dd2-3c9830e17c98" tuple2 = (35,) type(tuple2) # -
computing_sh/Python06_Array_&_Dictionary.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from fox_toolbox.utils import service import numpy as np from datetime import datetime from macs_service import Client client = Client("http://leviathan33:5000") # + curve = { "dates": ["2000-12-05T00:00:00.000Z", "2030-12-05T00:00:00.000Z"], "zeroRates": [0.01, 0.01] } libor_curve = { "dates": ["2000-12-05T00:00:00.000Z", "2030-12-05T00:00:00.000Z"], "zeroRates": [0.02, 0.02] } smiles = { 'optionExpiries': ['1Y'], 'strikes': [-1., 0., 0.5, 1.], 'swapTenors': ['1Y', '3Y'], 'volatilities': [0.03] * 2 * 4, # #tenor x #strikes 'volatilityType': 'NORMAL' } correls = { 'correlations': [0.5], 'dates': ['1Y'], 'swapTenors': ['1Y', '3Y'] } asof = datetime(2018, 5, 12) fixingDate = datetime(2018, 5, 12) paymentDate = datetime(2019, 5 ,12) start = datetime(2018, 5, 12) end = datetime(2019, 5 ,12) tenor = 6 floatFreq = '3M' fixFreq = '6M' fixRate = 0.8 spread = 0.0 N = 100 ccy = 'EUR' expiry = '1Y' initial_stub_date = datetime(2018, 4, 12) indexes = [f'{ccy}LIBOR1M'] dsc_curve = curve # - bond_task = service.get_bond_task('EUR', curve, asof, paymentDate) client.evaluate(bond_task) libor_task = service.get_libor_flow_task(ccy, dsc_curve, asof, tenor, fixingDate, paymentDate, libor_curve) client.evaluate(libor_task) swap_task = service.get_swap_task(start, end, floatFreq, fixFreq, fixRate, spread, N, ccy, curve, asof) client.evaluate(swap_task) swo_task = service.get_swo_task(start, end, floatFreq, fixFreq, fixRate, spread, N, ccy, curve, asof, expiry, smiles) client.evaluate(swo_task) service.add_swap_initial_stub(swap_task, initial_stub_date, indexes) service.add_swap_historical_fixing(swap_task, ccy, 1, initial_stub_date, value=0.2) client.evaluate(swap_task)
notebooks/service_demo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- import numpy as np import pandas as pd import os import pickle root = "." input_path = os.path.join(root, 'processed', 'dictionary.pickle') file = open(input_path,'rb') dictionary = pickle.load(file) dictionary[:3]
blink/preprocess/Checking_Dictionary.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <a id='section1'></a> # <div style=" # width: 100%; # height: 100px; # border:none; # background:linear-gradient(-45deg, #d84227, #ce3587); # text-align: center; # position: relative; # "> # <span style=" # font-size: 40px; # padding: 0 10px; # color: white; # margin: 0; # position: absolute; # top: 50%; # left: 50%; # -ms-transform: translate(-50%, -50%); # transform: translate(-50%, -50%); # font-weight: bold; # "> # XBRL PARSER <!--Padding is optional--> # </span> # </div><a id='section1'></a> # + """ <NAME> - Office for National Statistics <EMAIL> 23/07/2018 XBRL parser Contains functions that scrape and clean an XBRL document's content and variables, returning a dict ready for dumping into MongoDB. """ import os import re import numpy as np import pandas as pd from datetime import datetime from dateutil import parser from bs4 import BeautifulSoup as BS # Can parse xml or html docs # Table of variables and values that indicate consolidated status consolidation_var_table = { "includedinconsolidationsubsidiary": True, "investmententityrequiredtoapplyexceptionfromconsolidationtruefalse": True, "subsidiaryunconsolidatedtruefalse": False, "descriptionreasonwhyentityhasnotpreparedconsolidatedfinancialstatements": "exist", "consolidationpolicy": "exist" } def clean_value(string): """ Take a value that's stored as a string, clean it and convert to numeric. If it's just a dash, it's taken to mean zero. """ if string.strip() == "-": return (0.0) try: return float(string.strip().replace(",", "").replace(" ", "")) except: pass return (string) def retrieve_from_context(soup, contextref): """ Used where an element of the document contained no data, only a reference to a context element. Finds the relevant context element and retrieves the relevant data. Returns a text string Keyword arguments: soup -- BeautifulSoup souped html/xml object contextref -- the id of the context element to be raided """ try: context = soup.find("xbrli:context", id=contextref) contents = context.find("xbrldi:explicitmember").get_text().split(":")[-1].strip() except: contents = "" return (contents) def retrieve_accounting_standard(soup): """ Gets the account reporting standard in use in a document by hunting down the link to the schema reference sheet that always appears to be in the document, and extracting the format and standard date from the string of the url itself. WARNING - That means that there's a lot of implicity hardcoded info on the way these links are formated and referenced, within this function. Might need changing someday. Returns a 3-tuple (standard, date, original url) Keyword arguments: soup -- BeautifulSoup souped html/xml object """ # Find the relevant link by its unique attribute link_obj = soup.find("link:schemaref") # If we didn't find anything it's an xml doc using a different # element name: if link_obj == None: link_obj = soup.find("schemaref") # extract the name of the .xsd schema file, which contains format # and date information text = link_obj['xlink:href'].split("/")[-1].split(".")[0] # Split the extracted text into format and date, return values return (text[:-10].strip("-"), text[-10:], link_obj['xlink:href']) def retrieve_unit(soup, each): """ Gets the reporting unit by trying to chase a unitref to its source, alternatively uses element attribute unitref if it's not a reference to another element. Returns the unit Keyword arguments: soup -- BeautifulSoup souped html/xml object each -- element of BeautifulSoup souped object """ # If not, try to discover the unit string in the # soup object try: unit_str = soup.find(id=each['unitref']).get_text() except: # Or if not, in the attributes of the element try: unit_str = each.attrs['unitref'] except: return ("NA") return (unit_str.strip()) def retrieve_date(soup, each): """ Gets the reporting date by trying to chase a contextref to its source and extract its period, alternatively uses element attribute contextref if it's not a reference to another element. Returns the date Keyword arguments: soup -- BeautifulSoup souped html/xml object each -- element of BeautifulSoup souped object """ # Try to find a date tag within the contextref element, # starting with the most specific tags, and starting with # those for ixbrl docs as it's the most common file. date_tag_list = ["xbrli:enddate", "xbrli:instant", "xbrli:period", "enddate", "instant", "period"] for tag in date_tag_list: try: date_str = each['contextref'] date_val = parser.parse(soup.find(id=each['contextref']).find(tag).get_text()). \ date(). \ isoformat() return (date_val) except: pass try: date_str = each.attrs['contextref'] date_val = parser.parse(each.attrs['contextref']). \ date(). \ isoformat() return (date_val) except: pass return ("NA") def parse_element(soup, element): """ For a discovered XBRL tagged element, go through, retrieve its name and value and associated metadata. Keyword arguments: soup -- BeautifulSoup object of accounts document element -- soup object of discovered tagged element """ if "contextref" not in element.attrs: return ({}) element_dict = {} # Basic name and value try: # Method for XBRLi docs first element_dict['name'] = element.attrs['name'].lower().split(":")[-1] except: # Method for XBRL docs second element_dict['name'] = element.name.lower().split(":")[-1] element_dict['value'] = element.get_text() element_dict['unit'] = retrieve_unit(soup, element) element_dict['date'] = retrieve_date(soup, element) # If there's no value retrieved, try raiding the associated context data if element_dict['value'] == "": element_dict['value'] = retrieve_from_context(soup, element.attrs['contextref']) # If the value has a defined unit (eg a currency) convert to numeric if element_dict['unit'] != "NA": element_dict['value'] = clean_value(element_dict['value']) # Retrieve sign of element if exists try: element_dict['sign'] = element.attrs['sign'] # if it's negative, convert the value then and there if element_dict['sign'].strip() == "-": element_dict['value'] = 0.0 - element_dict['value'] except: pass return (element_dict) def parse_elements(element_set, soup): """ For a set of discovered elements within a document, try to parse them. Only keep valid results (test is whether field "name" exists). Keyword arguments: element_set -- BeautifulSoup iterable search result object soup -- BeautifulSoup object of accounts document """ elements = [] for each in element_set: element_dict = parse_element(soup, each) if 'name' in element_dict: elements.append(element_dict) return (elements) def summarise_by_sum(doc, variable_names): """ Takes a document (dict) after extraction, and tries to extract a summary variable relating to the financial state of the enterprise by summing all those named that exist. Returns dict. Keyword arguments: doc -- an extracted document dict, with "elements" entry as created by the 'scrape_clean_elements' functions. variable_names - variables to find and sum if they exist """ # Convert elements to pandas df df = pd.DataFrame(doc['elements']) # Subset to most recent (latest dated) df = df[df['date'] == doc['doc_balancesheetdate']] total_assets = 0.0 unit = "NA" # Find the total assets by summing components for each in variable_names: # Fault-tolerant, will skip whatever isn't numeric try: total_assets = total_assets + df[df['name'] == each].iloc[0]['value'] # Retrieve reporting unit if exists unit = df[df['name'] == each].iloc[0]['unit'] except: pass return ({"total_assets": total_assets, "unit": unit}) def summarise_by_priority(doc, variable_names): """ Takes a document (dict) after extraction, and tries to extract a summary variable relating to the financial state of the enterprise by looking for each named, in order. Returns dict. Keyword arguments: doc -- an extracted document dict, with "elements" entry as created by the 'scrape_clean_elements' functions. variable_names - variables to find and check if they exist. """ # Convert elements to pandas df df = pd.DataFrame(doc['elements']) # Subset to most recent (latest dated) df = df[df['date'] == doc['doc_balancesheetdate']] primary_assets = 0.0 unit = "NA" # Find the net asset/liability variable by hunting names in order for each in variable_names: try: # Fault tolerant, will skip whatever isn't numeric primary_assets = df[df['name'] == each].iloc[0]['value'] # Retrieve reporting unit if it exists unit = df[df['name'] == each].iloc[0]['unit'] break except: pass return ({"primary_assets": primary_assets, "unit": unit}) def summarise_set(doc, variable_names): """ Takes a document (dict) after extraction, and tries to extract summary variables relating to the financial state of the enterprise by returning all those named that exist. Returns dict. Keyword arguments: doc -- an extracted document dict, with "elements" entry as created by the 'scrape_clean_elements' functions. variable_names - variables to find and return if they exist. """ results = {} # Convert elements to pandas df df = pd.DataFrame(doc['elements']) # Subset to most recent (latest dated) df = df[df['date'] == doc['doc_balancesheetdate']] # Find all the variables of interest should they exist for each in variable_names: try: results[each] = df[df['name'] == each].iloc[0]['value'] except: pass # Send the variables back to be appended return (results) def scrape_elements(soup, filepath): """ Parses an XBRL (xml) company accounts file for all labelled content and extracts the content (and metadata, eg; unitref) of each element found to a dictionary params: filepath (str) output: list of dicts """ # Try multiple methods of retrieving data, I think only the first is # now needed though. The rest will be removed after testing this # but should not affect execution speed. try: element_set = soup.find_all() elements = parse_elements(element_set, soup) if len(elements) <= 5: raise Exception("Elements should be gte 5, was {}".format(len(elements))) return (elements) except: pass return (0) def flatten_data(doc): """ Takes the data returned by process account, with its tree-like structure and reorganises it into a long-thin format table structure suitable for SQL applications. """ # Need to drop components later, so need copy in function doc2 = doc.copy() doc_df = pd.DataFrame() # Pandas should create series, then columns, from dicts when called # like this for element in doc2['elements']: doc_df = doc_df.append(element, ignore_index=True) # Dump the "elements" entry in the doc dict doc2.pop("elements") # Create uniform columns for all other properties for key in doc2: doc_df[key] = doc2[key] return (doc_df) def process_account(filepath): """ Scrape all of the relevant information from an iXBRL (html) file, upload the elements and some metadata to a mongodb. Named arguments: filepath -- complete filepath (string) from drive root """ doc = {} # Some metadata, doc name, upload date/time, archive file it came from doc['doc_name'] = filepath.split("/")[-1] doc['doc_type'] = filepath.split(".")[-1].lower() doc['doc_upload_date'] = str(datetime.now()) doc['arc_name'] = filepath.split("/")[-2] doc['parsed'] = True # Complicated ones sheet_date = filepath.split("/")[-1].split(".")[0].split("_")[-1] doc['doc_balancesheetdate'] = datetime.strptime(sheet_date, "%Y%m%d").date().isoformat() doc['doc_companieshouseregisterednumber'] = filepath.split("/")[-1].split(".")[0].split("_")[-2] # print(filepath) try: soup = BS(open(filepath, "rb"), "html.parser") except: print("Failed to open: " + filepath) return (1) # Get metadata about the accounting standard used try: doc['doc_standard_type'], doc['doc_standard_date'], doc['doc_standard_link'] = retrieve_accounting_standard( soup) except: doc['doc_standard_type'], doc['doc_standard_date'], doc['doc_standard_link'] = (0, 0, 0) # Fetch all the marked elements of the document try: doc['elements'] = scrape_elements(soup, filepath) except Exception as e: doc['parsed'] = False doc['Error'] = e try: return (doc) except Exception as e: return (e) # - # <br><br><br><br><br><br><br><br><br><br><br><br><br><br> # <a id='section1'></a> # <div style=" # width: 100%; # height: 100px; # border:none; # background:linear-gradient(-45deg, #d84227, #ce3587); # text-align: center; # position: relative; # "> # <span style=" # font-size: 40px; # padding: 0 10px; # color: white; # margin: 0; # position: absolute; # top: 50%; # left: 50%; # -ms-transform: translate(-50%, -50%); # transform: translate(-50%, -50%); # font-weight: bold; # "> # EXTRACTION <!--Padding is optional--> # </span> # </div><a id='section1'></a> # + import os import numpy as np import pandas as pd import importlib def get_filepaths(directory): """ Helper function - Get all of the filenames in a directory that end in htm* or xml. Under the assumption that all files within the folder are financial records. """ files = [directory + "/" + filename for filename in os.listdir(directory) if (("htm" in filename.lower()) or ("xml" in filename.lower()))] return(files) # + # Get all the filenames from the example folder files = get_filepaths("../data/for_testing/xbrl_decompressed_data") print(len(files)) # Here you can splice/truncate the number of files you want to process for testing # files = files[0:200] # + doc = process_account(files[0]) # display for fun doc # - doc['elements'] # Loop through the document, retrieving any element with a matching name for element in doc['elements']: if element['name'] == 'balancesheetdate': print(element) # Extract the all the data to long-thin table format for use with SQL # Note, tables from docs should be appendable to one another to create # tables of all data flatten_data(doc).head(10) # + import time import sys def progressBar(name, value, endvalue, bar_length = 50, width = 20): """ Text based graphic indicator for XBRL processing.seconds """ percent = float(value) / endvalue arrow = '-' * int(round(percent * bar_length) - 1) + '>' spaces = ' ' * (bar_length - len(arrow)) sys.stdout.write( "\r{0: <{1}} : [{2}]{3}% ({4} / {5})".format( name, width, arrow + spaces, int(round(percent * 100)), value, endvalue ) ) sys.stdout.flush() if value == endvalue: sys.stdout.write('\n\n') def retrieve_list_of_tags(dataframe, column, output_folder): """ Save dataframe containing all unique tags to txt format in specified directory. Arguments: dataframe: tabular datra column: location of xbrl tags output_folder: user specified file location Returns: NOne Raises: None """ list_of_tags = dataframe[column].tolist() list_of_tags_unique = list(set(list_of_tags)) print( "Number of tags in total: {} \nOf which are unique: {}".format(len(list_of_tags), len(list_of_tags_unique)) ) with open(output_folder + "/" + "list_of_tags.txt", "w") as f: for item in list_of_tags_unique: f.write("%s\n" % item) def get_tag_counts(dataframe, column, output_folder): """ Save dataframe containing all unique tags and relative frequencies to txt format in specified directory, with particular naming scheme YYYY-MM_xbrl_data.csv". Arguments: dataframe: tabular datra column: location of xbrl tags output_folder: user specified file location Returns: NOne Raises: None """ year = str(pd.DatetimeIndex(dataframe['doc_balancesheetdate']).year[0]) month = "{:02d}".format(pd.DatetimeIndex(dataframe['doc_balancesheetdate']).month[0]) dataframe['count'] = dataframe.groupby(by = column)[column].transform('count') dataframe.sort_values('count', inplace = True, ascending = False) dataframe.drop_duplicates(subset = [column, 'count'], keep = 'first', inplace = True) dataframe = dataframe[[column, 'count']] print(dataframe.shape) dataframe.to_csv( output_folder + "/" + year + "-" + month + "_unique_tag_frequencies.txt", header = None, index = None, sep = '\t', mode = 'a' ) def output_xbrl_month(dataframe, output_folder, file_type = "csv"): """ Save dataframe to csv format in specified directory, with particular naming scheme YYYY-MM_xbrl_data.csv". Arguments: dataframe: tabular datra output_folder: user specified file location Returns: NOne Raises: None """ year = str(pd.DatetimeIndex(dataframe['doc_balancesheetdate']).year[0]) month = "{:02d}".format(pd.DatetimeIndex(dataframe['doc_balancesheetdate']).month[0]) if file_type == "csv": dataframe.to_csv( output_folder + "/" + str(year) + "-" +str(month) + "_xbrl_data.csv", index = False, header = True ) else: print("I need a CSV for now...") def build_month_table(list_of_files): """ """ process_start = time.time() results = pd.DataFrame() COUNT = 0 for file in list_of_files: COUNT += 1 # Read the file doc = process_account(file) # tabulate the results doc_df = flatten_data(doc) # append to table results = results.append(doc_df) progressBar("XBRL accounts parsed", COUNT, len(list_of_files), bar_length = 50, width = 20) print("Average time to process an XBRL file: \x1b[31m{:0f}\x1b[0m".format((time.time() - process_start) / 60, 2), "seconds") return results # + # Finally, build a table of all variables from all example (digital) documents # This can take a while results = build_month_table(files[0:20]) # + print(results.shape) results.head() # + # Find list of all unique tags in dataset list_of_tags = results["name"].tolist() list_of_tags_unique = list(set(list_of_tags)) print("Longest tag: ", len(max(list_of_tags_unique, key = len))) # + # Output all unique tags to a txt file retrieve_list_of_tags( results, "name", "/shares/data/20200519_companies_house_accounts/logs/" ) # + # Output all unique tags and their relative frequencies to a txt file get_tag_counts( results, "name", "/shares/data/20200519_companies_house_accounts/logs" ) # - output_xbrl_month(results, "/shares/data/20200519_companies_house_accounts/logs") # ```python # get_tag_counts = results # get_tag_counts['count'] = get_tag_counts.groupby(by = 'name')['name'].transform('count') # get_tag_counts.sort_values('count', inplace = True, ascending = False) # get_tag_counts.drop_duplicates(subset = ['name', 'count'], keep = 'first', inplace = True) # get_tag_counts = get_tag_counts[['name', 'count']] # print(get_tag_counts.shape) # get_tag_counts.head(50) # ```
third_party_apps/xbrl_extracts.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # !pip install tfx import tensorflow_data_validation as tfdv import tensorflow_transform as tft import tensorflow_transform.beam as tft_beam import tensorflow as tf from tfx.orchestration.experimental.interactive.interactive_context import InteractiveContext context = InteractiveContext() # + import os from tfx.components import CsvExampleGen path = '/tf/Datasets' os.chdir(path) base_dir = os.getcwd() _data_filepath = os.path.join(base_dir, "winequality-white.csv") print(base_dir, _data_filepath) # - example_gen = CsvExampleGen(input_base = base_dir) context.run(example_gen) artifact = example_gen.outputs['examples'].get()[0] print(artifact.split_names, artifact.uri) import pprint pp = pprint.PrettyPrinter() # + train_uri = os.path.join(example_gen.outputs['examples'].get()[0].uri, 'Split-train') tfrecord_filenames = [os.path.join(train_uri, name) for name in os.listdir(train_uri)] print(tfrecord_filenames) dataset = tf.data.TFRecordDataset(tfrecord_filenames, compression_type='GZIP') for tfrecord in dataset.take(3): serialized_example = tfrecord.numpy() example = tf.train.Example() example.ParseFromString(serialized_example) pp.pprint(example) # -
Learning_TFX/Data Ingestion.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Visualizing Geographic Data import altair as alt from vega_datasets import data # + # https://altair-viz.github.io/gallery/choropleth.html counties = alt.topo_feature(data.us_10m.url, 'counties') source = data.unemployment.url alt.Chart(counties).mark_geoshape().encode( color='rate:Q', tooltip=['rate:Q'] ).transform_lookup( lookup='id', from_=alt.LookupData(source, 'id', ['rate']) ).project( type='albersUsa' ).properties( width=500, height=300 ) # + # https://altair-viz.github.io/gallery/airports_count.html airports = data.airports.url states = alt.topo_feature(data.us_10m.url, feature='states') # US states background background = alt.Chart(states).mark_geoshape( fill='lightgray', stroke='white' ).properties( width=500, height=300 ).project('albersUsa') # airport positions on background points = alt.Chart(airports).transform_aggregate( latitude='mean(latitude)', longitude='mean(longitude)', count='count()', groupby=['state'] ).mark_circle().encode( longitude='longitude:Q', latitude='latitude:Q', size=alt.Size('count:Q', title='Number of Airports'), color=alt.value('steelblue'), tooltip=['state:N','count:Q'] ).properties( title='Number of airports in US' ) background + points # - # ## Custom Maps # # Austrian geo shapes e.g. from <https://github.com/ginseng666/GeoJSON-TopoJSON-Austria> under CC-BY license. # + url_topo = 'https://raw.githubusercontent.com/ginseng666/GeoJSON-TopoJSON-Austria/master/2021/simplified-99.9/laender_999_topo.json' austrian_states = alt.topo_feature(url_topo, 'laender') alt.Chart(austrian_states).mark_geoshape( fill='lightgray', stroke='white' ).properties( width=500, height=300 ).project('azimuthalEquidistant') # + url_geojson = 'https://raw.githubusercontent.com/ginseng666/GeoJSON-TopoJSON-Austria/master/2021/simplified-99.9/laender_999_geo.json' austrian_states_g = alt.Data(url=url_geojson, format=alt.DataFormat(property='features',type='json')) alt.Chart(austrian_states_g).mark_geoshape( fill='lightgray', stroke='white' ).properties( width=500, height=300 ).project('azimuthalEquidistant') # -
GeographicData.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="ZcyiVUeZr2cJ" # <table class="tfo-notebook-buttons" align="left"> # <td> # <a target="_blank" href="https://colab.research.google.com/github/sjchoi86/upstage-basic-deeplearning/blob/main/notebook/mlp.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Colab</a> # </td> # <td> # <a target="_blank" href="https://github.com/sjchoi86/upstage-basic-deeplearning/blob/main/notebook/mlp.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View Source</a> # </td> # </table> # + [markdown] id="w0aqRluNsI38" # # Multilayer Perceptron (MLP) # + id="EXxbX-Diq9rq" import numpy as np import matplotlib.pyplot as plt import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F # %matplotlib inline # %config InlineBackend.figure_format='retina' print ("PyTorch version:[%s]."%(torch.__version__)) device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') print ("device:[%s]."%(device)) # + [markdown] id="7HWvC0ZltF8a" # ### Dataset # + id="uaokkwJwsN5I" from torchvision import datasets,transforms mnist_train = datasets.MNIST(root='./data/',train=True,transform=transforms.ToTensor(),download=True) mnist_test = datasets.MNIST(root='./data/',train=False,transform=transforms.ToTensor(),download=True) print ("mnist_train:\n",mnist_train,"\n") print ("mnist_test:\n",mnist_test,"\n") print ("Done.") # + [markdown] id="A4lP2QQoBXGw" # ### Data Iterator # + id="_0TKrHSCBWzm" BATCH_SIZE = 256 train_iter = torch.utils.data.DataLoader(mnist_train,batch_size=BATCH_SIZE,shuffle=True,num_workers=1) test_iter = torch.utils.data.DataLoader(mnist_test,batch_size=BATCH_SIZE,shuffle=True,num_workers=1) print ("Done.") # + [markdown] id="081T7_3lvk-N" # ### Define the MLP model # + id="U4mWpPXouPCR" class MultiLayerPerceptronClass(nn.Module): """ Multilayer Perceptron (MLP) Class """ def __init__(self,name='mlp',xdim=784,hdim=256,ydim=10): super(MultiLayerPerceptronClass,self).__init__() self.name = name self.xdim = xdim self.hdim = hdim self.ydim = ydim self.lin_1 = nn.Linear( # FILL IN HERE ) self.lin_2 = nn.Linear( # FILL IN HERE ) self.init_param() # initialize parameters def init_param(self): nn.init.kaiming_normal_(self.lin_1.weight) nn.init.zeros_(self.lin_1.bias) nn.init.kaiming_normal_(self.lin_2.weight) nn.init.zeros_(self.lin_2.bias) def forward(self,x): net = x net = self.lin_1(net) net = F.relu(net) net = self.lin_2(net) return net M = MultiLayerPerceptronClass(name='mlp',xdim=784,hdim=256,ydim=10).to(device) loss = nn.CrossEntropyLoss() optm = optim.Adam(M.parameters(),lr=1e-3) print ("Done.") # + [markdown] id="FrPPFQi56NDk" # ### Simple Forward Path of the MLP Model # + id="0rOz8a1Gw1Xi" x_numpy = np.random.rand(2,784) x_torch = torch.from_numpy(x_numpy).float().to(device) y_torch = M.forward(x_torch) # forward path y_numpy = y_torch.detach().cpu().numpy() # torch tensor to numpy array print ("x_numpy:\n",x_numpy) print ("x_torch:\n",x_torch) print ("y_torch:\n",y_torch) print ("y_numpy:\n",y_numpy) # + [markdown] id="zzd12JKl7NpX" # ### Check Parameters # + id="3Rmd2r_kw1s0" np.set_printoptions(precision=3) n_param = 0 for p_idx,(param_name,param) in enumerate(M.named_parameters()): param_numpy = param.detach().cpu().numpy() n_param += len(param_numpy.reshape(-1)) print ("[%d] name:[%s] shape:[%s]."%(p_idx,param_name,param_numpy.shape)) print (" val:%s"%(param_numpy.reshape(-1)[:5])) print ("Total number of parameters:[%s]."%(format(n_param,',d'))) # + [markdown] id="VVaqrcXUA5EB" # ### Evaluation Function # + id="SxXyYXH75Veq" def func_eval(model,data_iter,device): with torch.no_grad(): model.eval() # evaluate (affects DropOut and BN) n_total,n_correct = 0,0 for batch_in,batch_out in data_iter: y_trgt = batch_out.to(device) model_pred = model( # FILL IN HERE ) _,y_pred = torch.max(model_pred.data,1) n_correct += ( # FILL IN HERE ).sum().item() n_total += batch_in.size(0) val_accr = (n_correct/n_total) model.train() # back to train mode return val_accr print ("Done") # + [markdown] id="WmmJjAFKKOrB" # ### Initial Evaluation # + id="pNlGD1TlA4T8" M.init_param() # initialize parameters train_accr = func_eval(M,train_iter,device) test_accr = func_eval(M,test_iter,device) print ("train_accr:[%.3f] test_accr:[%.3f]."%(train_accr,test_accr)) # + [markdown] id="yT_r2wMZLjTm" # ### Train # + id="3AS5BdrMw1E9" print ("Start training.") M.init_param() # initialize parameters M.train() EPOCHS,print_every = 10,1 for epoch in range(EPOCHS): loss_val_sum = 0 for batch_in,batch_out in train_iter: # Forward path y_pred = M.forward(batch_in.view(-1, 28*28).to(device)) loss_out = loss(y_pred,batch_out.to(device)) # Update # FILL IN HERE # reset gradient # FILL IN HERE # backpropagate # FILL IN HERE # optimizer update loss_val_sum += loss_out loss_val_avg = loss_val_sum/len(train_iter) # Print if ((epoch%print_every)==0) or (epoch==(EPOCHS-1)): train_accr = func_eval(M,train_iter,device) test_accr = func_eval(M,test_iter,device) print ("epoch:[%d] loss:[%.3f] train_accr:[%.3f] test_accr:[%.3f]."% (epoch,loss_val_avg,train_accr,test_accr)) print ("Done") # + [markdown] id="NHQIhg-aNok5" # ### Test # + id="52zoFQxdMWRU" n_sample = 25 sample_indices = np.random.choice(len(mnist_test.targets), n_sample, replace=False) test_x = mnist_test.data[sample_indices] test_y = mnist_test.targets[sample_indices] with torch.no_grad(): y_pred = M.forward(test_x.view(-1, 28*28).type(torch.float).to(device)/255.) y_pred = y_pred.argmax(axis=1) plt.figure(figsize=(10,10)) for idx in range(n_sample): plt.subplot(5, 5, idx+1) plt.imshow(test_x[idx], cmap='gray') plt.axis('off') plt.title("Pred:%d, Label:%d"%(y_pred[idx],test_y[idx])) plt.show() print ("Done") # + id="Ikf5C7uV_ExD"
notebook/mlp.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Meeting: 01 # need upgrade and graphical design # # Knapsack Problem: # # # $$ max \sum_{j=1}^{n} V_{j}x_{j} $$ # . # $$ Subject-to: $$ # . # $$ \sum_{j=1}^{n} W_{j}x_{j} \leqslant Cap $$ # . # $$ x_{j} \in \left \{ 0, 1 \right \} $$ # . # Data Input for example: # $$ W = \left \{ 5, 4, 6, 1, 2, 7, 9, 10 \right \} $$ # . # $$ V = \left \{ 2, 4, 2, 3, 5, 2, 1, 7 \right \} $$ # . # $$ Cap = 18 $$ # + # import Numpy Library in Code import numpy as np # input Data size_no = int(input("Enter the column size of your matrix = \n")) Cap = int(input("Enter the capacity of your problem = \n")) # create matrix by lenght data array_W = np.zeros((1, size_no), dtype = int) array_V = np.zeros((1, size_no), dtype = int) array_W = [] array_V = [] # create random matrix number 0&1 and transpose rand_item = np.random.randint(0, 2, size = (size_no)) reShape = np.reshape(rand_item, (size_no,1)) # append data in V and W matrix for i in range (0, size_no): wMatrix = int(input("Enter the amount of W Number " + str(i+1) + " Pls : ")) array_W.append(wMatrix) print("-------------") for i in range (0, size_no): vMatrix = int(input("Enter the amount of V Number " + str(i+1) + " Pls : ")) array_V.append(vMatrix) # multiplication for testing data z = np.diag(array_W*reShape) if z.sum() <= Cap: object = np.diag(array_V*reShape) # print Data print("\nRandomPerm Array : \n", rand_item) print('Sum of the Subject with W = ', z.sum()) print("Object is : ", object.sum()) # - # # --- # ## For Guide or need help, Contact me: # - [Email](mailto:<EMAIL>), [LinkedIn](https://www.linkedin.com/in/mkarimi21/), [Telegram](https://telegram.me/mkarimi21). # # # ----- # # # <p align="center"> # <a href="https://mr-karimi.ir/"> # <img src='https://avataaars.io/?avatarStyle=Circle&topType=ShortHairShortWaved&accessoriesType=Prescription02&hairColor=Black&facialHairType=BeardLight&facialHairColor=Black&clotheType=Hoodie&clotheColor=Black&eyeType=Wink&eyebrowType=Default&mouthType=Smile&skinColor=Light' width="100" height="100"> # </a> # </p> # <h3 align="center"><NAME></h3> # # #
Combinatorial-Optimization/Class-meeting-01/Knapsack_problem-01.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.6 # language: python # name: python36 # --- # Copyright (c) Microsoft Corporation. All rights reserved. # # Licensed under the MIT License. # ![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/training/train-in-spark/train-in-spark.png) # # 05. Train in Spark # * Create Workspace # * Create Experiment # * Copy relevant files to the script folder # * Configure and Run # ## Prerequisites # If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, go through the [configuration](../../../configuration.ipynb) Notebook first if you haven't already to establish your connection to the AzureML Workspace. # + # Check core SDK version number import azureml.core print("SDK version:", azureml.core.VERSION) # - # ## Initialize Workspace # # Initialize a workspace object from persisted configuration. from azureml.core import Workspace ws = Workspace.from_config() print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep='\n') # ## Create Experiment # # + experiment_name = 'train-on-spark' from azureml.core import Experiment exp = Experiment(workspace=ws, name=experiment_name) # - # ## View `train-spark.py` # # For convenience, we created a training script for you. It is printed below as a text, but you can also run `%pfile ./train-spark.py` in a cell to show the file. with open('train-spark.py', 'r') as training_script: print(training_script.read()) # ## Configure & Run # **Note** You can use Docker-based execution to run the Spark job in local computer or a remote VM. Please see the `train-in-remote-vm` notebook for example on how to configure and run in Docker mode in a VM. Make sure you choose a Docker image that has Spark installed, such as `microsoft/mmlspark:0.12`. # ### Attach an HDI cluster # Here we will use a actual Spark cluster, HDInsight for Spark, to run this job. To use HDI commpute target: # 1. Create a Spark for HDI cluster in Azure. Here are some [quick instructions](https://docs.microsoft.com/en-us/azure/hdinsight/spark/apache-spark-jupyter-spark-sql). Make sure you use the Ubuntu flavor, NOT CentOS. # 2. Enter the IP address, username and password below # + tags=["sample-hdinsightcompute-attach"] from azureml.core.compute import ComputeTarget, HDInsightCompute from azureml.exceptions import ComputeTargetException import os try: # if you want to connect using SSH key instead of username/password you can provide parameters private_key_file and private_key_passphrase attach_config = HDInsightCompute.attach_configuration(address=os.environ.get('hdiservername', '<my_hdi_cluster_name>-ssh.azurehdinsight.net'), ssh_port=22, username=os.environ.get('hdiusername', '<ssh_username>'), password=os.environ.get('hdipassword', '<<PASSWORD>>')) hdi_compute = ComputeTarget.attach(workspace=ws, name='myhdi', attach_configuration=attach_config) except ComputeTargetException as e: print("Caught = {}".format(e.message)) hdi_compute.wait_for_completion(show_output=True) # - # ### Configure HDI run # Configure an execution using the HDInsight cluster with a conda environment that has `numpy`. # + from azureml.core.runconfig import RunConfiguration from azureml.core.conda_dependencies import CondaDependencies # use pyspark framework hdi_run_config = RunConfiguration(framework="pyspark") # Set compute target to the HDI cluster hdi_run_config.target = hdi_compute.name # specify CondaDependencies object to ask system installing numpy cd = CondaDependencies() cd.add_conda_package('numpy') hdi_run_config.environment.python.conda_dependencies = cd # - # ### Submit the script to HDI # + from azureml.core import ScriptRunConfig script_run_config = ScriptRunConfig(source_directory = '.', script= 'train-spark.py', run_config = hdi_run_config) run = exp.submit(config=script_run_config) # - # Monitor the run using a Juypter widget from azureml.widgets import RunDetails RunDetails(run).show() # Note: if you need to cancel a run, you can follow [these instructions](https://aka.ms/aml-docs-cancel-run). # After the run is succesfully finished, you can check the metrics logged. # get all metris logged in the run metrics = run.get_metrics() print(metrics) # register the generated model model = run.register_model(model_name='iris.model', model_path='outputs/iris.model')
how-to-use-azureml/training/train-in-spark/train-in-spark.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/arjunparmar/VIRTUON/blob/main/Prashant/Pytorch/TOM_Train.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + colab={"base_uri": "https://localhost:8080/"} id="oyFS5LZJJdI_" outputId="403c0c84-29b2-463e-dd80-1145af9812e5" from google.colab import drive drive.mount('/content/drive') # + id="VRjZxl_q1QtY" # !cp /content/drive/Shareddrives/Virtuon/Pytorch/cp-vton-plus.zip /content/ # + id="Ypd6IEiKNybZ" # !unzip -qq cp-vton-plus.zip -d /content/ # + id="EriiCYwlH0kT" import numpy as np import pandas as pd import matplotlib.pyplot as plt from PIL import Image, ImageDraw import torch import time import torch.nn as nn import torch.utils.data as data import torchvision.transforms as transforms from torchvision import models import os import os.path as osp import json # + id="K_iyjB38IcVl" class CPDataset(data.Dataset): def __init__(self, stage, all_root="cp-vton-plus", data_path = "data", mode="train", radius=5, img_height=256, img_width=192): super(CPDataset, self).__init__() self.root = all_root self.data_root = osp.join(all_root,data_path) self.datamode = mode self.stage = stage self.data_list = "".join([mode, "_pairs.txt"]) self.fine_height = img_height self.fine_width = img_width self.radius = radius self.data_path = osp.join(all_root,data_path, mode) self.transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) ]) self.transform_1 = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.5), (0.5)) ]) self.transform_2 = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.5, 0.5), (0.5, 0.5)) ]) self.transform_3 = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) ]) im_names = [] c_names = [] with open(osp.join(self.data_root, self.data_list), 'r') as f: for line in f.readlines(): im_name, c_name = line.strip().split() im_names.append(im_name) c_names.append(c_name) self.im_names = im_names self.c_names = c_names def name(self): return "CPDataset" def __getitem__(self, index): c_name = self.c_names[index] im_name = self.im_names[index] if self.stage == "GMM": c = Image.open(osp.join(self.data_path, 'cloth', c_name)) cm = Image.open(osp.join(self.data_path, 'cloth-mask', c_name)).convert('L') else: c = Image.open(osp.join(self.data_path, 'warp-cloth', im_name)) cm = Image.open(osp.join(self.data_path, 'warp-mask', im_name)).convert('L') c = self.transform(c) cm_array = np.array(cm) cm_array = (cm_array >= 128).astype(np.float32) cm = torch.from_numpy(cm_array) cm.unsqueeze_(0) # person image im = Image.open(osp.join(self.data_path, 'image', im_name)) im = self.transform(im) # LIP labels # [(0, 0, 0), # 0=Background # (128, 0, 0), # 1=Hat # (255, 0, 0), # 2=Hair # (0, 85, 0), # 3=Glove # (170, 0, 51), # 4=SunGlasses # (255, 85, 0), # 5=UpperClothes # (0, 0, 85), # 6=Dress # (0, 119, 221), # 7=Coat # (85, 85, 0), # 8=Socks # (0, 85, 85), # 9=Pants # (85, 51, 0), # 10=Jumpsuits # (52, 86, 128), # 11=Scarf # (0, 128, 0), # 12=Skirt # (0, 0, 255), # 13=Face # (51, 170, 221), # 14=LeftArm # (0, 255, 255), # 15=RightArm # (85, 255, 170), # 16=LeftLeg # (170, 255, 85), # 17=RightLeg # (255, 255, 0), # 18=LeftShoe # (255, 170, 0) # 19=RightShoe # (170, 170, 50) # 20=Skin/Neck/Chest (Newly added after running dataset_neck_skin_correction.py) # ] # load parsing image parse_name = im_name.replace('.jpg', '.png') im_parse = Image.open(osp.join(self.data_path, 'image-parse-new',parse_name)).convert('L') parse_array = np.array(im_parse) im_mask = Image.open(osp.join(self.data_path, 'image-mask', parse_name)).convert('L') mask_array = np.array(im_mask) parse_shape = (mask_array > 0).astype(np.float32) if self.stage == 'GMM': parse_head = (parse_array == 1).astype(np.float32) + (parse_array == 4).astype(np.float32) + (parse_array == 13).astype(np.float32) else: parse_head = (parse_array == 1).astype(np.float32) + (parse_array == 2).astype(np.float32) + (parse_array == 4).astype(np.float32) + (parse_array == 9).astype(np.float32) + (parse_array == 12).astype(np.float32) + (parse_array == 13).astype(np.float32) + (parse_array == 16).astype(np.float32) + (parse_array == 17).astype(np.float32) parse_cloth = (parse_array == 5).astype(np.float32) + (parse_array == 6).astype(np.float32) + (parse_array == 7).astype(np.float32) parse_shape_ori = Image.fromarray((parse_shape*255).astype(np.uint8)) parse_shape = parse_shape_ori.resize((self.fine_width//16, self.fine_height//16), Image.BILINEAR) parse_shape = parse_shape.resize((self.fine_width, self.fine_height), Image.BILINEAR) parse_shape_ori = parse_shape_ori.resize((self.fine_width, self.fine_height), Image.BILINEAR) shape_ori = self.transform_1(parse_shape_ori) shape = self.transform_1(parse_shape) phead = torch.from_numpy(parse_head) pcm = torch.from_numpy(parse_cloth) # Upper Cloth im_c = im*pcm + (1 - pcm) im_h = im*phead + (1-phead) # load pose points pose_name = im_name.replace('.jpg', '_keypoints.json') with open(osp.join(self.data_path, 'pose', pose_name), 'r') as f: pose_label = json.load(f) pose_data = pose_label['people'][0]['pose_keypoints'] pose_data = np.array(pose_data) pose_data = pose_data.reshape([-1,3]) point_num = pose_data.shape[0] pose_map = torch.zeros(point_num, self.fine_height, self.fine_width) r = self.radius im_pose = Image.new('L', (self.fine_width, self.fine_height)) pose_draw = ImageDraw.Draw(im_pose) for i in range(point_num): one_map = Image.new('L', (self.fine_width, self.fine_height)) draw = ImageDraw.Draw(one_map) pointx = pose_data[i, 0] pointy = pose_data[i, 1] if pointx > 1 and pointy > 1: draw.rectangle((pointx - r, pointy - r, pointx + r, pointy + r), 'white', 'white') pose_draw.rectangle((pointx - r, pointy - r, pointx + r, pointy + r), 'white', 'white') one_map = self.transform_1(one_map) pose_map[i] = one_map[0] im_pose = self.transform_1(im_pose) agnostic = torch.cat([shape, im_h, pose_map], 0) if self.stage == 'GMM': im_g = Image.open(osp.join(self.root, 'grid.png')) im_g = self.transform(im_g) else: im_g = '' pcm.unsqueeze_(0) result = { 'c_name': c_name, 'im_name': im_name, 'cloth': c, 'cloth_mask': cm, 'image': im, 'agnostic': agnostic, 'parse_cloth': im_c, 'shape': shape, 'head': im_h, 'pose_image': im_pose, 'grid_image': im_g, 'parse_cloth_mask': pcm, 'shape_ori': shape_ori, } return result def __len__(self): return len(self.im_names) class CPDataLoader(object): def __init__(self, dataset, shuffle=True, batch=4, workers=4): super(CPDataLoader, self).__init__() if shuffle: train_sampler = torch.utils.data.sampler.RandomSampler(dataset) else: train_sampler = None self.data_loader = torch.utils.data.DataLoader( dataset, batch_size=batch, shuffle=(train_sampler is None), num_workers=workers, pin_memory=True, sampler=train_sampler ) self.dataset = dataset self.data_iter = self.data_loader.__iter__() def next_batch(self): try: batch = self.data_iter.__next__() except StopIteration: self.data_iter = self.data_loader.__iter__() batch = self.data_iter.__next__() return batch # + id="gpl14krN-zCh" class Vgg19(nn.Module): def __init__(self, requires_grad=False): super(Vgg19, self).__init__() vgg_pretrained_features = models.vgg19(pretrained=True).features self.slice1 = torch.nn.Sequential() self.slice2 = torch.nn.Sequential() self.slice3 = torch.nn.Sequential() self.slice4 = torch.nn.Sequential() self.slice5 = torch.nn.Sequential() for x in range(2): self.slice1.add_module(str(x), vgg_pretrained_features[x]) for x in range(2, 7): self.slice2.add_module(str(x), vgg_pretrained_features[x]) for x in range(7, 12): self.slice3.add_module(str(x), vgg_pretrained_features[x]) for x in range(12, 21): self.slice4.add_module(str(x), vgg_pretrained_features[x]) for x in range(21, 30): self.slice5.add_module(str(x), vgg_pretrained_features[x]) if not requires_grad: for param in self.parameters(): param.requires_grad = False def forward(self, X): h_relu1 = self.slice1(X) h_relu2 = self.slice2(h_relu1) h_relu3 = self.slice3(h_relu2) h_relu4 = self.slice4(h_relu3) h_relu5 = self.slice5(h_relu4) out = [h_relu1, h_relu2, h_relu3, h_relu4, h_relu5] return out class VGGLoss(nn.Module): def __init__(self, layids=None): super(VGGLoss, self).__init__() self.vgg = Vgg19() self.vgg.cuda() self.criterion = nn.L1Loss() self.weights = [1.0/32, 1.0/16, 1.0/8, 1.0/4, 1.0] self.layids = layids def forward(self, x, y): x_vgg, y_vgg = self.vgg(x), self.vgg(y) loss = 0 if self.layids is None: self.layids = list(range(len(x_vgg))) for i in self.layids: loss += self.weights[i] * \ self.criterion(x_vgg[i], y_vgg[i].detach()) return loss # + id="kkZ2YFwkD927" class UnetGenerator(nn.Module): def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False): super(UnetGenerator, self).__init__() # construct unet structure unet_block = UnetSkipConnectionBlock( ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True) for i in range(num_downs - 5): unet_block = UnetSkipConnectionBlock( ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout) unet_block = UnetSkipConnectionBlock( ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer) unet_block = UnetSkipConnectionBlock( ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer) unet_block = UnetSkipConnectionBlock( ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer) unet_block = UnetSkipConnectionBlock( output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer) self.model = unet_block def forward(self, input): return self.model(input) # + id="sPmnaCno-84_" class UnetSkipConnectionBlock(nn.Module): def __init__(self, outer_nc, inner_nc, input_nc=None, submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False): super(UnetSkipConnectionBlock, self).__init__() self.outermost = outermost use_bias = norm_layer == nn.InstanceNorm2d if input_nc is None: input_nc = outer_nc downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4, stride=2, padding=1, bias=use_bias) downrelu = nn.LeakyReLU(0.2, True) downnorm = norm_layer(inner_nc) uprelu = nn.ReLU(True) upnorm = norm_layer(outer_nc) if outermost: upsample = nn.Upsample(scale_factor=2, mode='bilinear') upconv = nn.Conv2d(inner_nc * 2, outer_nc, kernel_size=3, stride=1, padding=1, bias=use_bias) down = [downconv] up = [uprelu, upsample, upconv, upnorm] model = down + [submodule] + up elif innermost: upsample = nn.Upsample(scale_factor=2, mode='bilinear') upconv = nn.Conv2d(inner_nc, outer_nc, kernel_size=3, stride=1, padding=1, bias=use_bias) down = [downrelu, downconv] up = [uprelu, upsample, upconv, upnorm] model = down + up else: upsample = nn.Upsample(scale_factor=2, mode='bilinear') upconv = nn.Conv2d(inner_nc*2, outer_nc, kernel_size=3, stride=1, padding=1, bias=use_bias) down = [downrelu, downconv, downnorm] up = [uprelu, upsample, upconv, upnorm] if use_dropout: model = down + [submodule] + up + [nn.Dropout(0.5)] else: model = down + [submodule] + up self.model = nn.Sequential(*model) def forward(self, x): if self.outermost: return self.model(x) else: return torch.cat([x, self.model(x)], 1) # + id="oeNI6py8-ReW" def train_tom(train_loader, model): model.cuda() model.train() # criterion criterionL1 = nn.L1Loss() criterionVGG = VGGLoss() criterionMask = nn.L1Loss() # optimizer optimizer = torch.optim.Adam( model.parameters(), lr=0.0001, betas=(0.5, 0.999)) scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda step: 1.0 - max(0, step - 100000) / float(100000 + 1)) for step in range(200000): iter_start_time = time.time() inputs = train_loader.next_batch() im = inputs['image'].cuda() im_pose = inputs['pose_image'] im_h = inputs['head'] shape = inputs['shape'] agnostic = inputs['agnostic'].cuda() c = inputs['cloth'].cuda() cm = inputs['cloth_mask'].cuda() pcm = inputs['parse_cloth_mask'].cuda() # outputs = model(torch.cat([agnostic, c], 1)) # CP-VTON outputs = model(torch.cat([agnostic, c, cm], 1)) # CP-VTON+ p_rendered, m_composite = torch.split(outputs, 3, 1) p_rendered = F.tanh(p_rendered) m_composite = F.sigmoid(m_composite) p_tryon = c * m_composite + p_rendered * (1 - m_composite) """visuals = [[im_h, shape, im_pose], [c, cm*2-1, m_composite*2-1], [p_rendered, p_tryon, im]]""" # CP-VTON visuals = [[im_h, shape, im_pose], [c, pcm*2-1, m_composite*2-1], [p_rendered, p_tryon, im]] # CP-VTON+ loss_l1 = criterionL1(p_tryon, im) loss_vgg = criterionVGG(p_tryon, im) # loss_mask = criterionMask(m_composite, cm) # CP-VTON loss_mask = criterionMask(m_composite, pcm) # CP-VTON+ loss = loss_l1 + loss_vgg + loss_mask optimizer.zero_grad() loss.backward() optimizer.step() # if (step+1) % opt.display_count == 0: # board_add_images(board, 'combine', visuals, step+1) # board.add_scalar('metric', loss.item(), step+1) # board.add_scalar('L1', loss_l1.item(), step+1) # board.add_scalar('VGG', loss_vgg.item(), step+1) # board.add_scalar('MaskL1', loss_mask.item(), step+1) # t = time.time() - iter_start_time # print('step: %8d, time: %.3f, loss: %.4f, l1: %.4f, vgg: %.4f, mask: %.4f' # % (step+1, t, loss.item(), loss_l1.item(), # loss_vgg.item(), loss_mask.item()), flush=True) # if (step+1) % opt.save_count == 0: # save_checkpoint(model, os.path.join( # opt.checkpoint_dir, opt.name, 'step_%06d.pth' % (step+1))) # + id="cL80c4h0HYjY" def main(): # create dataset train_dataset = CPDataset("TOM","cp-vton-plus") # create dataloader train_loader = CPDataLoader(train_dataset) # # visualization # if not os.path.exists(opt.tensorboard_dir): # os.makedirs(opt.tensorboard_dir) # board = SummaryWriter(logdir=os.path.join(opt.tensorboard_dir, opt.name)) # create model & train & save the final checkpoint # model = UnetGenerator(25, 4, 6, ngf=64, norm_layer=nn.InstanceNorm2d) # CP-VTON model = UnetGenerator( 26, 4, 6, ngf=64, norm_layer=nn.InstanceNorm2d) # CP-VTON+ train_tom(train_loader, model) if __name__ == "__main__": main() # + id="1pzk-8X2RFJQ" # + id="czX4gkX7Rv9U"
research_development/Prashant/Pytorch/TOM_Train.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.6 # language: julia # name: julia-1.6 # --- using Pkg Pkg.activate(@__DIR__) using CUDA # # Concurrent (GPU) computing # Also known as asynchronous programming, this is a programming model that makes it possible to **perform operations when you wait for another operation to complete, without needing multiple threads**. Julia has great support for this using Tasks: @sync begin @async begin println("task 1: operation that waits") sleep(0.1) println("task 1: done!") end @async println("task 2: another operation") end; # This relies on the call to `sleep` not actually blocking, but instead yielding to Julia's scheduler so that other tasks get a chance to execute. Essentially, what it does: @sync begin @async begin println("task 1: operation that waits") t0 = time() while time() - t0 < 0.1 yield() end println("task 1: done!") end @async println("task 2: another operation") end; # ## Concurrent GPU computing # # With CUDA, many operations are already "properly asynchronous", meaning they don't only refrain from blocking, but they actually return immediately control back to the user. That useful, because it makes it possible to quickly queue a bunch of operations and (hopefully) saturate the GPU: # # ``` # for i in 1:10 # @cuda kernel(...) # end # ``` # # ![image.png](attachment:image.png) # # If GPU operations always were synchronous, the GPU would be idle when the CPU is launching a new operation: # # ![image-2.png](attachment:image-2.png) # However, there are still some blocking operations, e.g., when you need to wait for the GPU to finish its computations because you want to use those results. In CUDA.jl 3.0, many of those operations ([synchronization](https://github.com/JuliaGPU/CUDA.jl/blob/1c6752c2d65c84d1d0060ceebafab9dbc1c71e15/lib/cudadrv/stream.jl#L120-L149), [memory copying](https://github.com/JuliaGPU/CUDA.jl/blob/1c6752c2d65c84d1d0060ceebafab9dbc1c71e15/src/array.jl#L402-L420), etc) have been changed to similarly block by yielding to the scheduler instead, making it possible to execute other tasks while waiting for the GPU to finish computing: @sync begin @async begin println("task 1: submit GPU operations") A = CUDA.rand(1024, 1024) B = CUDA.rand(1024, 1024) A*B println("task 1: wait for GPU results") synchronize() println("task 1: done") end @async println("task 2: another operation") end; # ## Pitfall: blocking GUP operations # # I mentioned that CUDA.jl makes *many* of the blocking operations yield to the Julia scheduler, which implies there are some blocking operations left. One common such operation is copying memory to or from the CPU. For example: # + # allocate some memory a = rand(1024, 1024) # upload to the GPU b = CuArray(a); # - # ![image.png](attachment:image.png) # Despite CUDA.jl using an asynchronous version of the API, the call blocked for most of the time it took to complete on the GPU. The reason here is that the CPU memory can be paged out by the operating system at any time, making it impossible for CUDA to perform this operation using an asynchronous memory transfer. Instead the library copies the memory to a staging buffer on the CPU which takes a lot of time. # # The solution here is to make sure the memory cannot be paged out by the operating system, by either: # - allocating page-locked memory using CUDA.jl's `Mem.alloc(Mem.HostBuffer, ...)` # - pinning memory after the fact # # The latter is much easier: # + # allocate & pin some memory a = rand(1024, 1024) Mem.pin(a) # upload to the GPU b = CuArray(a); # - # ![image.png](attachment:image.png) # # This not only avoids blocking the CPU, but may also speed up the operation by avoiding the copy to a staging buffer. # # There is no free lunch, of course: # # - pinning memory is expensive, so make sure to pre-allocate and pin the necessary buffers beforehand # - pinning too much memory will reduce overall system performance # ## Use case: overlap CPU with GPU operations # # Because of the above, it's possible to use the GPU and CPU at the same time by wrapping both computations in separate tasks. However, there's a a problem: most compute-intensive operations don't cause task switches: @sync begin @async begin println("CPU task: begin") for x in 1:10 A = rand(1024 * 1024) sort(A) end println("CPU task: end") end @async begin println("GPU task: begin") A = CUDA.rand(1024, 1024) B = CUDA.rand(1024, 1024) A*B println("GPU task: wait") synchronize() println("GPU task: end") end end; # Because none of the CPU task's operations are blocking, they never yield, and the GPU task only runs at the end of it all. Blocking operations include: # - input/output: filesystem operations, printing, ... # - synchronization primitives: lcoks, conditions, ... # - channels # # Alternatively, as a workaround for compute-intensive tasks that never yield, one could periodically call `yield()`. # ## Task-local state # # A better alternative is to use multiple tasks for independent GPU operations. However, that requires changes to the GPU programming model, because: # # - CUDA uses a per-thread state, such as the active device # - CUDA.jl uses the default stream for all operations # # Starting with CUDA.jl 3.0, there are two changes that make it possible to use multiple tasks for independent GPU operations: # # - GPU state (active device, library handles, streams) is now task-local instead of thread-local # - all operations (kernel launches, API calls) default to using these task-local values # # These properties can be trivially observed: stream() fetch(@async stream()) function get_cublas_stream() handle = CUBLAS.handle() stream = Ref{CUDA.CUstream}() CUBLAS.cublasGetStream_v2(handle, stream) stream[] end get_cublas_stream() fetch(@async get_cublas_stream()) # Of course, these changes to the programming model have important consequences. # ### Pitfalls # # One obvious result of this change is that GPU operations are task-local. # + A = CUDA.rand(10000, 10000) B = CUDA.rand(10000, 10000) A*B wait(@async synchronize()) CUDA.isdone(stream()) # - # You could work around this by calling `device_synchronize()` instead, but it is generally better to synchronize where it's needed. # A not-so-obvious consequence is how data cannot be safely reused across tasks without additional synchronization. Let's illustrate by generating some random matrices and multiplying them in a child task: # + A = CUDA.rand(10000, 10000) B = CUDA.rand(10000, 10000) C = fetch( @async begin A*B end ) Array(C) โ‰ˆ Array(A)*Array(B) # depending on your system, this may be false # - # There are two issues here, both stemming from the fact that some operations might take a while to complete, but their API calls behave asynchronously and return immediately. This isn't normally a problem because depentend operations are generally issued on the same stream, imposing an ordering. # # When using multiple tasks, each with their own stream, there is no such ordering! The application should either impose such an ordering by itself, or wait for the operations to complete. # The first option is the simplest one: by adding calls to `synchronize()` in the tasks that produce data, the application will wait for the operation to complet and the data to be ready. Here, that means adding synchronization at two points! # + A = CUDA.rand(10000, 10000) B = CUDA.rand(10000, 10000) synchronize() C = fetch( @async begin CUDA.@sync A*B end ) Array(C) โ‰ˆ Array(A)*Array(B) # - # `CUDA.@sync` is equivalent to a call to `synchronize()`, but preserves the returned value. # # Note that again, we used `synchronize()` within the task that produces the data. If that is not possible, you can use `device_synchronize()` before *using* the data, but as said before that is a much more expensive call. # The alternative is to use CUDA events to impose a stream ordering: # + input_ready, output_ready = CuEvent(), CuEvent() A = CUDA.rand(10000, 10000) B = CUDA.rand(10000, 10000) record(input_ready) C = fetch( @async begin CUDA.wait(input_ready) C = A*B record(output_ready) C end ) synchronize(output_ready) Array(C) โ‰ˆ Array(A)*Array(B) # - # This is better because it only serializes execution once, and thus more operations can be queued up asynchronously, but requires more work on the application side. # ## Use case: overlap GPU operations # # With all that out of the way, let's look at a much more interesting use case. With modern GPUs becoming more and more powerful, it's getting harder to: # # - saturate the entire device using a single stream of operations; # - have every kernel use all of the device's hardware resources. # # One solution to this problem is using multiple streams of operations so that the GPU can overlap operations whenever possible. With CUDA.jl this is as simple as putting indepent work in separate Julia tasks: # # ``` # function compute(a,b,c) # mul!(c, a, b) # broadcast!(sin, c, c) # synchronize() # c # end # # @async compute(...) # @async compute(...) # ``` # # ![image.png](attachment:image.png) # Exercise: Large batched matrix RMSE # + N = 16 A = CUDA.rand(1024, 1024, N) B = CUDA.rand(1024, 1024, N) CUDA.allowscalar(false) function rmse(A::AbstractMatrix, B::AbstractMatrix, C::AbstractArray) E = A - B SQE = E .^ 2 MSE = sum(SQE; dims=(1,2)) ./ length(SQE) C .= sqrt.(MSE) return end NVTX.@range function doit(f) rmses = CuVector{Float64}(undef, N) for i in 1:N f(view(A, :, :, i), view(B, :, :, i), reshape(view(rmses, i), (1,1))) end Array(rmses) end doit(rmse) # + NVTX.@range function doit2(f) rmses = CuVector{Float64}(undef, N) device_synchronize() @sync for i in 1:N @async f(view(A, :, :, i), view(B, :, :, i), reshape(view(rmses, i), (1,1))) end device_synchronize() Array(rmses) end doit2(rmse) # - # In the profiler, we can see that some kernels executed together: # # ![image.png](attachment:image.png) # # Zoomed in: # # ![image-2.png](attachment:image-2.png) # # For such a simple operation this doesn't help, but it nicely demonstrates the API and how the GPU executes kernels in parallel. # ## Use case: multi-GPU applications # # Because the active device is a task-local property too, and CUDA.jl automatically switches contexts when performing API calls after a task switch, tasks can be used for using multiple GPUs within a single process: @sync begin @async begin device!(0) @cuda identity(nothing) end @async begin device!(1) @cuda identity(nothing) end end; # ![image.png](attachment:image.png) # There are currently still some sharp edges: # - tasks should pick a device (`device!`) at the start # - arrays do not track which device they were owned by # # Especially the second point is important, because using another device's memory will result in illegal memory access errors. One workaround for now is to use unified memory via `CuArray{..., Mem.UnifiedBuffer}(...)` or `cu(...; unified=true)`. # ## Use case: multithreading # # Finally, it's also possible to use multiple threads using Julia's `@spawn` macro. For example, combining with the previous example: @sync begin Threads.@spawn begin device!(0) @cuda identity(nothing) end Threads.@spawn begin device!(1) @cuda identity(nothing) end end; # ![image.png](attachment:image.png) # # Although thread-safety is a work in progress, so you might run into crashes, multithreading can be valuable in a number of situations: # # - when doing blocking API calls (e.g. memory copies to or from unpinned CPU memory) # - to overlap with compute-intensive, non-yielding CPU workloads
1-4-concurrent_computing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/ilexistools/ebralc2021/blob/main/extrair_textos_de_xml.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="yW-WAtMvAlyO" # # Extrair textos de arquivos XML # # O XML รฉ um tipo de linguagem muito comum para estruturar e representar anotaรงรตes em textos. As informaรงรตes em arquivos XML sรฃo facilmente acessรญveis, uma vez que a linguagem fornece uma padronizaรงรฃo para a criaรงรฃo de arquivos. # + [markdown] id="JbnNtvbWArnG" # #Recursos # Para realizar o teste de utilizaรงรฃo bรกsica, precisamos carregar o arquivo: # + id="qaXcfG8cAucd" import requests url = 'https://github.com/ilexistools/ebralc2021/raw/main/recursos/texto.xml' response = requests.get(url) with open('texto.xml', 'wb') as f: f.write(response.content) # + [markdown] id="-i7EBFGiBTqh" # Vamos utilizar o seguinte cรณdigo XML em arquivo (texto.xml) para demonstrar a extraรงรฃo de informaรงรตes: # + colab={"base_uri": "https://localhost:8080/"} id="hKiFsWzKAkVq" outputId="52526274-ecb7-40aa-9250-b2ba78f3564e" with open('texto.xml','r', encoding='utf-8') as fh: print(fh.read()) # + [markdown] id="HfKS-6C2Bci1" # Para realizar a leitura do arquivo, podemos utilizar o objeto โ€˜BeautifulSoupโ€™: # + id="ZYzQCasgBgFG" from bs4 import BeautifulSoup with open('texto.xml', 'r', encoding='utf-8') as fh: xml = BeautifulSoup(fh, 'lxml') # + [markdown] id="9ZZQxnxYBlrd" # Agora, podemos extrair informaรงรตes do cabeรงalho da seguinte forma: # + colab={"base_uri": "https://localhost:8080/"} id="wb6ty0lOBqAG" outputId="12562c76-3e14-487c-8f70-9ef1087f99aa" # tรญtulo do texto print(xml.teiheader.filedesc.titlestmt.title.text) # autor e data e = xml.teiheader.filedesc.titlestmt.find('author') print(e.find('name').text) print(e.find('date').text) # local de publicaรงรฃo e = xml.teiheader.filedesc.find('publicationstmt') print(e.find('pubplace').text) # + [markdown] id="AfWiGUv1B0YV" # Para acessar o conteรบdo do texto, รฉ possรญvel utilizar o elemento โ€˜\<p\>โ€™, acessando cada para individualmente: # + colab={"base_uri": "https://localhost:8080/"} id="KKbLMg1SB3i-" outputId="136a9f05-30b1-41b5-dcea-03233bfde1e7" for p in xml.body.find_all('p'): print(p.text)
extrair_textos_de_xml.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: ' (venv) WePlan - Forests' # language: python # name: venv-weplan # --- from sepal_ui import sepalwidgets as sw from component.message import cm # Create an appBar app_bar = sw.AppBar(cm.app.title, cm) # + # load the patial files # %run 'about_ui.ipynb' # %run 'map_ui.ipynb' # Gather all the partial tiles that you created previously app_content = [map_tile, about_tile, disclaimer_tile] # + # create a drawer for each group of tile # use the DrawerItem widget from sepalwidget (name_of_drawer, icon, the id of the widgets you want to display) # use the display_tile() method to link the times with the drawer items items = [ sw.DrawerItem(cm.app.drawer_item.map, "mdi-map-check", card="map_tile"), sw.DrawerItem(cm.app.drawer_item.about, "mdi-help-circle", card="about_tile"), ] # !!! not mandatory !!! # Add the links to the code, wiki and issue tracker of your code_link = "https://github.com/12rambau/weplan" wiki_link = "https://github.com/12rambau/weplan/blob/master/doc/en.rst" issue_link = "https://github.com/12rambau/weplan/issues/new" # Create the side drawer with all its components # The display_drawer() method link the drawer with the app bar app_drawer = sw.NavDrawer(items=items, code=code_link, wiki=wiki_link, issue=issue_link) # - # build the Html final app by gathering everything app = sw.App( tiles=app_content, appBar=app_bar, navDrawer=app_drawer, translator=cm, ).show_tile( "about_tile" ) # id of the tile you want to display # display the app # this final cell will be the only one displaying something in this notebook # if you run all this notebook you may see elements displayed on the left side of your screen but it won't work # it can only be launched with voila as it's creating a full page javascript interface app.show_tile("about_tile").add_banner(cm.app.disclaimer, "warning")
ui.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" # This Python 3 environment comes with many helpful analytics libraries installed # It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python # For example, here's several helpful packages to load import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 5GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # + #utilities for data processing and algebra import numpy as np import pandas as pd #for specialized container datatypes import collections #for plotting and data visualization import seaborn as sns import matplotlib.pyplot as plt # %matplotlib inline #for feature preprocessing import re #sklearn for machine learning from sklearn.model_selection import train_test_split from sklearn.metrics import confusion_matrix, accuracy_score from sklearn.utils.multiclass import unique_labels #extension to reload modules before executing user code # %reload_ext autoreload # %autoreload 2 #importing os to analyse and organize the directory structure import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename)) # - from fastai import * from fastai.text import * market_train_df = pd.read_csv('../input/news-dataset/marketdata_sample.csv') market_train_df.head() market_train_df.info() market_train_df['time']= pd.to_datetime(market_train_df['time']) market_train_df.info() pip install chart_studio import plotly.offline as py import plotly.graph_objects as go data = [] for asset in np.random.choice(market_train_df['assetName'].unique(), 10): asset_df = market_train_df[(market_train_df['assetName'] == asset)] data.append(go.Scatter( x = asset_df['time'].dt.strftime(date_format='%Y-%m-%d').values, y = asset_df['close'].values, name = asset )) layout = go.Layout(dict(title = "Closing prices of 10 random assets", xaxis = dict(title = 'Month'), yaxis = dict(title = 'Price (USD)'), ),legend=dict( orientation="h")) py.iplot(dict(data=data, layout=layout), filename='basic-line') data = [] market_train_df['close'] = market_train_df['close'] / 20 for i in [0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95]: price_df = market_train_df.groupby('time')['close'].quantile(i).reset_index() data.append(go.Scatter( x = price_df['time'].dt.strftime(date_format='%Y-%m-%d').values, y = price_df['close'].values, name = f'{i} quantile' )) layout = go.Layout(dict(title = "Trends of closing prices by quantiles", xaxis = dict(title = 'Month'), yaxis = dict(title = 'Price (USD)'), ),legend=dict( orientation="h")) py.iplot(dict(data=data, layout=layout), filename='basic-line') market_train_df['price_diff'] = market_train_df['close'] - market_train_df['open'] grouped = market_train_df.groupby('time').agg({'price_diff': ['std', 'min']}).reset_index() print(f"Average standard deviation of price change within a day in {grouped['price_diff']['std'].mean():.4f}.") # + g = grouped.sort_values(('price_diff', 'std'), ascending=False)[:10] g['min_text'] = 'Maximum price drop: ' + (-1 * g['price_diff']['min']).astype(str) trace = go.Scatter( x = g['time'].dt.strftime(date_format='%Y-%m-%d').values, y = g['price_diff']['std'].values, mode='markers', marker=dict( size = g['price_diff']['std'].values, color = g['price_diff']['std'].values, colorscale='Portland', showscale=True ), text = g['min_text'].values #text = f"Maximum price drop: {g['price_diff']['min'].values}" #g['time'].dt.strftime(date_format='%Y-%m-%d').values ) data = [trace] layout= go.Layout( autosize= True, title= 'Top 10 months by standard deviation of price change within a day', hovermode= 'closest', yaxis=dict( title= 'price_diff', ticklen= 5, gridwidth= 2, ), showlegend= False ) fig = go.Figure(data=data, layout=layout) py.iplot(fig,filename='scatter2010') # - market_train_df['price_diff'] = market_train_df['close'] - market_train_df['open'] grouped = market_train_df.groupby('time').agg({'price_diff': ['std', 'min']}).reset_index() print(f"Average standard deviation of price change within a day in {grouped['price_diff']['std'].mean():.4f}.") market_train_df.sort_values('price_diff')[:10] market_train_df['close_to_open'] = np.abs(market_train_df['close'] / market_train_df['open']) market_train_df.head() print(f"In {(market_train_df['close_to_open'] >= 1.2).sum()} lines price increased by 20% or more.") print(f"In {(market_train_df['close_to_open'] <= 0.8).sum()} lines price decreased by 20% or more.") print(f"In {(market_train_df['close_to_open'] >= 2).sum()} lines price increased by 100% or more.") print(f"In {(market_train_df['close_to_open'] <= 0.5).sum()} lines price decreased by 100% or more.") # + market_train_df['assetName_mean_open'] = market_train_df.groupby('assetName')['open'].transform('mean') market_train_df['assetName_mean_close'] = market_train_df.groupby('assetName')['close'].transform('mean') # if open price is too far from mean open price for this company, replace it. Otherwise replace close price. for i, row in market_train_df.loc[market_train_df['close_to_open'] >= 2].iterrows(): if np.abs(row['assetName_mean_open'] - row['open']) > np.abs(row['assetName_mean_close'] - row['close']): market_train_df.iloc[i,5] = row['assetName_mean_open'] else: market_train_df.iloc[i,4] = row['assetName_mean_close'] for i, row in market_train_df.loc[market_train_df['close_to_open'] <= 0.5].iterrows(): if np.abs(row['assetName_mean_open'] - row['open']) > np.abs(row['assetName_mean_close'] - row['close']): market_train_df.iloc[i,5] = row['assetName_mean_open'] else: market_train_df.iloc[i,4] = row['assetName_mean_close'] # + market_train_df['price_diff'] = market_train_df['close'] - market_train_df['open'] grouped = market_train_df.groupby(['time']).agg({'price_diff': ['std', 'min']}).reset_index() g = grouped.sort_values(('price_diff', 'std'), ascending=False)[:10] g['min_text'] = 'Maximum price drop: ' + (-1 * np.round(g['price_diff']['min'], 2)).astype(str) trace = go.Scatter( x = g['time'].dt.strftime(date_format='%Y-%m-%d').values, y = g['price_diff']['std'].values, mode='markers', marker=dict( size = g['price_diff']['std'].values * 5, color = g['price_diff']['std'].values, colorscale='Portland', showscale=True ), text = g['min_text'].values #text = f"Maximum price drop: {g['price_diff']['min'].values}" #g['time'].dt.strftime(date_format='%Y-%m-%d').values ) data = [trace] layout= go.Layout( autosize= True, title= 'Top 10 months by standard deviation of price change within a day', hovermode= 'closest', yaxis=dict( title= 'price_diff', ticklen= 5, gridwidth= 2, ), showlegend= False ) fig = go.Figure(data=data, layout=layout) py.iplot(fig,filename='scatter2010') # - data = [] for i in [0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95]: price_df = market_train_df.groupby('time')['returnsOpenNextMktres10'].quantile(i).reset_index() data.append(go.Scatter( x = price_df['time'].dt.strftime(date_format='%Y-%m-%d').values, y = price_df['returnsOpenNextMktres10'].values, name = f'{i} quantile' )) layout = go.Layout(dict(title = "Trends of returnsOpenNextMktres10 by quantiles", xaxis = dict(title = 'Month'), yaxis = dict(title = 'Price (USD)'), ),legend=dict( orientation="h"),) py.iplot(dict(data=data, layout=layout), filename='basic-line') # + data = [] market_train_df = market_train_df.loc[market_train_df['time'] >= '2010-01-01 22:00:00+0000'] price_df = market_train_df.groupby('time')['returnsOpenNextMktres10'].mean().reset_index() data.append(go.Scatter( x = price_df['time'].dt.strftime(date_format='%Y-%m-%d').values, y = price_df['returnsOpenNextMktres10'].values, name = f'{i} quantile' )) layout = go.Layout(dict(title = "Treand of returnsOpenNextMktres10 mean", xaxis = dict(title = 'Month'), yaxis = dict(title = 'Price (USD)'), ),legend=dict( orientation="h"),) py.iplot(dict(data=data, layout=layout), filename='basic-line') # + data = [] for col in ['returnsClosePrevRaw1', 'returnsOpenPrevRaw1', 'returnsClosePrevMktres1', 'returnsOpenPrevMktres1', 'returnsClosePrevRaw10', 'returnsOpenPrevRaw10', 'returnsClosePrevMktres10', 'returnsOpenPrevMktres10', 'returnsOpenNextMktres10']: df = market_train_df.groupby('time')[col].mean().reset_index() data.append(go.Scatter( x = df['time'].dt.strftime(date_format='%Y-%m-%d').values, y = df[col].values, name = col )) layout = go.Layout(dict(title = "Treand of mean values", xaxis = dict(title = 'Month'), yaxis = dict(title = 'Price (USD)'), ),legend=dict( orientation="h"),) py.iplot(dict(data=data, layout=layout), filename='basic-line') # - news_train_df = pd.read_csv('../input/news-dataset/news_sample.csv') news_train_df.head() print(f'{news_train_df.shape[0]} samples and {news_train_df.shape[1]} features in the training news dataset.') text = ' '.join(news_train_df['headline'].str.lower().values[-1000000:]) wordcloud = WordCloud(max_font_size=None, stopwords=stop, background_color='white', width=1200, height=1000).generate(text) plt.figure(figsize=(12, 8)) plt.imshow(wordcloud) plt.title('Top words in headline') plt.axis("off") plt.show() (news_train_df['urgency'].value_counts() / 1000000).plot(kind = 'bar'); plt.xticks(rotation=30); plt.title('Urgency counts (mln)'); news_train_df['sentence_word_count'] = news_train_df['wordCount'] / news_train_df['sentenceCount'] plt.boxplot(news_train_df['sentence_word_count'][news_train_df['sentence_word_count'] < 40]); news_train_df['provider'].value_counts().head(10) (news_train_df['headlineTag'].value_counts() / 1000)[:10].plot(kind = 'barh'); plt.title('headlineTag counts (thousands)'); for i, j in zip([-1, 0, 1], ['negative', 'neutral', 'positive']): df_sentiment = news_train_df.loc[news_train_df['sentimentClass'] == i, 'assetName'] print(f'Top mentioned companies for {j} sentiment are:') print(df_sentiment.value_counts().head(5)) print('') # + def data_prep(market_train_df,news_train_df): market_train_df['time'] = market_train_df.time.dt.date market_train_df['returnsOpenPrevRaw1_to_volume'] = market_train_df['returnsOpenPrevRaw1'] / market_train_df['volume'] market_train_df['close_to_open'] = market_train_df['close'] / market_train_df['open'] market_train_df['volume_to_mean'] = market_train_df['volume'] / market_train_df['volume'].mean() news_train_df['sentence_word_count'] = news_train_df['wordCount'] / news_train_df['sentenceCount'] news_train_df['time']= pd.to_datetime(news_train_df['time']) news_train_df['time'] = news_train_df.time.dt.hour news_train_df['sourceTimestamp']= pd.to_datetime(news_train_df['sourceTimestamp']) news_train_df['sourceTimestamp']= news_train_df.sourceTimestamp.dt.hour news_train_df['firstCreated']= pd.to_datetime(news_train_df['firstCreated']) news_train_df['firstCreated'] = news_train_df.firstCreated.dt.date news_train_df['assetCodesLen'] = news_train_df['assetCodes'].map(lambda x: len(eval(x))) news_train_df['assetCodes'] = news_train_df['assetCodes'].map(lambda x: list(eval(x))[0]) news_train_df['headlineLen'] = news_train_df['headline'].apply(lambda x: len(x)) news_train_df['assetCodesLen'] = news_train_df['assetCodes'].apply(lambda x: len(x)) news_train_df['asset_sentiment_count'] = news_train_df.groupby(['assetName', 'sentimentClass'])['time'].transform('count') news_train_df['asset_sentence_mean'] = news_train_df.groupby(['assetName', 'sentenceCount'])['time'].transform('mean') lbl = {k: v for v, k in enumerate(news_train_df['headlineTag'].unique())} news_train_df['headlineTagT'] = news_train_df['headlineTag'].map(lbl) kcol = ['firstCreated', 'assetCodes'] news_train_df = news_train_df.groupby(kcol, as_index=False).mean() market_train_df = pd.merge(market_train_df, news_train_df, how='left', left_on=['time', 'assetCode'], right_on=['firstCreated', 'assetCodes']) lbl = {k: v for v, k in enumerate(market_train_df['assetCode'].unique())} market_train_df['assetCodeT'] = market_train_df['assetCode'].map(lbl) market_train_df = market_train_df.dropna(axis=0) return market_train_df market_train_df.drop(['price_diff', 'assetName_mean_open', 'assetName_mean_close'], axis=1, inplace=True) market_train_df = data_prep(market_train_df, news_train_df) print(market_train_df.shape) up = market_train_df.returnsOpenNextMktres10 >= 0 fcol = [c for c in market_train_df.columns if c not in ['assetCode', 'assetCodes', 'assetCodesLen', 'assetName', 'assetCodeT', 'firstCreated', 'headline', 'headlineTag', 'marketCommentary', 'provider', 'returnsOpenNextMktres10', 'sourceId', 'subjects', 'time', 'time_x', 'universe','sourceTimestamp']] X = market_train_df[fcol].values up = up.values r = market_train_df.returnsOpenNextMktres10.values # Scaling of X values mins = np.min(X, axis=0) maxs = np.max(X, axis=0) rng = maxs - mins X = 1 - ((maxs - X) / rng) # - from sklearn.model_selection import train_test_split X_train, X_test, up_train, up_test, r_train, r_test = train_test_split(X, up, r, test_size=0.1, random_state=99) params = {'learning_rate': 0.01, 'max_depth': 12, 'boosting': 'gbdt', 'objective': 'binary', 'metric': 'auc', 'is_training_metric': True, 'seed': 42} model = lgb.train(params, train_set=lgb.Dataset(X_train, label=up_train), num_boost_round=2000, valid_sets=[lgb.Dataset(X_train, label=up_train), lgb.Dataset(X_test, label=up_test)], verbose_eval=100, early_stopping_rounds=100)
Stock Sentiment from news V1/stock-sentiment-from-news.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 58172, "status": "ok", "timestamp": 1646196077532, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "16604156050067165706"}, "user_tz": -60} id="fEjD-RrDEax6" outputId="10c016cb-5af3-4006-ec93-9307c5c01f00" # Setup if running in colab RunningInCOLAB = 'google.colab' in str(get_ipython()) if RunningInCOLAB: try: if runonce: print("Already ran") except: runonce = True # !pip install wandb # !git clone https://github.com/Jimmy-Nnilsson/StudieGrupp3_MLProjekt.git # import wandb # wandb.login() # + executionInfo={"elapsed": 27, "status": "ok", "timestamp": 1646196077535, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "16604156050067165706"}, "user_tz": -60} id="R643QH_ogQ2i" # import wandb # wandb.login() # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 3201, "status": "ok", "timestamp": 1646196080716, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "16604156050067165706"}, "user_tz": -60} id="gr84aH0mEYh8" outputId="29d0fc52-53af-4b72-c1d1-11790e062804" import os import numpy as np import random import matplotlib.pyplot as plt import matplotlib.cm as cm import tensorflow as tf print("TF: ", tf.__version__) from tensorflow.keras import layers from tensorflow.keras import models from keras import Model import keras from pathlib import Path from keras.preprocessing.image import load_img, img_to_array, image_dataset_from_directory from tensorflow.keras.applications import vgg16, vgg19, mobilenet_v2, inception_v3 from sklearn.metrics import confusion_matrix, classification_report import wandb from wandb.keras import WandbCallback import cv2 # + executionInfo={"elapsed": 14, "status": "ok", "timestamp": 1646196080717, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "16604156050067165706"}, "user_tz": -60} id="tc9140-sEYiB" def seed_everything(): # os.environ['PYTHONHASHSEED'] = '0' os.environ['TF_CUDNN_DETERMINISTIC'] = '1' random.seed(1254) np.random.seed(hash("improves reproducibility") % 2**32 - 1) tf.random.set_seed(hash("by removing stochasticity") % 2**32 - 1) seed_everything() # - # + executionInfo={"elapsed": 12, "status": "ok", "timestamp": 1646196080717, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "16604156050067165706"}, "user_tz": -60} id="Cnn3A9AdEYiC" # Get base project directory if not RunningInCOLAB: # project_path = Path(os.getcwd()).parent.parent for i, p in enumerate(Path(os.getcwd()).parts): if p == "StudieGrupp3_MLProjekt": break pathparts = list(Path(os.getcwd()).parts[0:i+2]) project_path = Path(pathparts[0],"\\\\".join(pathparts[1:])) else: project_path = Path('/content/StudieGrupp3_MLProjekt/') datapath = (project_path /'data/processed/') CLASSES = {0 : 'yes', 1 : 'no'} # Loops through pathlist and reads and resizes images def read_image(pathlist : list, size : int)-> list: data = [] for path in pathlist: image=load_img(path, color_mode='rgb', target_size=(size, size)) # image=load_img(path, color_mode='rgb', target_size=(size, size)) image=img_to_array(image) # image=image/255.0 data.append(image) data = np.asarray(data, dtype=np.uint8) return data # Makes input and label data from folder locations. # Loops through location "subfolder/CLASSES" def get_sets(subfolder : str, CLASSES : dict, size : int): folder_paths = [] folder_labels = [] labels = [] for k,v in CLASSES.items(): # input datapath generation folder_paths += list((datapath / f"2_split_{v}/{subfolder}").rglob("*")) # Label data generation folder_labels = [0 if x.stem.split('_')[1] == 'yes' else 1 for x in folder_paths] folder_labels = np.asarray(folder_labels, dtype=np.uint8) # Extract images from datapaths img_list = read_image(folder_paths, size) return img_list, folder_labels def get_training_set(CLASSES : dict, size : int): folder_paths = [] folder_labels = [] labels = [] for k,v in CLASSES.items(): # input datapath generation # folder_paths += list((datapath / f"3_aug_{v}_train/").rglob("*")) folder_paths += list((datapath / f"3_augmentation_train/3_aug_geo_{v}_train/").rglob("*")) # folder_paths += list((datapath / f"3_augmentation_train/3_aug_pix_{v}_train/").rglob("*")) # print(folder_paths) # Label data generation folder_labels = [0 if x.stem.split('_')[1] == 'yes' else 1 for x in folder_paths] # Extract images from datapaths img_list = read_image(folder_paths, size) return img_list, folder_labels # - pathparts # + [markdown] id="1jDJ9plmgxlC" # Load Pictures # + executionInfo={"elapsed": 9963, "status": "ok", "timestamp": 1646196090670, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "16604156050067165706"}, "user_tz": -60} id="Fc4RQseIEYiE" # Dataset inspect # Read images to variables size = 224 X_aug_train, y_aug_train = get_training_set(CLASSES, size) X_train, y_train = get_sets('train', CLASSES, size) X_val, y_val = get_sets('val', CLASSES, size) X_test, y_test = get_sets('test', CLASSES, size) # + executionInfo={"elapsed": 14, "status": "ok", "timestamp": 1646196090673, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "16604156050067165706"}, "user_tz": -60} id="BACMLdC0I-w3" #@title @tf.function def preprocess(image: tf.Tensor, label: tf.Tensor): """ Preprocess the image tensors and parse the labels """ # Preprocess images image = tf.image.convert_image_dtype(image, tf.float32) # Parse label label = tf.cast(label, tf.float32) return image, label def prepare_dataloader(images: np.ndarray, labels: np.ndarray, loader_type: str='train', batch_size: int=128): """ Utility function to prepare dataloader. """ images = model_preprocess(images) dataset = tf.data.Dataset.from_tensor_slices((images, labels)) if loader_type=='train': dataset = dataset.shuffle(1024) dataloader = ( dataset .map(preprocess, num_parallel_calls=tf.data.AUTOTUNE) .batch(batch_size) .prefetch(tf.data.AUTOTUNE) ) return dataloader def model_preprocess(images): images = vgg19.preprocess_input(images) # images = vgg16.preprocess_input(images) # images = mobilenet_v2.preprocess_input(images) # images = inception_v3.preprocess_input(images) return images # + [markdown] id="dAr1WwrYgxlH" # Config parameters # + executionInfo={"elapsed": 12, "status": "ok", "timestamp": 1646196090674, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "16604156050067165706"}, "user_tz": -60} id="KHhaOEzCI-w0" # Mind model processing # Finetune not complete configs = dict( project_name = "MRI_Baseline_3", #Project Name mode = 'disabled', #{'offline', 'run', 'disabled', 'dryrun', 'online'} # WandB run status job_type = "", #Run type for WandB group = "", # Group in WandB sub_group = "_pipeline", class_names = CLASSES, # Classes for training training_set = "", image_width = X_train[0].shape[0], # Picture width for model input image_height = X_train[0].shape[1], # Picture height for model input image_channels = X_train[0].shape[2], # Picture channels for model input pretrain_weights = 'imagenet', # pretrained weights for basemodel if any batch_size = 4, # Batchsize for training init_learning_rate = 0.001, # Initial training rate if no callback is used lr_decay_rate = 0.1, #decayrate of training rate epochs = 50, # Epochs to train optimizer = 'rmsprop', # The optimizer used by the ml model loss_fn = 'binary_crossentropy', # Loss function metrics = ['accuracy'], # Metrics earlystopping_patience = 5, # For the early stopping callback dataset = "Brain_MRI_Images_for_Brain_Tumor_Detection", fine_tune_learning_rate = 1e-5, # learningrate Used during fine tuning fine_tune_epochs = 10, # Epochs ran at finetuning architecture = "",# To be defined f"{base_model._name.upper()} global_average_pooling2d", model_name = '' # set after model is defined # Name of the ml Model ) # + executionInfo={"elapsed": 15039, "status": "ok", "timestamp": 1646196105702, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "16604156050067165706"}, "user_tz": -60} id="dVtM4jajI-w4" trainloader = prepare_dataloader(X_train, y_train, 'train', configs.get('batch_size', 64)) augtrainloader = prepare_dataloader(X_aug_train, y_aug_train, 'train', configs.get('batch_size', 64)) validloader = prepare_dataloader(X_val, y_val, 'valid', configs.get('batch_size', 64)) testloader = prepare_dataloader(X_test, y_test, 'test', configs.get('batch_size', 64)) # + executionInfo={"elapsed": 32, "status": "ok", "timestamp": 1646196105704, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "16604156050067165706"}, "user_tz": -60} id="82sEYhox889L" configs['training_set'] = augtrainloader # + [markdown] id="dpjWJwT9gxlJ" # Model class definition # + executionInfo={"elapsed": 1671, "status": "ok", "timestamp": 1646196107346, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "16604156050067165706"}, "user_tz": -60} id="clfaakDVgxlJ" class Model_Class(): def __init__(self, output_activation: str='sigmoid' ): self.shape = (configs['image_width'],configs['image_height'], configs['image_channels']) self.model = "" # self.base_model = "" self.output_activation = output_activation self.run = "" self.get_model() self.conv_layers, self.layer_names = self.__get_convlayers() self.preds = "" if not wandb.run is None: wandb.finish() def get_model(self): tf.keras.backend.clear_session() kwarg = dict(weights=configs['pretrain_weights'], include_top=False, input_shape=self.shape) self.base_model = vgg19.VGG19(**kwarg) # self.base_model = vgg16.VGG16(**kwarg) # self.base_model = mobilenet_v2.MobileNetV2(**kwarg) # self.base_model = inception_v3.InceptionV3(**kwarg) self.base_model.trainable = False x = layers.GlobalAveragePooling2D()(self.base_model.output) # x = layers.Flatten()(self.base_model.output) x = layers.Dense(128, activation='relu')(x) x = layers.Dense(64, activation='relu')(x) x = layers.Dense(32, activation='relu')(x) outputs = layers.Dense(1, activation=self.output_activation)(x) configs['group'] = f'{self.base_model._name}{configs["sub_group"]}' configs['architecture'] = self.base_model._name self.model = models.Model(self.base_model.input, outputs, name=f'Baseline_{self.base_model._name.upper()}') def transfer_learning(self, callbacks: list, verbose: int=0, wb: bool=False): # Makes training run on all but base model train_config = { "learning_rate" : configs['init_learning_rate'], "epochs" : configs['epochs'], "compile" : True} configs['job_type'] = "Transfer learning" self.base_model.trainable = False self.__train(callbacks=callbacks, verbose=verbose, wb=wb, train_config=train_config) def fine_tune(self, callbacks: list, verbose: int=0, wb: bool=False, trainable_layers: list=0): train_config = { "learning_rate" : configs['fine_tune_learning_rate'], "epochs" : configs['fine_tune_epochs'], 'job_type' : 'Fine-tune', "compile" : False} configs['job_type'] = "Fine-tune" self.set_trainable(trainable_layers) self.__train(callbacks=callbacks, verbose=verbose, wb=wb, train_config=train_config) def __train(self, callbacks: list, verbose: int=0, wb: bool=False, train_config:dict={} ): if wb: callbacks.append(self.__wandb()) # Initalize model # tf.keras.backend.clear_session() configs['model_name'] = self.model._name # set # Compile the model if train_config['compile']: opt = tf.keras.optimizers.RMSprop(learning_rate=train_config['learning_rate']) self.model.compile(optimizer=opt, loss=configs['loss_fn'], metrics=configs['metrics']) # Train model _ = self.model.fit(configs['training_set'], epochs=train_config['epochs'], validation_data=validloader, callbacks=callbacks, verbose=verbose) if wb: # Evaluate the trained model loss, acc = self.model.evaluate(validloader) self.run.log({'evaluate/accuracy': acc}) # Close the W&B run. self.run.finish() def grad_cam(self, image, layer=None): self.preds = self.predict(image) if type(layer) is list: heatmap_list, superimposed_list = {},{} for layer_num in layer: heatmap = self.make_gradcam_heatmap(np.expand_dims(image, axis=0), layer_num, np.argmax(self.preds[0])) superimposed_img = self.superimpose(image,heatmap) heatmap_list[self.model.layers[layer_num]._name] = heatmap superimposed_list[self.model.layers[layer_num]._name] = superimposed_img return image, heatmap_list, superimposed_list else: heatmap = self.make_gradcam_heatmap(np.expand_dims(image, axis=0), layer, np.argmax(self.preds[0])) superimposed_img = self.superimpose(image,heatmap) return image, heatmap, superimposed_img def make_gradcam_heatmap(self, img_array, layer=None, pred_index=None): # First, we create a model that maps the input image to the activations # of the last conv layer as well as the output predictions if layer == None: layer=self.conv_layers[-1] model = self.model grad_model = Model( [model.inputs], [model.layers[layer].output, model.output] # [model.inputs], [model.get_layer(self.layer_names[-1]).output, model.output] ) # Then, we compute the gradient of the top predicted class for our input image # with respect to the activations of the last conv layer with tf.GradientTape() as tape: last_conv_layer_output, preds = grad_model(img_array) if pred_index is None: pred_index = tf.argmax(preds[0]) class_channel = preds[:, pred_index] # This is the gradient of the output neuron (top predicted or chosen) # with regard to the output feature map of the last conv layer grads = tape.gradient(class_channel, last_conv_layer_output) # This is a vector where each entry is the mean intensity of the gradient # over a specific feature map channel pooled_grads = tf.reduce_mean(grads, axis=(0, 1, 2)) # We multiply each channel in the feature map array # by "how important this channel is" with regard to the top predicted class # then sum all the channels to obtain the heatmap class activation last_conv_layer_output = last_conv_layer_output[0] heatmap = last_conv_layer_output @ pooled_grads[..., tf.newaxis] heatmap = tf.squeeze(heatmap) # For visualization purpose, we will also normalize the heatmap between 0 & 1 heatmap = tf.maximum(heatmap, 0) / tf.math.reduce_max(heatmap) return heatmap.numpy() def predict(self, pic): x = model_preprocess(pic) if len(x.shape) < 4: x = np.expand_dims(x, axis=0) preds = self.model.predict(x) return preds def superimpose(self, pic,heatmap): img_numpy = np.asarray(np.clip(pic, 0, 190)) heatmap_resized = cv2.resize(heatmap, (img_numpy.shape[1], img_numpy.shape[0])) heatmap_resized = np.uint8(255 * heatmap_resized) heatmap_resized = cv2.applyColorMap(heatmap_resized, cv2.COLORMAP_JET) superimposed_img = 0.3*heatmap_resized[:,:,::-1] + img_numpy superimposed_img = superimposed_img.astype(np.uint8) return superimposed_img def __get_convlayers(self): list_conv_layers = [] list_layer_names = [] for i,l in enumerate(self.model.layers): # print(str(l).split('.')) if str(l).split('.')[2] == 'convolutional': list_conv_layers.append(i) list_layer_names.append(l._name) return list_conv_layers, list_layer_names def __wandb(self): self.run = wandb.init(mode=configs['mode'] ,project=configs['project_name'], config=configs, job_type=configs['job_type'], group=configs['group']) # Define WandbCallback for experiment tracking wandb_callback = WandbCallback(monitor='val_loss', log_weights=True, log_evaluation=True, validation_steps=5, save_model=True, save_graph = True ) return wandb_callback def set_trainable(self, trainable_layers=0): # Sets whole model to trainable if trainable_layers == 0: self.model.trainable = True else: self.model.trainable = False for i in trainable_layers: self.model.layers[i].trainable = True # + [markdown] id="N7Z16NM-gxlL" # Call model # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 2687, "status": "ok", "timestamp": 1646196110028, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "16604156050067165706"}, "user_tz": -60} id="M9A5yCANgxlN" outputId="32d3dec4-3b1d-4877-8dca-f817a0fe068a" model = Model_Class() # + [markdown] id="6Tg4OnwwgxlI" # Model Callbacks # + executionInfo={"elapsed": 11, "status": "ok", "timestamp": 1646196110028, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "16604156050067165706"}, "user_tz": -60} id="ir6gxKIOEYiM" model_filename = f'{model.base_model._name}_{configs["project_name"]}{configs["sub_group"]}.h5' if not RunningInCOLAB: checkpoint_filepath = (Path(os.getcwd()) /f'model_checkpoint/{model_filename}') else: checkpoint_filepath = (Path(f'/content/{model_filename}')) model_checkpoint_callback = keras.callbacks.ModelCheckpoint( filepath=checkpoint_filepath, save_weights_only=True, monitor='val_accuracy', mode='max', save_best_only=True) # + executionInfo={"elapsed": 11, "status": "ok", "timestamp": 1646196110029, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "16604156050067165706"}, "user_tz": -60} id="XUld9o-zI-w6" earlystopper = keras.callbacks.EarlyStopping( monitor='val_loss', patience=configs['earlystopping_patience'], verbose=0, mode='auto', restore_best_weights=True ) # + executionInfo={"elapsed": 10, "status": "ok", "timestamp": 1646196110029, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "16604156050067165706"}, "user_tz": -60} id="tIzmu5iFgxlJ" def lr_scheduler(epoch, lr): # log the current learning rate onto W&B if wandb.run is None: raise wandb.Error("You must call wandb.init() before WandbCallback()") wandb.log({'learning_rate': lr}, commit=False) if epoch < 12: return lr else: return lr * tf.math.exp(-configs['lr_decay_rate']) lr_callback = tf.keras.callbacks.LearningRateScheduler(lr_scheduler) # + [markdown] id="7BWGilQqgxlN" # Make gradcams # + executionInfo={"elapsed": 10, "status": "ok", "timestamp": 1646196110030, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "16604156050067165706"}, "user_tz": -60} id="2usvE1YxwzhN" def plot_gradcam(image, heatmap, superimposed_img): if type(heatmap) == dict and type(superimposed_img) == dict: nlen = len(heatmap) fig, ax = plt.subplots(nlen,3, figsize=(10, nlen*3.5)) # fig.figsize=(20,20) for i, (k, img) in enumerate(heatmap.items()): ax[i,0].imshow(img) ax[i,1].set_title(k) ax[i,1].imshow(superimposed_img[k]) ax[i,2].imshow(image) else: plt.subplot(1,3, 1) plt.imshow(heatmap) plt.subplot(1,3, 2) plt.imshow(superimposed_img) plt.subplot(1,3, 3) plt.imshow(image) # + executionInfo={"elapsed": 9, "status": "ok", "timestamp": 1646196110030, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "16604156050067165706"}, "user_tz": -60} id="6-H_exnJgxlN" # image, heatmap, superimposed_img = model.grad_cam(X_train[0], model.conv_layers) # plot_gradcam(image, heatmap, superimposed_img) # + [markdown] id="QInYauoxgxlO" # Plot gradcams # + colab={"base_uri": "https://localhost:8080/", "height": 150} executionInfo={"elapsed": 12803, "status": "ok", "timestamp": 1646196122824, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "16604156050067165706"}, "user_tz": -60} id="AxzuBr6qgxlO" outputId="ff1996f7-8cf8-445c-b478-032b5a3ba71e" image, heatmap, superimposed_img = model.grad_cam(X_train[0]) plot_gradcam(image, heatmap, superimposed_img) # + executionInfo={"elapsed": 11, "status": "ok", "timestamp": 1646196122826, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "16604156050067165706"}, "user_tz": -60} id="E8oDvtHG5fH5" import seaborn as sns def crcm(model, x, y, p=True): y_pred = model.predict(x) y_pred = np.asarray(y_pred) y_pred = np.uint8(y_pred+0.5) cm = confusion_matrix(y, y_pred) cr = classification_report(y, y_pred) if p: plt.xlabel('Pred') sns.heatmap(cm, vmin=0, annot=True) print(cr) cr = classification_report(y, y_pred, output_dict=True ) return cm, cr # + colab={"base_uri": "https://localhost:8080/", "height": 650} executionInfo={"elapsed": 5383, "status": "ok", "timestamp": 1646196128200, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "16604156050067165706"}, "user_tz": -60} id="JV8x_bUdJJ5L" outputId="ba3a50ec-60a4-446e-8e21-772383acc5c6" cm_, cr_ = crcm(model, X_val, y_val) # + [markdown] id="enLx-uT8gxlO" # Train with wandb # + executionInfo={"elapsed": 493, "status": "ok", "timestamp": 1646196128687, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "16604156050067165706"}, "user_tz": -60} id="il6x4WiM_K5A" model.get_model() # + colab={"base_uri": "https://localhost:8080/", "height": 1000} executionInfo={"elapsed": 5910907, "status": "ok", "timestamp": 1646202039588, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "16604156050067165706"}, "user_tz": -60} id="mjgcyPZvgxlO" outputId="f8e6c392-f5dd-484a-db7d-0b13721acd67" import pandas as pd df = pd.DataFrame cr_list = [] cm_list = [] # callbacks = [earlystopper] # callbacks = [model_checkpoint_callback] callbacks = [] for i in range(5): model.get_model() # Train # model.train(callbacks=callbacks, verbose=2, wb=True) model.transfer_learning(callbacks=callbacks, verbose=2, wb=True) # model.model.summary() cm_, cr_ = crcm(model, X_val, y_val, p=False) cm_list.append(cm_) cr_['pos'] = {'precision': i, 'recall': i, 'f1-score': i, 'support': i} if i > 0: df = df.append(pd.DataFrame.from_dict(cr_)) else: df = pd.DataFrame.from_dict(cr_) df.mean(level=0) # + colab={"base_uri": "https://localhost:8080/", "height": 1000} executionInfo={"elapsed": 1303629, "status": "ok", "timestamp": 1646203343207, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "16604156050067165706"}, "user_tz": -60} id="w_kc9pHlgxlO" outputId="834dbb8b-5ae7-493d-d721-b0cfe3952ebf" cr_list = [] cm_list = [] # callbacks = [earlystopper] # callbacks = [model_checkpoint_callback] callbacks = [] for i in range(5): model.get_model() opt = tf.keras.optimizers.RMSprop(learning_rate=configs['fine_tune_learning_rate']) model.model.compile(optimizer=opt, loss=configs['loss_fn'], metrics=configs['metrics']) # Train model.fine_tune(callbacks=callbacks, verbose=2, wb=True) # model.model.summary() cm_, cr_ = crcm(model, X_val, y_val, p=False) cm_list.append(cm_) cr_['pos'] = {'precision': i, 'recall': i, 'f1-score': i, 'support': i} if i > 0: df = df.append(pd.DataFrame.from_dict(cr_)) else: df = pd.DataFrame.from_dict(cr_) df.mean(level=0) # + colab={"base_uri": "https://localhost:8080/", "height": 380} executionInfo={"elapsed": 86087, "status": "error", "timestamp": 1646203429281, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "16604156050067165706"}, "user_tz": -60} id="EvePI4OaCOKI" outputId="67bcacc0-85ed-45e3-a479-2d94586d7758" if RunningInCOLAB: from google.colab import drive drive.mount('/content/drive/') # !cp '{model_filename}' "/content/drive/MyDrive/model" # + [markdown] id="DdLghhBaScNK" #
notebooks/modelling/S2_Pipeline_checks_geo_j.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # # 3. Converting data to OME-NGFF (practical) # # *** # # **ELMI 2021 NGFF Workshop**, 25 June 2021 # # *** # # # ## Summary # # * 3.1. Data from IDR # * 3.2. Converting your data to OME-NGFF # * 3.3. Publishing your data with S3 # * 3.4. Extras (time-permitting) # # *** # + [markdown] slideshow={"slide_type": "slide"} # ## 3.1. Data from S3 # We're going to start off by looking at some images you will likely have seen during the OMERO or IDR sessions. # # **Our goal is to share these *without* using an OMERO.** # # <table> # <tr> # <td> # <img alt="idr0062 thumbnails" src="images/training-1.png" style="height:150px"/> # </td> # <td> # <img alt="idr0062 thumbnails" src="images/training-2.png" style="height:150px"/> # </td> # <td> # <img alt="idr0023 3D screenshot" src="images/training-3.png" style="height:150px"/> # </td> # </tr> # </table> # # The left two images are from the ilastik plugin guide presented by Petr: https://omero-guides.readthedocs.io/en/latest/ilastik/docs/ilastik_fiji.html # # They are available in the "idr0062" project on the workshop server: https://workshop.openmicroscopy.org/webclient/?show=project-1952 # # The original dataset can be found in IDR study idr0062 by Blin _et al._: https://idr.openmicroscopy.org/webclient/?show=project-801 # # The image on the right is from idr0023 by Szymborska _et al_: http://idr.openmicroscopy.org/webclient/?show=project-52 and is **much** smaller. # # - # *** # # ## 3.2 Converting your data to OME-NGFF # # The two basic commands are `bioformats2raw` and `raw2ometiff`. Together they provide a pipeline to scalably convert large images into OME-TIFF. The primary caveat is that they require **twice** the storage for the conversion. # # # ### 3.2.1 Conversion tools # # https://forum.image.sc/t/converting-whole-slide-images-to-ome-tiff-a-new-workflow/32110/4 # # # <img src="images/conversion.png" style="height:400px" /> # !bioformats2raw --version # !bioformats2raw # !java --version import os os.environ["JAVA_OPTS"]="--illegal-access=deny" # %%time # !bioformats2raw --overwrite trans_norm.tif trans_norm.ome.zarr # !find trans_norm.ome.zarr -name ".z*" # !ls -ltrah trans_norm.ome.zarr/0/0/0/0/0/0/0 # !ome_zarr info trans_norm.ome.zarr/0 # *** # # ## 3.3. Publishing your data with S3 # # You can then move the generated output to S3. Note: one of the most frequent mistakes here is the slash (`/`) at the end of the commands.x YOURNAME = input() # !time mc cp --recursive trans_norm.ome.zarr/0/ elmi2021/idr-upload/elmi2021/$YOURNAME/my_trans_norm.ome.zarr/ # !mc cat elmi2021/idr-upload/elmi2021/$YOURNAME/my_trans_norm.ome.zarr/.zattrs # In the cell below, please enter the password used [What is the "Cloud"?](2_Minio.ipynb). # The password was sent prior to the workshop. # + import getpass import os os.environ["S3FS_LOGGING_LEVEL"] = "WARN" os.environ["FSSPEC_CONFIG_DIR"] = "/tmp" os.environ["AWS_ACCESS_KEY_ID"] = "elmi2021" os.environ["AWS_SECRET_ACCESS_KEY"] = getpass.getpass() with open("/tmp/conf.json", "w") as o: o.write(""" {"s3": {"client_kwargs": {"endpoint_url": "https://idr-ftp.openmicroscopy.org"} } }""") # !ome_zarr -qqq info s3://idr-upload/elmi2021/josh/my_trans_norm.ome.zarr/ # - from IPython.display import Video Video("images/idr0023.mp4") # # 3.4 Extras # # ## 3.4.1 Renaming # # Another important distinction to filesystems is that though it looks like hello is in a directory, you should really think of the entire string after the bucket just as a "key". # !mc mv --recursive elmi2021/idr-upload/elmi2021/$YOURNAME/my_trans_norm.ome.zarr/ elmi2021/idr-upload/elmi2021/$YOURNAME/public_trans_norm.ome.zarr # ## 3.4.2 omero-cli-zarr # Loading the image from: https://outreach.openmicroscopy.org/webclient/img_detail/55204/?dataset=6107. In this case, you will need the password to connect to the OMERO.server. # This is different from the password used previously. # Enter the password and click Enter. Move manually to the next cell. # Another block to get your workshop password from a previous session import getpass workshop_pass = getpass.getpass() # !omero login trainer-1@wss://outreach.openmicroscopy.org/omero-ws -w $workshop_pass # !rm -rf 55204.zarr # !time omero zarr export Image:55204 # !find 55204.zarr -name ".z*" # ## 3.4.3 Other resources # # <table> # <tr> # <td> # <a href="https://downloads.openmicroscopy.org/presentations/2020/Dundee/Workshops/NGFF/zarr_diagram/"> # <img src="images/resources-1.png" alt="Screenshot of the Zarr diagram from OME2020" style="height:200px"/> # </a> # </td> # <td> # <a href="https://downloads.openmicroscopy.org/presentations/2020/Dundee/Workshops/NGFF/zarr_diagram/">Diagram for how data moves</a> # </td> # </tr> # <tr> # <td> # <a href="https://blog.openmicroscopy.org/file-formats/community/2020/11/04/zarr-data/"> # <img src="images/resources-2.png" alt="Screenshot of the Zarr diagram from OME2020" style="height:200px"/> # </a> # </td> # <td> # <a href="https://blog.openmicroscopy.org/file-formats/community/2020/11/04/zarr-data/">Blog post for an easy way to publish OME-Zarr files</a> # </td> # </tr> # </table> # ### License (BSD 2-Clause) # # Copyright (c) 2021, University of Dundee All rights reserved. # # Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: # # Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
3_Conversion.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import os import pandas as pd import matplotlib.pyplot as plt # + def count_words(text): ''' Count the number of times each word occurs in text (str). Return dictionary where keys are unique words and values are word counts. Skips punctuation ''' #lower case letters text = text.lower() #skip punctuation skips = ['.', ',',':', ';', "'", '"'] for ch in skips: text = text.replace(ch, "") word_counts = {} for word in text.split(" "): #known word if word in word_counts: word_counts[word] +=1 #unknown word else: word_counts[word] = 1 return word_counts def read_book(title_path): ''' Read a book and retirn it as a string ''' with open(title_path, 'r', encoding = 'UTF-8') as current_file: text = current_file.read() text = text.replace('\n', '').replace('\r', '') return text def word_stats(word_counts): ''' Return number of unique words and word frequences ''' num_unique = len(word_counts) counts = word_counts.values() return (num_unique, counts) # - book_dir = './books' os.listdir(book_dir) stats = pd.DataFrame(columns = ('Language', 'Author', 'Title', 'Length', 'Unique' )) title_num = 1 for language in os.listdir(book_dir): for author in os.listdir(book_dir + "/" + language): for title in os.listdir(book_dir + "/" + language + '/' + author): inputfile = book_dir + '/' + language + '/' + author + '/' + title print(inputfile) text = read_book(inputfile) (num_unique, counts) = word_stats(count_words(text)) stats.loc[title_num] = language, author, title, sum(counts), num_unique title_num += 1 stats.Length stats.Unique plt.plot(stats.Length, stats.Unique, 'bo') plt.show() plt.loglog(stats.Length, stats.Unique, 'bo') plt.show() stats[stats.Language == 'English'] stats[stats.Language == 'French'] # + plt.figure(figsize = (10,10)) #English subset = stats[stats.Language == 'English'] plt.loglog(subset.Length, subset.Unique, 'o', label = 'English', color = 'crimson') #German subset = stats[stats.Language == 'German'] plt.loglog(subset.Length, subset.Unique, 'o', label = 'German', color = 'orange') #French subset = stats[stats.Language == 'French'] plt.loglog(subset.Length, subset.Unique, 'o', label = 'French', color = 'forestgreen') #Portuguese subset = stats[stats.Language == 'Portuguese'] plt.loglog(subset.Length, subset.Unique, 'o', label = 'Portuguese', color = 'blueviolet') plt.legend(); plt.xlabel('Book Lenght') plt.ylabel('Number of Unique Words') plt.savefig('./books/lang_plot.pdf') plt.show() # - # #### Plotting Book Statistics: Question 1 # # ```stats``` is a Pandas dataframe as defined in Video 3.2.6. How can you access the column "length" in this dataframe? # # - ```stats->length``` # - **```stats.length```** # - ```stats[length]``` # - **```stats["length"]```** # - ```stats[,"length"]``` # # #### Plotting Book Statistics: Question 2 # # ```stats``` is a Pandas dataframe as defined in Video 3.2.6. How can you select only the rows where the language is French? # # - ```stats.language == "French"``` # - ```stats[language == "French"]``` # - ```stats.French``` # - **```stats[stats.language == "French"]```**
11 - Case Studies/Case Study 2 - Language Processing/3.2.6 - Plotting Book Statistics.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # The GRAD-GPAD framework ๐Ÿ—ฟ # # โžก๏ธ Command Line Interface # --- # # # ## Table of Contents # - Installation ๐Ÿ’ป # - Getting Started ๐Ÿ‘ฉโ€๐Ÿ’ป # * Reproducible Research # * Show Histogram # # ###ย Installation ๐Ÿ’ป pip install -U gradgpad # --- # ###ย Getting Started ๐Ÿ‘ฉโ€๐Ÿ’ป # # The `grad-gpad` framework has available a Command Line Interface. # Please, check use help parameter to obtain updated information about available commands # !gradgpad -h # #####ย ๐Ÿ‘ฉโ€๐Ÿ’ป Reproducible Research # # To reproduce the research you only have to select an `<output>` folder an run the following command: # !gradgpad --reproducible-research -o output # #####ย ๐Ÿ‘ฉโ€๐Ÿ’ป Show Histograms # # Besides, `gradgpad` CLI can show the hist from score files # # For instance, if we want to visualize the hist of Quality-based approach, just type: # !gradgpad --show-hist \ # --score-filename-devel $(gradgpad --show-scores-path)/quality_rbf/quality_rbf_grandtest_devel.json \ # --score-filename-test $(gradgpad --show-scores-path)/quality_rbf/quality_rbf_grandtest_test.json # Or for Auxiliary-based face-PAD approach # !gradgpad --show-hist \ # --score-filename-devel $(gradgpad --show-scores-path)/auxiliary/auxiliary_grandtest_devel.json \ # --score-filename-test $(gradgpad --show-scores-path)/auxiliary/auxiliary_grandtest_test.json
gradgpad_cli.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # default_exp models # - #hide from nbdev.showdoc import * #export from bionlp_imports import * from bionlp.core import * # + #Generate Sequence -> Single Value dataset to test models dataset = generate_Seqs_Exp_Dataset(100,40,50) dataset.Exp = dataset.Exp.astype(float) train_loader = DataLoader(DnaDataset(dataset), batch_size=16, shuffle=True) seq_batch, label_batch = next(iter(train_loader)) seq_batch.shape # - #export class baseline_cnn(torch.nn.Module): """ simplest CNN """ def __init__(self): super().__init__() self.conv1 = nn.LazyConv1d(out_channels=10, kernel_size=5) self.flatten = nn.Flatten() self.linear1 = nn.LazyLinear(1) def forward(self,x): x = self.conv1(x) x = self.flatten(x) x = self.linear1(x) return x.flatten() # + model = baseline_cnn() model(seq_batch) loss_fn = nn.MSELoss() opt = torch.optim.Adam(model.parameters(),lr=.001) sbs = StepByStep(model,loss_fn,opt) sbs.set_loaders(train_loader) sbs.train(5) sbs.plot_losses() # - #N, F, L seq_batch.shape # + class base_rnn(torch.nn.Module): """ basic RNN """ def __init__(self): super().__init__() #self.cnn = torch.nn.LazyConv1d(10,kernel_size=1) self.rnn = torch.nn.RNN(input_size=49,hidden_size=10, batch_first=True) self.linear = torch.nn.LazyLinear(1) def forward(self,x): #print(x.shape) #x = self.cnn(x) #print(x.shape) # x = x.permute(0,2,1) #print(x.shape) batch_first, hidden = self.rnn(x) x = batch_first[:,:,-1] x = self.linear(x) return x model = base_rnn() batch_permute = seq_batch.permute(1,0,2) print(batch_permute.shape) hidden = model(batch_permute) hidden #print(f'Hidden shape {hidden.shape}') #print(f'Out shape {out.shape}') # + loss_fn = nn.MSELoss() opt = torch.optim.Adam(model.parameters(),lr=.01) sbs = StepByStep(model,loss_fn,opt) sbs.set_loaders(train_loader) sbs.train(2) sbs.plot_losses() # - ??torch.nn.RNN class custom_cnn(torch.nn.Module): """ customized CNN """
01_models.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + language="javascript" # IPython.OutputArea.prototype._should_scroll = function(lines) { # return false; # } # + from typing import Any, Callable, NamedTuple, Tuple, Union Step = int Schedule = Callable[[Step], float] from IPython import display from IPython.display import Image, clear_output from PIL import Image import glob, os, shutil import os.path import time import scipy.io as io import scipy.sparse.csgraph as csgraph from scipy.sparse.csgraph import laplacian as csgraph_laplacian import scipy as sp from scipy.stats import gaussian_kde from scipy.linalg import null_space import jax from jax import jit, vmap, random, grad, value_and_grad, hessian from jax.experimental import optimizers, sparse from jax.experimental.optimizers import optimizer from jax import numpy as jnp from functools import partial import itertools import math import numpy as np import numpy.random as npr import matplotlib.pyplot as plt # %matplotlib inline from matplotlib import collections as mc import seaborn as sns import datetime from tqdm.notebook import tqdm import networkx as nx # %load_ext autoreload # %autoreload 2 # + #from jax.config import config #config.update("jax_enable_x64", True) # - import utils from utils import * from optimizers import * # load the data from the SuiteSparse Matrix Collection format # https://www.cise.ufl.edu/research/sparse/matrices/ graphs = ['qh882','dwt_1005','3elt','commanche_dual','bcsstk31'] graphdir = './testcases/' graphpostfix = 'dwt_1005' assert graphpostfix in graphs #grid_testcase = nx.grid_graph(dim=(50, 50)) grid_testcase = nx.triangular_lattice_graph(25,25) #grid_testcase = nx.cycle_graph(100) grid_testcase_adjacency = nx.adjacency_matrix(grid_testcase).toarray().astype(np.int16) DEBUG=True if DEBUG: graph, G, A, L, D, n = load_graph(graphdir+graphpostfix, A=grid_testcase_adjacency, plot_adjacency=False, verbose=True) else: graph, G, A, L, D, n = load_graph(graphdir+graphpostfix, A=None, plot_adjacency=False, verbose=True) del G del A del D # + @jit def project(X1, C, E_0, c=jnp.array([0,0])): C1 = X1.T@X1 C1sqrt = utils._sqrtm(C1) Csqrt = utils._sqrtm(C) U,s,V = jnp.linalg.svd(Csqrt@C1sqrt) X = X1@jnp.linalg.inv(C1sqrt)@U@V.T@Csqrt U_E, _, V_E = jnp.linalg.svd(X.T@E_0) X = X@(-U_E@V_E.T) return X.real @partial(jit, static_argnums=(3,)) def step(i, opt_state, Z, opt_update): """Perform a single descent + projection step with arbitrary descent direction.""" return opt_update(i, Z, opt_state) def _D_Z(X, A, P, d, e): I = jnp.eye(A.shape[0]) Ad = A + d*I Del = jnp.linalg.solve(X.T@jnp.linalg.solve(Ad, X), X.T)@jnp.linalg.solve(Ad, e) Z = jnp.linalg.solve(Ad, -X@Del + e) return Del, Z @jit def _sqp(A, P, L, E_0, X): """Perform an iteration of SQP.""" w = jnp.linalg.eigvals(L) idx = w.argsort() w = w[idx] E = -E_0 - (A@X + X@L) Del_0, Z_0 = _D_Z(X, A, P, w[0], E[:,0]) Del_1, Z_1 = _D_Z(X, A, P, w[1], E[:,1]) Z = jnp.stack([Z_0, Z_1], axis=1) Del = jnp.stack([Del_0, Del_1], axis=1) return Z, Del @jit def D_Z(X, A, P, d, e, I): Ad = A + d*I #sp_solve = lambda _, x: jax.scipy.sparse.linalg.bicgstab(lambda b:Ad@b, x, M=lambda b:M@b, maxiter=100)[0] #sp_solve = lambda _, x: jax.scipy.sparse.linalg.gmres(lambda b:Ad@b , x, maxiter=100, solve_method='incremental',M=jnp.linalg.inv(Ad))[0] sp_solve = lambda _, x: jnp.linalg.solve(Ad,x) ADinvP = sp_solve(Ad, P.T) #Del = jnp.linalg.solve(X.T@(jnp.linalg.solve(Ad,X)),X.T)@jnp.linalg.solve(Ad,e) Del = jnp.linalg.solve(X.T@(sp_solve(Ad,X)),X.T)@sp_solve(Ad,e) Z = P@(ADinvP@(P@(-X@Del + e))) return Del, Z @jit def sqp(X, A, P, L, E_0, I): """Perform an iteration of SQP.""" w = jnp.linalg.eigvals(L) idx = w.argsort() w = w[idx].real E = -E_0 - (A@X + X@L) Del_0, Z_0 = D_Z(X, A, M, P, w[0], E[:,0], I) Del_1, Z_1 = D_Z(X, A, M, P, w[1], E[:,1], I) Z = jnp.stack([Z_0, Z_1], axis=1) Del = jnp.stack([Del_0, Del_1], axis=1) return Z, Del def scipy_sqp(X, A, P, L, E_0, I): """Perform an iteration of SQP.""" w = jnp.linalg.eigvals(L) idx = w.argsort() w = w[idx].real E = -E_0 - (A@X + X@L) Del_0, Z_0 = scipy_D_Z(X, A, P, w[0], E[:,0], I) Del_1, Z_1 = scipy_D_Z(X, A, P, w[1], E[:,1], I) Z = jnp.stack([Z_0, Z_1], axis=1) Del = jnp.stack([Del_0, Del_1], axis=1) return Z, Del def bicg_solve(A, B, M): if len(B.shape) > 1 and B.shape[1] > 1: X, info = zip(*(sp.sparse.linalg.bicgstab(A, b, M=M, tol=1e-6) for b in B.T)) else: X, info = sp.sparse.linalg.bicgstab(A,B, M=M, tol=1e-6) return np.transpose(X), info def scipy_D_Z(X, A, P, d, e, I): Ad = A + d*I Ad = sp.sparse.csc_matrix((A.data, (A.indices[:,0], A.indices[:,1]))) sp_solve = lambda A, b:jnp.array(sp.sparse.linalg.spsolve(A.astype(np.float64),b.astype(np.float64))) Del = jnp.linalg.solve(X.T@(sp_solve(Ad,X)),X.T)@sp_solve(Ad,e) v_s = np.ones((A.shape[0],1))/np.sqrt(A.shape[0]) ADE = (-X@Del + e) PADE = ADE - v_s@(v_s.T@ADE) PADE = PADE - v_s@(v_s.T@PADE) ADinvP = sp_solve(Ad, PADE) Z = ADinvP - v_s@(v_s.T@ADinvP) return Del, Z def newton(opt_params, A, P, L, C, X_k, b_x, b_y, convergence_criterion, maxiters=100, alpha=1e-2, beta=0.9, initL=True): """Perform iterations of PND + backtracking line search.""" opt_state, opt_update, get_params = opt_params X_k = get_params(opt_state) E_0 = np.stack([b_x, b_y], axis=1) pAp = P@A@P.T if initL: L = L_init(X_k, C, pAp, E_0) report = {'x':None, 'lossh':[f(X_k, pAp, pAp, b_x, b_y)[1].item()], 'sln_path':[np.asarray(X_k)], 'foc':[foc_sqp(X_k, L, C, pAp, E_0).item()], 'step_sizes':[1], 'L':[L]} cc = 0 for k in tqdm(range(maxiters)): #Z, Del = sqp(A, P, pAp, L, E_0, X_k) Z, Del = _sqp(pAp, P, L, E_0, X_k) # backtracking line search f_xp = jnp.finfo(jnp.float32).max stp = 1 #f_x, gr = value_and_grad(f)(X_k, pAp, pAp, b_x, b_y) f_x = f(X_k, pAp, pAp, b_x, b_y)[0] len_p = jnp.linalg.norm(Z) X_k_t = X_k opt_state_t = opt_state while f_xp >= f_x: stp *= beta opt_state_t = step(stp, opt_state, -Z, opt_update) X_k_t = get_params(opt_state_t) f_xp,t = f(X_k_t, pAp, pAp, b_x, b_y) if stp * len_p < 1e-10: break #if f_xp.item() > report['lossh'][-1]: # break L = L + stp*Del foc = foc_sqp(X_k, L, C, pAp, E_0) opt_state = opt_state_t X_k = get_params(opt_state_t) report['sln_path'].append(np.asarray(X_k)) report['step_sizes'].append(stp) report['foc'].append(foc.item()) #report['lossh'].append(f_xp.item()) report['lossh'].append(t) report['L'].append(np.asarray(L)) if len(report['lossh']) > 2 and np.abs(foc.item()) <= convergence_criterion: cc += 1 if cc > 10: print('converged') break if cc > 0: cc -= 1 return report @jit def subspace(X_k_q, X_k, Z, v, A, E_0, E_00, P, C): v_s = np.ones((A.shape[0],1))/np.sqrt(A.shape[0]) AXE = A@X_k AXE = AXE - v_s@(v_s.T@AXE)+E_00 Q, _ = jnp.linalg.qr(jnp.concatenate([X_k_q, Z, v, AXE],axis=-1), mode='reduced') PQ = Q - v_s@(v_s.T@Q) B=PQ.T@(A@PQ) X_k = PQ.T@X_k E_0 = PQ.T@E_00 X_k = project(X_k, C, E_0) w_v, v_v = jnp.linalg.eig(B) w_v = w_v.real v_v = v_v.real idx = w_v.argsort() v_v = v_v[idx] v = Q@v_v[:,0:2] return Q, PQ, B, X_k, E_0, v def ssm(opt_params, A, P, L, C, X_k, b_x, b_y, convergence_criterion, maxiters=10, alpha=1e-2, beta=0.9): """ 1. compute newton direction z = sqp(X, Z, v, Ax + E0) & subspace S 2. approximate locally optimal X, L on S; X = min F(\hat{X}, B, V.T@E0) """ opt_state, opt_init, opt_update, get_params = opt_params X_k = get_params(opt_state) E_00 = jnp.stack([b_x, b_y], axis=1) cc = 0 L = jnp.eye(2) results = None E_0 = E_00 I = sp.sparse.identity(A.shape[0]) I = sparse.BCOO.from_scipy_sparse(I) v_s = np.ones((A.shape[0],0))/A.shape[0] X_k_q = X_k - v_s@((v_s).T@X_k) X_k_q = X_k #v = jnp.zeros_like(X_k_q) v = X_k M = None print('starting ssm iterations') for k in tqdm(range(maxiters)): 'Subspace computation' Z, Del = scipy_sqp(X_k, A, P, L, E_00, I) 'initialize wrt subspace' qq, Q, B, X_k, E_0, v = subspace(X_k_q, X_k, Z, v, A, E_0, E_00, P, C) opt_init, opt_update, get_params = psgd(partial(lambda x, y, z: z, E_0, C)) opt_state = opt_init(X_k) result = newton((opt_state, opt_update, get_params), A, Q.T, L, C, X_k, E_0[:,0], E_0[:,1], convergence_criterion=convergence_criterion, maxiters=20, alpha=0.0, beta=0.9, initL=True) X_k = result['sln_path'][-1] L = result['L'][-1] X_k_q = qq@X_k # PX X_k = Q@X_k # X #E_0 = QE_0 # E X_k = project(X_k, C, E_00) if results == None: results = result results['sln_path'] = [X_k] results['lossh'] = [result['lossh'][-1]] results['lossh'].extend(result['lossh']) results['sln_path'].extend([X_k]*len(result['lossh'])) results['foc'].extend(result['foc']) results['step_sizes'].extend(result['step_sizes']) return results # + def transform_A(A, X_k, boolean_idx): boolean_fixed_idx, boolean_nonfixed_idx = boolean_idx X_1x = X_k[boolean_fixed_idx,0] X_1y = X_k[boolean_fixed_idx,1] X_2 = X_k[boolean_nonfixed_idx] A_12 = A[boolean_fixed_idx, :] A_12 = A_12[:, boolean_nonfixed_idx] A = A[boolean_nonfixed_idx, :] A = A[:,boolean_nonfixed_idx] v_s = np.ones((A.shape[0],1))/np.sqrt(A.shape[0]) I = np.eye(v_s.shape[0]) #pap = (I - <EMAIL>)@A@(I - <EMAIL>) pap = A #w,v = sp.linalg.eigh(pap) w,v = sp.sparse.linalg.eigsh(pap,which='SM') print('eigenvalues: ',w) X_2[:,0] = v[:,0] X_2[:,1] = v[:,1] b_x = X_1x@A_12 b_y = X_1y@A_12 return A, X_1x, X_1y, X_2, b_x, b_y def map_vars(A, X_k, fixed_idx, centercons, decomp=True): """Preprocess variables """ N = A.shape[0] k = fixed_indices.shape[0] fixed_idx = jnp.zeros((k,N)) for i in range(k): fixed_idx=jax.ops.index_add(fixed_idx,jnp.index_exp[i, fixed_indices[i]],1) boolean_fixed_idx = fixed_idx.sum(0).astype(bool) boolean_nonfixed_idx = (1-fixed_idx.sum(0)).astype(bool) A, X_1x, X_1y, X_2, b_x, b_y = transform_A(A, X_k, (boolean_fixed_idx, boolean_nonfixed_idx)) X_k = X_2 print('computing constraints null space') constraints = np.expand_dims(np.ones(X_2.shape[0]),0) P = None n0_x = jnp.zeros_like(b_x) if centercons[1] == centercons[0]: n0_y = n0_x else: n0_y = pinvcons@(np.expand_dims(centercons[1],0)) return X_k, A, P, b_x, b_y, n0_x, n0_y, fixed_idx def cluster(rng, opt_params, X_k, fixed_x, A, mapped_vars, fixed_indices=None, use_fi=False, maxiters=10, convergence_criterion=1e-3, c1=1, c2=1, c3=0, centroid=jnp.array([0,0]), centercons=None, v=None, D=None, eps=1e-8, method='pgd'): """Given an adjacency matrix A and initialization X_k, optimize X.""" method = method.lower() opt_init, opt_update, get_params = opt_params k = fixed_x.shape[0] if not use_fi: fixed_coordsx = fixed_x[:,0] fixed_coordsy = fixed_x[:,1] else: fixed_coordsx = X_k[fixed_indices,0] fixed_coordsy = X_k[fixed_indices,1] N = A.shape[0] if v is None: v = jnp.ones(N) #if D is None: # D = jnp.diag(v) if centercons is None: centercons = jnp.zeros(2) A, P, b_x, b_y, n0_x, n0_y, fixed_idx = mapped_vars C = jnp.block([[c1, c3],[c3, c2]]) assert jnp.linalg.det(C) > 1e-5 E_0 = jnp.stack([b_x, b_y], axis=1) n0 = jnp.stack([n0_x,n0_y],axis=0) X_k_n = X_k print('initial projection') X_k_n = project(X_k_n, C, E_0, centercons) L = jnp.eye(2) opt_state = opt_init(X_k_n) print('ssm...') result = ssm((opt_state, opt_init, opt_update, get_params), A, P, L, C, X_k_n, b_x, b_y, convergence_criterion=convergence_criterion, maxiters=maxiters, alpha=0.0, beta=0.9) X_k = result['sln_path'][np.argmin(result['lossh'])] X_k_n = np.zeros((N,2)) if not use_fi: nonfixed_idx = np.ones(N, dtype=bool) nonfixed_idx[fixed_indices] = 0 X_k_n[fixed_indices,0] = fixed_coordsx X_k_n[nonfixed_idx,0] = np.array(X_k[:,0]) + n0_x.T X_k_n[fixed_indices,1] = fixed_coordsy X_k_n[nonfixed_idx,1] = np.array(X_k[:,1]) + n0_y.T else: X_k_n[:,0] = np.array(P.T@X_k[:,0]) + n0_x.T X_k_n[:,1] = np.array(P.T@X_k[:,1]) + n0_y.T #result = {} result['x'] = X_k_n mask = (1-fixed_idx.sum(0)).astype(bool) result['mask'] = mask result['centroid'] = centercons result['P'] = (P) result['e'] = np.vstack([b_x,b_y]) result['n'] = (n0_x, n0_y) return result # + ##### USER PARAMETERS ##### method = "ssm" # pnd, ssm, or pgd seed = 0 # random seed eps = 1e-8 # global epsilon variable rng = random.PRNGKey(seed) key, subkey = jax.random.split(rng) v = np.ones(n) c1=v.sum()*10**2*1/12 c2=v.sum()*10**2*1/12 c3=0 C = jnp.block([[c1, c3],[c3, c2]]) X_k_r = (random.normal(subkey, (n,2))*np.sqrt(10)) if os.path.isfile(graphdir+graphpostfix+'_evals.npy') and \ os.path.isfile(graphdir+graphpostfix+'_evecs.npy'): w = np.load(graphdir+graphpostfix+'_evals.npy') v = np.load(graphdir+graphpostfix+'_evecs.npy') else: w,v = sp.sparse.linalg.eigsh(L, k=min(n,5), which='SM') np.save(graphdir+graphpostfix+'_evals.npy',w) np.save(graphdir+graphpostfix+'_evecs.npy',v) if DEBUG: w,v = sp.sparse.linalg.eigsh(L, k=min(n,5), which='SM') X_k = v[:,1:3].real if DEBUG: fixed_indices = np.array([0]) else: fixed_indices = np.array([0,1,2,3,4,5,6,7,8,9,10]) X_k[fixed_indices] = X_k_r[fixed_indices] # + del w del v #del X_k_r v = jnp.ones(n) print('initial transformation of variables') X_k, A, P, b_x, b_y, n0_x, n0_y, fixed_idx = map_vars(L, X_k, fixed_indices, v.sum()*jnp.array([0,0])) print('done mapping variables') A = sparse.BCOO.from_scipy_sparse(A) mapped_vars = (A, P, b_x, b_y, n0_x, n0_y, fixed_idx) if method == "pgd": pgd_lr = 5e-2 opt_init, opt_update, get_params = padam(pgd_lr,partial(lambda x, y, z: project(z, y, x), np.stack([b_x,b_y],axis=1), C), b1=0.9, b2=0.999, eps=1e-08) elif method == "pnd": opt_init, opt_update, get_params = psgd(partial(lambda x, y, z: project(z, y, x), np.stack([b_x,b_y],axis=1), C)) elif method == 'ssm': opt_init, opt_update, get_params = psgd(partial(lambda x, y, z: project(z, y, x), np.zeros((8,2)), C)) else: print('method not supported') assert False # + print('clustering...') result = cluster(rng, (opt_init, opt_update, get_params), X_k, X_k_r[fixed_indices], L, mapped_vars, fixed_indices=fixed_indices, use_fi=False, c1=c1, c2=c2, c3=c3, centercons=v.sum()*jnp.array([0,0]), v=None, D=None, eps=1e-8, maxiters=10, convergence_criterion=1e-3, method=method) results = [result] X_k_n=result['x'] # - resgraph = utils.plot_results(result) voxel_id, voxel_bound = voxel_cluster(X_k, np.array([5, 5])) result['h'] = 0.0 result['g'] = 0.0 utils.plot_graph(X_k_n, graph, title='loss: {} h: {} g: {} foc: {}'.format(str(np.round(np.min(result['lossh']),2)), np.round(result['h'],2), np.round(result['g'],2), str(np.round(result['foc'][np.argmin(result['lossh'])],2))), fixed_indices=fixed_indices, c=None) # + #utils.plot_animation(results, graph, fixed_coordinates=X_k_r[fixed_indices]) # - # ###
rayleigh_sparse-Copy1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Lifetime of GTOs and Highly Elliptical Orbits # # ## Summary of Findings # # * The Sun and Moon perturbations can have large effects on the orbital lifetime, particularly the orbit orientation with respect to the moon. # # * Due to the highly sensative nature of highly elliptical orbits the orbital lifetime variability is rather large, due to drag, and solar/lunar perturbations. # # * The Lifetime Tool offers a good initial guess for GTOs but running HPOP shows a wider variability in outcomes. # import numpy as np import pandas as pd pd.set_option('display.max_columns', 100) pd.set_option('display.max_rows', 100) import scipy as sp import matplotlib.pyplot as plt import seaborn as sns sns.set_context("talk") sns.set_style("white") sns.set_palette('colorblind') import os import pickle import time from functools import reduce from comtypes.client import CreateObject from comtypes.client import GetActiveObject from comtypes.gen import STKObjects from comtypes.gen import STKUtil from comtypes.gen import AgSTKVgtLib from IPython.display import Image from LifeTimeLib import * # %config Completer.use_jedi = False # # What are orbit lifetimes and the key influencing factors for GTOs? # # ### Look at the data set # # GTO like trajectories with different Cd * Drag Area / Mass ratios # Load trade study file tradeStudy = loadTradeStudy('LifeTimeResultsGTOs') tradeStudy.properties() # + # Load Data Frame dfRes = readResults(tradeStudy) dfRes = dfRes.loc[(dfRes['Cd*Drag Area/Mass'] > 0.001) & (dfRes['Cd*Drag Area/Mass'] < 1)] # Summary statistics dfRes.describe() # - # Look at correlations colsToDrop = ['Run ID','e','Ra','x','y','z','Vx','Vy','Vz','TA','p','LT Orbits','LT Runtime','Orb Per Calc','Gaussian Quad','SolarFluxFile','Density Model','2nd Order Oblateness'] corrMat = dfRes.drop(colsToDrop,axis=1).corr()['LT Years'].sort_values(ascending=False) pd.DataFrame(corrMat*100)[1:] # ### Compare Rp and Cd * Drag Area/Mass to Lifetime Predicitions # Note: Because the max propagation duration was limited to 200 yrs, the max allowed lifetime is capped at 200 years, which causes the mean value to be smaller than it should be. But you can still get the general trend. Looking at the 50% may be of more use. Also note the std is very high, so this suggests there are many other factors which affect the orbit lifetime. #Compare the effects of radius of periapsis dfRes['Rp'] = np.round(dfRes['Rp']) # Rounding gets rid of small numerical precision issues dfRes.groupby('Rp').describe()['LT Years'] #Compare the effects of Cd*Drag Area/Mass dfRes.groupby('Cd*Drag Area/Mass').describe()['LT Years'] #Compare the effects of radius of periapsis and Cd*Drag Area/Mass dfResReduced = dfRes[dfRes['Cd*Drag Area/Mass'].isin(dfRes['Cd*Drag Area/Mass'].sort_values().unique()[0:18:2])] dfResReduced = dfRes.groupby(['Rp','Cd*Drag Area/Mass']).describe()['LT Years'] dfResReduced sns.lineplot('Rp','LT Years',hue='Cd*Drag Area/Mass',legend='full',data=dfRes); plt.xticks(np.arange(6578,6878,100)) plt.grid(True) plt.legend(loc='center left', bbox_to_anchor= (1.0, 0.5),borderaxespad=0, frameon=False); # ### Compare Flux Sigma Level to Lifetime Predictions # #Compare the effects of flux sigma level dfRes.groupby(['Rp','Flux Sigma Level']).describe()['LT Years'] # The flux sigma level doesn't appear to have a large affect on GTOs which already have lower lifetimes, but it does appear to cause a wider variability in GTOs with longer lifetimes. This effect can be seen looking at Rp = 6678 at the 75% level and Rp = 6778 at the 25% level. # ### Compare Inclination to Lifetime Predictions #Compare the effects of inclination dfRes.groupby(['Rp','i']).describe()['LT Years'] # The lower inclinations tend to have a shorter lifetimes. But since launch site largely determines inclination this is usually not a design parameter. # ### Compare Periapsis Location w.r.t. Sun and Moon to Lifetime Predictions # limit the orbits to a subset of the data rpVal = 6678 iVal = 30 maxCdDragAreaMassVal = 0.08 # + # Get Sun and Moon angle at epoch try: app = GetActiveObject('STK11.Application') root = app.Personality2 except: app = CreateObject('STK11.Application') app.Visible = True app.UserControl = True root = app.Personality2 root.Isolate() root.NewScenario('MoonAngle') scenario = root.CurrentScenario scenario2 = scenario.QueryInterface(STKObjects.IAgScenario) scenario2.StartTime = '10 Sep 2019 04:00:00.000' scenario2.StopTime = '11 Sep 2019 04:00:00.000' # Create Longitude of Periapsis dfRes['LoP'] = dfRes['RAAN'] +dfRes['AoP'] dfRes.loc[dfRes['LoP']>=360,'LoP'] = dfRes.loc[dfRes['LoP']>=360,'LoP']-360 dfRes.loc[dfRes['LoP']>=360,'LoP'] = dfRes.loc[dfRes['LoP']>=360,'LoP']-360 try: moonRAAN = root.CentralBodies.Earth.Vgt.Angles.Item('MoonRAAN') except: moonRAAN = root.CentralBodies.Earth.Vgt.Angles.Factory.Create('MoonRAAN','Moon RAAN',AgSTKVgtLib.eCrdnAngleTypeDihedralAngle) moonRAAN2 = moonRAAN.QueryInterface(AgSTKVgtLib.IAgCrdnAngleDihedral) moonRAAN2.FromVector.SetPath('CentralBody/Earth ICRF-X') moonRAAN2.ToVector.SetPath('CentralBody/Earth Moon') moonRAAN2.PoleAbout.SetPath('CentralBody/Moon Orbit_Normal') root.UnitPreferences.SetCurrentUnit('DateFormat','YYDDD') dfRes['AngToMoon'] = dfRes['LoP'] for uniqueEpoch in dfRes['epoch'].unique(): indexs = np.round(dfRes['epoch']) == np.round(uniqueEpoch) ang = moonRAAN.FindAngle(str(uniqueEpoch)) dfRes.loc[indexs,'AngToMoon'] = (ang.Angle-dfRes.loc[indexs,'LoP']).astype(float) dfRes.loc[dfRes['AngToMoon'] < 0,'AngToMoon'] = dfRes['AngToMoon']+360 try: sunRAAN = root.CentralBodies.Earth.Vgt.Angles.Item('SunRAAN') except: sunRAAN = root.CentralBodies.Earth.Vgt.Angles.Factory.Create('SunRAAN','sun RAAN',AgSTKVgtLib.eCrdnAngleTypeDihedralAngle) sunRAAN2 = sunRAAN.QueryInterface(AgSTKVgtLib.IAgCrdnAngleDihedral) sunRAAN2.FromVector.SetPath('CentralBody/Earth ICRF-X') sunRAAN2.ToVector.SetPath('CentralBody/Earth Sun') sunRAAN2.PoleAbout.SetPath('CentralBody/Earth Orbit_Normal') root.UnitPreferences.SetCurrentUnit('DateFormat','YYDDD') dfRes['AngToSun'] = dfRes['LoP'] for uniqueEpoch in dfRes['epoch'].unique(): indexs = np.round(dfRes['epoch']) == np.round(uniqueEpoch) ang = sunRAAN.FindAngle(str(uniqueEpoch)) dfRes.loc[indexs,'AngToSun'] = (ang.Angle-dfRes.loc[indexs,'LoP']).astype(float) dfRes.loc[dfRes['AngToSun'] < 0,'AngToSun'] = dfRes['AngToSun']+360 # - # At Epoch 19360 epochVal = 19360 data = dfRes[(dfRes['Rp'] == rpVal) & (dfRes['Cd*Drag Area/Mass'] <= maxCdDragAreaMassVal) & (dfRes['i'] == iVal) & (np.round(dfRes['epoch']) == np.round(epochVal))] ax = sns.jointplot(data['AngToSun'],data['LT Years'],kind='kde',space=0,n_levels=100,height=6,kernel='epa',bw='silverman',marginal_kws={"kernel": "epa","bw": "silverman"}) ax.plot_joint(plt.scatter, c="k", s=20, linewidth=0.5, marker="+",alpha=0.2) plt.xticks(np.arange(0,360,90)); ax.ax_joint.set_xlim([-5,365]) ax.ax_joint.set_ylim([0,201]); ax = sns.jointplot(data['AngToMoon'],data['LT Years'],kind='kde',space=0,n_levels=100,height=6,kernel='epa',bw='silverman',marginal_kws={"kernel": "epa","bw": "silverman"}) ax.plot_joint(plt.scatter, c="k", s=20, linewidth=0.5, marker="+",alpha=0.2) plt.xticks(np.arange(0,360,90)); ax.ax_joint.set_xlim([-5,365]) ax.ax_joint.set_ylim([0,201]); # At Epoch 19253.166667 epochVal = 19253.166667 data = dfRes[(dfRes['Rp'] == rpVal) & (dfRes['Cd*Drag Area/Mass'] <= maxCdDragAreaMassVal) & (dfRes['i'] == iVal) & (np.round(dfRes['epoch']) == np.round(epochVal))] ax = sns.jointplot(data['AngToSun'],data['LT Years'],kind='kde',space=0,n_levels=100,height=6,kernel='epa',bw='silverman',marginal_kws={"kernel": "epa","bw": "silverman"}) ax.plot_joint(plt.scatter, c="k", s=20, linewidth=0.5, marker="+",alpha=0.2) plt.xticks(np.arange(0,360,90)); ax.ax_joint.set_xlim([-5,365]) ax.ax_joint.set_ylim([0,201]); ax = sns.jointplot(data['AngToMoon'],data['LT Years'],kind='kde',space=0,n_levels=100,height=6,kernel='epa',bw='silverman',marginal_kws={"kernel": "epa","bw": "silverman"}) ax.plot_joint(plt.scatter, c="k", s=20, linewidth=0.5, marker="+",alpha=0.2) plt.xticks(np.arange(0,360,90)); ax.ax_joint.set_xlim([-5,365]) ax.ax_joint.set_ylim([0,201]); # There is clustering of orbit lifetimes based on the angle between periapsis and Sun/Moon. The angle is defined as the angle between the longitude of periapsis and the Sun/Moon position about their respective orbit normals. (This is not quite the same as the angle between periapsis and the Sun/Moon but it is quicker to calculate and similar). The angle is 0 when the celestial body is in line with the initial periapsis and +180 would indicate the celestial body is rotated in the plane of motion to align with apoapsis. Looking at different inclinations also affects the clustering. Detailed conclusions are not drawn here, but the orientation of the orbit w.r.t. the Moon and Sun has a substantial impact on the orbit lifetime. # # ### Look at Cd * Drag Area/Mass vs Mean Lifetime data = data[(data['Cd*Drag Area/Mass'] != .004) & (data['Cd*Drag Area/Mass'] != 0.02) & (data['Cd*Drag Area/Mass'] != 0.0625)] # limit lines drawn numOfColors = len(data['Cd*Drag Area/Mass'].unique()) sns.lineplot('AngToMoon','LT Years',hue='Cd*Drag Area/Mass',legend='full',palette=sns.color_palette('colorblind')[0:numOfColors],data=data) plt.legend(loc='center left', bbox_to_anchor= (1.0, 0.5),borderaxespad=0, frameon=False); plt.xticks(np.arange(0,360,45)); plt.grid(True) plt.title('Rp = '+str(rpVal)); sns.lineplot('AngToSun','LT Years',hue='Cd*Drag Area/Mass',legend='full',palette=sns.color_palette('colorblind')[0:numOfColors],data=data) plt.legend(loc='center left', bbox_to_anchor= (1.0, 0.5),borderaxespad=0, frameon=False); plt.xticks(np.arange(0,360,45)); plt.grid(True) plt.title('Rp = '+str(rpVal)); # The angles to the Sun/Moon have an impact on orbit lifetime which affect all satellites, but the specific Cd * Drag Area/Mass ratio for a given satellite also has a substantial impact. # Load satellites into STK dfLoad = data[data['LT Years']<25].sample(10) # load 10 satellite with lifetimes < 25 years loadSats(dfLoad,maxSats=50,maxDur=tradeStudy.maxDur) # # How does the Lifetime Tool compare to HPOP for GTOs? # # ### Look at the data set # # An example GTO mission with different flux sigma levels, density models and also running HPOP. The radius of periapsis is 6577 km. # Load trade study tradeStudy = loadTradeStudy('LifeTimeGTOMissionVariations') tradeStudy.properties() # Load Data Frame dfRes = readResults(tradeStudy) dfRes['Runtime Ratio'] = dfRes['HPOP Runtime']/dfRes['LT Runtime'] dfRes['Years Signed Error'] = dfRes['LT Years']-dfRes['HPOP Years'] dfRes['Years Abs Error'] = abs(dfRes['LT Years']-dfRes['HPOP Years']) dfRes['Years % Error'] = abs(dfRes['LT Years']-dfRes['HPOP Years'])/dfRes['HPOP Years']*100 dfRes.describe() # ### Compare Flux Sigma Level and Density Model to Lifetime Predictions # Comparing HPOP and LT across Flux Sigma Levels dfRes.groupby('Flux Sigma Level').describe()[['LT Years','HPOP Years']] # It looks like the orbit lifetime predictions vary +- 0.1 years from changing the flux sigma level. Interestingly the lifetime of the GTOs sometimes goes up with atmospheric flux. This would need to be investigated further to draw any conclusive results. # Comparing HPOP and LT Across Density Models dfRes.groupby('Density Model').describe()[['LT Years','HPOP Years']] # It looks like the orbit lifetime predictions vary +- 0.1 years between different atmospheric density models. Although there is one outlier from an HPOP run at Flux Sigma Level = 2 using the Jacchia 1970 atmospheric density model. # # ### Compare Lifetime Predictions to HPOP # Look at LT vs HPOP Predictions dfSub = dfRes#[dfRes['Flux Sigma Level'] == 0] # plt.figure(figsize=(10,5)) plt.scatter(dfSub['Density Model'],dfSub['HPOP Years'],label='HPOP',s=50,alpha = .8) plt.scatter(dfSub['Density Model'],dfSub['LT Years'],label='LT',s=50,alpha = .8) plt.xticks(rotation=45,horizontalalignment='right') plt.legend(loc='center left', bbox_to_anchor= (1.0, 0.5), borderaxespad=0, frameon=False); # Look at erorrs dfRes.describe()[['Years Signed Error','Years Abs Error','Years % Error']] # Plot % error plt.figure(figsize=(6, 6)) sns.scatterplot('HPOP Years','Years % Error',hue='2nd Order Oblateness',data=dfRes); plt.legend(loc='center left', bbox_to_anchor= (1.0, 0.5),borderaxespad=0, frameon=False); # The lifetime tool gives similar results to HPOP within a few % for this GTO which has a low lifetime of 3.7 years. A few other GTOs were looked at and yielded similar results, although HPOP did tend to show a bit more variability in the orbit lifetimes predictions with occasional outliers. Indicating that the lifetime tool is a good first guess but some of the other perturbations captured with HPOP may cause the orbit lifetime to have a wider distribution of outcomes.
StkAutomation/Python/Lifetime Analysis/LifetimeOfGTOs.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### 1. Review of model evaluationm # - Need a way to choose between models: different model types, tuning parameters, and features # - Use a model evaluation procedure to estimate how well a model will generalize to out-of-sample data # - Requires a model evaluation metric to quantify the model performance # ### 2. Model evaluation procedures # __Training and testing on the same data__ # - Rewards overly complex models that "overfit" the training data and won't necessarily generalize # # __Train/test split__ # - Split the dataset into two pieces, so that the model can be trained and tested on different data # - Better estimate of out-of-sample performance, but still a "high variance" estimate # - Useful due to its speed, simplicity, and flexibility # # __K-fold cross-validation__ # - Systematically create "K" train/test splits and average the results together # - Even better estimate of out-of-sample performance # - Runs "K" times slower than train/test split # ### 3. Model evaluation metricsm # __Regression problems:__ Mean Absolute Error, Mean Squared Error, Root Mean Squared Error # # __Classification problems:__ Classification accuracy # - There are many more metrics, and we will discuss them today # ### 4. Classification accuracy # + #importing the library import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns # %matplotlib inline sns.set_style("darkgrid") # - # import data data = pd.read_csv('Fraud_check.csv') data.head() # + categorical_col = [] for column in data.columns: if data[column].dtype == object: categorical_col.append(column) print(f"{column} : {data[column].unique()}") print("------------------------------------") print(f"{column} : {data[column].value_counts()}") print("====================================\n\n") # + from sklearn.preprocessing import LabelEncoder # risk = 1 and good = 0 data['Result'] = data['Taxable.Income'].apply(lambda value: 1 if value <= 30000 else 0) label = LabelEncoder() for column in categorical_col: data[column] = label.fit_transform(data[column]) data.head() # + # split X and y into training and testing sets from sklearn.model_selection import train_test_split X = data.drop('Result', axis=1) y = data.Result X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 120) X_train.shape, y_train.shape, X_test.shape, y_test.shape # + # train a logistic regression model on the training set from sklearn.linear_model import LogisticRegression # instantiate model logreg = LogisticRegression() # fit model logreg.fit(X_train, y_train) # - # make class predictions for the testing set y_pred_class = logreg.predict(X_test) # __Classification accuracy:__ percentage of correct predictions # calculate accuracy from sklearn import metrics print(metrics.accuracy_score(y_test, y_pred_class)) # __Null accuracy:__ accuracy that could be achieved by always predicting the most frequent class # - We must always compare with this # examine the class distribution of the testing set (using a Pandas Series method) y_test.value_counts() # calculate the percentage of ones # because y_test only contains ones and zeros, we can simply calculate the mean = percentage of ones y_test.mean() # calculate the percentage of zeros 1 - y_test.mean() # calculate null accuracy in a single line of code # only for binary classification problems coded as 0/1 max(y_test.mean(), 1 - y_test.mean()) # calculate null accuracy (for multi-class classification problems) y_test.value_counts().head(1) / len(y_test) # + from sklearn.metrics import classification_report print(classification_report(y_test, y_pred_class)) # - # Comparing the __true__ and __predicted__ response values # print the first 25 true and predicted responses print('True:', y_test.values[0:25]) print('False:', y_pred_class[0:25]) # __Conclusion:__ # # - Classification accuracy is the easiest classification metric to understand # - But, it does not tell you the underlying distribution of response values # - We examine by calculating the null accuracy # - And, it does not tell you what "types" of errors your classifier is making # ### 5. Confusion matrix # IMPORTANT: first argument is true values, second argument is predicted values # this produces a 2x2 numpy array (matrix) print(metrics.confusion_matrix(y_test, y_pred_class)) # __Basic terminology:__ # # - __True Positives (TP):__ we correctly predicted that they do have diabetes # 112 # # - __True Negatives (TN):__ we correctly predicted that they don't have diabetes # 24 # # - __False Positives (FP):__ we incorrectly predicted that they do have diabetes (a "Type I error") # 3 # -Falsely predict positive # -Type I error # # - __False Negatives (FN):__ we incorrectly predicted that they don't have diabetes (a "Type II error") # 11 # -Falsely predict negative # -Type II error # print the first 25 true and predicted responses print('True', y_test.values[0:25]) print('Pred', y_pred_class[0:25]) # save confusion matrix and slice into four pieces confusion = metrics.confusion_matrix(y_test, y_pred_class) print(confusion) #[row, column] TP = confusion[1, 1] TN = confusion[0, 0] FP = confusion[0, 1] FN = confusion[1, 0] # ### 6. Metrics computed from a confusion matrix # __Classification Accuracy:__ Overall, how often is the classifier correct? # use float to perform true division, not integer division print((TP + TN) / float(TP + TN + FP + FN)) print(metrics.accuracy_score(y_test, y_pred_class)) # __Classification Error:__ Overall, how often is the classifier incorrect? # # - Also known as "Misclassification Rate" # + classification_error = (FP + FN) / float(TP + TN + FP + FN) print(classification_error) print(1 - metrics.accuracy_score(y_test, y_pred_class)) # - # __Sensitivity:__Sensitivity When the actual value is positive, how often is the prediction correct? # # - Something we want to maximize # - How "sensitive" is the classifier to detecting positive instances? # - Also known as "True Positive Rate" or "Recall" # - TP / all positive # - all positive = TP + FN # + sensitivity = TP / float(FN + TP) print(sensitivity) print(metrics.recall_score(y_test, y_pred_class)) # - # __Specificity:__ When the actual value is negative, how often is the prediction correct? # # - Something we want to maximize # - How "specific" (or "selective") is the classifier in predicting positive instances? # - TN / all negative # - all negative = TN + FP # + specificity = TN / (TN + FP) print(specificity) # - # __False Positive Rate:__ When the actual value is negative, how often is the prediction incorrect? # + false_positive_rate = FP / float(TN + FP) print(false_positive_rate) print(1 - specificity) # - # __Precision:__ When a positive value is predicted, how often is the prediction correct? # + precision = TP / float(TP + FP) print(precision) print(metrics.precision_score(y_test, y_pred_class)) # - # __Which metrics should you focus on?__ # # - Choice of metric depends on your business objective # - Identify if FP or FN is more important to reduce # - Choose metric with relevant variable (FP or FN in the equation) # # __Spam filter__ (positive class is "spam"): # - Optimize for precision or specificity # precision # false positive as variable # - specificity # false positive as variable # - Because false negatives (spam goes to the inbox) are more acceptable than false positives (non-spam is caught by the spam filter) # # # __Fraudulent transaction detector__ (positive class is "fraud"): # - Optimize for sensitivity # FN as a variable # - Because false positives (normal transactions that are flagged as possible fraud) are more acceptable than false negatives (fraudulent transactions that are not detected) # ### 7. Adjusting the classification threshold # print the first 10 predicted responses # 1D array (vector) of binary values (0, 1) logreg.predict(X_test)[0:10] # print the first 10 predicted probabilities of class membership logreg.predict_proba(X_test)[0:10] # - predict_proba process # 1. Predicts the probabilities # 2. Choose the lass with the highest probability # - There is a 0.5 classification threshold # 1. Class 1 is predicted if probability > 0.5 # 2. Class 0 is predicted if probability < 0.5 # print the first 10 predicted probabilities for class 1 logreg.predict_proba(X_test)[0:10, 1] # store the predicted probabilities for class 1 y_pred_prob = logreg.predict_proba(X_test)[:, 1] # + # allow plots to appear in the notebook # %matplotlib inline import matplotlib.pyplot as plt # adjust the font size plt.rcParams['font.size'] = 12 # + # histogram of predicted probabilities # 8 bins plt.hist(y_pred_prob, bins=8) # x-axis limit from 0 to 1 plt.xlim(0,1) plt.title('Histogram of predicted probabilities') plt.xlabel('Predicted probability of diabetes') plt.ylabel('Frequency') # - # __Observations:__ # We can see from the first bar # - About 93% of observations have probability from 0.001 to 0.100 # - Small number of observations with probability > 0.5 # - This is below the threshold of 0.5 # # __Solution:__ # - Decrease the threshold value # - Increase the sensitivity of the classifier # This would increase the number of TP # More sensitive to positive instances # # print the first 10 predicted probabilities y_pred_prob[0:10] # print the first 10 predicted classes with the lower threshold y_pred_class[0:10] # + y_pred_probs = y_pred_prob.reshape(-1, 1) # predict diabetes if the predicted probability is greater than 0.3 from sklearn.preprocessing import binarize # it will return 1 for all values above 0.3 and 0 otherwise # results are 2D so we slice out the first column y_pred_class = binarize(y_pred_probs, 0.3)[0] # - # print the first 10 predicted probabilities y_pred_probs[0:10] # + y_pred_prob = logreg.predict_proba(X_test)[0:10] y_pred_prob # + # print the first 10 predicted probabilities for class 1 - Probability of rain logreg.predict_proba(X_test)[0:10, 1] # + # store the predicted probabilities for class 1 - Probability of rain y_pred1 = logreg.predict_proba(X_test)[:, 1] # + from sklearn.preprocessing import binarize from sklearn.metrics import accuracy_score for i in range(1,5): cm1=0 y_pred1 = logreg.predict_proba(X_test)[:,1] y_pred1 = y_pred1.reshape(-1,1) y_pred2 = binarize(y_pred1, i/10) #y_pred2 = np.where(y_pred2 == 1, 'Yes', 'No') cm1 = confusion_matrix(y_test, y_pred2) print ('With',i/10,'threshold the Confusion Matrix is ','\n\n',cm1,'\n\n', 'with',cm1[0,0]+cm1[1,1],'correct predictions, ', '\n\n', cm1[0,1],'Type I errors( False Positives), ','\n\n', cm1[1,0],'Type II errors( False Negatives), ','\n\n', 'Accuracy score: ', (accuracy_score(y_test, y_pred2)), '\n\n', 'Sensitivity: ',cm1[1,1]/(float(cm1[1,1]+cm1[1,0])), '\n\n', 'Specificity: ',cm1[0,0]/(float(cm1[0,0]+cm1[0,1])),'\n\n', '====================================================', '\n\n') # - def evaluate_threshold(threshold): print('Sensitivity:', tpr[thresholds > threshold][-1]) print('Specificity:', 1 - fpr[thresholds > threshold][-1]) evaluate_threshold(0.5) evaluate_threshold(0.2) # IMPORTANT: first argument is true values, second argument is predicted probabilities print(metrics.roc_auc_score(y_test, y_pred1)) # ### 8. Cross validation score # calculate cross-validated AUC from sklearn.model_selection import cross_val_score cross_val_score(logreg, X, y, cv=10, scoring='roc_auc').mean()
06_Logistic Regression/Logistic Regression Threshold Optimizations/Threshold_v1_trial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd # !pip install --index-url https://test.pypi.org/simple/ lambdata-aklefebvere from my_lambdata.my_mod import add_col from my_lambdata.my_mod import is_nan import my_lambdata.my_mod df = pd.DataFrame({"State":["CT", "CO", "CA", "TX"]}) df.head() add_col(df, 'Name', ['Connecticut', 'Colorado', 'California', 'Texas']) df.head() is_nan(df)
module1-python-modules-packages-and-environments/DS-Unit-3-Sprint-1-mod-1-Assignment.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: openvino_env # language: python # name: openvino_env # --- # # Video Super Resolution with OpenVINO # Super Resolution is the process of enhancing the quality of an image by increasing the pixel count using deep learning. This notebook applies Single Image Super Resolution (SISR) to frames in a 360p (480ร—360) video in 360p resolution. We use a model called [single-image-super-resolution-1032](https://github.com/openvinotoolkit/open_model_zoo/tree/develop/models/intel/single-image-super-resolution-1032) which is available from the Open Model Zoo. It is based on the research paper cited below. # # <NAME> et al., ["An Attention-Based Approach for Single Image Super Resolution,"](https://arxiv.org/abs/1807.06779) 2018 24th International Conference on Pattern Recognition (ICPR), 2018, pp. 2777-2784, doi: 10.1109/ICPR.2018.8545760. # # **NOTE:** The Single Image Super Resolution (SISR) model used in this demo is not optimized for video. Results may vary depending on the video. We are looking for a more suitable Multi Image Super Resolution (MISR) model, so if you know of a great open source model, please let us know! You can start a [discussion](https://github.com/openvinotoolkit/openvino_notebooks/discussions) or create an [issue](https://github.com/openvinotoolkit/openvino_notebooks/issues) on GitHub. # ## Preparation # ### Imports # + tags=[] import os import time import urllib from pathlib import Path import cv2 import numpy as np from IPython.display import HTML, FileLink, Pretty, ProgressBar, Video, clear_output, display from openvino.inference_engine import IECore from pytube import YouTube # - # ### Settings # + tags=[] # Device to use for inference. For example, "CPU", or "GPU" DEVICE = "CPU" # 1032: 4x superresolution, 1033: 3x superresolution MODEL_FILE = "model/single-image-super-resolution-1032.xml" model_name = os.path.basename(MODEL_FILE) model_xml_path = Path(MODEL_FILE).with_suffix(".xml") # - # ### Functions # # + tags=[] def write_text_on_image(image: np.ndarray, text: str) -> np.ndarray: """ Write the specified text in the top left corner of the image as white text with a black border. :param image: image as numpy arry with HWC shape, RGB or BGR :param text: text to write :return: image with written text, as numpy array """ font = cv2.FONT_HERSHEY_PLAIN org = (20, 20) font_scale = 4 font_color = (255, 255, 255) line_type = 1 font_thickness = 2 text_color_bg = (0, 0, 0) x, y = org image = cv2.UMat(image) (text_w, text_h), _ = cv2.getTextSize(text, font, font_scale, font_thickness) result_im = cv2.rectangle(image, org, (x + text_w, y + text_h), text_color_bg, -1) textim = cv2.putText( result_im, text, (x, y + text_h + font_scale - 1), font, font_scale, font_color, font_thickness, line_type, ) return textim.get() def load_image(path: str) -> np.ndarray: """ Loads an image from `path` and returns it as BGR numpy array. :param path: path to an image filename or url :return: image as numpy array, with BGR channel order """ if path.startswith("http"): # Set User-Agent to Mozilla because some websites block requests # with User-Agent Python request = urllib.request.Request(path, headers={"User-Agent": "Mozilla/5.0"}) response = urllib.request.urlopen(request) array = np.asarray(bytearray(response.read()), dtype="uint8") image = cv2.imdecode(array, -1) # Loads the image as BGR else: image = cv2.imread(path) return image def convert_result_to_image(result) -> np.ndarray: """ Convert network result of floating point numbers to image with integer values from 0-255. Values outside this range are clipped to 0 and 255. :param result: a single superresolution network result in N,C,H,W shape """ result = result.squeeze(0).transpose(1, 2, 0) result *= 255 result[result < 0] = 0 result[result > 255] = 255 result = result.astype(np.uint8) return result # - # ## Load the Superresolution Model # Load the model in Inference Engine with `ie.read_network` and load it to the specified device with `ie.load_network` # + tags=[] ie = IECore() net = ie.read_network(str(model_xml_path), str(model_xml_path.with_suffix(".bin"))) exec_net = ie.load_network(network=net, device_name=DEVICE) # - # Get information about network inputs and outputs. The Super Resolution model expects two inputs: 1) the input image, 2) a bicubic interpolation of the input image to the target size 1920x1080. It returns the super resolution version of the image in 1920x1800. # + tags=[] # Network inputs and outputs are dictionaries. Get the keys for the # dictionaries. original_image_key = list(exec_net.input_info)[0] bicubic_image_key = list(exec_net.input_info)[1] output_key = list(exec_net.outputs.keys())[0] # Get the expected input and target shape. `.dims[2:]` returns the height # and width. OpenCV's resize function expects the shape as (width, height), # so we reverse the shape with `[::-1]` and convert it to a tuple input_height, input_width = tuple(exec_net.input_info[original_image_key].tensor_desc.dims[2:]) target_height, target_width = tuple(exec_net.input_info[bicubic_image_key].tensor_desc.dims[2:]) upsample_factor = int(target_height / input_height) print(f"The network expects inputs with a width of {input_width}, " f"height of {input_height}") print(f"The network returns images with a width of {target_width}, " f"height of {target_height}") print( f"The image sides are upsampled by a factor {upsample_factor}. " f"The new image is {upsample_factor**2} times as large as the " "original image" ) # - # ## Superresolution on Video # # Download a YouTube\* video with PyTube and enhance the video quality with superresolution. # # By default only the first 100 frames of the video are processed. Change NUM_FRAMES in the cell below to modify this. # # **Note:** # - The resulting video does not contain audio. # - The input video should be a landscape video and have an an input resultion of 360p (640x360) for the 1032 model, or 480p (720x480) for the 1033 model. # ### Settings # + tags=[] test_replace={"NUM_FRAMES = 100": "NUM_FRAMES = 3"} VIDEO_DIR = "data" OUTPUT_DIR = "output" os.makedirs(str(OUTPUT_DIR), exist_ok=True) # Number of frames to read from the input video. Set to 0 to read all frames. NUM_FRAMES = 100 # The format for saving the result video's # vp09 is slow, but widely available. If you have FFMPEG installed, you can # change the FOURCC to `*"THEO"` to improve video writing speed FOURCC = cv2.VideoWriter_fourcc(*"vp09") # - # ### Download and Prepare Video # + tags=[] # Use pytube to download a video. It downloads to the videos subdirectory. # You can also place a local video there and comment out the following lines VIDEO_URL = "https://www.youtube.com/watch?v=V8yS3WIkOrA" yt = YouTube(VIDEO_URL) # Use `yt.streams` to see all available streams. See the PyTube documentation # https://python-pytube.readthedocs.io/en/latest/api.html for advanced # filtering options try: os.makedirs(VIDEO_DIR, exist_ok=True) stream = yt.streams.filter(resolution="360p").first() filename = Path(stream.default_filename.encode("ascii", "ignore").decode("ascii")).stem stream.download(OUTPUT_DIR, filename=filename) print(f"Video {filename} downloaded to {OUTPUT_DIR}") # Create Path objects for the input video and the resulting videos video_path = Path(stream.get_file_path(filename, OUTPUT_DIR)) except Exception: # If PyTube fails, use a local video stored in the VIDEO_DIR directory video_path = Path(rf"{VIDEO_DIR}/CEO Pat Gelsinger on Leading Intel.mp4") # Path names for the result videos superres_video_path = Path(f"{OUTPUT_DIR}/{video_path.stem}_superres.mp4") bicubic_video_path = Path(f"{OUTPUT_DIR}/{video_path.stem}_bicubic.mp4") comparison_video_path = Path(f"{OUTPUT_DIR}/{video_path.stem}_superres_comparison.mp4") # + tags=[] # Open the video and get the dimensions and the FPS cap = cv2.VideoCapture(str(video_path)) ret, image = cap.read() if not ret: raise ValueError(f"The video at '{video_path}' cannot be read.") fps = cap.get(cv2.CAP_PROP_FPS) original_frame_height, original_frame_width = image.shape[:2] cap.release() print( f"The input video has a frame width of {original_frame_width}, " f"frame height of {original_frame_height} and runs at {fps:.2f} fps" ) # - # Create superresolution video, bicubic video and comparison video. The superresolution video contains the enhanced video, upsampled with superresolution, the bicubic video is the input video upsampled with bicubic interpolation, the combination video sets the bicubic video and the superresolution side by side. # + tags=[] superres_video = cv2.VideoWriter( str(superres_video_path), FOURCC, fps, (target_width, target_height), ) bicubic_video = cv2.VideoWriter( str(bicubic_video_path), FOURCC, fps, (target_width, target_height), ) comparison_video = cv2.VideoWriter( str(comparison_video_path), FOURCC, fps, (target_width * 2, target_height), ) # - # ### Do Inference # # Read video frames and enhance them with superresolution. Save the superresolution video, the bicubic video and the comparison video to file. # # The code in this cell reads the video frame by frame. Each frame is resized and reshaped to network input shape and upsampled with bicubic interpolation to target shape. Both the original and the bicubic image are propagated through the network. The network result is a numpy array with floating point values, with a shape of (1,3,1920,1080). This array is converted to an 8-bit image with shape (1080,1920,3) and written to `superres_video`. The bicubic image is written to `bicubic_video` for comparison. Lastly, the bicubic and result frames are combined side by side and written to `comparison_video`. A progress bar shows the progress of the process. Inference time is measured, as well as total time to process each frame, which includes inference time as well as the time it takes to process and write the video. # + tags=[] start_time = time.perf_counter() frame_nr = 1 total_inference_duration = 0 total_frames = cap.get(cv2.CAP_PROP_FRAME_COUNT) if NUM_FRAMES == 0 else NUM_FRAMES progress_bar = ProgressBar(total=total_frames) progress_bar.display() cap = cv2.VideoCapture(str(video_path)) try: while cap.isOpened(): ret, image = cap.read() if not ret: cap.release() break if NUM_FRAMES > 0 and frame_nr == NUM_FRAMES: break # Resize the input image to network shape and convert from (H,W,C) to # (N,C,H,W) resized_image = cv2.resize(image, (input_width, input_height)) input_image_original = np.expand_dims(resized_image.transpose(2, 0, 1), axis=0) # Resize and reshape the image to the target shape with bicubic # interpolation bicubic_image = cv2.resize( image, (target_width, target_height), interpolation=cv2.INTER_CUBIC ) input_image_bicubic = np.expand_dims(bicubic_image.transpose(2, 0, 1), axis=0) # Do inference inference_start_time = time.perf_counter() result = exec_net.infer( inputs={ original_image_key: input_image_original, bicubic_image_key: input_image_bicubic, } )[output_key] inference_stop_time = time.perf_counter() inference_duration = inference_stop_time - inference_start_time total_inference_duration += inference_duration # Transform inference result into an image result_frame = convert_result_to_image(result) # Write resulting image and bicubic image to video superres_video.write(result_frame) bicubic_video.write(bicubic_image) stacked_frame = np.hstack((bicubic_image, result_frame)) comparison_video.write(stacked_frame) frame_nr = frame_nr + 1 # Update progress bar and status message progress_bar.progress = frame_nr progress_bar.update() if frame_nr % 10 == 0: clear_output(wait=True) progress_bar.display() display( Pretty( f"Processed frame {frame_nr}. Inference time: " f"{inference_duration:.2f} seconds " f"({1/inference_duration:.2f} FPS)" ) ) except KeyboardInterrupt: print("Processing interrupted.") finally: superres_video.release() bicubic_video.release() comparison_video.release() end_time = time.perf_counter() duration = end_time - start_time print(f"Video's saved to {comparison_video_path.parent} directory.") print( f"Processed {frame_nr} frames in {duration:.2f} seconds. Total FPS " f"(including video processing): {frame_nr/duration:.2f}. " f"Inference FPS: {frame_nr/total_inference_duration:.2f}." ) # - # ### Show side-by-side video of bicubic and superresolution version # + tags=[] if not comparison_video_path.exists(): raise ValueError("The comparison video does not exist.") else: video_link = FileLink(comparison_video_path) display( HTML( f"Showing side by side comparison. If you cannot see the video in " "your browser, please click on the following link to download " f"the video<br>{video_link._repr_html_()}" ) ) display(Video(comparison_video_path, width=800, embed=True))
notebooks/202-vision-superresolution/202-vision-superresolution-video.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Time Series Modeling # ## Decomposing time series # ### How to do it... # 1. Run the following code to import the necessary libraries: # + # %matplotlib inline # %config InlineBackend.figure_format = 'retina' from statsmodels.tsa.seasonal import seasonal_decompose import matplotlib.pyplot as plt import pandas as pd import quandl plt.style.use('seaborn') plt.rcParams['figure.figsize'] = [16, 9] plt.rcParams['figure.dpi'] = 200 # - # 2. Running the next code block downloads Gold prices for years 2000-2011 from Quandl: # + # authentication quandl_key = '{key}' # replace {key} with your own API key quandl.ApiConfig.api_key = quandl_key # download gold prices from Quandl df = quandl.get(dataset='WGC/GOLD_MONAVG_USD', start_date='2000-01-01', end_date='2011-12-31') print(f'Shape of DataFrame: {df.shape}') # - # 3. In the next code block, we add rolling statistics (mean, standard deviation) to see how they look like over time. # data preprocessing df = df.resample("M").last() df.rename(columns={'Value': 'gold_price'}, inplace=True) df['rolling_mean'] = df.gold_price.rolling(window=12).mean() df['rolling_std'] = df.gold_price.rolling(window=12).std() df.plot(title='Gold Price') # 4. That is why we decide to use the multiplicative model when doing seasonal decomposition. decomposition_results = seasonal_decompose(df.gold_price, model='multiplicative') decomposition_results.plot().suptitle('Multiplicative Decomposition', fontsize=18) # ## Decomposing time series using Facebook's Prophet # ### How to do it... # 1. Run the following block to import necessary libraries: # + # %matplotlib inline # %config InlineBackend.figure_format = 'retina' from fbprophet import Prophet import matplotlib.pyplot as plt import pandas as pd import quandl import seaborn as sns plt.style.use('seaborn') plt.rcParams['figure.figsize'] = [16, 9] plt.rcParams['figure.dpi'] = 200 # - # 2. In the following block we download daily gold prices from Quandl and divide the series into training and test set: # + # authentication quandl_key = '{key}' # replace {key} with your own API key quandl.ApiConfig.api_key = quandl_key df = quandl.get(dataset='WGC/GOLD_DAILY_USD', start_date='2000-01-01', end_date='2005-12-31') print(f'Shape of DataFrame: {df.shape}') # rename columns df.reset_index(drop=False, inplace=True) df.rename(columns={'Date': 'ds', 'Value': 'y'}, inplace=True) # train-test split df_train = df.loc[df.ds.apply(lambda x: x.year) < 2005].dropna() df_test = df.loc[df.ds.apply(lambda x: x.year) == 2005].reset_index(drop=True) # - # 3. The next block creates the instance of the model and fits it to the data: # set up and fit model model_prophet = Prophet(seasonality_mode='additive') model_prophet.add_seasonality(name='monthly', period=30.5, fourier_order=5) model_prophet = model_prophet.fit(df_train) # 4. Run the following code to forecast 1 year ahead and plot the results: df_future = model_prophet.make_future_dataframe(periods=365) df_pred = model_prophet.predict(df_future) model_prophet.plot(df_pred); # 5. In the next step we inspect the decomposition of the time series: model_prophet.plot_components(df_pred); # 6. Lastly, we want to compare the forecasts to actual data in order to evaluate how the model performed. The following code merges the test set with the forecasts: # + # define outside for readability row_filter = df_pred.ds.apply(lambda x: x.year) == 2005 selected_columns = ['ds', 'yhat_lower', 'yhat_upper', 'yhat'] df_pred = df_pred.loc[row_filter, selected_columns].reset_index(drop=True) df_test = df_test.merge(df_pred, on=['ds'], how='left') df_test.ds = pd.to_datetime(df_test.ds) df_test.set_index('ds', inplace=True) # - # 7. Running the following code plots the two series: # + fig, ax = plt.subplots(1, 1) ax = sns.lineplot(data=df_test[['y', 'yhat_lower', 'yhat_upper', 'yhat']]) ax.fill_between(df_test.index, df_test.yhat_lower, df_test.yhat_upper, alpha=0.3) # plot labels plt.xlabel('Date') plt.ylabel('Gold Price ($)') plt.title('Gold Price - actual vs. predicted', fontsize=14) plt.show() # - # ### How it works... # ### There's more... # 1. In the first block we iterate over the list of considered values for the hyperparameter, fit the model and store the predictions in a separate `DataFrame`. # + # selected changepoints to consider changepoint_priors = [0.01, 0.15] # fit model for all changepoints and store predictions for i, prior in enumerate(changepoint_priors): model_prophet = Prophet(changepoint_prior_scale=prior) model_prophet.add_seasonality(name='monthly', period=30.5, fourier_order=5) model_prophet = model_prophet.fit(df_train) # predict 1 year ahead df_future = model_prophet.make_future_dataframe(periods=365) if i == 0: df_pred = df_future.copy() df_future = model_prophet.predict(df_future) df_pred[f'yhat_upper_{prior}'] = df_future['yhat_upper'] df_pred[f'yhat_lower_{prior}'] = df_future['yhat_lower'] df_pred[f'yhat_{prior}'] = df_future['yhat'] # merge back to df to remove weekends df = df.merge(df_pred, on=['ds'], how='left') df.ds = pd.to_datetime(df.ds) df.set_index('ds', inplace=True) # - # 2. In this step we plot the results and compare the effects of different values of `changepoint_prior_scale`: # + # selected colors colors = ['b', 'g', 'r', 'c'] fig, ax = plt.subplots(1, 1) # plot actual gold price ax.plot(df.index, df['y'], 'k-', label='actual') # plot results of changepoint analysis for i, prior in enumerate(changepoint_priors): ax.plot(df.index, df[f'yhat_{prior}'], linewidth=1.2, color=colors[i], label=f'{prior}') ax.fill_between(df.index, df[f'yhat_upper_{prior}'], df[f'yhat_lower_{prior}'], facecolor=colors[i], alpha=0.3, edgecolor='k', linewidth=0.6) # plot labels plt.legend(loc=2, prop={'size': 10}) plt.xlabel('Date') plt.ylabel('Gold Price ($)') plt.title('Changepoint Prior Analysis', fontsize=16) plt.show() # - # 3. Performance evaluation: # + def rmse(predictions, targets): return np.sqrt(((predictions - targets) ** 2).mean()) # specify outside for readability train_index = df.index.year < 2005 test_index = df.index.year == 2005 print(f"Training set RMSE of the model with changepoint_prior_scale = 0.01: {rmse(df.loc[train_index, 'yhat_0.01'], df[train_index].y)}") print(f"Training set RMSE of the model with changepoint_prior_scale = 0.15: {rmse(df.loc[train_index, 'yhat_0.15'], df[train_index].y)}") print(f"Test set RMSE of the model with changepoint_prior_scale = 0.01: {rmse(df.loc[test_index, 'yhat_0.01'], df[test_index].y)}") print(f"Test set RMSE of the model with changepoint_prior_scale = 0.15: {rmse(df.loc[test_index, 'yhat_0.15'], df[test_index].y)}") # - # cross validation from fbprophet.diagnostics import cross_validation, performance_metrics from fbprophet.plot import plot_cross_validation_metric df_cv = cross_validation(model_prophet, horizon='365 days') df_metrics = performance_metrics(df_cv) plot_cross_validation_metric(df_cv, metric='mape'); # ## Testing for stationarity in time series # ### How to do it... # 1. We need to import the following libraries: # + # %matplotlib inline # %config InlineBackend.figure_format = 'retina' from statsmodels.graphics.tsaplots import plot_acf, plot_pacf from statsmodels.tsa.stattools import adfuller, kpss import matplotlib.pyplot as plt import pandas as pd plt.style.use('seaborn') plt.rcParams['figure.figsize'] = [16, 9] plt.rcParams['figure.dpi'] = 200 # - # 2. The next code block presents how to define a function for running the ADF test and presenting the results in a human-readable format: # + def adf_test(series): '''Perform Augmented Dickey-Fuller test for stationarity''' indices = ['Test Statistic', 'p-value', '# of Lags Used', '# of Observations Used'] adf_test = adfuller(series, autolag='AIC') adf_results = pd.Series(adf_test[0:4], index=indices) for key, value in adf_test[4].items(): adf_results[f'Critical Value ({key})'] = value print('Results of Augmented Dickey-Fuller Test:') print(adf_results) adf_test(df.gold_price) # - # 3. The next block presents a similar function, this time for running the KPSS test: # + def kpss_test(series, h0_type='c'): '''Perform KPSS test for stationarity''' indices = ['Test Statistic', 'p-value', '# of Lags'] kpss_test = kpss(series, regression=h0_type) kpss_results = pd.Series(kpss_test[0:3], index=indices) for key, value in kpss_test[3].items(): kpss_results[f'Critical Value ({key})'] = value print('Results of KPSS Test:') print(kpss_results) kpss_test(df.gold_price) # - # 4. Lastly, we show how to create the ACF/PACF plots: # ACF/PACF plots fig, ax = plt.subplots(2, figsize=(16, 8)) plot_acf(df.gold_price, ax=ax[0], lags=40, alpha=0.05) plot_pacf(df.gold_price, ax=ax[1], lags=40, alpha=0.05) plt.show() # ### What's more... # + from pmdarima.arima import ndiffs, nsdiffs print(f"Suggested number of differences (ADF): {ndiffs(df.gold_price, test='adf')}") print(f"Suggested number of differences (KPSS): {ndiffs(df.gold_price, test='kpss')}") print(f"Suggested number of differences (PP): {ndiffs(df.gold_price, test='pp')}") # - print(f"Suggested number of differences (OSCB): {nsdiffs(df.gold_price, m=12, test='ocsb')}") print(f"Suggested number of differences (CH): {nsdiffs(df.gold_price, m=12, test='ch')}") # ## Correcting for stationarity in time series # ### How to do it... # 1. Run the following code to import the libraries (the rest of the libraries is the same as in Recipe 'Testing for stationarity in time series'): import cpi from datetime import date from chapter_3_utils import test_autocorrelation # 2. The next code block covers deflating the prices (to 2011-12-31 USD values) and plotting the new results: df['dt_index'] = df.index.map(lambda x: x.to_pydatetime().date()) df['gold_price_deflated'] = df.apply(lambda x: cpi.inflate(x.gold_price, x.dt_index, date(2011, 12, 31)), axis=1) df[['gold_price', 'gold_price_deflated']].plot(title='Gold Price (deflated)') # 3. In this block we apply natural logarithm to the deflated price series and plot the new series: df['gold_price_log'] = np.log(df.gold_price_deflated) df['rolling_mean_log'] = df.gold_price_log.rolling(window=12).mean() df['rolling_std_log'] = df.gold_price_log.rolling(window=12).std() df[['gold_price_log', 'rolling_mean_log', 'rolling_std_log']].plot(title='Gold Price (logged)') # 4. We use `test_autocorrelation` function to investigate if the series became stationary after applied transformations. The function is a combination of stationarity test presented in Recipe 'Testing for stationarity in time series'. test_autocorrelation(df.gold_price_log) # 5. In this step we apply differencing: df['gold_price_log_diff'] = df.gold_price_log.diff(1) df['rolling_mean_log_diff'] = df.gold_price_log_diff.rolling(window=12).mean() df['rolling_std_log_diff'] = df.gold_price_log_diff.rolling(window=12).std() df[['gold_price_log_diff', 'rolling_mean_log_diff', 'rolling_std_log_diff']].plot( title='Gold Price (1st diff)') # 6. In this step we once again investigate if the differenced series can be considered stationary: test_autocorrelation(df.gold_price_log_diff.dropna()) # ## Modeling time series with exponential smoothing methods # ### How to do it... # 1. Run the first block to import all the necessary libraries: # + # %matplotlib inline # %config InlineBackend.figure_format = 'retina' from statsmodels.tsa.holtwinters import ExponentialSmoothing, SimpleExpSmoothing, Holt from datetime import date import pandas as pd import numpy as np import matplotlib.pyplot as plt import warnings import yfinance as yf plt.style.use('seaborn') plt.rcParams['figure.figsize'] = [16, 9] plt.rcParams['figure.dpi'] = 200 warnings.simplefilter(action='ignore', category=FutureWarning) # - # 2. Having the downloaded the stock prices into the `df` object, we split the data into a training and testing samples. # + df = yf.download('AMZN', start='2010-01-01', end='2018-06-30', adjusted=True, progress=False) print(f'Downloaded {df.shape[0]} rows of data.') # aggregating to weekly amzn = df.resample('W').last().rename(columns={'Adj Close': 'adj_close'}).adj_close # train-test split amzn_train = amzn[amzn.index.year < 2018] amzn_test = amzn[amzn.index.year == 2018] # define length of test period test_length = len(amzn_test) # plot the stock prices amzn.plot(title='Amazon Stock Price') # - # 3. In the next block we run 3 Simple Exponential Smoothing models and plot the results: # + # Simple Exponential Smoothing ---- amzn.plot(color='gray', title='Simple Exponential Smoothing', legend=True, figsize=[16, 9]) fit_1 = SimpleExpSmoothing(amzn_train).fit(smoothing_level=0.2) forecast_1 = fit_1.forecast(test_length).rename(r'$\alpha=0.2$') forecast_1.plot(color='blue', legend=True) fit_1.fittedvalues.plot(color='blue') fit_2 = SimpleExpSmoothing(amzn_train).fit(smoothing_level=0.5) forecast_2 = fit_2.forecast(test_length).rename(r'$\alpha=0.5$') forecast_2.plot(color='red', legend=True) fit_2.fittedvalues.plot(color='red') fit_3 = SimpleExpSmoothing(amzn_train).fit() alpha = fit_3.model.params['smoothing_level'] forecast_3 = fit_3.forecast(test_length).rename(r'$\alpha={0:.4f}$'.format(alpha)) forecast_3.plot(color='green', legend=True) fit_3.fittedvalues.plot(color='green') plt.show() # - # 4. In the next step we run 3 configurations of Holt's Smoothing models and plot the results: # + # Holt's Smoothing models ---- amzn.plot(color='gray', title="Holt's Smoothing models", legend=True, figsize=[16, 9]) # Holt's model with linear trend fit_1 = Holt(amzn_train).fit() forecast_1 = fit_1.forecast(test_length).rename("Linear trend") fit_1.fittedvalues.plot(color='blue') forecast_1.plot(color='blue', legend=True) # Holt's model with exponential trend fit_2 = Holt(amzn_train, exponential=True).fit() # equivalent of ExponentialSmoothing(train, trend='mul').fit() forecast_2 = fit_2.forecast(test_length).rename("Exponential trend") fit_2.fittedvalues.plot(color='red') forecast_2.plot(color='red', legend=True) # Holt's model with exponential trend and damping fit_3 = Holt(amzn_train, exponential=False, damped=True).fit(damping_slope=0.99) forecast_3 = fit_3.forecast(test_length).rename("Exponential trend (damped)") fit_3.fittedvalues.plot(color='green') forecast_3.plot(color='green', legend=True) plt.show() # - # ### There's more... # + # Holt-Winter's Seasonal Smoothing ---- amzn.plot(color='gray', title="Holt-Winter's Seasonal Smoothing", legend=True, figsize=[16, 9]) # Holt-Winter's model with exponential trend fit_1 = ExponentialSmoothing(amzn_train, trend="mul", seasonal="add", seasonal_periods=52).fit() forecast_1 = fit_1.forecast(test_length).rename("Seasonal Smoothing") fit_1.fittedvalues.plot(color='blue') forecast_1.plot(color='blue', legend=True) # Holt-Winter's model with exponential trend and damping fit_2 = ExponentialSmoothing(amzn_train, trend="mul", seasonal="add", seasonal_periods=52, damped=True).fit() phi = fit_2.model.params['damping_slope'] forecast_2 = fit_2.forecast(test_length).rename(r'$Seasonal Smoothing (damped with \phi={0:.4f})$'.format(phi)) fit_2.fittedvalues.plot(color='red') forecast_2.plot(color='red', legend=True) plt.show() # - # ## Modeling time series with ARIMA class models # ### How to do it... # 1. Run the following code to import necessary dependencies: # + # %matplotlib inline # %config InlineBackend.figure_format = 'retina' from chapter_3_utils import test_autocorrelation import yfinance as yf import pmdarima as pm from datetime import date import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from statsmodels.tsa.arima_model import ARIMA import statsmodels.api as sm from statsmodels.graphics.tsaplots import plot_acf from statsmodels.stats.diagnostic import acorr_ljungbox import scipy.stats as scs plt.style.use('seaborn') plt.rcParams['figure.figsize'] = [16, 9] plt.rcParams['figure.dpi'] = 200 # - # 2. Download the Google stock prices and resample to weekly frequency # + df = yf.download('GOOG', start='2015-01-01', end='2018-12-31', adjusted=True, progress=False) print(f'Downloaded {df.shape[0]} rows of data.') # aggregate to weekly goog = df.resample('W').last().rename(columns={'Adj Close': 'adj_close'}).adj_close # - # 3. Apply first differences to prices series and plot them together: # + # apply first differences goog_diff = goog.diff().dropna() # plot both series fig, ax = plt.subplots(2) goog.plot(title = "Google's stock price", ax=ax[0]) goog_diff.plot(ax=ax[1]) plt.show() # - # 4. Test the differenced series for stationarity: test_autocorrelation(goog_diff) # 5. Based on the results of the tests, specify the ARIMA model and fit it to the data: arima = ARIMA(goog, order=(2, 1, 1)).fit(disp=0) arima.summary() # 6. Prepare a function diagnosing the fit of the model based on its residuals: # + def plot_diagnostics(arima, time_index=None): '''Function for diagnosing the fit of an ARIMA model by investigating the residuals ''' # create placeholder subplots fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2) # residuals over time time_index = range(len(arima.resid)) if time_index is None else time_index sns.lineplot(x=time_index, y=arima.resid, ax=ax1) ax1.set_title('Residuals', fontsize=14) # distribution of residuals sns.distplot(arima.resid, hist=True, kde=False, norm_hist=True, ax=ax2) ax2.set_title('Distribution of residuals', fontsize=14) r_range = np.linspace(min(arima.resid), max(arima.resid), num=1000) norm_pdf = scs.norm.pdf(r_range, loc=0, scale=1) ax2.plot(r_range, norm_pdf, 'g', lw=2, label='N(0,1)') # QQ plot qq = sm.qqplot(arima.resid, line='s', ax=ax3) ax3.set_title('QQ plot', fontsize=14) # ACF plot plot_acf(arima.resid, ax=ax4, lags=40, alpha=0.05) ax4.set_title('ACF plot', fontsize=14) return fig plot_diagnostics(arima, goog.index[1:]); # - # 7. Apply and visualise Ljung-Box's test for no autocorrelation in the residuals: # + ljung_box_results = acorr_ljungbox(arima.resid) fig, ax = plt.subplots(1, figsize=[16, 5]) sns.scatterplot(x=range(len(ljung_box_results[1])), y=ljung_box_results[1], ax=ax) ax.axhline(0.05, ls='--', c='r') ax.set_title("Ljung-Box test's results", fontsize=14) plt.xlabel('Lag') plt.ylabel('p-value') plt.show() # - # ### There's more # 1. We start by importing the library: import pmdarima as pm # 2. We run `auto_arima` with the majority of settings set to default values. We only exclude potential seasonality. auto_arima = pm.auto_arima(goog, error_action='ignore', suppress_warnings=True, seasonal=False) auto_arima.summary() # 3. In the next step we try to tune the search of the optimal parameters: auto_arima = pm.auto_arima(goog, error_action='ignore', suppress_warnings=True, seasonal=False, stepwise=False, approximation=False, n_jobs=-1) auto_arima.summary() # ## Forecasting using ARIMA class models # ### How to do it... # 1. Download additional test data: # + df = yf.download('GOOG', start='2019-01-01', end='2019-03-31', adjusted=True, progress=False) print(f'Downloaded {df.shape[0]} rows of data.') # aggregating to weekly test = df.resample('W').last().rename(columns={'Adj Close': 'adj_close'}).adj_close # - # 2. Obtain forecasts from the first model: # + arima_pred = arima.forecast(len(test)) # reshaping into a dataframe arima_pred = [pd.DataFrame(arima_pred[0], columns=['prediction']), pd.DataFrame(arima_pred[2], columns=['ci_lower', 'ci_upper'])] arima_pred = pd.concat(arima_pred, axis=1).set_index(test.index) # - # 3. Obtain forecasts from the second model: # + auto_arima_pred = auto_arima.predict(n_periods=len(test), return_conf_int=True, alpha=0.05) # reshaping into a dataframe auto_arima_pred = [pd.DataFrame(auto_arima_pred[0], columns=['prediction']), pd.DataFrame(auto_arima_pred[1], columns=['ci_lower', 'ci_upper'])] auto_arima_pred = pd.concat(auto_arima_pred, axis=1).set_index(test.index) # - # 4. Plot the results on the same plot: # + fig, ax = plt.subplots(1) # plot the observed stock prices ax = sns.lineplot(data=test, color='k', label = 'Actual') # plot the predictions from ARIMA(2,1,1) ax.plot(arima_pred.prediction, c='g', label = 'ARIMA(2,1,1)') ax.fill_between(arima_pred.index, arima_pred.ci_lower, arima_pred.ci_upper, alpha=0.3, facecolor='g') # plot the predictions from ARIMA(3,1,2) ax.plot(auto_arima_pred.prediction, c='b', label = 'ARIMA(3,1,2)') ax.fill_between(auto_arima_pred.index, auto_arima_pred.ci_lower, auto_arima_pred.ci_upper, alpha=0.3, facecolor='b') # plot labels plt.xlabel('Date') plt.ylabel('Price ($)') plt.title("Google's stock price - actual vs. predicted", fontsize=14) plt.legend() plt.show()
Chapter 03/chapter_3_code.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + [markdown] nbgrader={"grade": false, "grade_id": "cell-5c081c1c48b5095c", "locked": true, "schema_version": 3, "solution": false, "task": false} slideshow={"slide_type": "slide"} # --- # # File I/O # + [markdown] slideshow={"slide_type": "slide"} # # "File I/O" refers to reading (**I**nput) and writing (**O**utput) files # Examples: # - Keep grade data in a comma-separated-value (CSV) file, compute averages and plot histograms in a program that reads data in # - Perform orbital dynamics simulation and write positions and momenta of all particles to a file at each timestep for later analysis # - Gather data from an open-source provider, read it in, and search for patterns and trends # # Files usually store many lines of **text**, so File I/O usually comes hand in hand with lists and string manipulation. # + [markdown] slideshow={"slide_type": "subslide"} # # Files are represented in python as their own objects that can be written to or read from # We create a file object (which represents a file on the computer) with the `open()` function, which is sort of like a constructor for the `file` datatype. The first argument is the name of the file, and the second is the "mode" we want the file to work in. There are several modes, but the most useful ones are summarized below # # | Mode | Description | # |:----:|:---------------------------------------------| # | `r` | **Read** from a file **only**; no writing | # | `w` | **Write** to blank (or newly erased) file | # | `a` | **Append** (write, but don't erase) to the end of a file | # | `r+` | **Read** *or* **write** text from/to file | # + [markdown] slideshow={"slide_type": "slide"} # # First example: writing a simple file # We want to create a new file called `hello.txt` and open it for writing. Then we will write the string `"Hello, world!"` to it. Finally we will close the file (more on closing in a bit). # - f = open('hello.txt', 'w') f.write('Hello, world!') f.close() # + [markdown] slideshow={"slide_type": "-"} # After executing this code look in your files list, and you should find `hello.txt` waiting for you. Open it up and see what it contains. # + [markdown] slideshow={"slide_type": "subslide"} # # Second Example: overwriting a file # We've created `hello.txt`, but what happens if we open that file again and write a different string to it? # - f = open('hello.txt', 'w') f.write('Goodbye, world!') f.close() # Look again at the contents of `hello.txt`. What happend? # + [markdown] slideshow={"slide_type": "subslide"} # # Aside: Filenames # While we won't delve into the bowels of operating system, we should explain a bit about how files are named. File names have two parts separated by a space: the base name and the extension: # <div style="width: 50%; margin: auto;"> # <img src=filename.svg alt="Filename Anatomy"> # </div> # + [markdown] slideshow={"slide_type": "-"} # ## Base name # What you probably think of the file. Usually text, numbers, underscores. When possible, best to avoide spaces and other special characters (some operating systems get grumpy). # # ## Extension # Even if you don't see it in your OS, they're there. Indicates to OS or program how to interpret the data inside (e.g. plain text, comma-separated data, binary data, h.265-encoded video, etc.). # + [markdown] slideshow={"slide_type": "subslide"} # # `f.close()`: Why bother closing? # - Don't want to risk corrupting the file with accidental writes # - While a file is open, operating system won't let other resources open the file. # - Even if it's in read mode, what if a program gets stuck in an infinite loop while a file is open? # # Best practice: open, do what you need to do, and close as fast as possible! # # + [markdown] slideshow={"slide_type": "subslide"} # # File methods: writing # If a file has a write-compatible mode (`'w'`, `'a'`, or `'r+'`), the following methods can write (or append) to a file: # - `write()`: takes argument (a string) and writes to the file or appends to the end of it # - `writelines()`: takes a list of strings and writes each to file. Does **not** add new lines for you! # - f = open('several_lines.txt', 'w') f.writelines(['is', 'each', 'word', 'a', 'line?']) f.close() # + [markdown] slideshow={"slide_type": "skip"} # We should change the above cell to this code to show how we could use `write` and the `join` method of strings to actually output multiple lines: # ```python # f = open('several_lines.txt', 'w') # f.write('\n'.join(['is', 'each', 'word', 'a', 'line?'])) # f.close() # ``` # - # If in append mode (`'a'`) instead of write mode, these methods add text to the very end of a file rather than overwriting the contentes of the files. # + [markdown] slideshow={"slide_type": "subslide"} # # Writing to files with print # If a file is opened in a mode that allows writing, we can also redirect the output of print to a file. This is nice since we know how to change some behavior of `print` already. To direct this output to a file, simply set the keyword argument `print` to the file object in question. # - f = open('from_print.txt', 'w') print('this', 'came', 'from', 'calling', 'print', sep='\t', end='!!!', file=f) f.close() # + [markdown] slideshow={"slide_type": "slide"} # # File methods: reading # If a file has a read-compatible mode (`r`, `r+`), we can extract text from the file (though reading alone cannot change the file). We have three useful methods: # # - `read()`: With no argument, reads entire contents of file into a single string # - `readline()`: Reads current (default: first) line, up to and including the newline character, into a string. A subsequent call to `readline()` will read in the *next* line. # - `readlines()`: Reads in all lines into a list of strings, each including the newline character # + [markdown] slideshow={"slide_type": "subslide"} # # Three ways to read! # - # read: all in one go f = open('several_lines.txt', 'r') read_contents = f.read() f.close() read_contents # readline: just one line at a time f = open('several_lines.txt', 'r') first_line = f.readline() second_line = f.readline() f.close() print('first line is "{}"'.format(first_line)) print('second line is "{}"'.format(second_line)) # readlines: all lines as a list of strings f = open('several_lines.txt', 'r') readlines_contents = f.readlines() f.close() readlines_contents # + [markdown] slideshow={"slide_type": "slide"} # # One more trick: text files are iterable # Their "elements" are lines. So looping over a file effectively pipes the return value of `readline` into the looping variable. # - f = open('several_lines.txt', 'r') for line in f: print(line) f.close() # Recall: each line has its own newline character, and each call to print also adds its own newline. We could get rid of the one from the lines by calling the `strip` method on strings, and we could also change the behavior of `print` by setting the `end` keyword argument to something more appropriate.
05_File_IO/05_File_IO.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## pythonใงjpegใƒ•ใ‚กใ‚คใƒซใฎใƒใ‚คใƒŠใƒชใƒ‡ใƒผใ‚ฟใ‹ใ‚‰็”ป็ด ๅ€คๆƒ…ๅ ฑใ‚’ๅ–ใ‚Šๅ‡บใ™ใƒˆใƒฉใ‚คใ‚ขใƒซใ€‚ # # pythonใฎใ‚ณใƒผใƒ‡ใ‚ฃใƒณใ‚ฐใ‚’ๅญฆใถ่ชฒ้กŒใจใ—ใฆjpegใƒ•ใ‚ฉใƒผใƒžใƒƒใƒˆใ‚’ใƒใ‚คใƒŠใƒชใƒ‡ใƒผใ‚ฟใ‹ใ‚‰็”ป็ด ๅ€คใซๅค‰ๆ›ใ™ใ‚‹ใƒˆใƒฉใ‚คใ‚ขใƒซใ‚’ใ—ใฆใฟใ‚‹ใ€‚ # ใ™ใ“ใ—็ฌฆๅˆ็†่ซ–ใ‚„ไฟกๅทๅ‡ฆ็†ใฎๅพฉ็ฟ’ใ‚‚ๅ…ผใญใ‚‹ # # ๅ‚่€ƒใซใ—ใŸใ‚ตใ‚คใƒˆ # * [https://www.setsuki.com/hsp/ext/jpg.htm](https://www.setsuki.com/hsp/ext/jpg.htm) # * [https://hp.vector.co.jp/authors/VA032610/JPEGFormat/StructureOfJPEG.htm](https://hp.vector.co.jp/authors/VA032610/JPEGFormat/StructureOfJPEG.htm) # * [http://www.siisise.net/jpeg.html](http://www.siisise.net/jpeg.html) # * [https://www.w3.org/Graphics/JPEG/jfif3.pdf](https://www.w3.org/Graphics/JPEG/jfif3.pdf) # * [http://www.ijg.org/files/T-REC-T.871-201105-I!!PDF-E.pdf](http://www.ijg.org/files/T-REC-T.871-201105-I!!PDF-E.pdf) # * [https://www.w3.org/Graphics/JPEG/itu-t81.pdf0](https://www.w3.org/Graphics/JPEG/itu-t81.pdf) # * [http://www.ijg.org/files/Wallace.JPEG.pdf](http://www.ijg.org/files/Wallace.JPEG.pdf) # # ใจใ‚Šใ‚ใˆใšใ‚ฟใƒผใ‚ฒใƒƒใƒˆใฎjpegใƒ•ใ‚กใ‚คใƒซใ‚’่กจ็คบใ—ใฆใŠใ(matplotlibไฝฟใˆใฐ็ฐกๅ˜ใซๅค‰ๆ›ใƒปๆ็”ปใงใใ‚‹ใ€ใ€) # + # %matplotlib inline import numpy as np import matplotlib.pyplot as plt import matplotlib.image as mpimg f_name="target_data\Lenna.jpg" img = mpimg.imread(f_name) #jpegใƒ•ใ‚กใ‚คใƒซใฎ่ชญใฟ่พผใฟ imgplot = plt.imshow(img) #jpegใƒ•ใ‚กใ‚คใƒซใฎๆ็”ป imgplot.axes.set_xticks([]) #x่ปธใฎ็›ฎ็››ใ‚’ๅ‰Š้™ค imgplot.axes.set_yticks([]) #y่ปธใฎ็›ฎ็››ใ‚’ๅ‰Š้™ค # - # ### 1.jpegใƒ•ใ‚กใ‚คใƒซใฎHEXใƒ€ใƒณใƒ— # # "rb"ใƒขใƒผใƒ‰ใงใƒ•ใ‚กใ‚คใƒซใ‚’ใ‚ชใƒผใƒ—ใƒณใ—ใ€ใƒใ‚คใƒŠใƒชใƒ‡ใƒผใ‚ฟใจใ—ใฆjpgใƒ•ใ‚กใ‚คใƒซใ‚’bytesๅž‹ใฎ้…ๅˆ—sใจใ—ใฆ่ชญใฟ่พผใฟใ€16ใƒ‡ใƒผใ‚ฟ1่กŒๅ˜ไฝใงhexใƒ€ใƒณใƒ—ๅ‡บๅŠ› # ใƒ‡ใƒผใ‚ฟ้‡ใฎ้ƒฝๅˆ้€”ไธญใพใง(16\*32\*3= 1536bytes)ใฎๅ‡บๅŠ›ใจใ™ใ‚‹ใ€‚ # # + f=open(f_name,"rb") s=f.read() f.close cnt=0 rows=0 for byte in s: if( rows == 32 * 2 and cnt%16==0 ): break if (cnt%16==0): if(rows %32 ==0 ): print("") print(" ",end="") for i in range(16): print(" {:1x} ".format(i),end="") print("") print("{:03x}# : ".format(rows),end="") print("{:02x} ".format(byte),end="") cnt+=1 if (cnt%16==0): print("") rows+=1 print("") print ("......(omittion)") # - # ### 2.pegใƒ•ใ‚กใ‚คใƒซใฎใƒžใƒผใ‚ซใƒผ/ใ‚ปใ‚ฐใƒกใƒณใƒˆ/ใ‚คใƒกใƒผใ‚ธใƒ‡ใƒผใ‚ฟๆง‹้€ ใฎๆŠฝๅ‡บ # # ใƒใ‚คใƒŠใƒชใƒ‡ใƒผใ‚ฟใ‚’ใƒใ‚คใƒˆๅ˜ไฝใงใƒใ‚งใƒƒใ‚ฏใ—0xFFXX ใฎใƒ‘ใ‚ฟใƒผใƒณ(ใƒžใƒผใ‚ซใƒผ)ใ‚’ๆคœๅ‡บใ—ใŸๅ ดๅˆใ‚ปใ‚ฐใƒกใƒณใƒˆใจใ—ใฆๅ‡ฆ็†ใ— # ใ‚ปใ‚ฐใƒกใƒณใƒˆๅ(่พžๆ›ธๅฎš็พฉใ•ใ‚Œใฆใ„ใ‚Œใฐใใฎๆ–‡ๅญ—ๅˆ—ใ€ใใ‚Œไปฅๅค–ใชใ‚‰HEXๆ–‡ๅญ—ๅˆ—)ใ€ใ‚ปใ‚ฐใƒกใƒณใƒˆ้•ทใ€ใ‚ปใ‚ฐใƒกใƒณใƒˆใƒ‡ใƒผใ‚ฟ(bytesๅž‹)ใ‚’ๅ–ใ‚Šๅ‡บใ—ใ€ใƒชใ‚นใƒˆjpeg_structใซ่ฟฝๅŠ ใ€‚ # # "SOI","EOI"ใซ้–ขใ—ใฆใฏใƒžใƒผใ‚ซใƒผใฎใฟใชใฎใงใ€ใ‚ปใ‚ฐใƒกใƒณใƒˆ้•ทใ€ใ‚ปใ‚ฐใƒกใƒณใƒˆใƒ‡ใƒผใ‚ฟใชใ—ใจใ—ใฆใŒjpeg_structใƒชใ‚นใƒˆไธŠใฏใ‚ปใ‚ฐใƒกใƒณใƒˆใจๅŒๅˆ—ใซๆ‰ฑใ† # # "SOS"ใ‚ปใ‚ฐใƒกใƒณใƒˆไปฅ้™ใฏใ‚คใƒกใƒผใ‚ธใƒ‡ใƒผใ‚ฟใจใ—ใฆ"EOI"ใ‚’ๆคœ็Ÿฅใ™ใ‚‹ใพใงใฎใƒ‡ใƒผใ‚ฟใ‚’ใ‚คใƒกใƒผใ‚ธใƒ‡ใƒผใ‚ฟใจใ—ใฆๆ–‡ๅญ—ๅˆ—"IMG"ๅŠใณใใฎใ‚ตใ‚คใ‚บใ€ใƒ‡ใƒผใ‚ฟ(bytesๅž‹)ใ‚’ใƒชใ‚นใƒˆjpeg_structใƒชใ‚นใƒˆใซ่ฟฝๅŠ (ใ‚คใƒกใƒผใ‚ธใƒ‡ใƒผใ‚ฟใฏใ‚ปใ‚ฐใƒกใƒณใƒˆใงใฏใชใ„ใŒjpeg_structใƒชใ‚นใƒˆไธŠใฏๅ็งฐ"IMG"ใ‚’ใคใ‘ใฆใ‚ปใ‚ฐใƒกใƒณใƒˆใจๅŒๅˆ—ใซๆ‰ฑใ†ใ“ใจใซใ™ใ‚‹) # # ใƒžใƒผใ‚ซใƒผ0xFF00ใซ้–ขใ—ใฆใฏ0x00ใฎใƒใ‚คใƒŠใƒชใƒ‡ใƒผใ‚ฟใจใ—ใฆๅค‰ๆ›ใ™ใ‚‹ใ€‚ # + marker_def={0xd8:"SOI",0xd9:"EOI",0xda:"SOS",0xe0:"APP0",0xdb:"DQT",0xc0:"SOF0",0xc2:"SOF2",0xc4:"DHT"} flag_marker= False flag_seg=False flag_seg_cnt=False flag_seg_data=False flag_SOI= False flag_EOI= False flag_SOS= False flag_err=False jpeg_struct=[] seg_buf=[] byte_bufs=b'' seg_count=0 f=open(f_name,"rb") s=f.read() f.close for byte in s: if flag_marker==False and byte==0xff : #ใƒžใƒผใ‚ซใƒผใฎๅˆคๅฎš flag_marker=True else: ####### ใƒžใƒผใ‚ซใƒผๅ‡ฆ็† ######### if flag_marker==True : #FF00ใƒžใƒผใ‚ซๅ‡ฆ็† if byte==0x00 : #print("0xFF00") byte_bufs=byte_bufs+bytes.fromhex("{:02X}".format(0xff)) #่พžๆ›ธๅฎš็พฉๆธˆใฟใƒžใƒผใ‚ซ elif byte in marker_def: #SOIๅˆคๅฎš if flag_SOI==False : if marker_def[byte]=="SOI" : flag_SOI=True jpeg_struct=jpeg_struct+[["SOI"]] else: flag_err=True; #EOIๅˆคๅฎš elif marker_def[byte]=="EOI": #IMAGE DATAๆ ผ็ด #jpeg_struct=jpeg_struct+[["IMG","{:d}".format(len(byte_bufs)),byte_bufs.hex()]] jpeg_struct=jpeg_struct+[["IMG","{:d}".format(len(byte_bufs)),byte_bufs]] jpeg_struct=jpeg_struct+[["EOI"]] flag_EOI=True #ใใฎไป–ๅฎš็พฉๆธˆใƒžใƒผใ‚ซ๏ผˆใ‚ปใ‚ฐใƒกใƒณใƒˆๅ‡ฆ็†๏ผ‰ elif byte in marker_def: seg_buf=[""+marker_def[byte]] flag_seg=True #SOSๅˆคๅฎš if marker_def[byte]=="SOS": flag_SOS=True #ๆœชๅฎš็พฉใƒžใƒผใ‚ซ๏ผˆใ‚ปใ‚ฐใƒกใƒณใƒˆๅ‡ฆ็†๏ผ‰ else: seg_buf=["FF{:X}".format(byte)] flag_seg=True flag_marker=False else: #ใ‚ปใ‚ฐใƒกใƒณใƒˆๅ‡ฆ็† if flag_seg==True: if(flag_seg_cnt==False): seg_count=seg_count+1 seg_size_h=byte flag_seg_cnt=True elif(flag_seg_data==False): seg_size=seg_size_h*256+byte seg_buf=seg_buf+["{:d}".format(seg_size)] seg_size=seg_size-2 byte_bufs=b'' flag_seg_data=True else: byte_bufs=byte_bufs+bytes.fromhex("{:02X}".format(byte)) seg_size=seg_size-1 if seg_size==0: #seg_buf=seg_buf+[byte_bufs.hex()] seg_buf=seg_buf+[byte_bufs] jpeg_struct=jpeg_struct+[seg_buf] byte_bufs=b'' flag_seg=False flag_seg_cnt=False flag_seg_data=False #IMAGE DATAๅ‡ฆ็† (SOSใ‚ปใ‚ฐใƒกใƒณใƒˆๅพŒ) elif flag_SOS==True and flag_seg==False: byte_bufs=byte_bufs+bytes.fromhex("{:02X}".format(byte)) #ไพ‹ๅค–ๅ‡ฆ็† else: flag_err=True if flag_err==True or flag_EOI==True: break; if flag_err==False and flag_EOI==True: print("Succeeded!!") # - # ไธ€ๅฟœ็ต‚ไบ†ใ—ใŸใจใใซไธ€ๅฎšใฎใคใ˜ใคใพใŒๅˆใฃใฆใ‚‹ใจ"Succeeded!!"ใจๅ‡บๅŠ›ใ™ใ‚‹ใ‚ˆใ†ใซใ—ใฆใŠใ„ใŸใคใ‚‚ใ‚ŠใชใฎใงใŸใถใ‚“ๆˆๅŠŸ # # ๆฌกใฏๆŠฝๅ‡บใ—ใŸjpegใƒ•ใ‚กใ‚คใƒซใฎๆง‹้€ (ใƒชใ‚นใƒˆๅž‹ jpeg_struct)ใฎๅ‡บๅŠ› # # len()้–ขๆ•ฐใงใ‚ปใ‚ฐใƒกใƒณใƒˆ(ๅซ๏ผšใƒžใƒผใ‚ซใฎใฟ/ใ‚คใƒกใƒผใ‚ธใƒ‡ใƒผใ‚ฟ)ใฎๆ•ฐใ‚’ๅ‡บๅŠ› len(jpeg_struct) # ๅ„ใ‚ปใ‚ฐใƒกใƒณใƒˆใฎใƒžใƒผใ‚ซใƒผใฎ่กจ็คบ i=0 for seg in jpeg_struct: print("Seg ",i," : ",seg[0],sep="") i+=1 # jpeg_sructใƒชใ‚นใƒˆใฎไธ€้ƒจใ‚’ไธญ่บซใฎใžใ„ใฆใฟใ‚‹ใ€‚ jpeg_struct[0] jpeg_struct[1] jpeg_struct[2] # ใ‚คใƒกใƒผใ‚ธใƒ‡ใƒผใ‚ฟ(jpeg_struct\[10\])ใฏๅคงใใ„ใฎใงใƒ‡ใƒผใ‚ฟๅ†…ๅฎนjpeg_struct\[10\]\[2\]ใฎๅ‡บๅŠ›ใฏ็œ็•ฅ jpeg_struct[10][0],jpeg_struct[10][1] jpeg_struct[11] # ### 3.ๅ„ใ‚ปใ‚ฐใƒกใƒณใƒˆใฎ่งฃๆž # # jpeg_structใฎๅ„ใ‚ปใ‚ฐใƒกใƒณใƒˆใฎใƒ‡ใƒผใ‚ฟใ‚ˆใ‚Šใ‚ปใ‚ฐใƒกใƒณใƒˆใ‚’ๆง‹ๆˆใ™ใ‚‹ใƒ‘ใƒฉใƒกใƒผใ‚ฟใ‚’ๅ‡บๅŠ›ใ™ใ‚‹ใ€‚ # (ๆ ผ็ดใ™ใ‚‹ๅฝขๅผใ‚’ๆฑบใ‚ใฆใชใ„ใฎใงใจใ‚Šใ‚ใˆใšๅฐๅญ—ๅ‡บๅŠ›ใฎใฟใจใ™ใ‚‹ใ€ใ€) # # ใ‚ตใƒใƒผใƒˆใ™ใ‚‹ใ‚ปใ‚ฐใƒกใƒณใƒˆ(ใƒžใƒผใ‚ซใƒผ)ใ‚‚ใจใ‚Šใ‚ใˆใšไปฅไธ‹ใซ้™ๅฎšใ™ใ‚‹(ๅŸบๆœฌJFIFๅฝขๅผใซ้™ๅฎš) # * SOI # * APP0 JFIFใƒ•ใ‚ฉใƒผใƒžใƒƒใƒˆ # * DHT ใƒใƒ•ใƒžใƒณใƒ†ใƒผใƒ–ใƒซ # * SOF ใƒ•ใƒฌใƒผใƒ ใƒ˜ใƒƒใƒ€(SOF0,2ใฎใฟ) # * DQT ้‡ๅญๅŒ–ใƒ†ใƒผใƒ–ใƒซ # * SOS ใ‚นใ‚ญใƒฃใƒณใƒ˜ใƒƒใƒ€ # * EOI # + flag_SOI= False flag_EOI= False flag_SOS= False flag_err=False for seg in jpeg_struct: print(seg[0]) if(seg[0] == "IMG"): print(" DATA LENGTH : ",seg[1],sep="") else: if(seg[0] == "SOI"): flag_SOI=True elif(seg[0] == "EOI"): flag_EOI=True else: print(" SEG LENGTH : ",seg[1]) data=seg[2] ######## APP0 JFIFใƒ•ใ‚ฉใƒผใƒžใƒƒใƒˆ ###### if(seg[0] == "APP0"): print(" ID : ",data[0:4].decode(),sep="") #JFIF่ญ˜ๅˆฅๅญ print(" Ver : ",data[5],".",data[6],sep="") #ใƒใƒผใ‚ธใƒงใƒณ็•ชๅท print(" U : ",data[7],sep="") #ใƒ”ใ‚ฏใ‚ปใƒซๆฟƒๅบฆใฎๅ˜ไฝ 0:ไธๅฎš 1:pixels/inch(dpi) 3: pixel/cmใ€€ print(" Xd : ",data[8]*256+data[9],sep="") #็ธฆใฎใƒ”ใ‚ฏใ‚ปใƒซๆฟƒๅบฆ print(" Yd : ",data[10]*256+data[11],sep="") #ๆจชใฎใƒ”ใ‚ฏใ‚ปใƒซๆฟƒๅบฆ print(" Xt : ",data[12],sep="") #ใ‚ตใƒ ใƒใ‚คใƒซใ‚คใƒกใƒผใ‚ธใฎๆจชๅน…(ใ‚ตใƒ ใƒใ‚คใƒซ็„กใชใ‚‰0) print(" Yt : ",data[13],sep="") #ใ‚ตใƒ ใƒใ‚คใƒซใ‚คใƒกใƒผใ‚ธใฎ้ซ˜ใ•(ใ‚ตใƒ ใƒใ‚คใƒซ็„กใชใ‚‰0) for i in range(data[12]*data[13]): print(" RGB",i," : (",data[14+i*3],",",data[15+i*3],",",data[16+i*3],")",sep="") #ใ‚ตใƒ ใƒใ‚คใƒซใ‚คใƒกใƒผใ‚ธRGBๅ€ค ######## DQT ้‡ๅญๅŒ–ใƒ†ใƒผใƒ–ใƒซๅฎš็พฉ ###### elif(seg[0] == "DQT"): length = int(seg[1])-3 base = 0 while(length >0): pqn=data[base]>>4 tqn=data[base]&0x0F; if(pqn==0): qlen=64; else: qlen=128; print(" Pq",tqn," : ",pqn,sep="") #้‡ๅญๅŒ–ใƒ†ใƒผใƒ–ใƒซ็ฒพๅบฆ 0;8bit , 1:16bit print(" Tq",tqn," : ",tqn,sep="") #้‡ๅญๅŒ–ใƒ†ใƒผใƒ–ใƒซ็•ชๅท 0 to 3 for i in range(qlen): print(" Q",tqn,"-",ascii(i)," : ",data[base+1+i],sep="") #้‡ๅญๅŒ–ๅ› ๅญ(้‡ๅญๅŒ–ไฟ‚ๆ•ฐ) length-=qlen+1 base+=qlen+1 ######## SOF0 ใƒ•ใƒฌใƒผใƒ ใ‚ฟใ‚คใƒ—0้–‹ๅง‹ใ€€(Baseline-DCT & ใƒใƒ•ใƒžใƒณ็ฌฆๅท) ###### elif(seg[0] == "SOF0" or seg[0] == "SOF2"): nf=data[5] print(" P : ",data[1]) #ใ‚ตใƒณใƒ—ใƒซใฎ็ฒพๅบฆ print(" Y : ",data[1]*256+data[2],sep="") #็”ปๅƒ็ธฆใ‚ตใ‚คใ‚บ print(" X : ",data[3]*256+data[4],sep="") #็”ปๅƒๆจชใ‚ตใ‚คใ‚บ print(" Nf : ",data[5]) #ๆง‹ๆˆ่ฆ็ด ๆ•ฐ 1;GreyScacle ,3;YCbCr or YIQ 4;CMYK for i in range(nf): print(" C",i+1," : ",data[6+i*3],sep="") #ๆง‹ๆˆ่ฆ็ด  ่ญ˜ๅˆฅๅญ 1:Y 2:Cb 3:Cr 4:I 5:Q print(" H",i+1," : ",data[7+i*3]>>4,sep="") #ๆง‹ๆˆ่ฆ็ด ใ‚ต ๆฐดๅนณๆ–นๅ‘ใƒณใƒ—ใƒซๅŒ–่ฆๅ› (ๆฏ”็އ) print(" V",i+1," : ",data[7+i*3]&0x0F,sep="") #ๆง‹ๆˆ่ฆ็ด  ๅž‚็›ดๅ ฑๅ‘Šใ‚ตใƒณใƒ—ใƒซๅŒ–่ฆๅ› (ๆฏ”็އ) print(" Tq",i+1," : ",data[8+i*3],sep="") #ๆง‹ๆˆ่ฆ็ด  ้‡ๅญๅŒ–ใƒ†ใƒผใƒ–ใƒซ็•ชๅท ######## DHT ใƒใƒ•ใƒžใƒณ็ฌฆๅทใƒ†ใƒผใƒ–ใƒซๅฎš็พฉ ###### elif(seg[0] == "DHT"): thn=data[0]&0x0f tcn=data[0]>>4 print(" Tc",thn," : ",tcn,sep="") #ใ‚ฏใƒฉใ‚น 0;DC , 1:1AC ๏ผŸ๏ผŸ print(" Th",thn," : ",thn,sep="") #ใƒใƒ•ใƒžใƒณ็ฌฆๅทใƒ†ใƒผใƒ–ใƒซ็•ชๅท vlen=[] for i in range(16): vlen+= [data[1+i]] print(" L",i+1," ; ",data[1+i],sep="") #็ฌฆๅท้•ทใ”ใจใฎ็ฌฆๅทๆ•ฐ base = 17 for i in range(16): for j in range(vlen[i]): if(tcn==0): print(" V",i+1,"-",j+1," : ",data[base+j],sep="") #ๅ„็ฌฆๅท้•ทใฎ็ฌฆๅท(tcn=0ใฎใจใ)ใ€€ใƒ‡ใƒผใ‚ฟใƒผใƒ“ใƒƒใƒˆๆ•ฐ else: print(" V",i+1,"-",j+1," : ",data[base+j]>>4,",",data[base+j]&0x0F,sep="") #ๅ„็ฌฆๅท้•ทใฎ็ฌฆๅท(tcn=1ใฎใจใ) ใƒฉใƒณใƒฌใƒณใ‚ฐใ‚นๆ•ฐใ€ใƒ‡ใƒผใ‚ฟใƒผใƒ“ใƒƒใƒˆๆ•ฐ base+=vlen[i] ######## SOS Start Of Scan ###### elif(seg[0] == "SOS"): ns=data[0] print(" Ns : ",ns) #ๆง‹ๆˆ่ฆ็ด ใฎๆ•ฐ for i in range(ns): print(" Cs",i+1," : ",data[1+i*2],sep="") #ๆง‹ๆˆ่ฆ็ด  ่ญ˜ๅˆฅๅญ print(" Td",i+1," : ",data[2+i*2]>>4,sep="") #ๆง‹ๆˆ่ฆ็ด  DCๆˆๅˆ†ใƒใƒ•ใƒžใƒณ็ฌฆๅทใƒ†ใƒผใƒ–ใƒซ็•ชๅท print(" Ta",i+1," : ",data[2+i*2]&0x0F,sep="") #ๆง‹ๆˆ่ฆ็ด  ACๆˆๅˆ†ใƒใƒ•ใƒžใƒณ็ฌฆๅทใƒ†ใƒผใƒ–ใƒซ็•ชๅท print(" Ss : ",data[1+ns*2],sep="") #ใ‚นใƒšใ‚ฏใƒˆใƒซ้ธๆŠž้–‹ๅง‹(้‡ๅญๅŒ–ไฟ‚ๆ•ฐ้–‹ๅง‹็•ชๅท) print(" Se : ",data[2+ns*2],sep="") #ใ‚นใƒšใ‚ฏใƒˆใƒซ้ธๆŠž็ต‚ไบ†้‡ๅญๅŒ–ไฟ‚ๆ•ฐ้–‹ๅง‹็ต‚ไบ†) print(" Ah : ",data[3+ns*2]>>4,sep="") #๏ผŸ๏ผŸ print(" Al : ",data[3+ns*2]&0x0f,sep="") #๏ผŸ๏ผŸ # - # ๅ„ใ‚ปใ‚ฐใƒกใƒณใƒˆใฎ่ฆ็ด ใฏๆŠฝๅ‡บใงใใŸใจๆ€ใ‚ใ‚Œใ‚‹ใ€‚ # ๆฌกใซใƒใƒ•ใƒžใƒณ็ฌฆๅทๅŒ–ใ•ใ‚ŒใŸใ‚คใƒกใƒผใ‚ธใƒ‡ใƒผใ‚ฟใ‹ใ‚‰8x8ใƒ–ใƒญใƒƒใ‚ฏๅ˜ไฝใฎDCTใฎใ‚นใƒšใ‚ฏใƒˆใƒซใƒ‡ใƒผใ‚ฟใซๅพฉๅทใ—ใฆใ„ใใ‚ใ‘ใ ใŒใ€ใ€ใ€ # DHTใจใ‹DQTใจใ‹ใฎไธญ่บซใฎๆ„ๅ‘ณใซใคใ„ใฆใ‚‚ใ†ๅฐ‘ใ—่ชฟๆŸปใŒๅฟ…่ฆ # jpegใงใฏใ‚’ๅŸบๆœฌ8x8ใƒ–ใƒญใƒƒใ‚ฏๅ˜ไฝใฎDCTใ‚นใƒšใ‚ฏใƒˆใƒซใ‚’ใƒ‡ใƒผใ‚ฟใจใ—ใฆๆŒใฃใฆใ„ใ‚‹ใŒใ€ใใฎ8x8ใƒ–ใƒญใƒƒใ‚ฏใฎใƒ‡ใƒผใ‚ฟใฎๆ ผ็ด้ †ใฏไปฅไธ‹ใฎ่กจใฎ้€šใ‚Š(ใ‚ธใ‚ฐใ‚ถใ‚ฐใ‚นใ‚ญใƒฃใƒณ)ใงๅพฉๅทๅŒ–ใ—ใŸๅ€คใ‚’ๅฑ•้–‹ใ—ใฆใ„ใใ€‚ # ๅ› ใฟใซไธ‹ใซ่กŒใใปใฉๅž‚็›ดๅ‘จๆณขๆ•ฐใŒ้ซ˜ใใ€ๅณใซ่กŒใใปใฉๆฐดๅนณๅ‘จๆณขๆ•ฐใŒ้ซ˜ใ0็•ช็›ฎใฎใƒ‡ใƒผใ‚ฟ(ๅทฆไธŠ)ใŒDCๆˆๅˆ†ใงใใฎใปใ‹ใŒACๆˆๅˆ†ใจใ„ใ†ใ“ใจใซใชใ‚‹ใ€‚ # # # # | || 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | # |- ||:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:| # |0 || 0 | 1 | 5 | 6 | 14 | 15 | 27 | 28 | # |1 || 2 | 4 | 7 | 13 | 16 | 26 | 29 | 42 | # |2 || 3 | 8 | 12 | 17 | 25 | 30 | 41 | 43 | # |3 || 9 | 11 | 18 | 24 | 31 | 40 | 44 | 53 | # |4 || 10 | 19 | 23 | 32 | 39 | 45 | 52 | 54 | # |5 || 20 | 22 | 33 | 38 | 46 | 51 | 55 | 60 | # |6 || 21 | 34 | 37 | 47 | 50 | 56 | 59 | 61 | # |7 || 35 | 36 | 48 | 49 | 57 | 58 | 62 | 63 | # # # ๅพฉๅทๅŒ–ใ•ใ‚ŒใŸ64ๅ€‹ใฎใƒ‡ใƒผใ‚ฟๅˆ—ใฎใซใ“ใฎ่กจใงๅฏพๅฟœใ™ใ‚‹้‡ๅญๅŒ–ใƒ†ใƒผใƒ–ใƒซTq0๏ฝž3ใฎๅ†…ๅฎน(Q-0๏ฝž63)ใฎๅ€คใ‚’ใ‹ใ‘ใ‚‹ใจๆ‰€ๆœ›ใฎใ‚นใƒšใ‚ฏใƒˆใƒซๅ€คใŒๆฑ‚ใพใ‚‹ใจใฎใ“ใจ # # ๅฎŸ้š›ใฎ้‡ๅญๅŒ–ใƒ†ใƒผใƒ–ใƒซTQHใ‚‚QnใฎnใŒๅฐใ•ใ„(DC๏ฝžไฝŽๅ‘จๆณข)ใ‚ใŸใ‚Šใฏๅฐใ•ใชๆ•ฐๅญ—ใงใ€nใŒๅคงใใ„(้ซ˜ๅ‘จๆณข)ใฎใจใ“ใ‚ใฏๅคงใใ‚ใฎๅ€คใซใชใฃใฆใ„ใฆใ€jpegใงใฏไฝŽๅ‘จๆณขๅŸŸใฏๅฏ†ใช้‡ๅญๅŒ–(DCTๅค‰ๆ›ใ—ใŸ็ตๆžœใ‚’ๅฐ‘ใชใ„ๅ€คใงๅ‰ฒใฃใฆไธธใ‚ใ‚‹)ใ€้ซ˜ๅ‘จๆณขๅŸŸ็จ‹ใ€็–Žใช้‡ๅญๅŒ–(ๅคงใใ„ๅ€คใงๅ‰ฒใ‚‹)ใ—ใฆใ„ใ‚‹ใ“ใจใŒใ‚ใ‹ใ‚‹ใ€‚(ใใ†ใ™ใ‚‹ใ“ใจใง้ซ˜ๅ‘จๆณขๅŸŸใƒ‡ใƒผใ‚ฟใฎๆƒ…ๅ ฑ้‡ใ‚’ๆธ›ใ‚‰ใ—ใฆใ„ใ‚‹ๆจกๆง˜) # ๅฎŸ้š›้ซ˜ๅ‘จๆณขๅŸŸใฎใƒ‡ใƒผใ‚ฟใฏ่’ใใฆใ‚‚ๅฎŸ้š›็›ฎ็ซ‹ใŸใชใ„ใ€‚ # # ้‡ๅญๅŒ–ใƒ†ใƒผใƒ–ใƒซใฏไปฅไธŠใฎใ‚ˆใ†ใชไป•็ต„ใฟใชใ‚ˆใ†ใงใ€ใƒ–ใƒญใƒƒใ‚ฏๅ˜ไฝใฎๅพฉๅทใ•ใˆใงใใฆใ—ใฆใ—ใพใˆใฐๅพฉๅทใƒ‡ใƒผใ‚ฟๅˆ—ใจ้‡ๅญๅŒ–ใƒ†ใƒผใƒ–ใƒซใฎใƒ‡ใƒผใ‚ฟๅˆ—ใฎ่ฆ็ด ใ”ใจใฎๆŽ›ใ‘็ฎ—ใ‚’ใ™ใ‚‹ใ“ใจใงDCTใ‚นใƒšใ‚ฏใƒˆใƒซๅ€คใ‚’ๅ†็พใ™ใ‚‹ใฎใฏ็ฐกๅ˜ใซใงใใใ†ใ€‚ # # ใƒžใƒผใ‚ซAPP0ใงID="JFIF"ๆŒ‡ๅฎšใ•ใ‚Œใ‚‹JFIFใƒ•ใ‚ฉใƒผใƒžใƒƒใƒˆใฎๅ ดๅˆใ‚ซใƒฉใƒผ็”ปๅƒใฏYCrCbใฎ3ๆˆๅˆ†ใฎๅ€ค(1ๆˆๅˆ†1byte)ใ‚’ใคใ‹ใ†ใ€‚ # (่‰ฒๅทฎๆˆๅˆ†ใฏCB,Crใฏ่ฒ ๆ•ฐใ‚‚ใจใ‚Šใˆใ‚‹ใŸใ‚ๅฎŸใƒ‡ใƒผใ‚ฟใฏ128(0x80)ใ‚ชใƒ•ใ‚ปใƒƒใƒˆใ•ใ‚Œใฆ1ใƒใ‚คใƒˆใฎunsigned intใจใ—ใฆใ‚ใคใ‹ใฃใฆใ„ใ‚‹ใ€‚) # SOF0ใงๅ„ๆˆๅˆ†ใซๅฏพๅฟœใ—ใŸ้‡ๅญๅŒ–ใƒ†ใƒผใƒ–ใƒซ็•ชๅทใŒๆŒ‡ๅฎšใ•ใ‚Œใฆใ„ใ‚‹ใ€‚ # ไปŠๅ›žใฎไพ‹ใงใฏYใฏใƒ†ใƒผใƒ–ใƒซ0ใงCb,Crใฏใƒ†ใƒผใƒ–ใƒซ # # ๆˆๅˆ†1 # C1=1 (Y) # Tq1=0(้‡ๅญๅŒ–ใƒ†ใƒผใƒ–ใƒซ0) # H1=2 # V1=2 # # ๆˆๅˆ†2 # C2=2(Cr) # Tq2=1 # H2=1 # V2=1 # # ๆˆๅˆ†2 # C3=3(Cb) # H3=1 # V3=1 # # Hn,Vnใฎๆฐดๅนณๅž‚็›ดใฎใ‚ตใƒณใƒ—ใƒชใƒณใ‚ฐใƒ•ใ‚กใ‚ฏใ‚ฟใƒผใฏใŠใใ‚‰ใ็”ป็ด ใ‚’DCTใงใ‚นใƒšใ‚ฏใƒˆใƒซๅค‰ๆ›ใ™ใ‚‹ใจใใฎๅ…ƒใฎ็”ป็ด ใ‚’้–“ๅผ•ใ„ใฆDCTๅค‰ๆ›ใ—ใฆใ„ใ‚‹ใ‹ใ€ใใ†ใงใชใ„ใ‹ใซใ‚ˆใ‚‹้•ใ„ใ ใจๆ€ใ†ใŒใ€ใ„ใพใ„ใกใฉใ†ๅ–ใ‚Šๆ‰ฑใˆใฐใ„ใ„ใฎใ‹็ขบไฟกใฏใ‚‚ใฆใชใ„ใ€‚ # ่‰ฒๅทฎๆˆๅˆ†ใฎใปใ†ใŒไบบ้–“ใฎ็›ฎใงใ‚ใ‹ใ‚Šใซใใ„ใŸใ‚Cr,Cbใ‚’้–“ๅผ•ใ„ใฆใ„ใ‚‹ใฏใšใชใฎใ ใŒใ€ใใกใ‚‰ใฎHn,VnใŒ1ใงYๆˆใฎHn,VnใŒ2ใจใชใฃใฆใ„ใ‚‹ใฎใฏ้‡ˆ็„ถใจใ—ใชใ„ใ€ใ€ใ€2ใŒๅŸบๆบ–ใง1ใŒ่’ใ„ใฃใฆใ“ใจใชใฎใ‹๏ผŸ๏ผŸ # # ใจใ‚Šใ‚ใˆใšๆœฌไปถใฏ8x8ใƒ–ใƒญใƒƒใ‚ฏๅ˜ไฝใฎใ‚นใƒšใ‚ฏใƒˆใƒซใ‚’ๅพฉๅ…ƒใ™ใ‚‹ใŸใ‚ใซใฏๅฝ“้ขใฏ้–ขไฟ‚ใชใ•ใใ†ใชใฎใงไฟ็•™ใจใ—ใฆใƒใƒ•ใƒžใƒณใƒ†ใƒผใƒ–ใƒซใซใคใ„ใฆ่€ƒใˆใ‚‹ใ“ใจใซใ™ใ‚‹ใ€‚ # # ### 4.ใƒใƒ•ใƒžใƒณใƒ†ใƒผใƒ–ใƒซใ‚’ไฝฟใฃใŸๅพฉๅทๅŒ–๏ผˆ8x8ใƒ–ใƒญใƒƒใ‚ฏใฎDCTใ‚นใƒšใ‚ฏใƒˆใƒซใฎๅ†็พ๏ผ‰ # # ๅ‰่ฟฐใฎใจใŠใ‚ŠๅพฉๅทๅŒ–ใ—ใฆ้‡ๅญๅŒ–ใƒ†ใƒผใƒ–ใƒซใ‚’ใ‹ใ‘ใ‚‹ๅ‰ใฎ8x8ใฎใƒ–ใƒญใƒƒใ‚ฏใพใงๅ†็พใ—ใฆใ—ใพใˆใฐ็ตๆง‹ใ‚ใจใฏ็ฐกๅ˜ใชๆฐ—ใŒใ™ใ‚‹ใ€‚ # ใใ—ใฆSOSใƒ•ใƒฌใƒผใƒ ใงYCbCrใฎๅ„ๆˆๅˆ†ใ”ใจใฎใƒใƒ•ใƒžใƒณใƒ†ใƒผใƒ–ใƒซใฎ็•ชๅท(DC,AC)ใŒTdn,TanใงๆŒ‡ๅฎšใ•ใ‚Œใฆใ„ใ‚‹ใฎใฏใ‚ใ‹ใ‚‹ใฎใ ใŒใ€ใ€ # # ๅ•้กŒใฏๅพฉๅทใซไฝฟใ†ใƒใƒ•ใƒžใƒณใƒ†ใƒผใƒ–ใƒซใฎๆ‰ฑใ„ใงใ‚ใ‚‹ใ€‚ # ่‰ฒใ€…่ชฌๆ˜ŽใŒใ‚ใ‚‹ใฎใ ใŒใ‚ขใƒซใ‚ดใƒชใ‚บใƒ ใซใ™ใ‚‹ใ‚ขใ‚คใƒ‡ใ‚ขใŒๅ‡บใฆใ“ใชใ„ใฎใงใจใ‚Šใ‚ใˆใšๆ‰‹ไฝœๆฅญใงใ™ใ“ใ—ใ ใ‘ใ‚„ใฃใฆใฟใ‚‹ใ€‚ # # ใพใšไธ€็•ชๆœ€ๅˆใซไฝฟใ†DCใฎใƒใƒ•ใƒžใƒณใƒ†ใƒผใƒ–ใƒซ0(Ycn=0,Thn=0)ใงๅฐ‘ใ—่€ƒใˆใฆใฟใ‚‹ใ€‚ # ไปฅไธ‹ใŒๅ…ˆใปใฉๆŠฝๅ‡บใ—ใŸใƒ†ใƒผใƒ–ใƒซใฎใƒ‡ใƒผใ‚ฟ # # ``` # DHT # SEG LENGTH : 31 # Tc0 : 0 # Th0 : 0 # L1 ; 0 # L2 ; 1 # L3 ; 5 # L4 ; 1 # L5 ; 1 # L6 ; 1 # L7 ; 1 # L8 ; 1 # L9 ; 1 # L10 ; 0 # L11 ; 0 # L12 ; 0 # L13 ; 0 # L14 ; 0 # L15 ; 0 # L16 ; 0 # V2-1 : 0 # V3-1 : 1 # V3-2 : 2 # V3-3 : 3 # V3-4 : 4 # V3-5 : 5 # V4-1 : 6 # V5-1 : 7 # V6-1 : 8 # V7-1 : 9 # V8-1 : 10 # V9-1 : 11 # ``` # # ใฉใ†ใ‚‚็ฌฆๅทใฏ"ใƒใƒ•ใƒžใƒณใƒ“ใƒƒใƒˆๅˆ—"+"ใƒ‡ใƒผใ‚ฟใƒ“ใƒƒใƒˆๅˆ—"ใงๆง‹ๆˆใ•ใ‚Œใฆใ„ใ‚‹ใ‚ˆใ†ใงLn(n:1๏ฝž16)ใฏnbitใฎใƒใƒ•ใƒžใƒณใ‚ณใƒผใƒ‰(ใƒใƒ•ใƒžใƒณใƒ“ใƒƒใƒˆๅˆ—)ใŒๅนพใคใ‚ใ‚‹ใ‹ใ‚’็คบใ™ๆ•ฐใงใ€ใใ“ใ‹ใ‚‰ๆฉŸๆขฐ็š„ใซใƒใƒ•ใƒžใƒณใƒ“ใƒƒใƒˆๅˆ—ใฎ็ณปๅˆ—(ใƒใƒ•ใƒžใƒณใƒ„ใƒชใƒผ)ใŒๆฌกใฎใ‚ˆใ†ใซๆฑ‚ใพใ‚‹ๆจกๆง˜ # # L1=0 ๅฏพๅฟœใ™ใ‚‹ใƒใƒ•ใƒžใƒณใ‚ณใƒผใƒ‰็„กใ— # # L2=1 # 1:"00"ใฎ1ใค # # L3=5 # 1:"010",2:"011",3:"100",4:"101",5:"110"ใฎ3ใค # # L4=1 # 1:"1110"ใฎ1ใค # # L5=1 # 1:"11110"ใฎ1ใค # # L6=1 # 1:"111110"ใฎ1ใค # # L7=1 # 1:"1111110"ใฎ1ใค # # L8=1 # 1:"11111110"ใฎ1ใค # # L9=1 # 1:"111111110"ใฎ1ใค # # Vn-mใฏn้•ทใฎใƒใƒ•ใƒžใƒณใƒ“ใƒƒใƒˆใฎm็•ช็›ฎใฎใ‚‚ใฎใซ็ถšใใƒ‡ใƒผใ‚ฟใƒ“ใƒƒใƒˆใฎ้•ทใ•ใงใใฎๅ€คใ‹ใ‚‰ๅพฉๅทๅŒ–ใ‚‰ใ‚Œใ‚‹ใƒ‡ใƒผใ‚ฟๅ€คใŒๆฑบใพใ‚‹ใ‚ˆใ†ใซใชใฃใฆใ„ใ‚‹ๆจกๆง˜ใ€‚ # Vใฏใฎๅ€คใซใ‚ˆใฃใฆ2^V้€šใ‚Šใฎใƒ‡ใƒผใ‚ฟใƒ“ใƒƒใƒˆใŒใ‚ใ‚ŠVใฎๅ€คใŒๅฐใ•ใ„้ †ใซไปฅไธ‹ใฎ้€šใ‚Šใ€‚(n,mใฏ็œ็•ฅ) # (ๅฝ“็„ถๅŒใ˜ใƒใƒ•ใƒžใƒณใƒ†ใƒผใƒ–ใƒซใงVใฎๅ€คใฏ่ขซใ‚‹ใ“ใจใฏใชใ„ใฏใšใ€‚) # # V=0(1้€šใ‚Š) # ("":0) # # V=1(2้€šใ‚Š) # ("0":-1),("1";1) # # V=2(4้€šใ‚Š) # ("00":-3),("01":-2),("10":2).("11":3) # # V=3(8้€šใ‚Š) # ("000":-7),("001":-6),("010":-5),("011":-4),("100":4),("101":5),("110":6),("111":7) # # (ไปฅ้™็œ็•ฅ) # # ใ“ใ‚Œ้ข็™ฝใ„ใ“ใจใซ1ใงๅง‹ใพใ‚‹ใƒ‡ใƒผใ‚ฟใƒ“ใƒƒใƒˆใฏใใฎใพใพๆ•ฐๅ€คๅŒ–ใ—1ใงๅง‹ใพใ‚‹ใƒ‡ใƒผใ‚ฟใƒ“ใƒƒใƒˆใฏๅ่ปขใ—ใฆ่ฒ ใฎๅ€คใซใชใฃใฆใ„ใ‚‹ๆ„Ÿใ˜ใŒใ™ใ‚‹(ๅฐ‘ใชใใจใ‚‚V=3ใพใงใฏใ€ใ€ใ€) # ็†ๅฑˆใ‚’่€ƒใˆใ‚‹ใฎใจใ‚Šใ‚ใˆใšใปใฃใจใ„ใฆใใฎ่ฆๅ‰‡ใŒใŸใ ใ—ใ„ใชใ‚‰ใ‚ขใƒซใ‚ดใƒชใ‚บใƒ ใฏ็ฐกๅ˜ใใ†ใ€ใ€ใ€ # # ๅพฉๅทๅŒ–ใฎๅคงใใชๆ‰‹้ †ใจใ—ใฆใฏใƒใƒ•ใƒžใƒณ็ฌฆๅทใ‚’ๆคœๅ‡บใ—ใฆใ€ๅฏพๅฟœใ™ใ‚‹Vๅ€คๅˆ†ใฎใƒ‡ใƒผใ‚ฟใƒ“ใƒƒใƒˆใ‚’ๅ–ใ‚Šๅ‡บใ—ใฆใใ‚Œใ‚’ๆ•ฐๅ€คๅŒ–ใ™ใ‚‹ใฃใฆๆ‰‹้ †ใง่กŒใ‘ใใ†ใ€‚ # # ใจใ‚Šใ‚ใˆใšๅฏพ่ฑกใจใชใ‚‹ใ‚คใƒกใƒผใ‚ธใƒ‡ใƒผใ‚ฟใฎๅ…ˆ้ ญ64ใƒใ‚คใƒˆ็จ‹ๅบฆใ‚’ๅ–ใ‚Šๅ‡บใ—ใฆ2้€ฒๅ‡บๅŠ›ใ—ใฆใฟใ‚‹ # + cnt=0 for chr in jpeg_struct[10][2].hex()[0:128]: if chr == "0" :print("0000",end="") elif chr == "1" :print("0001",end="") elif chr == "2" :print("0010",end="") elif chr == "3" :print("0011",end="") elif chr == "4" :print("0100",end="") elif chr == "5" :print("0101",end="") elif chr == "6" :print("0110",end="") elif chr == "7" :print("0111",end="") elif chr == "8" :print("1000",end="") elif chr == "9" :print("1001",end="") elif chr == "a" :print("1010",end="") elif chr == "b" :print("1011",end="") elif chr == "c" :print("1100",end="") elif chr == "d" :print("1101",end="") elif chr == "e" :print("1110",end="") elif chr == "f" :print("1111",end="") cnt=cnt+1 if (cnt==16): print("") cnt=0 # - # ใ“ใฎใƒ‡ใƒผใ‚ฟใ‚ˆใ‚Šๅ…ˆ้ ญใฎ"1110"ใฎๆ™‚็‚นใง4bit้•ทใฎใƒใƒ•ใƒžใƒณใƒ“ใƒƒใƒˆ(ใ“ใ‚Œใฏ4bit้•ทใฎใƒใƒ•ใƒžใƒณใƒ“ใƒƒใƒˆใŒ1ใ‚ใ‚‹ใ†ใกใฎ1็•ช็›ฎ)ใŒๆคœๅ‡บใ•ใ‚Œใ‚‹ใ€‚(ใƒใƒ•ใƒžใƒณ็ฌฆๅทใฎใ„ใ„ใจใ“ใ‚ใฏๅฏๅค‰้•ทใงใ‚ใ‚ŠใชใŒใ‚‰้ †็•ชใซๅ‡ฆ็†ใ—ใฆใ„ใ‘ใฐ็ฌฆๅทใฎ้‡่ค‡็„กใๅˆ‡ใ‚Šๅˆ†ใ‘ใงใใ‚‹ใจใ“ใ‚ใ€‚) # V4-1(็ฌฆๅท้•ท4ใฎ1็•ช็›ฎใฎVๅ€ค)ใฏ6ใชใฎใง็ถšใ6bit"101101"ใŒใƒ‡ใƒผใ‚ฟใƒ“ใƒƒใƒˆใซใชใ‚‹ใ€‚ # ใ“ใ‚Œใฏๅ…ˆใปใฉใฎใƒซใƒผใƒซใงๅ…ˆ้ ญใŒ"1"ใชใฎใงใใฎใพใพๆ•ดๆ•ฐๅŒ–ใ—ใฆ0x2D=2\*16+13=45ใจใชใ‚‹ใ€‚ # ็ฌฌไธ€ๆˆๅˆ†(Yๆˆๅˆ†ใฎๆœ€ๅˆใฎ8x8ใƒ–ใƒญใƒƒใ‚ฏใฎใ‚ธใ‚ฐใ‚ถใ‚ฐใ‚นใ‚ญใƒฃใƒณใฎ0็•ช(DCๆˆๅˆ†)ใฎๅพฉๅทๅŒ–ๅ€คใฏ45ใ€ใ“ใ‚Œใซๅฏพๅฟœใ™ใ‚‹้‡ๅญๅŒ–ไฟ‚ๆ•ฐ(้‡ๅญๅŒ–ใƒ†ใƒผใƒ–ใƒซ0ใฎQ0-0)ใฎๅ€ค5ใ‚’ใ‹ใ‘ใฆๅพฉๅ…ƒใ—ใŸDCTใ‚นใƒšใ‚ฏใƒˆใƒซๅ€คใฏ225(Yๅ€คใชใฎใง็ตๆง‹ใ‚ใ‹ใ‚‹ใ„)ใจใชใ‚‹ใ€‚ # # # ใ“ใ‚Œใซ็ถšใ # "011100101100110000110011100111001101010110101011011100...." # ใฎใƒ“ใƒƒใƒˆๅˆ—ใฏACๆˆๅˆ†ใซใชใ‚‹ใฎใง้•ใ†ใƒใƒ•ใƒžใƒณใƒ†ใƒผใƒ–ใƒซACใฎใƒ†ใƒผใƒ–ใƒซ็•ชๅท0(Tc=1,Th=0)ใ‚’ไฝฟใ†ใ“ใจใซใชใ‚‹ใ€‚ # ACใฎใƒใƒ•ใƒžใƒณใƒ†ใƒผใƒ–ใƒซใฏLnใซ้–ขใ—ใฆใฏDCใจๆ‰ฑใ„ใฏๅŒใ˜ใ ใŒใ€Vn-mใฎๅ€คใซ้–ขใ—ใฆใฏๅฐ‘ใ—็•ฐใชใ‚Šใ€ไธŠไฝใ€ไธ‹ไฝใใ‚Œใžใ‚Œ4bitใ‚’ๅŒบๅˆฅใ™ใ‚‹ใ‚ˆใ†ใงใ€ใ€ # ไธŠไฝใ‚’ใƒฉใƒณใƒฌใƒณใ‚ฐใ‚นใจ่จ€ใฃใฆใใฎๆ•ฐใ ใ‘ใ‚ธใ‚ฐใ‚ถใ‚ฐใ‚นใ‚ญใƒฃใƒณใฎ้ †็•ชใซๅพ“ใฃใฆๅ€ค0ใ‚’ๅŸ‹ใ‚ใฆใใฎใฎใกใใฎไธ‹ไฝ4biใฎ่กจใ™ใƒ“ใƒƒใƒˆๆ•ฐๅˆ†ใฎใƒ‡ใƒผใ‚ฟใƒ“ใƒƒใƒˆ(่€ƒใˆๆ–นใฏDCใฎใƒ†ใƒผใƒ–ใƒซใจๅŒใ˜)ใ‚’ๅ‰ฒใ‚Šๅ‡บใ—ๅฏพๅฟœใ™ใ‚‹ๆ•ฐๅ€คใซๅค‰ๆ›ใ™ใ‚‹ๆจกๆง˜ใ€‚ # (ใƒฉใƒณใƒฌใƒณใ‚ฐใ‚นใ€ใƒ‡ใƒผใ‚ฟใƒ“ใƒƒใƒˆ)ใซใฏ็‰นๅˆฅใชๅ ดๅˆใŒใ‚ใ‚Š(0,0)ใฎๆ™‚ใฏEOB:End of Blockใจ่จ€ใฃใฆใใ“ใงใƒ–ใƒญใƒƒใ‚ฏใฎ็ต‚ใ‚ใ‚Šใ‚’ๆ„ๅ‘ณใ—ใ€ใใฎใƒ–ใƒญใƒƒใ‚ฏใฎใใ‚Œไปฅ้™ใฎACๆˆๅˆ†ใฏ)ใจใ—ใฆๅ‡ฆ็†ใ™ใ‚‹ใ€‚(15,0)ๅ ดๅˆใฏZRLใจ่จ€ใฃใฆใƒฉใƒณใƒฌใƒณใ‚ฐใ‚น16ใจใ—ใฆ0ใ‚’16ๅ€‹ๅŸ‹ใ‚ใ‚‹ๅ‡ฆ็†ใจใ—ใฆๆ‰ฑใ† # # ๅฝ“็„ถใƒ‡ใƒผใ‚ฟใƒ“ใƒƒใƒˆ้•ทใฎๆœ€ๅคงๅ€คใฏ15(0xF)ใจDCใƒ†ใƒผใƒ–ใƒซใฎๆ™‚ใ‚ˆใ‚Š้•ทใใชใ‚‹ใ€‚ # # ไปฅไธ‹ใƒใƒ•ใƒžใƒณใƒ†ใƒผใƒ–ใƒซACใฎใƒ†ใƒผใƒ–ใƒซ็•ชๅท0(Tc=1,Th=0)ใฎๆŠœ็ฒ‹ # # ``` # DHT # SEG LENGTH : 181 # Tc0 : 1 # Th0 : 0 # L1 ; 0 # L2 ; 2 # L3 ; 1 # L4 ; 3 # L5 ; 3 # L6 ; 2 # L7 ; 4 # L8 ; 3 # L9 ; 5 # # (็œ็•ฅ) # # V2-1 : 0,1 # V2-2 : 0,2 # V3-1 : 0,3 # V4-1 : 0,0 # V4-2 : 0,4 # V4-3 : 1,1 # V5-1 : 0,5 # V5-2 : 1,2 # V5-3 : 2,1 # V6-1 : 3,1 # V6-2 : 4,1 # # (็œ็•ฅ) # # ``` # # ใƒใƒ•ใƒžใƒณ็ฌฆๅทใฏDCใฎๆ™‚ใจไธ€็ท’ใง # # L2=2 1:"00",2:"01" # L3=1 1;"100" # L4=3 1:"1010",2:"1011",3:"1100" # L5=3 1:"11010",2:"11011",3:"11100" # L6=2 1:"111010",2:"111011" # L7=4 1:"1111000",2:"1111001"3:"1111010":4:"1111011" # L8=3 1:"11111000",2:"11111001",3:"11111010" # L9=5 1:"111110110",2:"111110111",3:"111111000",4:"111111001",5:"111111010" # # (็œ็•ฅ) # # ใจใชใ‚‹ใฎใง็ฌฆๅทๅˆ— # # "011100101100110000110011100111001101010110101011011100...." # # ใ‹ใ‚‰2bitใฎใƒใƒ•ใƒžใƒณใƒ“ใƒƒใƒˆใฎ2็•ช็›ฎ"01"ใ‚’ๆคœๅ‡บใ—ใฆใ€ # ๅฏพๅฟœใ™ใ‚‹V2-2=(0,2)(ใƒฉใƒณใƒฌใƒณใ‚ฐใ‚น,ใƒ‡ใƒผใ‚ฟใƒ“ใƒƒใƒˆๆ•ฐ)ใ‚ˆใ‚Š2bit"11"(ๅพฉๅทๅ€ค3),ใƒฉใƒณใƒฌใƒณใ‚ฐใ‚นใฏ0ใชใฎใง็ถšใใ‚ธใ‚ฐใ‚ถใ‚ฐใ‚นใ‚ญใƒฃใƒณๆฌก็•ชๅท1ใซ3ใ‚’ๅ‰ฒใ‚Šๅฝ“ใฆใ‚‹ใ€‚ # # ใ•ใ‚‰ใซ็ถšใ2bit"00"ใŒ2bitใƒใƒ•ใƒžใƒณ็ฌฆๅทใฎ1็•ช็›ฎใชใฎใงV2-2=(0,1)ใ‚ˆใ‚Šใ‚ธใ‚ฐใ‚ถใ‚ฐใ‚นใ‚ญใƒฃใƒณใ‚’้ฃ›ใฐใ•ใชใ„ใง็•ชๅท2ใซใƒ‡ใƒผใ‚ฟใƒ“ใƒƒใƒˆ"1"(ๅพฉๅ€ค1) # # ไปฅไธ‹ใซใƒ‡ใƒผใ‚ฟใฎ้€”ไธญใพใงใ‚’ใƒใƒ•ใƒžใƒณใƒ“ใƒƒใƒˆ.ใƒ‡ใƒผใ‚ฟใƒ“ใƒƒใƒˆ:ใƒใƒ•ใƒžใƒณใƒ“ใƒƒใƒˆ.ใƒ‡ใƒผใ‚ฟใƒ“ใƒƒใƒˆ:.....ใฎๅฝขๅผใงๅŒบๅˆ‡ใฃใฆใฟใŸ # # "01.11:00.1:01.10:01.10:00.0:1100.1:1100.1:1100.1:1010.:10110101011011100...." # # ใ•ใ‚‰ใซVn-m.databit:ใซVn-m.databit.....:ๅฝขๅผใซใ™ใ‚‹ใจ # # (0,2)."11":(0,1)."1":(0,2)."10":(0,2)."10":(0,1)."0",(1,1)."1",(1,1)."1":(1,1)."1":(EOB,0)."":(0,4)."0101":(0,2)."10",..... # # 3,1,2,2,-1,0,1,0,1,0,1,EOB...,..... # # # ใงEOBใง็ต‚ใ‚ใฃใฆใ„ใ‚‹ใŸใ‚11็•ช็›ฎใฎ่ฆ็ด ใ‚ˆใ‚Šใ‚ใจใฏ0ใ‚’ๅŸ‹ใ‚ใฆใƒ–ใƒญใƒƒใ‚ฏใ‚’็ต‚ไบ†ใ™ใ‚‹ # # | || 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | # |- ||:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:| # |0 || 45 | 3 | -1 | 0 | 0 | 0 | 0 | 0 | # |1 || 1 | 2 | 1 | 0 | 0 | 0 | 0 | 0 | # |2 || 2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | # |3 || 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | # |4 || 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | # |5 || 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | # |6 || 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | # |7 || 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | # # # ใ“ใฎใƒ–ใƒญใƒƒใ‚ฏใซ้‡ๅญๅŒ–ใƒ†ใƒผใƒ–ใƒซ(Tqn=0ใฎDQT๏ผšไปฅไธ‹)ใ‚’ไฝฟใฃใฆDCTใ‚นใƒšใ‚ฏใƒˆใƒซใ‚’ใ‚‚ใจใ‚ใฆใฟใ‚‹ใจใ€ใ€ใ€ # # ``` # DQT # SEG LENGTH : 67 # Pq0 : 0 # Tq0 : 0 # Q0-0 : 5 # Q0-1 : 3 # Q0-2 : 4 # Q0-3 : 4 # Q0-4 : 4 # Q0-5 : 3 # Q0-6 : 5 # Q0-7 : 4 # Q0-8 : 4 # Q0-9 : 4 # Q0-10 : 5 # Q0-11 : 5 # Q0-12 : 5 # Q0-13 : 6 # Q0-14 : 7 # Q0-15 : 12 # Q0-16 : 8 # Q0-17 : 7 # Q0-18 : 7 # Q0-19 : 7 # Q0-20 : 7 # Q0-21 : 15 # Q0-22 : 11 # Q0-23 : 11 # (็œ็•ฅ) # ``` # # # | || 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | # |- ||:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:| # |0 ||145 | 9 | -3 | 0 | 0 | 0 | 0 | 0 | # |1 || 4 | 8 | 4 | 0 | 0 | 0 | 0 | 0 | # |2 || 8 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | # |3 || 4 | 5 | 0 | 0 | 0 | 0 | 0 | 0 | # |4 || 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | # |5 || 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | # |6 || 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | # |7 || 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | # # # Vๅ€คใ‹ใ‚‰ใƒฉใƒณใƒฌใƒณใ‚ฐใ‚น้ฃ›ใฐใ—ใฆใƒ‡ใƒผใ‚ฟใƒ“ใƒƒใƒˆใ‚’ๅพฉๅ…ƒใ™ใ‚‹ใฎใฏ็ฐกๅ˜ใใ†ใชใฎใงใƒใƒ•ใƒžใƒณ็ฌฆๅทใ‚’ๆคœๅ‡บใ™ใ‚‹ใ‚ขใƒซใ‚ดใƒชใ‚บใƒ ใŒ่ชฒ้กŒใ€ใ€ # ใใ‚‚ใใ‚‚ใ“ใฎๅพฉๅทๆ‰‹้ †ใงใ‚ใฃใฆใ‚‹ใฎใ‹่‡ช่บซใŒใชใ„ใ€ใ€ # ใจใ‚Šใ‚ใˆใšๆœ€ๅˆใฎ1ใƒ–ใƒญใƒƒใ‚ฏๅพฉๅทใ™ใ‚‹ใจใ“ใ‚ใ‹ใ‚‰ใ‚„ใฃใฆใฟใ‚ˆใ†ใ€ # # ใพใšใฏใƒใƒ•ใƒžใƒณใƒ†ใƒผใƒ–ใƒซๅฎš็พฉใฎใ‚ปใ‚ฐใƒกใƒณใƒˆใƒ‡ใƒผใ‚ฟใ‚’ๆง‹้€ ๅŒ–ใƒ‡ใƒผใ‚ฟใจใ—ใฆไฟๅญ˜ใ™ใ‚‹ใจใ“ใ‚ใ‹ใ‚‰ใ‚„ใฃใฆใฟใ‚‹ใ€‚้‡ๅญๅŒ–ใƒ†ใƒผใƒ–ใƒซๅฎš็พฉใ‚‚็ฐกๅ˜ใใ†ใชใฎใงไธ€็ท’ใซใ‚„ใ‚‹ใ“ใจใซใ™ใ‚‹ใ€‚ # # ใ„ใšใ‚Œใ‚‚ใƒชใ‚นใƒˆๅž‹ใฎๅ…ฅใ‚Œๅญๆง‹้€ ใงๅฎŸ็พใ—ใฆใฟใ‚‹ใ€‚ # ้‡ๅญๅŒ–ใƒ†ใƒผใƒ–ใƒซใฏๆœ€ๅคง4ใค(0๏ฝž3)ใงใ€ใƒใƒ•ใƒžใƒณใƒ†ใƒผใƒ–ใƒซใฏAC,DCใงใใ‚Œใžใ‚Œๆœ€ๅคง4ใค(0๏ฝž3)ใชใฎใงๅ…ฅใ‚Œ็‰ฉใจใฎใ—ใฆใฏไปฅไธ‹ใ‚’็”จๆ„ # Q=[[],[],[],[]] Ldc=[[],[],[],[]] Vdc=[[],[],[],[]] Lac=[[],[],[],[]] Vac=[[],[],[],[]] # ใ“ใ‚Œใซใƒ†ใƒผใƒ–ใƒซใฎๅ†…ๅฎนใ‚’่ฟฝๅŠ ใ—ใฆใ„ใใ€‚ # # ้‡ๅญๅŒ–ใƒ†ใƒผใƒ–ใƒซ็•ชๅทiใฎQi-nใฏ # # ```python # Q[i][n] = ใ‚ธใ‚ฐใ‚ถใ‚ฐใ‚นใ‚ญใƒฃใƒณn็•ช็›ฎใฎ้‡ๅญๅŒ–ใƒ•ใ‚กใ‚ฏใ‚ฟใƒผ # ``` # ใƒใƒ•ใƒžใƒณใƒ†ใƒผใƒ–ใƒซ็•ชๅทiใฎ LnใจVn-mใฏ # # ```python # Ldc[i][n] = ็ฌฆๅท้•ทn+1ใฎใƒใƒ•ใƒžใƒณ็ฌฆๅทๆ•ฐ # Vdc[i][n][m] = ็ฌฆๅท้•ทn+1ใฎm็•ช็›ฎใฎใƒใƒ•ใƒžใƒณ็ฌฆๅทใซ็ถšใใƒ‡ใƒผใ‚ฟใƒ“ใƒƒใƒˆ้•ท # Lac[i][n] = ็ฌฆๅท้•ทn+1ใฎใƒใƒ•ใƒžใƒณ็ฌฆๅทๆ•ฐ # Vac[i][n][m] = (็ฌฆๅท้•ทn+1ใฎm็•ช็›ฎใฎใƒใƒ•ใƒžใƒณ็ฌฆๅทใซๅฏพๅฟœใ™ใ‚‹ใƒฉใƒณใƒฌใƒณใ‚ฐใ‚น,ใƒ‡ใƒผใ‚ฟใƒ“ใƒƒใƒˆ้•ท) # ``` # ใจๅฎš็พฉใฅใ‘ใฆใ€‚ # # ๅ…ˆใปใฉใฎใ‚ปใ‚ฐใƒกใƒณใƒˆ่งฃๆžใซไฝฟใฃใŸใ‚ณใƒผใƒ‰ใง้‡ๅญๅŒ–ใƒ†ใƒผใƒ–ใƒซๅฎš็พฉ(DQT)ใ€ใƒใƒ•ใƒžใƒณ็ฌฆๅทๅŒ–ๅฎš็พฉ(DHT)ใซ้–ขใ™ใ‚‹็ฎ‡ๆ‰€ใฎprintๅ‡บๅŠ›ใ‚’ใ‚ณใƒกใƒณใƒˆใ‚ขใ‚ฆใƒˆใ—ใฆไปฃใ‚ใ‚ŠใซไธŠ่จ˜ใƒชใ‚นใƒˆใฎๆง‹็ฏ‰ใฎใ‚ณใƒผใƒ‰ใ‚’็ต„ใฟ่พผใ‚€ใ€‚ # # + flag_SOI= False flag_EOI= False flag_SOS= False flag_err=False Q=[[],[],[],[]] Ldc=[[],[],[],[]] Vdc=[[],[],[],[]] Lac=[[],[],[],[]] Vac=[[],[],[],[]] for seg in jpeg_struct: print(seg[0]) if(seg[0] == "IMG"): print(" DATA LENGTH : ",seg[1],sep="") else: if(seg[0] == "SOI"): flag_SOI=True elif(seg[0] == "EOI"): flag_EOI=True else: print(" SEG LENGTH : ",seg[1]) data=seg[2] ######## APP0 JFIFใƒ•ใ‚ฉใƒผใƒžใƒƒใƒˆ ###### if(seg[0] == "APP0"): print(" ID : ",data[0:4].decode(),sep="") #JFIF่ญ˜ๅˆฅๅญ print(" Ver : ",data[5],".",data[6],sep="") #ใƒใƒผใ‚ธใƒงใƒณ็•ชๅท print(" U : ",data[7],sep="") #ใƒ”ใ‚ฏใ‚ปใƒซๆฟƒๅบฆใฎๅ˜ไฝ 0:ไธๅฎš 1:pixels/inch(dpi) 3: pixel/cmใ€€ print(" Xd : ",data[8]*256+data[9],sep="") #็ธฆใฎใƒ”ใ‚ฏใ‚ปใƒซๆฟƒๅบฆ print(" Yd : ",data[10]*256+data[11],sep="") #ๆจชใฎใƒ”ใ‚ฏใ‚ปใƒซๆฟƒๅบฆ print(" Xt : ",data[12],sep="") #ใ‚ตใƒ ใƒใ‚คใƒซใ‚คใƒกใƒผใ‚ธใฎๆจชๅน…(ใ‚ตใƒ ใƒใ‚คใƒซ็„กใชใ‚‰0) print(" Yt : ",data[13],sep="") #ใ‚ตใƒ ใƒใ‚คใƒซใ‚คใƒกใƒผใ‚ธใฎ้ซ˜ใ•(ใ‚ตใƒ ใƒใ‚คใƒซ็„กใชใ‚‰0) for i in range(data[12]*data[13]): print(" RGB",i," : (",data[14+i*3],",",data[15+i*3],",",data[16+i*3],")",sep="") #ใ‚ตใƒ ใƒใ‚คใƒซใ‚คใƒกใƒผใ‚ธRGBๅ€ค ######## DQT ้‡ๅญๅŒ–ใƒ†ใƒผใƒ–ใƒซๅฎš็พฉ ###### elif(seg[0] == "DQT"): length = int(seg[1])-3 base = 0 while(length >0): pqn=data[base]>>4 tqn=data[base]&0x0F; if(pqn==0): qlen=64; else: qlen=128; print(" Pq",tqn," : ",pqn,sep="") #้‡ๅญๅŒ–ใƒ†ใƒผใƒ–ใƒซ็ฒพๅบฆ 0;8bit , 1:16bit print(" Tq",tqn," : ",tqn,sep="") #้‡ๅญๅŒ–ใƒ†ใƒผใƒ–ใƒซ็•ชๅท 0 to 3 for i in range(qlen): Q[tqn] += [data[base+1+i]] #print(" Q",tqn,"-",ascii(i)," : ",data[base+1+i],sep="") #้‡ๅญๅŒ–ๅ› ๅญ(้‡ๅญๅŒ–ไฟ‚ๆ•ฐ) length-=qlen+1 base+=qlen+1 ######## SOF0 ใƒ•ใƒฌใƒผใƒ ใ‚ฟใ‚คใƒ—0้–‹ๅง‹ใ€€(Baseline-DCT & ใƒใƒ•ใƒžใƒณ็ฌฆๅท) ###### elif(seg[0] == "SOF0" or seg[0] == "SOF2"): nf=data[5] print(" P : ",data[1]) #ใ‚ตใƒณใƒ—ใƒซใฎ็ฒพๅบฆ print(" Y : ",data[1]*256+data[2],sep="") #็”ปๅƒ็ธฆใ‚ตใ‚คใ‚บ print(" X : ",data[3]*256+data[4],sep="") #็”ปๅƒๆจชใ‚ตใ‚คใ‚บ print(" Nf : ",data[5]) #ๆง‹ๆˆ่ฆ็ด ๆ•ฐ 1;GreyScacle ,3;YCbCr or YIQ 4;CMYK for i in range(nf): print(" C",i+1," : ",data[6+i*3],sep="") #ๆง‹ๆˆ่ฆ็ด  ่ญ˜ๅˆฅๅญ 1:Y 2:Cb 3:Cr 4:I 5:Q print(" H",i+1," : ",data[7+i*3]>>4,sep="") #ๆง‹ๆˆ่ฆ็ด ใ‚ต ๆฐดๅนณๆ–นๅ‘ใƒณใƒ—ใƒซๅŒ–่ฆๅ› (ๆฏ”็އ) print(" V",i+1," : ",data[7+i*3]&0x0F,sep="") #ๆง‹ๆˆ่ฆ็ด  ๅž‚็›ดๅ ฑๅ‘Šใ‚ตใƒณใƒ—ใƒซๅŒ–่ฆๅ› (ๆฏ”็އ) print(" Tq",i+1," : ",data[8+i*3],sep="") #ๆง‹ๆˆ่ฆ็ด  ้‡ๅญๅŒ–ใƒ†ใƒผใƒ–ใƒซ็•ชๅท ######## DHT ใƒใƒ•ใƒžใƒณ็ฌฆๅทใƒ†ใƒผใƒ–ใƒซๅฎš็พฉ ###### elif(seg[0] == "DHT"): thn=data[0]&0x0f tcn=data[0]>>4 print(" Tc",thn," : ",tcn,sep="") #ใ‚ฏใƒฉใ‚น 0;DC , 1:1AC ๏ผŸ๏ผŸ print(" Th",thn," : ",thn,sep="") #ใƒใƒ•ใƒžใƒณ็ฌฆๅทใƒ†ใƒผใƒ–ใƒซ็•ชๅท vlen=[] for i in range(16): vlen+= [data[1+i]] if(tcn==0): #DCใƒ†ใƒผใƒ–ใƒซ Ldc[thn]+=[data[1+i]] else: #ACใƒ†ใƒผใƒ–ใƒซ Lac[thn]+=[data[1+i]] #print(" L",i+1," ; ",data[1+i],sep="") #็ฌฆๅท้•ทใ”ใจใฎ็ฌฆๅทๆ•ฐ base = 17 for i in range(16): vlist=[] for j in range(vlen[i]): if(tcn==0): #DCใƒ†ใƒผใƒ–ใƒซ vlist+=[data[base+j]] #print(" V",i+1,"-",j+1," : ",data[base+j],sep="") #ๅ„็ฌฆๅท้•ทใฎ็ฌฆๅท(tcn=0ใฎใจใ)ใ€€ใƒ‡ใƒผใ‚ฟใƒผใƒ“ใƒƒใƒˆๆ•ฐ else: #ACใƒ†ใƒผใƒ–ใƒซ runlen=data[base+j]>>4 detalen=data[base+j]&0x0F if(detalen==0): if(runlen==0): vlist+=[("EOB",0)] elif(runlen==15): vlist+=[("ZRL",0)] else: vlist+=[("N/A",0)] else: vlist+=[(runlen,detalen)] #print(" V",i+1,"-",j+1," : ",data[base+j]>>4,",",data[base+j]&0x0F,sep="") #ๅ„็ฌฆๅท้•ทใฎ็ฌฆๅท(tcn=1ใฎใจใ) ใƒฉใƒณใƒฌใƒณใ‚ฐใ‚นๆ•ฐใ€ใƒ‡ใƒผใ‚ฟใƒผใƒ“ใƒƒใƒˆๆ•ฐ if(tcn==0): #DCใƒ†ใƒผใƒ–ใƒซ Vdc[thn]+=[vlist] else: #ACใƒ†ใƒผใƒ–ใƒซ Vac[thn]+=[vlist] base+=vlen[i] ######## SOS Start Of Scan ###### elif(seg[0] == "SOS"): ns=data[0] print(" Ns : ",ns) #ๆง‹ๆˆ่ฆ็ด ใฎๆ•ฐ for i in range(ns): print(" Cs",i+1," : ",data[1+i*2],sep="") #ๆง‹ๆˆ่ฆ็ด  ่ญ˜ๅˆฅๅญ print(" Td",i+1," : ",data[2+i*2]>>4,sep="") #ๆง‹ๆˆ่ฆ็ด  DCๆˆๅˆ†ใƒใƒ•ใƒžใƒณ็ฌฆๅทใƒ†ใƒผใƒ–ใƒซ็•ชๅท print(" Ta",i+1," : ",data[2+i*2]&0x0F,sep="") #ๆง‹ๆˆ่ฆ็ด  ACๆˆๅˆ†ใƒใƒ•ใƒžใƒณ็ฌฆๅทใƒ†ใƒผใƒ–ใƒซ็•ชๅท print(" Ss : ",data[1+ns*2],sep="") #ใ‚นใƒšใ‚ฏใƒˆใƒซ้ธๆŠž้–‹ๅง‹(้‡ๅญๅŒ–ไฟ‚ๆ•ฐ้–‹ๅง‹็•ชๅท) print(" Se : ",data[2+ns*2],sep="") #ใ‚นใƒšใ‚ฏใƒˆใƒซ้ธๆŠž็ต‚ไบ†้‡ๅญๅŒ–ไฟ‚ๆ•ฐ้–‹ๅง‹็ต‚ไบ†) print(" Ah : ",data[3+ns*2]>>4,sep="") #๏ผŸ๏ผŸ print(" Al : ",data[3+ns*2]&0x0f,sep="") #๏ผŸ๏ผŸ # - print(Q[0]) print(Ldc[0]) print(Vdc[0]) print(Vdc[1]) print(Lac[0]) print(Vac[0]) print(Ldc[1]) print(Ldc[2]) # ใฉใ†ใ‚„ใ‚‰ใ†ใพใใ„ใฃใŸใฟใŸใ„ใ€‚ # # ๆฌกใซLdc,Lacใ‹ใ‚‰ใƒใƒ•ใƒžใƒณ็ฌฆๅทใ‚’ๆง‹ๆˆใ™ใ‚‹ๆ–นๆณ•ใ‚’่€ƒใˆใ‚‹ใ€ใ€ใ€ # ใ“ใ‚Œใพใงใฎ่ญฐ่ซ–ใ‚ˆใ‚Šใ€ใ€ใ“ใ‚ŒใŒใงใใฆๅพฉๅทๅŒ–ใฎใ‚ขใƒซใ‚ดใƒชใ‚บใƒ ใŒใงใใ‚Œใฐใ‚ใจใฏใ†ใพใใ„ใฃใŸใ‚ˆใ†ใชใ‚‚ใ‚“ใงใฏใชใ„ใ‹ใจๆ€ใ†ใ€‚ # # ใƒใƒ•ใƒžใƒณ็ฌฆๅทใฏใ‚ˆใใƒใƒ•ใƒžใƒณใƒ„ใƒชใƒผ(2ๅˆ†ๆœจ)ใงใ‚ใ‚‰ใ‚ใ•ใ‚Œใ‚‹ใฎใงใใ‚Œใซๅ€ฃใ„ใŸใ„ใ€‚ใ€‚ # # Lac,Ldcใฎใฏใ„ใ‚Œใคใ‹ใ‚‰ใ‚ฟใƒ—ใƒซใฎๅ…ฅใ‚Œๅญๆง‹้€ ใจใ—ใฆใƒใƒ•ใƒžใƒณใƒ„ใƒชใƒผใ‚’ๆง‹ๆˆใ™ใ‚‹ใ“ใจใ‚’่€ƒใˆใ‚‹ # # ไธ‹ใฟใŸใ„ใชๅ†ๅธฐๅ‘ผใณๅ‡บใ—ใง ๏ผ’่ฆ็ด ใฎใ‚ฟใƒ—ใƒซใซใ‚ˆใ‚‹ใƒ‡ใƒผใ‚ฟๆง‹้€ ๆง‹ๆˆใ™ใ‚‹ใƒซใƒผใƒใƒณใ‚’่€ƒใˆใฆใฟใ‚‹ใ€‚ # # ```` # def huffman_tree(ๅผ•ๆ•ฐ): # if(ๅ†ๅธฐๅ‘ผใณๅ‡บใ•ใ‚ŒใŸๆทฑๅบฆn(็ฌฆๅท้•ท)ใฎ็ฌฆๅทใŒๅ‰ฒใ‚Šๅฝ“ใฆใŒๆฎ‹ใฃใฆใ„ใ‚‹): # nใƒ“ใƒƒใƒˆใ‚ใŒ0ใฎ็ฌฆๅทใจใ—ใฆzeroใซ็ฌฆๅทใ‚’ๅ‰ฒใ‚Šๅฝ“ใฆใ‚‹ใ€ # else: # zeroใฏ huffman()ใฎๅ†ๅธฐๅ‘ผใณๅ‡บใ—ใฎ็ตๆžœใฎใ‚ฟใƒ—ใƒซ(2่ฆ็ด )ใ‚’ๅ‰ฒใ‚Šๅฝ“ใฆใ‚‹ใ€‚ # if(ๅ†ๅธฐๅ‘ผใณๅ‡บใ•ใ‚ŒใŸๆทฑๅบฆ(็ฌฆๅท้•ท)ใฎ็ฌฆๅทใŒๅ‰ฒใ‚Šๅฝ“ใฆใŒๆฎ‹ใฃใฆใ„ใ‚‹): # nใƒ“ใƒƒใƒˆใ‚ใŒ1ใฎ็ฌฆๅทใจใ—ใฆoneใซ็ฌฆๅทใ‚’ๅ‰ฒใ‚Šๅฝ“ใฆใ‚‹ใ€ # else: # one ใฏhuffman()ใฎๅ†ๅธฐๅ‘ผใณๅ‡บใ—ใฎ็ตๆžœใฎใ‚ฟใƒ—ใƒซ(2่ฆ็ด )ใ‚’ๅ‰ฒใ‚Šๅฝ“ใฆใ‚‹ใ€‚ # return (zero,one) # ```` # # ๅผ•ๆ•ฐใจใ—ใฆใฏๆœ€ๅˆใฏLac\[i\]ใพใŸใฏใ‚’Ldc\[i\]ๆธกใ—ใฆๅ†ๅธฐๅ‘ผใณๅ‡บใ—ใ™ใ‚‹้š›ใซใฏๅทฆไธ€่ฆ็ด ใฎใžใ„ใŸใƒชใ‚นใƒˆใ‚’ๆธกใ—ใฆใ„ใ‘ๅ‘ผใณๅ‡บใ—ๆทฑๅบฆใซๅฏพๅฟœใ—ใŸ็ฌฆๅทใฎๅ‰ฒใ‚Šๅฝ“ใฆๆ•ฐใŒใ‚ใ‹ใ‚‹ไป•็ต„ใฟใซใชใ‚‰ใชใ„ใ‹๏ผŸ๏ผŸใฃใจใ„ใ†่€ƒใˆใงใ‚ตใƒ–ใƒซใƒผใƒใƒณใ‚’็ต„ใ‚“ใงใฟใ‚‹ใ€‚ # ใ‚ใ‚“ใฉใใ•ใ„ใฎใง็ดฐใ‹ใ„ไพ‹ๅค–ๅ‡ฆ็†ใจใ‹ใฏๅ…ฅใ‚Œใชใ„ใ€‚ใƒ‡ใƒใƒƒใ‚ฐ็”จใฎๅ‡บๅŠ›ๆฉŸ่ƒฝใฏใคใใฃใฆใŠใใ€‚ # # + ## Ldc[0๏ฝž3]ใพใŸใฏLac[0๏ฝž3]ใ‹ใ‚‰ใƒใƒ•ใƒžใƒณใƒ„ใƒชใƒผใ‚’ใ‚ฟใƒ—ใƒซใฎๅ…ฅใ‚Œๅญๆง‹้€ ใงๆง‹็ฏ‰ใ™ใ‚‹้–ขๆ•ฐ ## ## ๅผ•ๆ•ฐLength_ListใฏLใฎใƒชใ‚นใƒˆใ‚’ๆธกใ™ใ€‚ ## ๅ†ๅธฐๅ‘ผใณๅ‡บใ—ใฎใŸใณใซๅทฆ1่ฆ็ด ๅ‰ŠใฃใŸใƒชใ‚นใƒˆLength_List[1:0]ใ‚’ๆธกใ—ใฆใ„ใ ## ใ“ใ†ใ™ใ‚‹ใ“ใจใงๅธธใซLength_List[0]ใฎๅ€คใง ## ## Assign_List(ใ‚ณใƒผใƒ‰ๆœฌไฝ“ใงใฏas_listใงๆ‰ฑใ†)ใฏๅ„็ฌฆๅท้•ทใซๅนพใคใƒใƒ•ใƒžใƒณ็ฌฆๅทใ‚’ๅ‰ฒใ‚Šๅฝ“ใฆใŸใ‹ใ‚’ ## ่จ˜้Œฒใ—ใฆใŠใใƒชใ‚นใƒˆๅž‹ใงๅˆๅ›žๅ‘ผใณๅ‡บใ—ๆ™‚(Assign_Listใ‚’ๆŒ‡ๅฎšใ›ใšใ€int 0ใŒๆธกใ•ใ‚Œใ‚‹)ใซๅˆๆœŸๅŒ– ## ใ—ใฆๅ†ๅธฐๅ‘ผใณๅ‡บใ—ๆฏŽใซๅทฆ1่ฆ็ด ๅ‰ŠใฃใŸas_list[1:]ใ‚’ๆธกใ—ใฆใ„ใใ€‚ ## ## def huffman_tree(Length_List,Assign_List=0,debug=False): ## topๅ‘ผใณๅ‡บใ—ๆ™‚(Assign_Listใ‚’ๆŒ‡ๅฎšใ›ใšint 0ใฎๅ ดๅˆ)ใฏas_listใ‚’ๅˆๆœŸๅŒ–(Length_ListใŠใชใ˜่ฆ็ด ๆ•ฐใฎ0ใฎใƒชใ‚นใƒˆ) if((type(Assign_List)==int)&(Assign_List==0)): as_list=[] for i in range(len(Length_List)): as_list+=[0] ## ๅ†ๅธฐๅ‘ผใณๅ‡บใ—ๆ™‚(Assign_ListใŒใƒชใ‚นใƒˆใงใ‚ใŸใ•ใ‚ŒใŸๅ ดๅˆ)ใฏas_listใฏAssign_Listใจใ™ใ‚‹ใ€‚ elif(type(Assign_List)==list): as_list=Assign_List ## ๅ†ๅธฐๅ‘ผใณๅ‡บใ—ๆ™‚(Assign_ListใŒใƒชใ‚นใƒˆใฎๅ ดๅˆใฏ) else: return "err",[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0] ## Length_ListใŒใƒชใ‚นใƒˆใงใชใ„ใ‹ใ€้•ทใ•ใŒ0ใฎๅ ดๅˆใฏไพ‹ๅค–ๅ‡ฆ็†ใจใ—ใฆใ‚จใƒฉใƒผใ‚’่ฟ”ใ™ใ€‚ if((len(Length_List)==0) | (type(Length_List)!=list)): print("err") return "err",as_list ##็ฌฆๅท0ใฎๅ‡ฆ็†(ใ‚ฟใƒ—ใƒซๅทฆๅดใฎๅ‡ฆ็†) ##ๅ‰ฒใ‚Šๅฝ“ใฆใ‚‹็ฌฆๅทๆ•ฐใŒไฝ™ใฃใฆใ„ใ‚‹ใจใใฏ็ฌฆๅทใ‚’ๅ‰ฒใ‚Šๅฝ“ใฆ if(as_list[0]<Length_List[0]): if(debug):print("sig 0 len",17-len(as_list),"num",as_list[0]) zero=as_list[0] as_list[0]+=1 ## ๅ‰ฒใ‚Šๅฝ“ใฆใ‚‹็ฌฆๅทใŒไฝ™ใฃใฆใชใ„ใชใ‚‰ๅ†ๅธฐๅ‘ผใณๅ‡บใ—ใงไธ‹ไฝๆง‹้€ ใฎใ‚ฟใƒ—ใƒซใ‚’ๆง‹็ฏ‰ ## (ใŸใ ใ—ๆœซ็ซฏๆกไปถใง็ฉบๆ–‡ๅญ—ๅˆ—""ใ‚’่จญๅฎš) else: if((len(as_list) > 1)& (as_list!=Length_List)): zero,as_list[1:]=huffman_tree(Length_List[1:],as_list[1:],debug) else: zero="" ## ็ฌฆๅท1ใฎๅ‡ฆ็†(ใ‚ฟใƒ—ใƒซๅณๅดใฎๅ‡ฆ็†) ##ๅ‰ฒใ‚Šๅฝ“ใฆใ‚‹็ฌฆๅทๆ•ฐใŒไฝ™ใฃใฆใ„ใ‚‹ใจใใฏ็ฌฆๅทใ‚’ๅ‰ฒใ‚Šๅฝ“ใฆ if(as_list[0]<Length_List[0]): if(debug):print("sig 1 len",17-len(as_list),"num",as_list[0]) one=as_list[0] as_list[0]+=1 ## ๅ‰ฒใ‚Šๅฝ“ใฆใ‚‹็ฌฆๅทใŒไฝ™ใฃใฆใชใ„ใชใ‚‰ๅ†ๅธฐๅ‘ผใณๅ‡บใ—ใงไธ‹ไฝๆง‹้€ ใฎใ‚ฟใƒ—ใƒซใ‚’ๆง‹็ฏ‰ ## (ใŸใ ใ—ๆœซ็ซฏๆกไปถใง็ฉบๆ–‡ๅญ—ๅˆ—""ใ‚’่จญๅฎš) else: if((len(as_list) > 1) & (as_list!=Length_List)): one,as_list[1:]=huffman_tree(Length_List[1:],as_list[1:],debug) else: one = "" return (zero,one),as_list # - print(Ldc[0]) huffman_tree(Ldc[0]) # ็ตๆžœใฎๆœ€ๅค–ใฎใ‚ฟใƒ—ใƒซใฎๅทฆๅดใŒใƒใƒ•ใƒžใƒณใƒ„ใƒชใƒผใงๅณๅดใฏ็ฌฆๅท้•ทใฎๅ‰ฒใ‚Šไป˜ใ‘ใ‚ซใ‚ฆใƒณใƒˆ็ตๆžœใจใชใ‚‹ใ€‚ # ๅฝ“็„ถใ€ๅณๅดใฏๅ…ฅๅŠ›ใฎใƒชใ‚นใƒˆใจๅŒใ˜ใซใชใฃใฆ็ต‚ใ‚ใ‚‹(็„ก้ง„ใซๅ†ๅธฐๅ‘ผใณๅ‡บใ—ใ‚’ใ—ใชใ„ๆกไปถใจใ—ใฆใ‚‚ใ“ใ‚Œใ‚’ไฝฟใฃใฆใ„ใ‚‹) # ๅ†ๅธฐๅ‘ผใณๅ‡บใ—ใ—ใชใ„ๅ ดๅˆใฏ็ฉบๆ–‡ๅญ—ๅˆ—""ใ‚’็ต‚ไบ†ใ•ใ›ใฆใ„ใ‚‹ใ€‚ๆญฃใ—ใ„ใƒใƒ•ใƒžใƒณ็ฌฆๅทใงใ‚ใ‚‹้™ใ‚ŠๅพฉๅทๅŒ–ใฎ้š›ใฎใƒ„ใƒชใƒผๆŽข็ดขใงใฏใ“ใ“ใพใงใŸใฉใ‚Š็€ใใ“ใจใฏใชใ„ใฏใšใ€‚ # # ใจใ‚Šใ‚ใˆใš่‚ๅฟƒใฎใƒใƒ•ใƒžใƒณใƒ„ใƒชใƒผใฏ # # ``` # ((0, (0, 1)), ((2, 3), (4, (0, (0, (0, (0, (0, (0, '')))))))) # ``` # # ใงใ“ใ‚Œใงใฏ่ฆ‹ใซใใ„ใฎใงๅฐ‘ใ—ใ ใ‘่ฆ‹ใ‚„ใ™ใใ—ใฆ # # ``` # "00" #"01" #"10" #"11" โ†ๅ…ˆ้ ญ2bitๅˆ†ใง#ใงๅŒบๅˆ‡ใฃใฆใ„ใ‚‹ # L1:( # # L2:(0, # # # # L3: (0, 1)) , ((2, 3) , (4, # L4: (0, โ†ๅ…ˆL4ใ‚ˆใ‚Š้•ทใ„็ฌฆๅทใฏ"111"ใซ็ถšใ„ใฆ0ใŒๆฅใŸใ‚‰ๅ‰ฒใ‚Šๅฝ“ใฆใ‚‰ใ‚Œใ‚‹ใ€‚ # L5: (0, # L6: (0, # L7: (0, # L8: (0, # L9: (0, '') # :))))))) # ``` # # ใƒ“ใƒƒใƒˆ้•ท1(L1)ใฏ็ฌฆๅทใŒใชใใฆ # L2ใฏ"00"ใŒL2ใฎ0็•ช็›ฎ(ๅ‡ฆ็†ใฎ้ƒฝๅˆไธŠ1ใ‹ใ‚‰ใงใชใใฆ1ใ‹ใ‚‰็•ชๅทไป˜ใ‘ใ™ใ‚‹ใ“ใจใซใ—ใŸใ€‚)ใฎ็ฌฆๅท # L3hใฏ0"010",1"011",2"100",3"101",4"110" # L4ใฏ 0"1110" # ใฆใ„ใ†ๆ„Ÿใ˜ใง1bitใ”ใจ่ชญใฟใ ใ—ใฆ0ใชใ‚‰ๅทฆๅดใ€1ใชใ‚‰ๅณๅด # ใ‚’ๆŽขใฃใฆใ„ใใ€ใใ‚ŒใŒๅ€คใฎๅ ดๅˆใฏใใฎๆŽข็ดขๆทฑๅบฆ(็ฌฆๅท้•ท)ใงไฝ•็•ช็›ฎใฎใƒใƒ•ใƒžใƒณ็ฌฆๅทๅŒ–ใ‚’็คบใ—ใ€ใ‚ฟใƒ—ใƒซใชใ‚‰ใ•ใ‚‰ใซbitใ‚’่ชญใฟๅ‡บใ—ๆŽขใฃใฆใ„ใใ“ใจใงใƒใƒ•ใƒžใƒณใƒ“ใƒƒใƒˆใฎๆคœๅ‡บใŒใงใใ‚‹ใ€‚ใ€‚ # # ใƒ†ใƒผใƒ–ใƒซ0ใฎacๆˆๅˆ†ใฏ็ฌฆๅทใŒๅคšใ„ใฎใงใ“ใ‚“ใชๆ„Ÿใ˜ใซใชใ‚‹ใƒใƒ•ใƒžใƒณใƒ†ใƒผใƒ–ใƒซใŒๆง‹ๆˆใงใใ‚‹ Lac[0] huffman_tree(Lac[0]) # ใ•ใ™ใŒใซใ“ใ‚Œใฏ็ฌฆๅทๆ•ฐ(็‰นใซ็ฌฆๅท้•ทใŒ้•ทใ„ใ‚‚ใฎ)ใŒๅคšใ„ใฎใงๆ‰‹ไฝœๆฅญใงๅ‡ฆ็†ใฏใ‚€ใ‚Šใ€ใ€ # ใŸใถใ‚“ใงใใฆใ‚‹ใฏใšใ€‚ # # # ใ“ใ“ใงใกใ‚‡ใฃใจ่ฆšๆ›ธ # # ๆœ€ๅˆใฏhuffman_tree้–ขๆ•ฐใฎๅฎš็พฉใงไปฅไธ‹ใฎใ‚ˆใ†ใซAssign_Listใซๅฎš็พฉๆ–‡ใงใƒชใ‚นใƒˆใฎไปฃๅ…ฅๆ–‡ใ‚’่จ˜่ฟฐใ—ใฆใ„ใŸใ‚‰ # ้–ขๆ•ฐๅฎš็พฉๅพŒไธ€ๅ›žใ—ใ‹ใ†ใพใๅฎŸ่กŒใงใใชใ‹ใฃใŸใ€‚ใ€‚ใ€‚ # # ```` # def huffman_tree(Length_List,Assign_List=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]): # ```` # # ใ“ใ‚Œใฏ้–ขๆ•ฐๅฎš็พฉใ—ใŸใจใใซไฝœใ‚‰ใ‚ŒใŸใƒชใ‚นใƒˆใƒ‡ใƒผใ‚ฟ\[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\]ใฎid(cใงใ„ใ†ใƒใ‚คใƒณใ‚ฟใฟใŸใ„ใชใ‚‚ใฎ)ใŒใ“ใฎไปฃๅ…ฅๆ–‡ใงใฏ่ญ˜ๅˆฅๅญ"Assign_List"ใซๅฏพๅฟœใฅใ‘ใ‚‰ใ‚Œใ‚‹ใ‹ใ‚‰ใงใ€ใ€ใ€ไธ€ๅบฆๅฎŸ่กŒใ•ใ‚Œใใฎไธญ่บซใŒๆ›ธใๆ›ใˆใ‚‰ใ‚Œใฆใ—ใพใฃใฆไบŒๅบฆ็›ฎๅฎŸ่กŒใ™ใ‚‹ใจใใ‚‚ๅŒใ˜idใŒๆธกใ•ใ‚Œใ‚‹ใŸใ‚ใ€ๅˆๆœŸๅ€คใจใ—ใฆAssign_ListใŒ\[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\]ใชใฃใฆใชใ„ใŸใ‚ใ“ใฎ้–ขๆ•ฐใฎๅฎš็พฉใ‚’่ฆ‹็›ดใ—ใฆๆŒ‡ๅฎšใ—ใชใ„ๅ ดๅˆๆ•ดๆ•ฐใฎ0ใ‚’่จญๅฎšใ—ใฆ # ้–ขๆ•ฐใฎๅ‡ฆ็†ใฎไธญใง่ฆ็ด 0ใฎๅˆ—ใ‚’ไฝœใฃใฆๅˆๆœŸๅŒ–ใ™ใ‚‹ใ‚ˆใ†ใซๅค‰ๆ›ดใ—ใŸใ€‚ ใ“ใ†ใ™ใ‚Œใฐใ€่ฟ”ใ‚Šๅ€คใจใ—ใฆas_listใ‚’่ฟ”ใ™ๅฟ…่ฆใ‚‚ใชใใชใ‚‹ใฎใ ใ‚ใ†ใŒใ€ใ€ใƒ‡ใƒใƒƒใ‚ฐ็”จใซ่ฟ”ใ™ใพใพใซใ—ใฆ็ฝฎใ„ใŸใ€‚ # # ใ“ใ“ใฎใ‚ใŸใ‚Šใฏpythonใฎๅค‰ๆ•ฐใจใ‹ไปฃๅ…ฅใฎใƒกใ‚ซใƒ‹ใ‚บใƒ ใ‚’็†่งฃใ—ใฆใŠใ‹ใชใ„ใจใ€ใชใ‹ใชใ‹ใ‚ใ‹ใ‚Šใซใใ„ใ€ # # # ใ•ใฆใƒใƒ•ใƒžใƒณใƒ„ใƒชใƒผใŒๆง‹ๆˆใงใใŸ(ใŸใถใ‚“)ใฎใงใ€ใ“ใ‚ŒใจVๅ€คใฎใƒ†ใƒผใƒ–ใƒซใ‚’ไฝฟใฃใฆใ‚คใƒกใƒผใ‚ธใƒ‡ใƒผใ‚ฟใ‹ใ‚‰ๆœ€ๅˆใฎ64ใƒ‡ใƒผใ‚ฟ(8x8ใƒ–ใƒญใƒƒใ‚ฏ)ใ‚’ๅพฉๅทๅŒ–ใ—ใฆใฟใ‚‹ใ€‚ # # ๅค‰ๆ•ฐๅzzใฏZigzagใ‹ใ‚‰ใจใฃใŸใกใ‚‡ใฃใจใ—ใŸ้Šใณๅฟƒ๏ผˆๅพŒใงใ‚ใ‹ใฃใŸใ“ใจใ ใ‘ใฉITU-T81ใงใ‚‚ZZใŒไฝฟใ‚ใ‚ŒใฆใŸ๏ผ‰ # # + H_tree_dc_org,alist=huffman_tree(Ldc[0]) H_tree_ac_org,alist=huffman_tree(Lac[0]) ##ๆˆๅˆ† comps=0 zz=0 flag_data=False flag_code=False d_bits=0 h_bits=0 data=0 edata=0 blocks=0 decoded=[] ##ๆœ€ๅˆใฏDC H_tree=H_tree_dc_org V=Vdc[comps] for byte in jpeg_struct[10][2]: mask=int(0x80) for i in range(8): bit=(byte&mask!=0) print("1" if bit else "0",end="") #Huffman็ฌฆๅทๅ–ใ‚Šๅ‡บใ— if(flag_data==False): if(bit==False): #็ฌฆๅท0 element=H_tree[0] else: #็ฌฆๅท1 element=H_tree[1] h_bits+=1 if(type(element)==tuple): H_tree=element #Huffmanใ‚ณใƒผใƒ‰ๆคœๅ‡บ else: if(zz==0): ##print("deb dc_huf:",zz,h_bits,element) data_len=V[h_bits-1][element] else: ##print("debug ac_huf:",zz,h_bits,element) data_len=V[h_bits-1][element][1] if(data_len==0):#databit้•ท0ใชใ‚‰ใใฎใพใพใ‚จใƒณใ‚ณใƒผใƒ‰ flag_code=True else: flag_data=True #databitๅ–ใ‚Šๅ‡บใ— elif(data_len > 0): data*=2 data+= 1if(bit) else 0 d_bits+=1 #databit(code)็ต‚ไบ† if(d_bits==data_len): ##print("deb databit",zz,d_bits,data) flag_code=True #decodeๅ‡ฆ็† if(flag_code==True): print("") print("V:",V[h_bits-1][element]) #ใ‚จใƒณใ‚ณใƒผใƒ‰ if(data_len==0): ddata=0; else: if(data & (1 << (data_len-1))!=0): ddata=data else: ddata=-(data^((1<<data_len)-1)) #ๆ ผ็ด if(zz==0): ##print("debug Vdc",zz,V[h_bits-1][element]) print("decode",zz,ddata) decoded=[ddata]; zz+=1 else: ##print("debug Vac",zz,V[h_bits-1][element]) if(type(V[h_bits-1][element][0])==int): for j in range(V[h_bits-1][element][0]): if(zz<64): print("decode",zz,0) decoded+=[0] zz+=1 if(zz<64): print("decode",zz,ddata) decoded+=[ddata] zz+=1 elif(V[h_bits-1][element][0]=="EOB"): while(zz<64): #print("decode",zz,0) decoded+=[0] zz+=1 elif(V[h_bits-1][element][0]=="ZRL"): for j in range(16): if(zz<64): print("decode",zz,0) decoded+=[0] zz+=1 flag_code=False flag_data=False d_bits=0 h_bits=0 data=0 ##ใƒ–ใƒญใƒƒใ‚ฏ็ต‚ไบ† if(zz==64): blocks+=1 H_tree=H_tree_dc_org V=Vdc[comps] zz==0 else: H_tree=H_tree_ac_org V=Vac[comps] mask= mask >>1 if(blocks==1): break if(blocks==1): break # - # ๅ…ˆใฎๆ‰‹่จˆ็ฎ—(ใ ใ„ใถใƒŸใ‚นใŒใ‚ใฃใŸใฎใงใชใŠใ—ใŸใŒใ€ใ€)45,3,1,2,2,-1,0,1,0,1,0,1,0,0,.....ใจใฏ็ฌฆๅˆใ™ใ‚‹ใ€ใ€ใ€ # ใจใ‚Šใ‚ใˆใš็ฎ—ๅ‡บใ—ใŸใƒ‡ใƒผใ‚ฟใซ้‡ๅญๅŒ–ใƒ•ใ‚กใ‚ฏใ‚ฟใ‚’ใ‹ใ‘ใ‚‹ # ใ“ใ‚Œใฏ็ฐกๅ˜ dequan=[0]*64 for i in range(64): dequan[i]=decoded[i]*Q[0][i] print(dequan) # ### 5.1ๆฌกๅ…ƒใ‚ธใ‚ฐใ‚ถใ‚ฐใ‚นใ‚ญใƒฃใƒณ้…ๅˆ—ใ‚’2ๆฌกๅ…ƒ้…ๅˆ—ใซๅค‰ๆ›ด # # # ใ“ใ“ใพใงใงใˆใ‚‰ใ‚ŒใŸใƒ‡ใƒผใ‚ฟใฏใ‚ธใ‚ฐใ‚ถใ‚ฐใ‚นใ‚ญใƒฃใƒณ(ไปฅไธ‹่กจใฎ้ †)ใ•ใ‚ŒใŸ1ๆฌกๅ…ƒ้…ๅˆ—ใชใฎใงใ€ใ“ใ‚Œใ‚’2ๆฌกๅ…ƒ้…ๅˆ—ใซใ™ใ‚‹ใ€‚ # # # | || 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | # |- ||:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:| # |0 || 0 | 1 | 5 | 6 | 14 | 15 | 27 | 28 | # |1 || 2 | 4 | 7 | 13 | 16 | 26 | 29 | 42 | # |2 || 3 | 8 | 12 | 17 | 25 | 30 | 41 | 43 | # |3 || 9 | 11 | 18 | 24 | 31 | 40 | 44 | 53 | # |4 || 10 | 19 | 23 | 32 | 39 | 45 | 52 | 54 | # |5 || 20 | 22 | 33 | 38 | 46 | 51 | 55 | 60 | # |6 || 21 | 34 | 37 | 47 | 50 | 56 | 59 | 61 | # |7 || 35 | 36 | 48 | 49 | 57 | 58 | 62 | 63 | # # # ๅพฉๅ…ƒใฎใ‚„ใ‚Šๆ–นใฏๅœฐ้“ใชใฎใชใ‚‰็ฐกๅ˜ใ ใŒใ€ใ€0ใ‹ใ‚‰63ใฎๆ•ฐๅญ—ใ‚’ไบŒๆฌกๅ…ƒใฎx,yใซๅค‰ๆ›ใ™ใ‚‹ใ‚จใƒฌใ‚ฌใƒณใƒˆใชๆ–นๆณ•ใฏๆ€ใ„ใคใ‹ใชใ„ใ€ใ€ใ€ # ๅฏพๅฟœ่กจใ‚’่พžๆ›ธๅž‹ใงไฝœใ‚‹ใ“ใจใซใ™ใ‚‹ใ€ใ€ใ€ๆ‰‹ๆ›ธใไฝœใฃใฆใ‚‚ใ„ใ„ใ‘ใฉไธ€ๅฟœๆ‰‹็ถšใๅ‡ฆ็†ใซๆŒ‘ๆˆฆใ—ใฆใฟใ‚‹ใ€‚(ๅคงใ—ใŸใ“ใจใฏใชใ„) # # + dic_zigzag={} x=0 y=0 xd=1 yd=-1 for i in range(64): if(y < 0): y=0 xd*=-1 yd*=-1 if(x < 0): if(i!=36): x=0 xd*=-1 yd*=-1 if(y >=8): x+=2 y=7 xd*=-1 yd*=-1 if(x >=8): y+=2 x=7 xd*=-1 yd*=-1 dic_zigzag[(x,y)]=i x+=xd y+=yd # - # ๆœฌๅฝ“ใซใ‚ธใ‚ฐใ‚ถใ‚ฐใƒ‘ใ‚ฟใƒณใซใชใฃใฆใ„ใ‚‹ใ‹ใ‚’็ขบ่ชใ—ใฆใฟใ‚‹ใ€‚ # zigzag=[] for y in range(8): xlist=[] for x in range(8): xlist+=[dic_zigzag[(x,y)]] zigzag+=[xlist] zigzag # ใ†ใพใใงใใฆใ„ใ‚‹ใ“ใจใŒใ‚ใ‹ใ‚‹ใŒใ€ๅฎŸใฏใ“ใ“ใซ่‡ณใ‚‹ใพใง็ตๆง‹่‹ฆๆˆฆใ—ใŸใ€‚ # # ใชใ‚“ใงใ‹ไธ‹ใฎใ‚ณใƒผใƒ‰ใ ใจใ†ใพใใ„ใ‹ใชใ‹ใฃใŸใ€‚ zigzag_miss=[[0]*8]*8 for y in range(8): for x in range(8): zigzag_miss[y][x]=dic_zigzag[(x,y)] zigzag_miss # ใฉใ†ใ‚‚ไปฅไธ‹ใฎๅˆๆœŸๅŒ–ใŒใพใšใ„ใ‚ˆใ†ใงใ‚ใ‚‹ใ€‚ # # ```python # zigzag_miss=[[0]*8]*8 # # ``` # ไปฅไธ‹ใฎใ‚ˆใ†ใซไธ€่ฆ็ด ใฎไปฃๅ…ฅใŒ่ค‡ๆ•ฐ่ฆ็ด ใฎไปฃๅ…ฅใซใชใฃใฆใ„ใ‚‹๏ผ๏ผŸ # ใใ‚Œใ ใจใ†ใพใใ„ใ‹ใชใ„ใฎใฏๅฝ“็„ถใ€ใ€ zigzag_miss[0][0]=0 zigzag_miss zigzag_miss=[[0]*8]*8 zigzag_miss[0][0]=4 zigzag_miss # ไปฅไธ‹ใฎใ‚ˆใ†ใซไธŠ่จ˜ใฎใ‚„ใ‚Šๆ–นใ ใจ # ๅ†…ๅดใฎ8ๅ€‹ใฎ๏ผ‘ๆฌกๅ…ƒใƒชใ‚นใƒˆใŒใƒชใ‚นใƒˆๅŒใ˜idใซใชใฃใฆใ—ใพใ†ใ€‚ # ใใฎใŸใ‚ใซไธ€่ฆ็ด ใ„ใ˜ใฃใŸใคใ‚‚ใ‚Šใงใ‚‚ใปใ‹ใฎๅ†…ๅดใƒชใ‚นใƒˆใซใ‚‚ๅๆ˜ ใ•ใ‚ŒใŸใ‚ˆใ†ใชๅฝขใซใชใ‚‹ใ€‚ zigzag_miss=[[0]*8]*8 print("2dim list type(zigzag_miss):",type(zigzag_miss)) print("2dim list type(zigzag_miss[0]):",type(zigzag_miss[0])) print("2dim list type(zigzag_miss[0][0]):",type(zigzag_miss[0][0])) print("2dim list id(zigzag_miss):",id(zigzag_miss)) print("2dim list id(zigzag_miss[0]):",id(zigzag_miss[0]),"โ†ๅŒใ˜") print("2dim list id(zigzag_miss[1]):",id(zigzag_miss[1]),"โ†ๅŒใ˜") print("2dim list id(zigzag_miss[0][0]):",id(zigzag_miss[0][0])) print("2dim list id(zigzag_miss[1][0]):",id(zigzag_miss[1][0])) print("") print("2dim list type(zigzag):",type(zigzag)) print("2dim list type(zigzag[0]):",type(zigzag[0])) print("2dim list type(zigzag[0][0]):",type(zigzag[0][0])) print("2dim list id(zigzag):",id(zigzag)) print("2dim list id(zigzag[0]):",id(zigzag[0]),"โ†้•ใ†") print("2dim list id(zigzag[1]):",id(zigzag[1]),"โ†้•ใ†") print("2dim list id(zigzag[0][0]):",id(zigzag[0][0])) print("2dim list id(zigzag[1][0]):",id(zigzag[1][0])) # ๏ผ‘ๆฌกๅ…ƒ้…ๅˆ—ใชใ‚‰ๅˆๆœŸๅ€คใงใ™ในใฆๅŒใ˜id(ๆ•ดๆ•ฐ0ใ‚’็คบใ™)ใซใชใ‚‹ใŒใ€่ฆ็ด ใ”ใจใซๆ“ไฝœๅฏ่ƒฝ # ใ“ใ‚Œใฏidใฎใ—ใ‚ใ™ๅ…ˆใฎๅž‹(ใ“ใฎใฐใ‚ใ„ใฏๆ•ดๆ•ฐๅž‹)ใ ใ‹ใ‚‰ใงใฏใชใ„ใ‹ใจๆŽจๅฏŸใ€‚ zigzag_miss=[0]*8 print(zigzag_miss) print("type(zigzag_miss);",type(zigzag_miss)) print("type(zigzag_miss[0]);",type(zigzag_miss[0])) print(id(zigzag_miss[0]),id(zigzag_miss[1]),id(zigzag_miss[2]),id(zigzag_miss[2])) zigzag_miss[0]=1 zigzag_miss[1]=2 print(zigzag_miss) print(id(zigzag_miss[0]),id(zigzag_miss[1]),id(zigzag_miss[2]),id(zigzag_miss[2])) id(zigzag_miss[0]),id(zigzag_miss[1]),id(zigzag_miss[2]),id(zigzag_miss[2]) # ๅฝ“็„ถใ†ใพใใ„ใฃใŸใปใ†ใฎ2ๆฌกๅ…ƒ้…ๅˆ—ใฏๅ†…ๅดใฎ8ๅ€‹ใฎใƒชใ‚นใƒˆใฎidใŒ็•ฐใชใ‚‹ใฎใงไปฅไธ‹ใฎใ‚ˆใ†ใซ1่ฆ็ด ๅ˜ไฝใฎไปฃๅ…ฅๆ“ไฝœๅฏ่ƒฝ zigzag[0][0]=90 zigzag zigzag[0][0]=0 zigzag # ใ ใ„ใถ่ฉฑใŒใใ‚ŒใŸใŒใ€ใ€ใ€๏ผˆใจใใซๅคšๆฌกๅ…ƒใฎ๏ผ‰ใƒชใ‚นใƒˆใฎๅˆๆœŸๅŒ–ใฏๆฐ—ใ‚’ไป˜ใ‘ใ‚ˆใ†๏ผ๏ผใจใ„ใ†ๆ•™่จ“ # # ใจใ‚Šใ‚ใˆใšไธŠ่จ˜ใฎๆ–นๆณ•ใงๅพฉๅทๅŒ–ใ—ใŸใ‚ธใ‚ฐใ‚ถใ‚ฐใ‚นใ‚ญใƒฃใƒณ้…ๅˆ—ใ‚’2ๆฌกๅ…ƒใซใฏใงใใใ† DCT_spectrum8x8=[] for y in range(8): xlist=[] for x in range(8): xlist+=[dequan[dic_zigzag[(x,y)]]] DCT_spectrum8x8+=[xlist] DCT_spectrum8x8 # ใ“ใ‚Œใงไฝ•ใจใ‹ๆœ€ๅˆใฎ8x8ใƒ–ใƒญใƒƒใ‚ฏDCTใ‚นใƒšใ‚ฏใƒˆใƒซ2ๆฌกๅ…ƒ้…ๅˆ—ๅŒ–ใฏใงใใŸใฎใงใฏ็„กใ„ใ‹ใจใ€ใ€ใ€ใ€ใ€ใ€ใ€(ๆญฃ็›ด่‡ชไฟกใŒใชใ„) # ### 6.DCTใ‚นใƒšใ‚ฏใƒˆใƒซใฎๆ็”ปใจ้€†DCTๅค‰ๆ›ใซใ‚ˆใ‚‹่ผๅบฆๅ€คYๆˆๅˆ†ใฎ็”ป็ด ๅพฉๅ…ƒ # # ใพใšใƒชใ‚นใƒˆๅž‹ใ‚’matplotlibใง่กจ็คบใ™ใ‚‹ใŸใ‚ใซใฏไฝœใฃใŸ2ๆฌกๅ…ƒ้…ๅˆ—ใ‚’[numpy](https://numpy.org/doc/stable/reference/)ใฎ[numpy.ndarry](https://numpy.org/doc/stable/reference/arrays.ndarray.html)ใซใ™ใ‚‹(้€†DCTๅค‰ๆ›ใ‚’ใ™ใ‚‹ๆบ–ๅ‚™ใ‚‚ใตใใ‚ใฆ) # DCT_spectrum8x8np=np.array(DCT_spectrum8x8, float) DCT_spectrum8x8np # ใ“ใ†ใ—ใฆไฝœใฃใŸnumpy.narray DCT_spectrum8x8np(DCTใ‚นใƒšใ‚ฏใƒˆใƒซ)ใ‚’ๆ็”ปใ—ใฆใฟใ‚‹ใ€‚ imgplot = plt.imshow(DCT_spectrum8x8np,cmap="bwr",vmin=-128,vmax=128) #jpegใƒ•ใ‚กใ‚คใƒซใฎๆ็”ป imgplot.axes.set_xticks([]) #x่ปธใฎ็›ฎ็››ใ‚’ๅ‰Š้™ค imgplot.axes.set_yticks([]) #y่ปธใฎ็›ฎ็››ใ‚’ๅ‰Š้™ค imgplot.axes.set_title("8x8 DCT Spectrum?") # ACๆˆๅˆ†ใŒๅฐใ•ใ„ใŸใ‚ใ‚ณใƒณใƒˆใƒฉใ‚นใƒˆใŒใ‚ใ‹ใ‚‹ใ‚ˆใ†ใซ็ฏ„ๅ›ฒใ‚‚-128 to +128ใซใ—ใฆใ‚ใ‚‹(ใใฎใŸใ‚DCๆˆๅˆ†ใฏ่กจ็คบไธŠใฏ้ ญๆ‰“ใก) # # ใ•ใฆใ“ใ“ใงใ€DCT้€†ๅค‰ๆ›ใซใ‚ˆใ‚Šใ‚‚ใจใฎ่ผๅบฆๅ€คYใ‚’ๅพฉๅ…ƒใ™ใ‚‹ใ€‚ # ใ“ใ‚Œ[scipy](https://docs.scipy.org/doc/scipy/reference/)ใฎ[fftpack](http://scipy.github.io/devdocs/fftpack.html)ใƒขใ‚ธใƒฅใƒผใƒซใฎ[idct()](https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.fftpack.idct.html#scipy.fftpack.idct)ใ‚’ไฝฟใ†ใ€‚ # JPEGใงใฏใ‚จใƒณใ‚ณใƒผใƒ‡ใ‚ฃใƒณใ‚ฐๆ™‚ใฎDCTๅค‰ๆ›ๅ‰ใซ0๏ฝž255ใฎ่ผๅบฆๅ€คใ‚’-128๏ฝž+127ใซใชใ‚‹ใ‚ˆใ†ใซใ—ใฆใ„ใ‚‹ใฎใงๅค‰ๅŒ–ๅพŒใฎๅ€คใซใฏ+128ใ‚’ใ—ใฆ # + import scipy as sp import scipy.fftpack as fft IDCT_Y=fft.idct(fft.idct(DCT_spectrum8x8np,n=8,axis=0,norm='ortho'),n=8,axis=1,norm='ortho')+128 # - (IDCT_Y).astype(int) # ใ“ใ‚Œใ‚’ๆ็”ปใ—ใฆใฟใ‚‹ใ€‚ imgplot = plt.imshow(255-IDCT_Y,cmap="Greys",vmin=0,vmax=255) #jpegใƒ•ใ‚กใ‚คใƒซใฎๆ็”ป imgplot.axes.set_xticks([]) #x่ปธใฎ็›ฎ็››ใ‚’ๅ‰Š้™ค imgplot.axes.set_yticks([]) #y่ปธใฎ็›ฎ็››ใ‚’ๅ‰Š้™ค imgplot.axes.set_title("first 8x8 block Y conp") # ็ขบ่ชใฎใŸใ‚ๆœ€ๅˆใซmatplotlibใฎimread()ใงๅ–ใ‚Šๅ‡บใ—ใŸใ‚ฟใƒผใ‚ฒใƒƒใƒˆ็”ปๅƒ(img)ใ‚ˆใ‚Š่จˆ็ฎ—ใ—ใŸ่ผๅบฆๅ€คYใ‚’ๆฑ‚ใ‚ๆœ€ๅˆใฎ็”ปๅƒใ‚’่ฆ‹ใฆใฟใ‚‹ใ€‚ Y_img = 0.29900 * img[:,:,0] + 0.58700 * img[:,:,1] + 0.11400 * img[:,:,2] Cb_img = -0.16870 * img[:,:,0] - 0.33120 * img[:,:,1] + 0.50000 * img[:,:,2] Cr_img = 0.50000 * img[:,:,0] - 0.41870 * img[:,:,1] - 0.08130 * img[:,:,2] imgplot = plt.imshow(255-Y_img[0:8,0:8],cmap="Greys",vmin=0,vmax=255) #jpegใƒ•ใ‚กใ‚คใƒซใฎๆ็”ป imgplot.axes.set_xticks([]) #x่ปธใฎ็›ฎ็››ใ‚’ๅ‰Š้™ค imgplot.axes.set_yticks([]) #y่ปธใฎ็›ฎ็››ใ‚’ๅ‰Š้™ค imgplot.axes.set_title("first 8x8 Y block of target") (Y_img[0:8,0:8]).astype(int) DCT_Y=fft.dct(fft.dct(Y_img[0:8,0:8]-128,n=8,axis=0,norm='ortho'),n=8,axis=1,norm='ortho') DCT_Y.astype(int) # ไธธ็›ฎ่ชคๅทฎใจใ‹ใใ†ใ„ใฃใŸ็จ‹ๅบฆใฎ่ชคๅทฎใฏใ‚ใ‚‹ใŒๅ†็พใงใใฆใ‚‹ใ‚ˆใ†ใงใ‚ใ‚‹ใ€‚ # ### 7.่ค‡ๆ•ฐใƒ–ใƒญใƒƒใ‚ฏใฎๅพฉๅ…ƒ # # Yๆˆๅˆ†ใฎๅ…ˆ้ ญใฎใƒ–ใƒญใƒƒใ‚ฏใฎๅพฉๅทใŒๅฎŒๆˆใ—ใŸใฎๆฎ‹ใ‚Šใฎใƒ–ใƒญใƒƒใ‚ฏใ€ๆˆๅˆ†ใฎ็”ป็ด ๅพฉๅ…ƒใ‚’ใ™ใ‚‹ใ€‚ # # ใ“ใ“ใ‹ใ‚‰่ฉณใ—ใ„ๆƒ…ๅ ฑใŒใปใ—ใ„ใฎใง[JPEG (Joint Photographic Experts Group)](https://www.w3.org/Graphics/JPEG/)ใฎใ‚ตใ‚คใƒˆใ‹ใ‚‰ITUๅ‹งๅ‘Šๆ–‡ๆ›ธ[ITU-T.81](https://www.w3.org/Graphics/JPEG/itu-t81.pdf)ใ‚‚็ขบ่ชใ—ใŸใ€‚ใ€‚(ๅค‰ๆ›ใฎใ‚ขใƒซใ‚ดใƒชใ‚บใƒ ใ‚‚่จ˜่ผ‰ใŒใ‚ใฃใŸใŒใ€ใ“ใ“ใฏpyhonใฎใƒˆใƒฌใƒผใƒ‹ใƒณใ‚ฐใจใ„ใ†ใ“ใจใ‚‚ใ‚ใ‚‹ใ—ใ€ไปŠๆ›ดใชใฎใงๅ‚็…งใ—ใชใ„ใ“ใจใซใ™ใ‚‹ใ€‚) # # ็ตๆžœใ‚ใ‹ใฃใŸใ“ใจใŒใ‚ใ‚Šใ€ใ‚คใƒกใƒผใ‚ธใƒ‡ใƒผใ‚ฟใฏๅฟ…ใšใ—ใ‚‚ๅ…จYๆˆๅˆ†ใƒ‡ใƒผใ‚ฟใ™ในใฆโ†’ๅ…จCbๆˆๅˆ†ใƒ‡ใƒผใ‚ฟใ™ในใฆโ†’ๅ…จCbๆˆๅˆ†ใจใ„ใ†้ †ใงๆง‹ๆˆใ•ใ‚Œใฆใชใ„ใจใ„ใ†ใ“ใจใ€ใ€ใ€ใ€ # ใใ—ใฆใ“ใ“ใซใใฆ่ฌŽใ ใฃใŸSOF0ใ‚ปใ‚ฐใƒกใƒณใƒˆใฎHn,VnใŒๆ„ๅ‘ณใŒใ‚ใ‹ใฃใฆใใŸใ€ใ€ # # ็ฐกๅ˜ใช่€ƒใˆใ‹ใŸใ‚’ใ™ใ‚‹ใจHn,Ynใฏๆˆๅˆ†ใ”ใจใฎๆฐดๅนณๆ–นๅ‘ใ€ๅž‚็›ดๆ–นๅ‘ใฎๆˆๅˆ†ใƒ–ใƒญใƒƒใ‚ฏใฎๅ–ใ‚Šๅ‡บใ—ๅ˜ไฝใ‚’่กจใ™ใ‚ˆใ†ใงใ‚ใ‚‹ใ€‚ใ€ใ€ # # JFIFใฎใ‚ซใƒฉใƒผ็”ปๅƒ(ๆˆๅˆ†ใฏ1:Y,2:Cb,3:Crใซ้™ๅฎšใ•ใ‚Œใ‚‹)ใฎๅ ดๅˆใ€ๅ…ƒใฎ็”ปๅƒใƒ‡ใƒผใ‚ฟใฎๆˆๅˆ†1(Y)ใ‚’H1ร—V1ใƒ–ใƒญใƒƒใ‚ฏใ€ๆˆๅˆ†2(Cb)ใ‚’H2ร—V2ใƒ–ใƒญใƒƒใ‚ฏใ€ๆˆๅˆ†3(Cr)ใ‚’H3ร—V3ใƒ–ใƒญใƒƒใ‚ฏใ‚’ไธ€ๅ˜ไฝ(ใ“ใ‚Œใ‚’MCU(Minimum Coded Unit)ใจใ„ใ†ใ‚‰ใ—ใ„)ใจใ—ใฆMCUใฎ็นฐใ‚Š่ฟ”ใ—ใงใ‚คใƒกใƒผใ‚ธใƒ‡ใƒผใ‚ฟใŒไฟๅญ˜ใ•ใ‚Œใฆใ„ใ‚‹ใจใฎใ“ใจใ€‚ # # ไปŠๅ›žใฎใ‚ฟใƒผใ‚ฒใƒƒใƒˆใฏ # # # ``` # SOF0 # SEG LENGTH : 17 # P : 0 # Y : 150 # X : 150 # Nf : 3 # C1 : 1 # H1 : 2 # V1 : 2 # Tq1 : 0 # C2 : 2 # H2 : 1 # V2 : 1 # Tq2 : 1 # C3 : 3 # H3 : 1 # V3 : 1 # Tq3 : 1 # ``` # # ใชใฎใง # Yๆˆๅˆ†2x2=4ใƒ–ใƒญใƒƒใ‚ฏ # Cb,Crๆˆๅˆ†ๅ„1x1=1ใƒ–ใƒญใƒƒใ‚ฏ # ใใ‚Œใ‚‰ใ‚’ใพใจใ‚ใŸใ‚‚ใฎใŒ1MCUใจใชใ‚‹ใ‚ˆใ†ใงใ‚ใ‚‹ใ€‚ # # ใใ—ใฆใ€Cr,CbใŒใƒ–ใƒญใƒƒใ‚ฏใŒๅฐ‘ใชใ„ๅˆ†ใฏYๆˆๅˆ†ใฎH,V(ๆœ€ๅคงๅ€ค)ใจใฎH,Vใฎๆฏ”1/2ใซ้–“ๅผ•ใ‹ใ‚Œใ‚Œใฆ็”ป็ด ใŒใ‚ตใƒณใƒ—ใƒชใƒณใ‚ฐใ•ใ‚Œใฆใ„ใ‚‹ใ‚ˆใ†ใงใ‚ใ‚‹ใ€‚ # # MCUใฎไธญใฎใƒ–ใƒญใƒƒใ‚ฏใฎไธฆใณๆ–นใฏใ‚ธใ‚ฐใ‚ถใ‚ฐใงใชใๅทฆไธŠใ‹ใ‚‰ๅณไธ‹ใธ่กŒใฎๆŠ˜ใ‚Š่ฟ”ใ—ใงใ•ใ‚Œใฆใ„ใ‚‹(ๆจชๆ›ธใใฎๆ–‡ๆ›ธใจไธ€็ท’)ใ€‚ # # ใพใŸๅ…จ็”ปๅƒไธญใฎMCUใฎไธฆใณๆ–นใ‚‚ๅทฆไธŠใ‹ใ‚‰ๅณไธ‹ใธ่กŒใฎๆŠ˜ใ‚Š่ฟ”ใ—ใงใ•ใ‚Œใฆใ„ใ‚‹ใจใฎใ“ใจใ€‚ # # ไปŠๅ›žใฎๅ ดๅˆใฏMCUใฎ็”ป็ด ใฏVn,Hnใฎๆœ€ๅคง(Yๆˆๅˆ†)ใซๅˆใ‚ใ›ใฆ2x2ใƒ–ใƒญใƒƒใ‚ฏๅˆ†ใคใพใ‚Š1MCU 16x16ๅ˜ไฝใงๅ…จ็”ป็ด XxYใฏ150x150ใชใฎใง10x10=100MCUใ‚ใ‚‹ใ“ใจใซใชใ‚‹ใ€‚ # # ใจใ‚Šใ‚ใˆใšMCUใซๅˆ†ๅ‰ฒใ—ใฆใ‹ใ‚‰1MCUใ‚’ๅ–ใ‚Šๅ‡บใ—ใฆๅˆ†ๆžใ—ใฆใฟใ‚‹ใ€‚ # # ใพใšSOFใƒ•ใƒฌใƒผใƒ ,SOSใƒ•ใƒฌใƒผใƒ ใ‹ใ‚‰ใ‚‚ๆƒ…ๅ ฑใ‚’ๅ–ใ‚Šๅ‡บใ™ใ€‚(JFIFใƒ•ใƒผใƒžใƒƒใƒˆใฎใ‚ซใƒฉใƒผ็”ปๅƒใซ้™ๅฎšใซใ—ใฆใ‚‹ใฎใงSOSใƒ•ใƒฌใƒผใƒ ใฎCn็ญ‰JFIFใƒ•ใ‚ฉใƒผใƒžใƒƒใƒˆใงๆฑบใพใฃใฆใ„ใ‚‹็ฎ‡ๆ‰€ใฏ็„ก่ฆ–ใ™ใ‚‹ใ€‚) # # + Q=[[],[],[],[]] Ldc=[[],[],[],[]] Vdc=[[],[],[],[]] Lac=[[],[],[],[]] Vac=[[],[],[],[]] ##ใƒ†ใƒผใƒ–ใƒซๆ•ฐ nTdc=0 nTac=0 for seg in jpeg_struct: print(seg[0]) if(seg[0] == "IMG"): print(" DATA LENGTH : ",seg[1],sep="") else: if(seg[0] == "SOI"): flag_SOI=True elif(seg[0] == "EOI"): flag_EOI=True else: print(" SEG LENGTH : ",seg[1]) data=seg[2] ######## APP0 JFIFใƒ•ใ‚ฉใƒผใƒžใƒƒใƒˆ ###### if(seg[0] == "APP0"): print(" ID : ",data[0:4].decode(),sep="") #JFIF่ญ˜ๅˆฅๅญ print(" Ver : ",data[5],".",data[6],sep="") #ใƒใƒผใ‚ธใƒงใƒณ็•ชๅท print(" U : ",data[7],sep="") #ใƒ”ใ‚ฏใ‚ปใƒซๆฟƒๅบฆใฎๅ˜ไฝ 0:ไธๅฎš 1:pixels/inch(dpi) 3: pixel/cmใ€€ print(" Xd : ",data[8]*256+data[9],sep="") #็ธฆใฎใƒ”ใ‚ฏใ‚ปใƒซๆฟƒๅบฆ print(" Yd : ",data[10]*256+data[11],sep="") #ๆจชใฎใƒ”ใ‚ฏใ‚ปใƒซๆฟƒๅบฆ print(" Xt : ",data[12],sep="") #ใ‚ตใƒ ใƒใ‚คใƒซใ‚คใƒกใƒผใ‚ธใฎๆจชๅน…(ใ‚ตใƒ ใƒใ‚คใƒซ็„กใชใ‚‰0) print(" Yt : ",data[13],sep="") #ใ‚ตใƒ ใƒใ‚คใƒซใ‚คใƒกใƒผใ‚ธใฎ้ซ˜ใ•(ใ‚ตใƒ ใƒใ‚คใƒซ็„กใชใ‚‰0) for i in range(data[12]*data[13]): print(" RGB",i," : (",data[14+i*3],",",data[15+i*3],",",data[16+i*3],")",sep="") #ใ‚ตใƒ ใƒใ‚คใƒซใ‚คใƒกใƒผใ‚ธRGBๅ€ค ######## DQT ้‡ๅญๅŒ–ใƒ†ใƒผใƒ–ใƒซๅฎš็พฉ ###### elif(seg[0] == "DQT"): length = int(seg[1])-3 base = 0 while(length >0): pqn=data[base]>>4 tqn=data[base]&0x0F; if(pqn==0): qlen=64; else: qlen=128; print(" Pq",tqn," : ",pqn,sep="") #้‡ๅญๅŒ–ใƒ†ใƒผใƒ–ใƒซ็ฒพๅบฆ 0;8bit , 1:16bit print(" Tq",tqn," : ",tqn,sep="") #้‡ๅญๅŒ–ใƒ†ใƒผใƒ–ใƒซ็•ชๅท 0 to 3 for i in range(qlen): Q[tqn] += [data[base+1+i]] #print(" Q",tqn,"-",ascii(i)," : ",data[base+1+i],sep="") #้‡ๅญๅŒ–ๅ› ๅญ(้‡ๅญๅŒ–ไฟ‚ๆ•ฐ) length-=qlen+1 base+=qlen+1 ######## SOF0 ใƒ•ใƒฌใƒผใƒ ใ‚ฟใ‚คใƒ—0้–‹ๅง‹ใ€€(Baseline-DCT & ใƒใƒ•ใƒžใƒณ็ฌฆๅท) ###### elif(seg[0] == "SOF0" or seg[0] == "SOF2"): Nf=data[5] ##print(" P : ",data[1]) #ใ‚ตใƒณใƒ—ใƒซใฎ็ฒพๅบฆ ##print(" Y : ",data[1]*256+data[2],sep="") #็”ปๅƒ็ธฆใ‚ตใ‚คใ‚บ Y=data[1]*256+data[2] ##print(" X : ",data[3]*256+data[4],sep="") #็”ปๅƒๆจชใ‚ตใ‚คใ‚บ X=data[3]*256+data[4] Hmax=0 Vmax=0 h=[] v=[] Tq=[] ##print(" Nf : ",data[5]) #ๆง‹ๆˆ่ฆ็ด ๆ•ฐ 1;GreyScacle ,3;YCbCr or YIQ 4;CMYK] for i in range(Nf): #print(" C",i+1," : ",data[6+i*3],sep="") #ๆง‹ๆˆ่ฆ็ด  ่ญ˜ๅˆฅๅญ 1:Y 2:Cb 3:Cr 4:I 5:Q #print(" H",i+1," : ",data[7+i*3]>>4,sep="") #ๆง‹ๆˆ่ฆ็ด ใ‚ต ๆฐดๅนณๆ–นๅ‘ใƒณใƒ—ใƒซๅŒ–่ฆๅ› (ๆฏ”็އ) h+=[data[7+i*3]>>4] Hmax=h[i]if(h[i]>Hmax)else Hmax #print(" V",i+1," : ",data[7+i*3]&0x0F,sep="") #ๆง‹ๆˆ่ฆ็ด  ๅž‚็›ดๅ ฑๅ‘Šใ‚ตใƒณใƒ—ใƒซๅŒ–่ฆๅ› (ๆฏ”็އ) v+=[data[7+i*3]&0x0F] Vmax=v[i]if(v[i]>Vmax)else Vmax #print(" Tq",i+1," : ",data[8+i*3],sep="") #ๆง‹ๆˆ่ฆ็ด  ้‡ๅญๅŒ–ใƒ†ใƒผใƒ–ใƒซ็•ชๅท Tq+=[data[8+i*3]] ######## DHT ใƒใƒ•ใƒžใƒณ็ฌฆๅทใƒ†ใƒผใƒ–ใƒซๅฎš็พฉ ###### elif(seg[0] == "DHT"): thn=data[0]&0x0f tcn=data[0]>>4 print(" Tc",thn," : ",tcn,sep="") #ใ‚ฏใƒฉใ‚น 0;DC , 1:1AC ๏ผŸ๏ผŸ print(" Th",thn," : ",thn,sep="") #ใƒใƒ•ใƒžใƒณ็ฌฆๅทใƒ†ใƒผใƒ–ใƒซ็•ชๅท #ใƒ†ใƒผใƒ–ใƒซๆ•ฐใ‚ซใ‚ฆใƒณใƒˆ if(thn==0): nTdc+=1 else: nTac+=1 vlen=[] for i in range(16): vlen+= [data[1+i]] if(tcn==0): #DCใƒ†ใƒผใƒ–ใƒซ Ldc[thn]+=[data[1+i]] else: #ACใƒ†ใƒผใƒ–ใƒซ Lac[thn]+=[data[1+i]] #print(" L",i+1," ; ",data[1+i],sep="") #็ฌฆๅท้•ทใ”ใจใฎ็ฌฆๅทๆ•ฐ base = 17 for i in range(16): vlist=[] for j in range(vlen[i]): if(tcn==0): #DCใƒ†ใƒผใƒ–ใƒซ vlist+=[data[base+j]] #print(" V",i+1,"-",j+1," : ",data[base+j],sep="") #ๅ„็ฌฆๅท้•ทใฎ็ฌฆๅท(tcn=0ใฎใจใ)ใ€€ใƒ‡ใƒผใ‚ฟใƒผใƒ“ใƒƒใƒˆๆ•ฐ else: #ACใƒ†ใƒผใƒ–ใƒซ runlen=data[base+j]>>4 detalen=data[base+j]&0x0F if(detalen==0): if(runlen==0): vlist+=[("EOB",0)] elif(runlen==15): vlist+=[("ZRL",0)] else: vlist+=[("N/A",0)] else: vlist+=[(runlen,detalen)] #print(" V",i+1,"-",j+1," : ",data[base+j]>>4,",",data[base+j]&0x0F,sep="") #ๅ„็ฌฆๅท้•ทใฎ็ฌฆๅท(tcn=1ใฎใจใ) ใƒฉใƒณใƒฌใƒณใ‚ฐใ‚นๆ•ฐใ€ใƒ‡ใƒผใ‚ฟใƒผใƒ“ใƒƒใƒˆๆ•ฐ if(tcn==0): #DCใƒ†ใƒผใƒ–ใƒซ Vdc[thn]+=[vlist] else: #ACใƒ†ใƒผใƒ–ใƒซ Vac[thn]+=[vlist] base+=vlen[i] ######## SOS Start Of Scan ###### elif(seg[0] == "SOS"): Ns=data[0] ##print(" Ns : ",ns) #ๆง‹ๆˆ่ฆ็ด ใฎๆ•ฐ Td=[] Ta=[] for i in range(Ns): ##print(" Cs",i+1," : ",data[1+i*2],sep="") #ๆง‹ๆˆ่ฆ็ด  ่ญ˜ๅˆฅๅญ ##print(" Td",i+1," : ",data[2+i*2]>>4,sep="") #ๆง‹ๆˆ่ฆ็ด  DCๆˆๅˆ†ใƒใƒ•ใƒžใƒณ็ฌฆๅทใƒ†ใƒผใƒ–ใƒซ็•ชๅท Td+=[data[2+i*2]>>4] ##print(" Ta",i+1," : ",data[2+i*2]&0x0F,sep="") #ๆง‹ๆˆ่ฆ็ด  ACๆˆๅˆ†ใƒใƒ•ใƒžใƒณ็ฌฆๅทใƒ†ใƒผใƒ–ใƒซ็•ชๅท Ta+=[data[2+i*2]&0x0F] ##print(" Ss : ",data[1+ns*2],sep="") #ใ‚นใƒšใ‚ฏใƒˆใƒซ้ธๆŠž้–‹ๅง‹(้‡ๅญๅŒ–ไฟ‚ๆ•ฐ้–‹ๅง‹็•ชๅท) ##print(" Se : ",data[2+ns*2],sep="") #ใ‚นใƒšใ‚ฏใƒˆใƒซ้ธๆŠž็ต‚ไบ†้‡ๅญๅŒ–ไฟ‚ๆ•ฐ้–‹ๅง‹็ต‚ไบ†) ##print(" Ah : ",data[3+ns*2]>>4,sep="") #๏ผŸ๏ผŸ ##print(" Al : ",data[3+ns*2]&0x0f,sep="") #๏ผŸ๏ผŸ # - # floatใ‹ใ‚‰intใฎใ‚ญใƒฃใ‚นใƒˆใฏๅˆ‡ใ‚Šๆจใฆใชใฎใงx่ปธ,y่ปธๆ–นๅ‘ใฎmcuๆ•ฐใฎ็ฎ—ๅ‡บใฏ็”ป็ด ๆ•ฐ/MCU็”ป็ด ๆ•ฐใŒๅ‰ฒใ‚Šๅˆ‡ใ‚Œใชใ„ๅ ดๅˆใฏ+1ใ™ใ‚‹ใ€‚ # 3้ …ๆผ”็ฎ—ๅญใจใ—ใฆใฎ (true) if (condition) else (false)ใ‚’ไฝฟใฃใฆใฟใ‚‹ใ€‚ # # (condition) ? (True) : (False) ๅฝขๅผใซๆ…ฃใ‚Œใฆใ‚‹ใฎใงใ‚„ใ‚„ใ“ใ—ใ„ใ€ใ€ # # ไปฅไธ‹ใ‚ณใƒผใƒ‰ใง10x10=100ใซใชใ‚‹ใฏใš # + mcu_size_x=8*Hmax mcu_size_y=8*Vmax X_mcus=X/mcu_size_x if(X%mcu_size_x==0)else int(X/mcu_size_x)+1 ##Xๆ–นๅ‘ใฎMCUๆ•ฐ Y_mcus=Y/mcu_size_y if(Y%mcu_size_y==0)else int(Y/mcu_size_y)+1 ##Yๆ–นๅ‘ใฎMCUๆ•ฐ print(X_mcus,Y_mcus) # - # MCUๅ†…ใฎๆˆๅˆ†ใ”ใจใฎใƒ–ใƒญใƒƒใ‚ฏๆ•ฐๆง‹ๆˆใ‚’ๅฎš็พฉ mcu_struct=[] for i in range(Ns): mcu_struct+=[[h[i]*v[i],(v[i],h[i])]] print(mcu_struct) # + ##ใƒใƒ•ใƒžใƒณใƒ„ใƒชใƒผๆง‹ๆˆ H_tree_dc_org=[] H_tree_ac_org=[] for i in range(nTac): Tree,alist=huffman_tree(Ldc[i]) H_tree_dc_org+=[Tree] for i in range(nTac): Tree,alist=huffman_tree(Lac[i]) H_tree_ac_org+=[Tree] #MCUใฎๆ ผ็ดใƒชใ‚นใƒˆ MCUs=[] ##MCUใฎใ‚ซใ‚ฆใƒณใƒˆ mcus=0 ##1MCUๅˆๆœŸๅŒ– MCU=[] ##ๆˆๅˆ†(ๅˆๆœŸๅ€ค0) comp=0 ##ๅ„ๆˆๅˆ†ใฎDCๆˆๅˆ†ใฎๅˆๆœŸๅŒ– data_dc_latest=[0,0,0,0] #MCUใฎๅ„ๆˆๅˆ†ๅ†…ใฎblocksๆ•ฐใฎใ‚ซใ‚ฆใƒณใƒˆๅˆๆœŸๅŒ– blocks=0 ##ๆˆๅˆ†ใฎใƒ–ใƒญใƒƒใ‚ฏๆ•ฐ/MCU blocks_max=mcu_struct[comp][0] #ใ‚ธใ‚ฐใ‚ถใ‚ฐใ‚นใ‚ญใƒฃใƒณ็•ชๅทๅˆๆœŸๅŒ– zz=0 flag_data=False flag_code=False d_bits=0 h_bits=0 data=0 edata=0 ##8x8 ใƒ–ใƒญใƒƒใ‚ฏใƒ‡ใƒผใ‚ฟ decoded=[] ##ๆœ€ๅˆใฏDC H_tree=H_tree_dc_org[Td[comp]] V=Vdc[Td[comp]] for byte in jpeg_struct[10][2]: #print("(","{:2X}".format(byte),")",end="",sep="") mask=int(0x80) for i in range(8): bit=(byte&mask!=0) if(mcus>=X_mcus*Y_mcus):print("1" if bit else "0",end="") #print("1" if bit else "0",end="") #Huffman็ฌฆๅทๅ–ใ‚Šๅ‡บใ— if(flag_data==False): if(bit==False): #็ฌฆๅท0 element=H_tree[0] else: #็ฌฆๅท1 element=H_tree[1] h_bits+=1 if(type(element)==tuple): H_tree=element #Huffmanใ‚ณใƒผใƒ‰ๆคœๅ‡บ else: if(zz==0): #print("") #print("zz:",zz," , dc_hufbits:",h_bits," , elm=",element,sep="") #print("Vdc",h_bits,"-",element+1,":",V[h_bits-1][element],sep="") data_len=V[h_bits-1][element] else: #print("") #print("zz:",zz," , ac_hufbits:",h_bits," , elm=",element,sep="") #print("Vac",h_bits,"-",element+1,":",V[h_bits-1][element],sep="") data_len=V[h_bits-1][element][1] if(data_len==0):#databit้•ท0ใชใ‚‰ใใฎใพใพใ‚จใƒณใ‚ณใƒผใƒ‰ flag_code=True else: flag_data=True #databitๅ–ใ‚Šๅ‡บใ— elif(data_len > 0): data*=2 data+= 1if(bit) else 0 d_bits+=1 #databit(code)็ต‚ไบ† if(d_bits==data_len): #print("") #print("zz:",zz," , databits:" ,d_bits," , data=",data,sep="") flag_code=True #decodeๅ‡ฆ็† if(flag_code==True): #print("") #print("V",h_bits,"-",element+1,":",V[h_bits-1][element],sep="") #czgzbitๅพฉๅท if(data_len==0): ddata=0; else: if(data & (1 << (data_len-1))!=0): ddata=data else: ddata=-(data^((1<<data_len)-1)) #ๆ ผ็ด if(zz==0): ##print("debug Vdc",zz,V[h_bits-1][element])] #print("(DC)decode[",zz,"]=", data_dc_latest[comp],"+",ddata,"=",ddata+data_dc_latest[comp],sep="") data_dc_latest[comp]+=ddata decoded=[data_dc_latest[comp]]; zz+=1 else: ##print("debug Vac",zz,V[h_bits-1][element]) if(type(V[h_bits-1][element][0])==int): for j in range(V[h_bits-1][element][0]): if(zz<64): #print("decode[",zz,"]=",0,sep="") decoded+=[0] zz+=1 if(zz<64): #print("decode[",zz,"]=",ddata,sep="") decoded+=[ddata] zz+=1 elif(V[h_bits-1][element][0]=="EOB"): while(zz<64): #print("decode[",zz,"]=",0,sep="") decoded+=[0] zz+=1 elif(V[h_bits-1][element][0]=="ZRL"): for j in range(16): if(zz<64): #print("decode[",zz,"]=",zz,0,sep="") decoded+=[0] zz+=1 flag_code=False flag_data=False d_bits=0 h_bits=0 data=0 ##ใƒ–ใƒญใƒƒใ‚ฏ็ต‚ไบ† if(zz==64): #print("********* comp=",comp," brock=",blocks," >fin",sep="") MCU+=[decoded] decoded=[] blocks+=1 ##ๆˆๅˆ†็ต‚ไบ† if(blocks==blocks_max): #print("******************** comp:",comp," >fin",sep="") blocks=0 comp+=1 ##MCU็ต‚ไบ† if(comp==Ns): #print("**************************** MCU=",mcus," >fin",sep="") #print("") MCUs+=[MCU] MCU=[] mcus+=1 comp=0 blocks_max=mcu_struct[comp][0] #print("Td[",comp,"]:",Td[comp],sep="") H_tree=H_tree_dc_org[Td[comp]] V=Vdc[Td[comp]] zz=0 else: #print("Ta[",comp,"]:",Ta[comp],sep="") H_tree=H_tree_ac_org[Ta[comp]] V=Vac[Ta[comp]] #if(mcus==4):break mask= mask >>1 #if(mcus==4):break # - len(MCUs) len(MCUs[0]) # MCUๆ•ฐใฏใกใ‚ƒใ‚“ใจ100ใซใชใฃใŸใ—ใ€ๆœ€ๅˆใฎMCUใฎ1MCUใ‚ใŸใ‚Šใฎ่ฆ็ด ๆ•ฐใ‚‚6ใชใฎใงใŸใถใ‚“ใงใใŸใ€‚ # # ้€†้‡ๅญๅŒ–ใจใ‚ธใ‚ฐใ‚ถใ‚ฐใ‚นใ‚ญใƒฃใƒณใฎไบŒๆฌกๅ…ƒ้…ๅˆ—ๅŒ– # + dqMCUs=[] for mcu in MCUs: dqMCU=[] blocks=0 comp=0 for data64 in mcu: dequantized=[] for y in range(8): buf=[] for x in range(8): buf+=[data64[dic_zigzag[(x,y)]]*Q[Tq[comp]][dic_zigzag[(x,y)]]] dequantized+=[buf] #print(blocks,comp) dqMCU+=[dequantized] blocks+=1 if(blocks==mcu_struct[comp][0]): blocks=0 comp+=1 dqMCUs+=[dqMCU] # - dqMCUs[0][4] # 16x16ใฎ๏ผ“ๆˆๅˆ†ใ”ใฎใฎ็”ป็ด ใฎ้…ๅˆ—ใซไธฆใณๆ›ฟใˆใฆใฟใ‚‹ใ€‚ใจใ‚Šใ‚ใˆใšใฏๅŠ›ๆŠ€ใงใ€ใ€ใ€ # ใพใšใ‚นใƒšใ‚ฏใƒˆใƒซใฎใพใพ้…็ฝฎCr,Cbใฎ8x8ใฏๅทฆไธŠใซใคใ‚ใฆใ€ใ€ mcu_id=0 Y_spectrum=[] Cb_spectrum=[] Cr_spectrum=[] for y in range(mcu_size_y): buf_Y=[] buf_Cb=[] buf_Cr=[] for x in range(mcu_size_x): blk_num=int(y/8)*2+int(x/8) buf_Y+=[dqMCUs[mcu_id][blk_num][y%8][x%8]] if((x<8 )& (y<8)): buf_Cb+=[dqMCUs[mcu_id][4][y][x]] buf_Cr+=[dqMCUs[mcu_id][5][y][x]] else: buf_Cb+=[0] buf_Cr+=[0] Y_spectrum+=[buf_Y] Cb_spectrum+=[buf_Cb] Cr_spectrum+=[buf_Cr] # ใจใ‚Šใ‚ใˆใšๅ„ๆˆๅˆ†ใฎใ‚นใƒšใ‚ฏใƒˆใƒซใ‚’ๆใ„ใฆใฟใ‚‹ # + Y_spectrum_np=np.array(Y_spectrum,float) Cb_spectrum_np=np.array(Cb_spectrum,float) Cr_spectrum_np=np.array(Cr_spectrum,float) fig, axs = plt.subplots(1, 3,figsize=[10.5,3.5]) #1x3ใฎใƒžใƒซใƒใฃใƒ—ใƒญใƒƒใƒˆใฎไฝœๆˆ axs[0].imshow(Y_spectrum_np,cmap="bwr",vmin=-128,vmax=128) axs[0].set_xticks([]) axs[0].set_yticks([]) axs[0].set_title("Y") axs[1].imshow(Cb_spectrum_np,cmap="bwr",vmin=-128,vmax=128) axs[1].set_xticks([]) axs[1].set_yticks([]) axs[1].set_title("Cb") axs[2].imshow(Cr_spectrum_np,cmap="bwr",vmin=-128,vmax=128) axs[2].set_xticks([]) axs[2].set_yticks([]) axs[2].set_title("Cr") # - # ใ•ใฆDCT้€†ๅค‰ๆ›ใ—ใฆใ€ๅค‰ๆ›็ตๆžœใ‚’ๆใ„ใฆใฟใ‚‹ใ€‚ # # ไฝ•ๅบฆใ‹ๆ‚ฉใ‚“ใงDCๆˆๅˆ†ใฏๅ‰ใฎใƒ–ใƒญใƒƒใ‚ฏใจใฎๅทฎ(ๆœ€ๅˆใฎใƒ–ใƒญใƒƒใ‚ฏใฎใฟ็ตถๅฏพๅ€ค)ใงใ‚ใ‚‹ใจใ„ใ†ใ“ใจใ‚’ใ—ใฃใŸใ€‚ # + idct_Y=np.zeros((16,16),float) idct_Cr=np.zeros((16,16),float) idct_Cb=np.zeros((16,16),float) idct_Y[0:8,0:8]=fft.idct(fft.idct(Y_spectrum_np[0:8,0:8],n=8,axis=0,norm='ortho'),n=8,axis=1,norm='ortho')+128 idct_Y[0:8,8:16]=fft.idct(fft.idct(Y_spectrum_np[0:8,8:16],n=8,axis=0,norm='ortho'),n=8,axis=1,norm='ortho')+128 idct_Y[8:16,0:8]=fft.idct(fft.idct(Y_spectrum_np[8:16,0:8],n=8,axis=0,norm='ortho'),n=8,axis=1,norm='ortho')+128 idct_Y[8:16,8:16]=fft.idct(fft.idct(Y_spectrum_np[8:16,8:16],n=8,axis=0,norm='ortho'),n=8,axis=1,norm='ortho')+128 idct_Cb[0:8,0:8]=fft.idct(fft.idct(Cb_spectrum_np[0:8,0:8],n=8,axis=0,norm='ortho'),n=8,axis=1,norm='ortho') idct_Cr[0:8,0:8]=fft.idct(fft.idct(Cr_spectrum_np[0:8,0:8],n=8,axis=0,norm='ortho'),n=8,axis=1,norm='ortho') fig, axs = plt.subplots(1, 3,figsize=[10.5,3.5]) #1x3ใฎใƒžใƒซใƒใฃใƒ—ใƒญใƒƒใƒˆใฎไฝœๆˆ axs[0].imshow(255-idct_Y,cmap="Greys",vmin=0,vmax=256) axs[0].set_xticks([]) axs[0].set_yticks([]) axs[0].set_title("Y") axs[1].imshow(idct_Cb,cmap="bwr",vmin=-128,vmax=128) axs[1].set_xticks([]) axs[1].set_yticks([]) axs[1].set_title("Cb") axs[2].imshow(idct_Cr,cmap="bwr",vmin=-128,vmax=128) axs[2].set_xticks([]) axs[2].set_yticks([]) axs[2].set_title("Cr") # - # ใ„ใ„ๆ„Ÿใ˜ # # Cbใฏ8x8ใซ้–“ๅผ•ใ„ใฆใ‚‹ใฎใงใ€ๅฝ“็„ถๅพฉๅทๅŒ–ใ—ใŸใ‚‚ใฎใฏๅทฆไธŠใฎ8x8ใฎ้ƒจๅˆ†ใ—ใ‹ๆˆๅˆ†ใŒใชใ„ใ€ # # ใ•ใฆใ€ๅฎŸ้š›ใ‚ฟใƒผใ‚ฒใƒƒใƒˆ็”ปๅƒใฎๅŒใ˜4ใƒ–ใƒญใƒƒใ‚ฏ(ๅทฆไธŠ)ใจ่ผๅบฆๅ€ค(Yๆˆๅˆ†)ใ‚’ไปŠๅ›žๅพฉๅทๅŒ–ใ—ใŸYๆˆๅˆ†ใจๆฏ”ในใฆใฟใ‚‹ใ€‚ # + fig, axs = plt.subplots(1, 2,figsize=[7.,3.5]) #1x2ใฎใƒžใƒซใƒใฃใƒ—ใƒญใƒƒใƒˆใฎไฝœๆˆ axs[0].imshow(255-(Y_img[0:16,0:16]).astype(int),cmap="Greys",vmin=0,vmax=256) axs[0].axes.set_xticks([]) #x่ปธใฎ็›ฎ็››ใ‚’ๅ‰Š้™ค axs[0].axes.set_yticks([]) #y่ปธใฎ็›ฎ็››ใ‚’ๅ‰Š้™ค axs[0].axes.set_title("Original image Ycomp zoomup" ) axs[1].imshow(255-idct_Y.astype(int),cmap="Greys",vmin=0,vmax=256) axs[1].axes.set_xticks([]) #x่ปธใฎ็›ฎ็››ใ‚’ๅ‰Š้™ค axs[1].axes.set_yticks([]) #y่ปธใฎ็›ฎ็››ใ‚’ๅ‰Š้™ค axs[1].axes.set_title("decoded Ycomp16x16" ) # + active="" # ใ“ใ‚“ใชๆ„Ÿใ˜ใงๅพฉๅ…ƒใงใใŸใ€‚ # # ใ•ใฆCr,Cbใ‚’16x16ใซใ™ใ‚‹ๅ•้กŒใ ใŒใ€ใ“ใฎใพใพ1็”ป็ด ใ‚’2x2ใซๅคงใใใ—ใŸใ‚Šใ—ใŸใ‚Šใ€1็”ป็ด ใŠใใซ้…็ฝฎใ—ใฆไฝ•ใ‚‰ใ‹ใฎๆ–นๆณ•ใง่ฃœๅฎŒใ—ใฆใ‚‚ใ„ใ„ใจใฏๆ€ใ†ใฎใ ใŒใ€ใ€ๅฐ‘ใ—ใ‚ขใ‚คใƒ‡ใ‚ขใŒใ‚ใฃใฆใ€ใ€ใใ‚Œใฏๅพฉๅทใ—ใŸ8x8ใ‚นใƒšใ‚ฏใƒˆใƒซใ‚’ๅทฆไธŠใซใคใ‚ใŸ16x16ใฎใƒ‡ใƒผใ‚ฟ(ๅฝ“็„ถๅทฆไธŠ8x8ไปฅๅค–ใฏใ‚ผใƒญ๏ผ‰ใซๅฏพใ—ใฆ16x16ใฎDCT้€†ๅค‰ๆ›ใ‚’ๆ–ฝใ—ใฆใฟใŠใ‚ˆใ†ใฃใฆ่€ƒใˆใ€ๅŽŸ็†็š„ใซใงใใ‚‹ใฏใšใ€‚(ๅ› ใฟใซไป•ๆง˜ใจใ—ใฆๆญฃใ—ใ„ใ‚„ใ‚Šๆ–นใ‚’็Ÿฅใ‚‰ใชใ„ใจใ„ใ†ใ‹ใพใ ่ชฟในใฆใ‚‚ใชใ„) # # - idct_Cb=fft.idct(fft.idct(Cb_spectrum_np,n=16,axis=0,norm='ortho'),n=16,axis=1,norm='ortho')*2 idct_Cr=fft.idct(fft.idct(Cr_spectrum_np,n=16,axis=0,norm='ortho'),n=16,axis=1,norm='ortho')*2 # 16x16ใซ้–“ๅผ•ใ้ƒฝๅˆไฟ‚ๆ•ฐใŒๅค‰ใ‚ใ‚‹ใฎใงใใฎๅˆ†2ใ‚’ใ‹ใ‘ใ‚‹ใ€‚ # # ไธ€ๅฟœ่ฃœ่ถณใ™ใ‚‹ใจ[scipy](https://docs.scipy.org/doc/scipy/reference/)ใฎ[idct()](https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.fftpack.idct.html#scipy.fftpack.idct)ใ‚’ใฎใƒชใƒ•ใ‚กใƒฉใƒณใ‚นใซใ‚ˆใ‚‹ใจ ไธ€ๆฌกๅ…ƒใฎDCT้€†ๅค‰ๆ›ใฏ็‰นใซtypeๆŒ‡ๅฎšใชใ—ใ ใจTYPE3ใฎDCTๅค‰ๆ›ใจๅŒใ˜(type2ใฎ้€†ๅค‰ๆ›)ใซใชใฃใฆใ€norm='ortho'ๆŒ‡ๅฎšใ—ใŸๅ ดๅˆไปฅไธ‹ใซใชใ‚‹ใ€‚($F(n)$ใŒใ‚นใƒšใ‚ฏใƒˆใƒซ$f(k)$ใฏๅ…ƒใƒ‡ใƒผใ‚ฟNใฏใ‚นใƒšใ‚ฏใƒˆใƒซๆ•ฐไปŠๅ›žใฏ8ใจใ‹16ใจใ‹) # # $$f(k) = F(0)/\sqrt{N} + \sqrt{2/N} \sum_{n=1}^{N-1}{F(n)\cos(\pi(k+0.5)n/N)}$$ # # ใง2ๆฌกๅ…ƒใฎๅ ดๅˆใฏ # # $$f(k,j) = F(0,0)/N + 2/N \sum_{n=1}^{N-1} \sum_{m=1}^{N-1}{F(n,m)\cos(\pi(k+0.5)n/N)}\cos(\pi(j+0.5)m/N)$$ # # ใงใ€ไฟ‚ๆ•ฐใฏDCๆˆๅˆ†ใง$1/N$ใ€ACๆˆๅˆ†ใง$2/N$ใจใชใ‚‹ใฎใงใ€$N=8$ใงๅค‰ๆ›ใ—ใŸใ„ใ‚‚ใฎใ‚’$N=16$ใงๅค‰ๆ›ใ™ใ‚‹ใจๅŠๅˆ†ใซใชใ‚‹ใฎใงใใฎ่ฃœๆญฃใง2ใ‚’ใ‹ใ‘ใฆใ‚‹ใ€‚ # ใใ‚ŒใงCb,Crใ‚’16x16ใฎ็”ป็ด ๅ€คใ‚’ๅ–ใ‚Šๅ‡บใ—ใฆRGBใซๅค‰ๆ›ใ—ใฆใ‚ซใƒฉใƒผ็”ปๅƒใงๅ†็พใ—ใฆใฟใ‚‹ใ€‚ # + red_img = idct_Y + 1.4020 *idct_Cr green_img = idct_Y - 0.3440 *idct_Cb - 0.7141 *idct_Cr blue_img = idct_Y + 1.7720 *idct_Cb img16x16=np.ndarray((16,16,3),dtype=int) img16x16[:,:,0]=red_img img16x16[:,:,1]=green_img img16x16[:,:,2]=blue_img imgplot = plt.imshow(img16x16) imgplot.axes.set_xticks([]) #x่ปธใฎ็›ฎ็››ใ‚’ๅ‰Š้™ค imgplot.axes.set_yticks([]) #y่ปธใฎ็›ฎ็››ใ‚’ๅ‰Š้™ค imgplot.axes.set_title("decodedl data RGB image" ) # - # ใ‚ชใƒชใ‚ธใƒŠใƒซใ‚คใƒกใƒผใ‚ธใจๆฏ”ในใฆใฟใ‚‹ใ€‚ # + fig, axs = plt.subplots(1, 2,figsize=[7.,3.5]) #1x2ใฎใƒžใƒซใƒใฃใƒ—ใƒญใƒƒใƒˆใฎไฝœๆˆ axs[0].imshow(img[0:16,0:16,:]) axs[0].axes.set_xticks([]) #x่ปธใฎ็›ฎ็››ใ‚’ๅ‰Š้™ค axs[0].axes.set_yticks([]) #y่ปธใฎ็›ฎ็››ใ‚’ๅ‰Š้™ค axs[0].axes.set_title("Original image zoomup" ) axs[1].imshow(img16x16) axs[1].axes.set_xticks([]) #x่ปธใฎ็›ฎ็››ใ‚’ๅ‰Š้™ค axs[1].axes.set_yticks([]) #y่ปธใฎ็›ฎ็››ใ‚’ๅ‰Š้™ค axs[1].axes.set_title("decoded collor img 16x16" ) # - # ใชใ‚“ใจใ‹1MCUๅˆ†(16x16็”ป็ด )ใฎใ‚ซใƒฉใƒผ็”ปๅƒใฎๅพฉๅ…ƒใŒใงใใŸใ€‚ # # ใ“ใ‚Œใ‹ใ‚‰ๅ…จMCUๅพฉๅ…ƒใ—ใฆใ„ใใ€‚ # # # # # ### 8.ๅ…จ็”ปๅƒใฎๅพฉๅ…ƒ # # ๅ…จMCUใ‚’10ร—10ใคใพใ‚Š160x160pixelใซไธฆในใฆใƒ‡ใƒผใ‚ฟใ‚’ๅพฉๅ…ƒใ™ใ‚‹ใ€‚ # ็”ปๅƒใ‚ตใ‚คใ‚บใฏ็”ปๅƒใฏ150x150ใ ใ‘ใฉ16x16ๅ˜ไฝใ ใจ็ซฏๆ•ฐใชใฎใงๅณ10pixelใ€ๅทฆ10pixelใฏ็„ก้ง„ใƒ‡ใƒผใ‚ฟใŒไฝœใ‚‰ใ‚Œใ‚‹ใŒใ€ใ€ใ€ไธ€ๅฟœใ“ใ‚Œใ‚‚ๅ†็พใ—ใฆใฟใ‚‹ใ€‚ # # ใพใšๅ…ฅใ‚Œ็‰ฉใ‚’dct_spectrum_npใจใ„ใ†ๅ็งฐใง็”จๆ„ # # ๅ…ˆใปใฉ่จˆ็ฎ—ใ—ใŸmcu_size_x,mcu_size_y(1MCUใฎ็”ป็ด ใ‚ตใ‚คใ‚บ)ใจX_mcus,Y_mcus(็”ปๅƒๅ…จไฝ“ใฎMCUใฎxๆ–นๅ‘ๅˆ—ๆ•ฐใจyๆ–นๅ‘่กŒๆ•ฐ)ใ‚’ใคใ‹ใ†(ๅ†—้•ทใ ใ‘ใฉใ‚ใ‹ใ‚Šใ‚„ใ™ใ„ใ‚ˆใ†ใซใ‚‚ใ†ไธ€ๅบฆ่จˆ็ฎ—ใฎcodeใ‚’่จ˜่ฟฐใ—ใฆใŠใ) # # ๆฑŽ็”จๆ€งใ‚’ๆŒใŸใ›ใ‚‹ใŸใ‚ๆˆๅˆ†ใฏๆˆๅˆ†็•ชๅทใจใ—ใฆใ“ใ“ใงใฏ็‰นๆฎตYCbCrใฏๅˆ†ใ‘ใชใ„ใ‚ˆใ†ใซใ—ใฆใŠใ # + mcu_size_x=8*Hmax mcu_size_y=8*Vmax X_mcus=X/mcu_size_x if(X%mcu_size_x==0)else int(X/mcu_size_x)+1 ##Xๆ–นๅ‘ใฎMCUๆ•ฐ Y_mcus=Y/mcu_size_y if(Y%mcu_size_y==0)else int(Y/mcu_size_y)+1 ##Yๆ–นๅ‘ใฎMCUๆ•ฐ dct_spectrum_np=np.zeros((mcu_size_y*Y_mcus,mcu_size_x*X_mcus,Nf),float) print(dct_spectrum_np.shape) # - # ๅทฆไธŠใ‹ใ‚‰ๅณไธ‹ใซใ‹ใ‘ใฆ่กŒใ€ๅˆ—ใฎ้ †็•ชใงๅ˜ไฝใงๅ‡ฆ็†ใ—ใฆใ„ใใฎใง100ใ‚ใ‚‹MCU(0็•ชto99็•ช)ใพใงใฎๅทฆไธŠใฎไฝ็ฝฎใฏ้…ๅˆ—ไธŠ # # ๅ„MCUใฎ็”ป็ด ใฎๅทฆไธŠใฎไฝ็ฝฎใฏไปฅไธ‹ใฎใ‚ˆใ†ใซใชใ‚‹ใใ“ใ‚’ๅŸบๆบ–(ๅทฆ)ใซใ—ใŸ16x16็”ป็ด MCU(ๅŽณๅฏ†ใซใฏmcu_size_y x mcu_size_x)ใ‚’ไธฆในใฆใ„ใ for i in range(len(MCUs)): print("MCU[","{:2d}".format(i),"]:(y,x)=",(int(i/Y_mcus)*mcu_size_y,i%X_mcus*mcu_size_x)) # ใ“ใ‚Œใ‚‰ใฎไฝ็ฝฎใ‚’ๅŸบๆบ–ใจใ—ใฆๅ…ˆใปใฉๅŠ›ๆŠ€ใงใ‚„ใฃใŸMCUโ†’ๅ„ๆˆๅˆ†ใซๅค‰ๆ›ใ™ใ‚‹ใ‚นใƒšใ‚ฏใƒˆใƒซใ‚’้…็ฝฎใ—ใฆใฟใ‚‹ใ€‚ # ใพใšใ€ๅ…ˆใปใฉใฎๅŠ›ๆŠ€codeใ‚’ใ‚นใƒžใƒผใƒˆใซใƒขใƒ‡ใ‚ฃใƒ•ใ‚กใ‚คใ—ใคใคไปปๆ„ใฎ1MCUใ‚’ๆ ผ็ดใ™ใ‚‹ๆฉŸ่ƒฝใ‚’้–ขๆ•ฐๅฎš็พฉใ™ใ‚‹ # (ๅ…ˆใปใฉใฏlistๅž‹ใ ใฃใŸใฎใ‚’ไปŠๅบฆใฏndarrayๅž‹ใซใชใฃใฆใ„ใ‚‹ใ“ใจใ‚‚ๅๆ˜ ) # # ๅ…ˆใปใฉใฎmcuๅ†…ใฎๆง‹ๆˆใ‚’่กจใ™mcu_structใ‚‚ๅผ•ๆ•ฐใจใ™ใ‚‹ใ€‚(ๅผ•ๆ•ฐใ ใ‘ใงๅ‡ฆ็†ใ™ใ‚‹ใ“ใจใ‚’ๆ„่ญ˜ใ—ใฆmcu_size_x,mcu_size_yใ‚„X_mcus,Y_mcusใฏ้–ขๆ•ฐๅ†…ใงไฝฟใ‚ใชใ„ใงmcu_structใ‹ใ‚‰่จˆ็ฎ—ใ™ใ‚‹ใ“ใจใซใ™ใ‚‹ใ€‚ mcu_struct=[] for i in range(Ns): mcu_struct+=[[v[i]*h[i],(v[i],h[i])]] print(mcu_struct) # + def MCU_place (mcu_spectrum,nf,MCU,mcu_struct): i=0 for comp in range(ns): for Y_block in range(mcu_struct[comp][1][0]): for X_block in range(mcu_struct[comp][1][1]): for y in range(8): for x in range(8): mcu_spectrum[Y_block*8+y,X_block*8+x,comp]=MCU[i][y][x] i+=1 # - # pythonใฎใ‚คใƒณใƒ‡ใƒณใƒˆใงๅ‡ฆ็†ใฎใƒฌใƒ™ใƒซใŒๆฑบใพใ‚‹ใจใ“ใ‚ใฏ่‹ฆๆ‰‹ใ€ใ€ใ€ใ“ใ†ใ„ใ†ใ‚ณใƒผใƒ‰ใงi+=1ใฎใ‚คใƒณใƒ‡ใƒณใƒˆไฝ็ฝฎใฎ้–“้•ใ„ใซๆฐ—ใฅใใซใใ„๏ผˆๆ…ฃใ‚Œใฎๅ•้กŒ๏ผŸ๏ผŸ๏ผ‰ใ€ใ€ใ€ # # ใ“ใฎ้–ขๆ•ฐใ‚’ไฝฟใ„ใฃใฆMCUใ‹ใ‚‰ใ‚นใƒšใ‚ฏใƒˆใƒซใ‚’ๅ–ใ‚Šๅ‡บใ—ใฆๅ‰่ฟฐใฎMCUไธฆในๆ–นใง้…ๅˆ—dct_spectrum_npใซ้…็ฝฎใ—ใฆใ„ใ for i in range(len(dqMCUs)): y_base=int(i/Y_mcus)*mcu_size_y x_base=(i%X_mcus)*mcu_size_x MCU_place(dct_spectrum_np[y_base:y_base+mcu_size_y,x_base:x_base+mcu_size_x,:],Nf,dqMCUs[i],mcu_struct) # + fig, axs = plt.subplots(1, 3,figsize=[10.5,3.5]) #1x3ใฎใƒžใƒซใƒใฃใƒ—ใƒญใƒƒใƒˆใฎไฝœๆˆ axs[0].imshow(dct_spectrum_np[:,:,0].astype(int),cmap="bwr",vmin=-128,vmax=128) axs[0].set_xticks([]) axs[0].set_yticks([]) axs[0].set_title("Y") axs[1].imshow(dct_spectrum_np[:,:,1].astype(int),cmap="bwr",vmin=-128,vmax=128) axs[1].set_xticks([]) axs[1].set_yticks([]) axs[1].set_title("Cb") axs[2].imshow(dct_spectrum_np[:,:,2].astype(int),cmap="bwr",vmin=-128,vmax=128) axs[2].set_xticks([]) axs[2].set_yticks([]) axs[2].set_title("Cr") # - dct_spectrum_np[0:16,0:16,0].astype(int) # ใ“ใ‚“ใชๆ„Ÿใ˜ใงใ‚นใƒšใ‚ฏใƒˆใƒซใ ใ‘ใชใ‚‰ในใฆใฟใŸใ€Yๆˆๅˆ†ใฎใ‚นใƒšใ‚ฏใƒˆใƒซใฎใƒ—ใƒญใƒƒใƒˆใซใฏๅ…ƒ็”ปๅƒใฎ้ขๅฝฑใŒใ†ใฃใ™ใ‚‰ใจ่ฆ‹ใˆใ‚‹ใ€‚ # # ๆฌกใซDCT้€†ๅค‰ๆ›ใ€ๅค‰ๆ›ใฏๅ…ˆใปใฉใฎMCU_placeใฎๅ‡ฆ็†ใจไธ€็ท’ใซใ‚„ใ‚‹ใปใ†ใŒใ‚ˆใ•ใใ†ใใ‚Œใ‚’ใƒขใƒ‡ใ‚ฃใƒ•ใ‚กใ‚คใ—ใฆใฟใ‚‹ใ€‚ # + def MCU_IDC_place (mcu_imgs,nf,MCU,mcu_struct): hmax=0 vmax=0 for comp_para in mcu_struct: if(comp_para[1][0]>vmax):vmax=comp_para[1][0] if(comp_para[1][1]>hmax):hmax=comp_para[1][1] i=0 for comp in range(ns): v_n=int(vmax/mcu_struct[comp][1][0]) h_n=int(hmax/mcu_struct[comp][1][1]) mcu_spectrum=np.zeros((vmax*8,hmax*8)) #print(mcu_spectrum.shape) for Y_block in range(mcu_struct[comp][1][0]): for X_block in range(mcu_struct[comp][1][1]): #print("block ",i,"th:comp=",comp,":(Y,X)=(",Y_block,",",X_block,"):(v,h)=(",v_n,",",h_n,")",sep="") for y in range(8): for x in range(8): mcu_spectrum[Y_block*8+y,X_block*8+x]=MCU[i][y][x] mcu_imgs[ Y_block*8 : Y_block*8 + 8*v_n , X_block*8 : X_block*8 + 8*h_n , comp]= \ fft.idct(fft.idct(mcu_spectrum[Y_block*8 : Y_block*8 + 8*v_n , X_block*8 : X_block*8 + 8*h_n],n=8*v_n,axis=0,norm='ortho')\ ,n=8*h_n,axis=1,norm='ortho')*np.sqrt(v_n*h_n) #mcu_imgs[ Y_block*8 : Y_block*8 + 8*v_n , X_block*8 : X_block*8 + 8*h_n , comp]=0 i+=1 # - # ๅ…ฅใ‚Œ็‰ฉใ‚‚decoded_YCbCr_npใจใ—ใฆๆ–ฐใ—ใไฝœใฃใฆไธŠ่จ˜้–ขๆ•ฐใ‚’MCUใ”ใจใซIDCTๅค‰ๆ›ใ—ใŸใ‚‚ใฎใ‚’ไธฆในใฆใฟใ‚‹ใ€‚ # + decoded_YCbCr_np=np.zeros((mcu_size_y*Y_mcus,mcu_size_x*X_mcus,Ns),float) for i in range(len(dqMCUs)): y_base=int(i/Y_mcus)*mcu_size_y x_base=(i%X_mcus)*mcu_size_x #print("*************** MCU",i,":(y_base,x_base)=(",y_base,",",x_base,")",sep="") MCU_IDC_place(decoded_YCbCr_np[y_base:y_base+mcu_size_y,x_base:x_base+mcu_size_x,:],Nf,dqMCUs[i],mcu_struct) fig, axs = plt.subplots(1, 3,figsize=[10.5,3.5]) #1x3ใฎใƒžใƒซใƒใฃใƒ—ใƒญใƒƒใƒˆใฎไฝœๆˆ axs[0].imshow(256-decoded_YCbCr_np[0:,0:,0].astype(int)-128,cmap="Greys",vmin=0,vmax=256) axs[0].set_xticks([]) axs[0].set_yticks([]) axs[0].set_title("Y") axs[1].imshow(decoded_YCbCr_np[:,:,1].astype(int),cmap="bwr",vmin=-128,vmax=128) axs[1].set_xticks([]) axs[1].set_yticks([]) axs[1].set_title("Cb") axs[2].imshow(decoded_YCbCr_np[:,:,2].astype(int),cmap="bwr",vmin=-128,vmax=128) axs[2].set_xticks([]) axs[2].set_yticks([]) axs[2].set_title("Cr") # - # ใฉใ†ใ‚„ใ‚‰ใ†ใพใ่จ€ใฃใŸ้›ฐๅ›ฒๆฐ—ใชใฎใงRGBๅˆๆˆใจ16x160ใ‹ใ‚‰ๅทฆไธŠ150x150ใฎใ‚’ใ—ใฆใƒ—ใƒญใƒƒใƒˆใ—ใฆใฟใ‚‹ # + dec_red_img = decoded_YCbCr_np[0:Y,0:X,0] +128 + 1.4020 *decoded_YCbCr_np[0:Y,0:X,2] dec_green_img = decoded_YCbCr_np[0:Y,0:X,0] +128 - 0.3440 *decoded_YCbCr_np[0:Y,0:X,1] - 0.7141 *decoded_YCbCr_np[0:Y,0:X,2] dec_blue_img = decoded_YCbCr_np[0:Y,0:X,0] +128 + 1.7720 *decoded_YCbCr_np[0:Y,0:X,1] decoded_img=np.ndarray((Y,X,Ns),dtype=int) decoded_img[:,:,0]=dec_red_img.astype(int) decoded_img[:,:,1]=dec_green_img.astype(int) decoded_img[:,:,2]=dec_blue_img.astype(int) imgplot = plt.imshow(decoded_img) imgplot.axes.set_xticks([]) #x่ปธใฎ็›ฎ็››ใ‚’ๅ‰Š้™ค imgplot.axes.set_yticks([]) #y่ปธใฎ็›ฎ็››ใ‚’ๅ‰Š้™ค imgplot.axes.set_title("decodedl data RGB image" ) # - # ใกใ‚‡ใฃใจ็”ป็ด ็ฏ„ๅ›ฒ(0-255)ใ‹ใ‚‰ใฏใฟๅ‡บใฆใ‚ฏใƒชใƒƒใƒ”ใƒณใ‚ฐใ•ใ‚Œใฆใ‚‹ๅ€คใŒใ‚ใ‚‹ใฟใŸใ„ใงๆ–‡ๅฅใ‚’ใ„ใ‚ใ‚Œใฆใ„ใ‚‹ใŒใ†ใพใ่จ€ใฃใŸๆ„Ÿใ˜ใงใ‚ใ‚‹ใ€‚ # # ๆœ€ๅพŒใซใ‚ฟใƒผใ‚ฒใƒƒใƒˆใฎใ‚ชใƒชใ‚ธใƒŠใƒซ็”ปๅƒใจไธฆในใฆใฟใ‚‹ใ€‚ # + fig, axs = plt.subplots(1, 2,figsize=[7.,3.5]) #1x2ใฎใƒžใƒซใƒใฃใƒ—ใƒญใƒƒใƒˆใฎไฝœๆˆ axs[0].imshow(img) axs[0].axes.set_xticks([]) #x่ปธใฎ็›ฎ็››ใ‚’ๅ‰Š้™ค axs[0].axes.set_yticks([]) #y่ปธใฎ็›ฎ็››ใ‚’ๅ‰Š้™ค axs[0].axes.set_title("Original image zoomup" ) axs[1].imshow(decoded_img) axs[1].axes.set_xticks([]) #x่ปธใฎ็›ฎ็››ใ‚’ๅ‰Š้™ค axs[1].axes.set_yticks([]) #y่ปธใฎ็›ฎ็››ใ‚’ๅ‰Š้™ค axs[1].axes.set_title("decoded collor img" ) # -
jpeg_binary_data_analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: ' Zipline environment' # language: python # name: zipline # --- # <img alt="QuantRocket logo" src="https://www.quantrocket.com/assets/img/notebook-header-logo.png"> # # ยฉ Copyright Quantopian Inc.<br> # ยฉ Modifications Copyright QuantRocket LLC<br> # Licensed under the [Creative Commons Attribution 4.0](https://creativecommons.org/licenses/by/4.0/legalcode). # # <a href="https://www.quantrocket.com/disclaimer/">Disclaimer</a> # # The Correlation Coefficient # By <NAME> and <NAME> with example algorithms by <NAME> # The correlation coefficient measures the extent to which the relationship between two variables is linear. Its value is always between -1 and 1. A positive coefficient indicates that the variables are directly related, i.e. when one increases the other one also increases. A negative coefficient indicates that the variables are inversely related, so that when one increases the other decreases. The closer to 0 the correlation coefficient is, the weaker the relationship between the variables. # # The correlation coefficient of two series $X$ and $Y$ is defined as # $$r = \frac{Cov(X,Y)}{std(X)std(Y)}$$ # where $Cov$ is the covariance and $std$ is the standard deviation. # # Two random sets of data will have a correlation coefficient close to 0: # ## Correlation vs. Covariance # # Correlation is simply a normalized form of covariance. They are otherwise the same and are often used semi-interchangeably in everyday conversation. It is obviously important to be precise with language when discussing the two, but conceptually they are almost identical. # # ### Covariance isn't that meaningful by itself # # Let's say we have two variables $X$ and $Y$ and we take the covariance of the two. import numpy as np import pandas as pd import matplotlib.pyplot as plt # + jupyter={"outputs_hidden": false} X = np.random.rand(50) Y = 2 * X + np.random.normal(0, 0.1, 50) np.cov(X, Y)[0, 1] # - # So now what? What does this mean? Correlation uses information about the variance of X and Y to normalize this metric. Once we've normalized the metric to the -1 to 1 scale, we can make meaningful statements and compare correlations. # # To see how this is done consider the formula. # # $$\frac{Cov(X, Y)}{std(X)std(Y)}$$ # # $$= \frac{Cov(X, Y)}{\sqrt{var(X)}\sqrt{var(Y)}}$$ # # $$= \frac{Cov(X, Y)}{\sqrt{Cov(X, X)}\sqrt{Cov(Y, Y)}}$$ # To demonstrate this let's compare the correlation and covariance of two series. # + jupyter={"outputs_hidden": false} X = np.random.rand(50) Y = 2 * X + 4 print('Covariance of X and Y: \n' + str(np.cov(X, Y))) print('Correlation of X and Y: \n' + str(np.corrcoef(X, Y))) # - # ## Why do both `np.cov` and `np.corrcoef` return matrices? # # The covariance matrix is an important concept in statistics. Often people will refer to the covariance of two variables $X$ and $Y$, but in reality that is just one entry in the covariance matrix of $X$ and $Y$. For each input variable we have one row and one column. The diagonal is just the variance of that variable, or $Cov(X, X)$, entries off the diagonal are covariances between different variables. The matrix is symmetric across the diagonal. Let's check that this is true. # + jupyter={"outputs_hidden": false} cov_matrix = np.cov(X, Y) # We need to manually set the degrees of freedom on X to 1, as numpy defaults to 0 for variance # This is usually fine, but will result in a slight mismatch as np.cov defaults to 1 error = cov_matrix[0, 0] - X.var(ddof=1) print('error: ' + str(error)) # + jupyter={"outputs_hidden": false} X = np.random.rand(50) Y = np.random.rand(50) plt.scatter(X,Y) plt.xlabel('X Value') plt.ylabel('Y Value') # taking the relevant value from the matrix returned by np.cov print('Correlation: ' + str(np.cov(X,Y)[0,1]/(np.std(X)*np.std(Y)))) # Let's also use the builtin correlation function print('Built-in Correlation: ' + str(np.corrcoef(X, Y)[0, 1])) # - # Now let's see what two correlated sets of data look like. # + jupyter={"outputs_hidden": false} X = np.random.rand(50) Y = X + np.random.normal(0, 0.1, 50) plt.scatter(X,Y) plt.xlabel('X Value') plt.ylabel('Y Value') print('Correlation: ' + str(np.corrcoef(X, Y)[0, 1])) # - # Let's dial down the relationship by introducing more noise. # + jupyter={"outputs_hidden": false} X = np.random.rand(50) Y = X + np.random.normal(0, .2, 50) plt.scatter(X,Y) plt.xlabel('X Value') plt.ylabel('Y Value') print('Correlation: ' + str(np.corrcoef(X, Y)[0, 1])) # - # Finally, let's see what an inverse relationship looks like. # + jupyter={"outputs_hidden": false} X = np.random.rand(50) Y = -X + np.random.normal(0, .1, 50) plt.scatter(X,Y) plt.xlabel('X Value') plt.ylabel('Y Value') print('Correlation: ' + str(np.corrcoef(X, Y)[0, 1])) # - # We see a little bit of rounding error, but they are clearly the same value. # ## How is this useful in finance? # # ### Determining related assets # # Once we've established that two series are probably related, we can use that in an effort to predict future values of the series. For example, let's look at the price of Apple and Microsoft. # + jupyter={"outputs_hidden": false} # Pull the pricing data for our two stocks from quantrocket.master import get_securities from quantrocket import get_prices securities = get_securities(symbols=['AAPL', 'MSFT'], vendors='usstock') start = '2013-01-01' end = '2015-01-01' closes = get_prices('usstock-free-1min', data_frequency='daily', sids=securities.index.tolist(), fields='Close', start_date=start, end_date=end).loc['Close'] sids_to_symbols = securities.Symbol.to_dict() closes = closes.rename(columns=sids_to_symbols) plt.scatter(closes['MSFT'], closes['AAPL']) plt.xlabel('MSFT') plt.ylabel('AAPL') plt.title('Stock prices from ' + start + ' to ' + end) print("Correlation coefficient:", np.corrcoef(closes['MSFT'], closes['AAPL'])[0,1]) # - # ### Constructing a portfolio of uncorrelated assets # # Another reason that correlation is useful in finance is that uncorrelated assets produce the best portfolios. The intuition for this is that if the assets are uncorrelated, a drawdown in one will not correspond with a drawdown in another. This leads to a very stable return stream when many uncorrelated assets are combined. # # Limitations # # ## Significance # # It's hard to rigorously determine whether or not a correlation is significant, especially when, as here, the variables are not normally distributed. Their correlation coefficient is close to 1, so it's pretty safe to say that the two stock prices are correlated over the time period we use, but is this indicative of future correlation? # # One fundamental problem is that it is easy to datamine correlations by picking the right time period. To avoid this, one should compute the correlation of two quantities over many historical time periods and examine the distibution of the correlation coefficient. # # As an example, remember that the correlation of AAPL and MSFT from 2013-01-01 to 2015-01-01 was 0.92. Let's take the rolling 60 day correlation between the two to see how that varies. # + jupyter={"outputs_hidden": false} rolling_correlation = closes['MSFT'].rolling(60).corr(closes['AAPL']) ax = rolling_correlation.plot() ax.set_xlabel('Day') ax.set_ylabel('60-day Rolling Correlation') # - # ## Non-Linear Relationships # # The correlation coefficient can be useful for examining the strength of the relationship between two variables. However, it's important to remember that two variables may be associated in different, predictable ways which this analysis would not pick up. For instance, one variable might precisely follow the behavior of a second, but with a delay. There are techniques for dealing with this lagged correlation. Alternatively, a variable may be related to the rate of change of another. Neither of these relationships are linear, but can be very useful if detected. # # Additionally, the correlation coefficient can be very sensitive to outliers. This means that including or excluding even a couple of data points can alter your result, and it is not always clear whether these points contain information or are simply noise. # # As an example, let's make the noise distribution poisson rather than normal and see what happens. # + jupyter={"outputs_hidden": false} X = np.random.rand(100) Y = X + np.random.poisson(size=100) plt.scatter(X, Y) np.corrcoef(X, Y)[0, 1] # - # In conclusion, correlation is a powerful technique, but as always in statistics, one should be careful not to interpret results where there are none. # --- # # **Next Lecture:** [Instability of Estimates](Lecture10-Instability-of-Estimates.ipynb) # # [Back to Introduction](Introduction.ipynb) # --- # # *This presentation is for informational purposes only and does not constitute an offer to sell, a solicitation to buy, or a recommendation for any security; nor does it constitute an offer to provide investment advisory or other services by Quantopian, Inc. ("Quantopian") or QuantRocket LLC ("QuantRocket"). Nothing contained herein constitutes investment advice or offers any opinion with respect to the suitability of any security, and any views expressed herein should not be taken as advice to buy, sell, or hold any security or as an endorsement of any security or company. In preparing the information contained herein, neither Quantopian nor QuantRocket has taken into account the investment needs, objectives, and financial circumstances of any particular investor. Any views expressed and data illustrated herein were prepared based upon information believed to be reliable at the time of publication. Neither Quantopian nor QuantRocket makes any guarantees as to their accuracy or completeness. All information is subject to change and may quickly become unreliable for various reasons, including changes in market conditions or economic circumstances.*
quant_finance_lectures/Lecture09-Linear-Correlation-Analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Automated NVC Conversational Mediator # ## PART 2 - how well can we get it to work? # # **Purpose:** The purpose of this notebook is to explore how well we can get the automated NVC feedback machine to work by testing it's results on multiple sample inputs. # # **Instructions:** # Go through read and run all the cells (hit play or Shift+Enter) # # ๐Ÿ‘‰ Load Packages and NVC Data # These are needed for the program to run # + # Verify python and pip are running from your jupyter env (optional) # # !which python3 # # !which pip3 # + # Install packages # # !pip3 install nltk, pandas, matplotlib # # !python3 -m spacy download en # # !pip3 install sklearn # + # Import packages import nvc_toolkit as nvc import pandas as pd import numpy as np from sklearn.metrics import confusion_matrix # - # Load training data for later act_df = pd.read_csv("data/fb_sample.csv", dtype=str) act_df = nvc.clean_df(act_df) act_df.head(3) # # ๐Ÿ‘‰ NVC Feedback with multiple user input sentences # # Run `get_feedback('user_input')` on a variety of inputs and inspect the quality of the results to see if its something that could be useful in application. # # Create a dataframe and then populate it with results based on what the system gives as feedback for each user input # + # Create empty dataframe for system feedback col_list = ['user_input', 'observations', 'feelings', 'needs', 'needs_guesses', 'requests', 'fofeelings', 'thoughts', 'evaluations', 'absolutes'] pred_df = pd.DataFrame(act_df.loc[:,'user_input'], columns=col_list, dtype=str) pred_df = pred_df.replace(pd.NA, '', regex=True) print(f'pred_df shape: {pred_df.shape}') pred_df.head() # - # NOTE: Cell below may take a bit to run as it is calculating the feedback for each row of user input above # Get run the get feedback function from the system to populate the columns for r in pred_df.index: res = nvc.get_raw_feedback(pred_df.loc[r,'user_input']) pred_df.loc[r, col_list[1:]] = res.values print(f'pred_df shape: {pred_df.shape}') pred_df.head(10) # Then, we need to compare the training data and the system's output data. Let's do that with a confusion matrix! # + # TEST # cols = ['feelings', 'needs', 'absolutes'] # df = act_df.loc[:,cols] # cleaned_df = df.applymap(lambda x: ','.join(sorted(x.split(','))) if x!='' else x) # display(cleaned_df.T) # + # SINGULAR EXAMPLE OF WHAT WE ARE DOING ENMASSE BELOW y_act = act_df.loc[:,'feelings'].values y_pred = pred_df.loc[:,'feelings'].values summary = pd.DataFrame(data=zip(y_act, y_pred), columns=['y_act', 'y_pred']) display(summary) # - labels = list(set(np.append(y_act, y_pred))) cm = confusion_matrix(y_act, y_pred, labels=labels) cm_plot_fee = nvc.plot_cm(cm, labels, title='Confusion Matrix from FEELINGS', figsize=(12,8)) # + # Confusion Matrix - USING CATEGORIES - possibly more useful # TODO - FIXME # cm_plots = labels = cms = [] # row = 0 # cols = ['feelings', 'needs', 'absolutes'] # for i in range(len(cols)): # y_act = act_df.loc[:,cols[i]].values # y_pred = pred_df.loc[:,cols[i]].values # labels.append(list(set(np.append(y_act, y_pred)))) # cms.append(confusion_matrix(y_act, y_pred, labels=labels[-1])) # cm_plots.append(nvc.plot_cm(cms[-1], labels[-1], title=f'Confusion Matrix from {labels[-1]}')) # - # # Concluding Thoughts # + # TODO - think about adding sentiment analysis # from nltk.sentiment import SentimentAnalyzer # sentim_analyzer = SentimentAnalyzer() # - # # ๐Ÿ‘‰ Part 2: NVC Needs Guessing via Machine Learning - TODO # # Using machine learning, we can make needs guesses given an input statement? # # **Objective:** # **Examples** # # If the user says: "When you leave like that it makes me feel scared." Then the system makes needs guesses # * safety # * comfort # * etc # # If the user says: "I feel really annoyed when you talk to me like that." Then the system makes needs guesses # * patience # * etc # + # Load training data # Train ML model # Try it # - # # ๐Ÿ‘‰ Utilities for Testing ONE INPUT AT A TIME # Choose an example sentence then run the cells that follow sent = act_df.loc[0,'user_input'] sent kwp_df = nvc.load_dfs() parsed = nvc.parse_sent(sent) kwp_df.tail() parsed m1 = nvc.find_kwp_matches(parsed, kwp_df) m1 m2 = nvc.find_pos_matches(parsed) m2 tools_res = nvc.compare_tools(sent) tools_res pred_df = nvc.get_raw_feedback(sent) pred_df # Feedback for col in pred_df.columns: data = pred_df.loc[0, col] if data: data_list = data.split(',') for i in data_list: feedback = kwp_df.loc[kwp_df['kwp_lemma'] == i, 'feedback'].values print(f'feeback on {col}: {i} is {feedback}') # + print(f'user_input: {sent}') i_act = act_df.loc[act_df['user_input'] == sent, act_df.columns[1:]] i_pred = pd.DataFrame(pred_df.loc[0,:]).T print('actual_feedback') display(i_act) print('predicted_feedback') display(i_pred) # - # Now, we can create confusion matricies to see our results graphically # TODO - account for multiple values per column??? act_data = list(i_act.loc[0,:].values) print(f'act_data: {act_data}') pred_data = list(i_pred.loc[0,:].values) print(f'pred_data: {pred_data}') # Confusion Matrix - USING VALUES - agruably not useful (cm makes more sense for multiple datapoints like done above) labels = list(set(act_data + pred_data)) cm = confusion_matrix(act_data, pred_data, labels=labels) f = nvc.plot_cm(cm, labels, title='Confusion Matrix from VALUES')
Part_2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # %matplotlib inline import pandas as pd import numpy as np import matplotlib as mpl import matplotlib.pyplot as plt import mrjob as mr # ### Homework 5 # # # Use the data/bible+shakes.nonpunc.txt file as the source of you analysis in this homework # # Homework 5.1 # A bigram is the combination of words. Find the 10 most common bigrams from the text. Order counts in the bigram combination for example "in the" is not the same bigram as "the in" # # # + # %%writefile code\Bigram.py from mrjob.job import MRJob from mrjob.step import MRStep import re WORD_RE = re.compile(r"[\w']+") class Bigram(MRJob): def steps(self): return [ MRStep(mapper=self.mapper, combiner = self.combiner, reducer=self.reducer), MRStep(reducer=self.reducer_top) ] def mapper(self, _, line): # mapper function takes line as input and and other input as null value preword="" for word in WORD_RE.findall(line): if preword !="" and word !="": yield (preword.lower(),word.lower()), 1 preword=word def combiner(self, bigram, counts): # Each time calls the combiner by giving 2 words as bigram and value 1 to the counts yield bigram, sum(counts) def reducer(self, bigram,count): # Each time it have 2 words as bigram and produce none value and combination of bigram and count yield None,(bigram, sum(count)) def reducer_top(self, _ , bigram_count): # It produces the top 10 highest frequency bigrams for i in sorted(bigram_count, key=lambda x:x[1], reverse=True)[:10]: yield i if __name__ == '__main__': Bigram.run() # - # %%capture Bigram # It captures the output from the reducer_top and store in the Bigram # %run code/Bigram.py data/bible+shakes.nopunc.txt with open('data/MRBigramFreqCount.txt', 'w') as f: f.write(Bigram.stdout) Bigram_data = pd.read_csv("data/MRBigramFreqCount.txt", sep="\t", header=None) #It reads the data in the text file from above step Bigram_data # # Homework 5.2 # Now do the same analysis but make the word order not count "in the" == "the in". Find the 10 most common ordered bigrams from the alice text. # # + # %%writefile code\Bigram_second.py from mrjob.job import MRJob from mrjob.step import MRStep import re WORD_RE = re.compile(r"[\w']+") class Bigram_second(MRJob): def steps(self): return [ MRStep(mapper=self.mapper, combiner = self.combiner, reducer=self.reducer), MRStep(reducer=self.reducer_top) ] def mapper(self, _, line): # mapper function takes line as input and and other input as null value preword="" for word in WORD_RE.findall(line): if preword !="" and word !="": value = (preword.lower(), word.lower()) yield value, 1 preword=word def combiner(self, bigram, counts): # Each time it have 2 words as bigram and produce none value and combination of bigram and count yield sorted(bigram), sum(counts) #sorted the biagram def reducer(self, bigram,count): yield None,(bigram, sum(count)) def reducer_top(self, _ , bigram_count): # It produces the top 10 highest frequency bigrams for i in sorted(bigram_count, key=lambda x:x[1], reverse=True)[:10]: yield i if __name__ == '__main__': Bigram_second.run() # - # %run code/Bigram_second.py data/bible+shakes.nopunc.txt # # Homework 5.3 # A trigram are three word combintation. Find the 10 most common ordered trigrams from the alice text. Make it so that the order of the words do not count in the trigram combination for example "in the air" is the same trigram as "the air in" or "air in the"... # # + # %%writefile code\Trigram.py from mrjob.job import MRJob from mrjob.step import MRStep import re WORD_RE = re.compile(r"[\w']+") class Trigram(MRJob): def steps(self): return [ MRStep(mapper=self.mapper, combiner = self.combiner, reducer=self.reducer), MRStep(reducer=self.reducer_top) ] def mapper(self, _, line): # mapper function takes line as input and and other input as null value words = WORD_RE.findall(line) for i, word in enumerate(words): if i < len(words) - 2: trigram = [words[i].lower(), words[i + 1].lower(), words[i + 2].lower()] yield trigram, 1 def combiner(self, trigram, counts): # Each time it have 2 words as tirgram and produce none value and combination of trigram and count yield trigram, sum(counts) def reducer(self, trigram,count): yield None,(trigram, sum(count)) def reducer_top(self, _ , trigram_count): # It produces the top 10 highest frequency trigrams for i in sorted(trigram_count, key=lambda x:x[1], reverse=True)[:10]: yield i if __name__ == '__main__': Trigram.run() # - # %%capture Trigram # It captures the output from the reducer_top and store in the Trigram # %run code/Trigram.py data/bible+shakes.nopunc.txt with open('data/MRTrigramFreqCount.txt', 'w') as f: f.write(Trigram.stdout) Trigram_data = pd.read_csv("data/MRTrigramFreqCount.txt", sep="\t", header=None) #It reads the data in the text file from above step Trigram_data # # Homework 5.4 # Create graphs to explain the relationship of the frequency of monograms ( words ) to bigrams and trigam frequencies # Creating Mongram which has frequency for each word # + # %%writefile code\Monogram.py from mrjob.job import MRJob from mrjob.step import MRStep import re WORD_RE = re.compile(r"[\w']+") class Monogram(MRJob): def steps(self): return [ MRStep(mapper=self.mapper, combiner = self.combiner, reducer=self.reducer), MRStep(reducer=self.reducer_top) ] def mapper(self, _, line): for word in WORD_RE.findall(line): yield word, 1 def combiner(self, monogram, counts): yield monogram, sum(counts) def reducer(self, monogram,count): yield None,(monogram, sum(count)) def reducer_top(self, _ , monogram_count): for i in sorted(monogram_count, key=lambda x:x[1], reverse=True)[:10]: yield i if __name__ == '__main__': Monogram.run() # - # %%capture Monogram # %run code/Monogram.py data/bible+shakes.nopunc.txt with open('data/MRMonogramFreqCount.txt', 'w') as f: f.write(Monogram.stdout) Monogram_data = pd.read_csv("data/MRMonogramFreqCount.txt", sep="\t", header=None) #It reads the data in the text file from above step Monogram_data # Plotting graph for Monogram_data , Bigram_data and Trigram_data which are captured during above questions import matplotlib.pyplot as plt plot1 = Monogram_data.plot(x=0, y=1, kind="bar", figsize=(14, 7), fontsize=13,color="green") plot1.set_title('Monogram Frequencies', fontsize=22) plot1.set_ylabel("Frequency", fontsize=14) plot1.set_xlabel("Words", fontsize=14) plt.show() plot2 = Bigram_data.plot(x=0, y=1, kind="bar", figsize=(15, 7), fontsize=14,color="red") plot2.set_title('Bigram Frequencies', fontsize=22) plot2.set_ylabel("Frequency", fontsize=14) plot2.set_xlabel("Words", fontsize=14) plt.show() plot3 = Trigram_data.plot(x=0, y=1, kind="bar", figsize=(15, 7), fontsize=14,color="blue") plot3.set_title('Trigram Frequencies', fontsize=22) plot3.set_ylabel("Frequency", fontsize=14) plot3.set_xlabel("Words", fontsize=14) plt.show() # ### Analyze the following Sherlock Holmes book from Project Gutenberg text versions of : # The Adventures of Sherlock Holmes- http://www.gutenberg.org/ebooks/1661.txt.utf-8 # # A Study in Scarlet - http://www.gutenberg.org/files/244/244-0.txt # # The Hound of the Baskervilles - http://www.gutenberg.org/files/2852/2852-0.txt # # The Return of Sherlock Holmes - http://www.gutenberg.org/files/108/108-0.txt # # The Sign of the Four - http://www.gutenberg.org/ebooks/2097.txt.utf-8 # # ### Display the scores for the top 20 highest frequencty terms and the relationship to the books # # # + import urllib url1 = 'http://www.gutenberg.org/ebooks/1661.txt.utf-8' url2 = 'http://www.gutenberg.org/files/244/244-0.txt' url3 = 'http://www.gutenberg.org/files/2852/2852-0.txt' url4 = 'http://www.gutenberg.org/files/108/108-0.txt' url5 = 'http://www.gutenberg.org/ebooks/2097.txt.utf-8' #writing all the data from url to txt file and saving in the data location matter1 = urllib.urlopen(url1).read() f1 = open('data/The_Adventures_of_Sherlock_Holmes.txt', 'w') f1.write(matter1) f1.close() matter2 = urllib.urlopen(url2).read() f2 = open('data/A_Study_in_Scarlet.txt', 'w') f2.write(matter2) f2.close() matter3 = urllib.urlopen(url3).read() f3 = open('data/The_Hound_of_the_Baskervilles.txt', 'w') f3.write(matter3) f3.close() matter4 = urllib.urlopen(url4).read() f4 = open('data/The_Return_of_Sherlock_Holmes.txt', 'w') f4.write(matter4) f4.close() matter5 = urllib.urlopen(url1).read() f5 = open('data/The_Sign_of_the_Four.txt', 'w') f5.write(matter5) f5.close() # - # %%capture HighestFreqency #Applying Monogram code to all text files # %run code/Monogram.py data/The_Adventures_of_Sherlock_Holmes.txt # %run code/Monogram.py data/A_Study_in_Scarlet.txt # %run code/Monogram.py data/The_Hound_of_the_Baskervilles.txt # %run code/Monogram.py data/The_Return_of_Sherlock_Holmes.txt # %run code/Monogram.py data/The_Sign_of_the_Four.txt with open('Combined_Freq.txt', 'w') as f: ## open the text file f.write(HighestFreqency.stdout) File = pd.read_csv("Combined_Freq.txt", sep="\t", header=None) sort_val = File.sort_values(1, ascending=False) ##sort the values from highest to lowest print "The Adventures of Sherlock Holmes" Combined_data = sort_val[[0,1]].head(20)##display the top 20 words and relation between the four texts Combined_data
Homeworks/Homework5/Rongali_Homework5_MapReduce_V2-2018.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Test svm classifier # # This notebook test a classifier that operates in two layers: # - First we use a SVM classifier to label utterances with high degree of certainty. # - Afterwards we use heuristics to complete the labeling # + import os import sys import pandas as pd import numpy as np import random import pickle import matplotlib.pyplot as plt root_path = os.path.dirname(os.path.abspath(os.getcwd())) sys.path.append(root_path) from sklearn.svm import SVC from sklearn.svm import LinearSVC from sklearn.metrics import classification_report from sklearn.metrics import confusion_matrix from src import phase_classification as pc data_path = os.path.join(root_path,'data') tables_path = os.path.join(data_path,'tables') results_path = os.path.join(root_path,'results') output_path =os.path.join(results_path,'tables') # - import importlib importlib.reload(pc) WITH_STEMMING = True #REMOVE_STOPWORDS = True SEED = 10 NUM_TOPICS = 60 random.seed(SEED) test_i = '[test1]' file_name = test_i+'IBL_topic_distribution_by_utterance_before_after_{}_{}.xlsx'.format(WITH_STEMMING,NUM_TOPICS) df_data = pd.read_excel(os.path.join(tables_path,'test',file_name)) with open(os.path.join(data_path,'random_training.pickle'),'rb') as f: proportions = pickle.load(f) step_1 = pickle.load(f) step_2 = pickle.load(f) dummy_phase = np.argmax(proportions)+1 the_keys = list(set(df_data['phase'])) total_samples = 0 class_samples = {} for key in the_keys: n = list(df_data.phase.values).count(key) #print("key {}, total {}".format(key,n)) total_samples += n class_samples[key] = n print(total_samples) for key in the_keys: print("key {}, samples: {}, prop: {}".format(key,class_samples[key],round(class_samples[key]*1.0/total_samples,2))) filter_rows = list(range(0,180))+[187,188] filter_labels = [60] argsort_prop = np.argsort(proportions) all_set = pc.split_df_test(df_data) X_all_1,y_all_1 = pc.get_data_from_dict(all_set,filter_rows) print(len(y_all_1)) step = step_2 pred_1 = [] for j in range(len(y_all_1)): step_j = j*1.0/len(y_all_1) if step_j > step: pred_1.append(argsort_prop[-1]+1) else: pred_1.append(1) output_first_layer_1 = pred_1 labels = ["Phase {}".format(i) for i in range(1,6)] df = pd.DataFrame(confusion_matrix(y_all_1, output_first_layer_1),columns=["Predicted {}".format(i) for i in labels]) df.index = labels print(classification_report(y_all_1, output_first_layer_1)) df print("Accuracy {0:.3f}".format(np.sum(confusion_matrix(y_all_1, output_first_layer_1).diagonal())/len(y_all_1))) bs = [pc.unit_vector(x) for x in y_all_1] y_pred = [pc.unit_vector(x) for x in output_first_layer_1] np.sqrt(np.sum([np.square(y_pred[i]-bs[i]) for i in range(len(y_all_1))])/(len(y_all_1)*2)) # ### Test 2 test_i = '[test2]' file_name = test_i+'IBL_topic_distribution_by_utterance_before_after_{}_{}.xlsx'.format(WITH_STEMMING,NUM_TOPICS) df_data = pd.read_excel(os.path.join(tables_path,'test','before_after',file_name)) the_keys = list(set(df_data['phase'])) total_samples = 0 class_samples = {} for key in the_keys: n = list(df_data.phase.values).count(key) #print("key {}, total {}".format(key,n)) total_samples += n class_samples[key] = n print(total_samples) for key in the_keys: print("key {}, samples: {}, prop: {}".format(key,class_samples[key],round(class_samples[key]*1.0/total_samples,2))) all_set = pc.split_df_test(df_data) X_all_2,y_all_2 = pc.get_data_from_dict(all_set,filter_rows) print(len(y_all_2)) step = step_2 pred_2 = [] for j in range(len(y_all_2)): step_j = j*1.0/len(y_all_2) if step_j > step: pred_2.append(argsort_prop[-1]+1) else: pred_2.append(1) output_first_layer_2 = pred_2 y_all_2 labels = ["Phase {}".format(i) for i in range(1,6)] df = pd.DataFrame(confusion_matrix(y_all_2, output_first_layer_2),columns=["Predicted {}".format(i) for i in labels]) df.index = labels print(classification_report(y_all_2, output_first_layer_2)) df print("Accuracy {0:.3f}".format(np.sum(confusion_matrix(y_all_2, output_first_layer_2).diagonal())/len(y_all_2))) bs = [pc.unit_vector(x) for x in y_all_2] y_pred = [pc.unit_vector(x) for x in output_first_layer_2] np.sqrt(np.sum([np.square(y_pred[i]-bs[i]) for i in range(len(y_all_2))])/(len(y_all_2)*2)) output_first_layer_1 y_all = y_all_1+y_all_2 pred = output_first_layer_1 + output_first_layer_2 df = pd.DataFrame(confusion_matrix(y_all, pred),columns=["Predicted {}".format(i) for i in labels]) df.index = labels print(classification_report(y_all, pred)) df print("Accuracy {0:.3f}".format(np.sum(confusion_matrix(y_all, pred).diagonal())/len(y_all))) bs = [pc.unit_vector(x) for x in y_all] y_pred = [pc.unit_vector(x) for x in pred] np.sqrt(np.sum([np.square(y_pred[i]-bs[i]) for i in range(len(y_all))])/(len(y_all)*2)) (72+98)/len(y_all)
notebooks/3-- Check random optimal between phases 1 and 5 .ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import math import numpy as np import pandas as pd df = pd.read_csv('../data/Residential-Profiles.csv') df['Time'] = df['Time'].astype('datetime64') df.plot(x='Time', y='Household 1') epsilons = [0.1, 0.25, 0.5, 0.75, 1] trials = [1, 2, 3, 4, 5] print("********************************") print("Grid Search on progress") print("5 trials with epsilon values [0.1, 0.25, 0.5, 0.75, 1]") print("Take average of 5 iterations") print("********************************") print("Prining relative errors...") for epsilon in epsilons: for trial in trials: PEAK_VALUE = 8000 delta = 10e-3 EX = 0 total_relative_error = 0 for house in houses: for timestamps in range(df.shape[0]): energy = df.at[timestamps,house] maxAllowedError = energy * 10 / 100 sgd = maxAllowedError / 2.33 sensitivity = math.sqrt((sgd*sgd) / 2) gamma1 = np.random.gamma(shape=(1/200), scale = sensitivity/epsilon) gamma2 = np.random.gamma(shape=(1/200), scale = sensitivity/epsilon) noisy_energy = energy + (gamma1-gamma2) + EX if noisy_energy > PEAK_VALUE: EX = noisy_energy - PEAK_VALUE noisy_energy = PEAK_VALUE relative_error = abs(energy-noisy_energy)/energy total_relative_error += relative_error avg_relative_error = total_relative_error*100/(df.shape[0]*200) print("epsilon ", epsilon, "iteration", trial, "error", avg_relative_error)
random_distribution_based/Gamma.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import keras from keras.datasets import mnist from keras.models import Sequential from keras.layers import Dense, Dropout from keras.optimizers import RMSprop # + (x_treino, y_treino), (x_teste, y_teste) = mnist.load_data() # Quantas imagens na base de treino? print(len(x_treino)) # Quantas imagens na base de teste? print(len(x_teste)) # Qual รฉ o formato de uma imagem? print(x_treino[0].shape) # Como sรฃo os dados de uma imagem? print(x_treino[0]) # + import matplotlib.pyplot as plt # %matplotlib inline indice = 10000 print('label', y_treino[indice]) plt.imshow(x_treino[indice], cmap=plt.cm.binary) # + # Preparaรงรฃo dos Dados qtde_elementos_treino = len(x_treino) # Irรก retornar 60000 qtde_elementos_teste = len(x_teste) # Irรก retornar 10000 shape = x_treino[0].shape # Shape possui valor (28, 28) tamanho_total = shape[0] * shape[1] # 28 * 28 x_treino = x_treino.reshape(qtde_elementos_treino, tamanho_total) x_teste = x_teste.reshape(qtde_elementos_teste, tamanho_total) # Quantos itens temos em x_treino[0]? print(len(x_treino[0])) # O que temos em x_treino[0]? print(x_treino[0]) # + # Normalizaรงรฃo dos dados # Quanto mais prรณximo de 255 รฉ o valor, mais prรณximo ele ficarรก de 1 # Ex: 255 serรก igual a 1 # Ex: 127 serรก igual a 0.49 # Garanto que todos os itens da imagem sรฃo float32, em vez de int8 # Assim conseguimos realizar a divisรฃo para todos os nรบmeros ao mesmo tempo x_treino = x_treino.astype('float32') x_teste = x_teste.astype('float32') # Normaliza para ficar entre 0 e 1 x_treino /= 255 x_teste /= 255 print(len(x_treino[0])) print(x_treino[0]) # + # Vamos garantir que ainda temos 60000/10000 samples # e que cada um tem 784 posiรงรตes print('Treino:', x_treino.shape) print('Teste:', x_teste.shape) # + # Vamos ajustar o formato de saรญda (output) # O que temos de valor na label 0 em y_treino[0]? print(y_treino[0]) # Queremos transformar para [0, 0, 0, 0, 0, 1, 0, 0, 0, 0] # O que temos em y_treino? print(y_treino) # Quantos itens temos em y_treino? print(len(y_treino)) # Quais itens รบnicos temos em y_treino? print(set(y_treino)) # Quantos itens รบnicos temos em y_treino? qtde_itens_unicos = len(set(y_treino)) print(qtde_itens_unicos) # Converte todos itens para informaรงรตes categรณricas y_treino = keras.utils.to_categorical(y_treino, qtde_itens_unicos) y_teste = keras.utils.to_categorical(y_teste, qtde_itens_unicos) # O que temos agora em y_treino[0]? print(y_treino[0]) # + # Criar a rede neural profunda model = Sequential() model.add(Dense(30, activation='relu', input_shape=(784,))) model.add(Dropout(0.2)) model.add(Dense(20, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(qtde_itens_unicos, activation='softmax')) model.summary() # + # Compila o modelo model.compile(loss='categorical_crossentropy', optimizer=RMSprop(), metrics=['accuracy']) # + # Treina o modelo batch_size = 128 epochs = 10 history = model.fit(x_treino, y_treino, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_teste, y_teste)) # + # Testando uma entrada qualquer indice = 998 print(y_teste[indice]) imagem = x_teste[indice].reshape((1,784)) #print(len(x_teste[indice])) #print(len(imagem)) #print(x_teste[indice]) #print(imagem) prediction = model.predict(imagem) print(prediction) prediction_class = model.predict_classes(imagem) print(prediction_class) (x_treino_img, y_treino_img), (x_teste_img, y_teste_img) = mnist.load_data() plt.imshow(x_teste_img[indice], cmap=plt.cm.binary)
IA_Deep_MNIST_06_06_19.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import sys from PyQt5 import QtCore, QtWidgets, QtWebEngineWidgets from lxml import html as htmlRenderer import requests import json from datetime import date def render(source_url): """Fully render HTML, JavaScript and all.""" import sys from PyQt5.QtWidgets import QApplication from PyQt5.QtCore import QUrl from PyQt5.QtWebEngineWidgets import QWebEngineView class Render(QWebEngineView): def __init__(self, url): self.html = None self.app = QApplication(sys.argv) QWebEngineView.__init__(self) self.loadFinished.connect(self._loadFinished) #self.setHtml(html) self.load(QUrl(url)) self.app.exec_() def _loadFinished(self, result): # This is an async call, you need to wait for this # to be called before closing the app self.page().toHtml(self._callable) def _callable(self, data): self.html = data # Data has been stored, it's safe to quit the app self.app.quit() return Render(source_url).html url="https://www.abc.es/" renderUrl = render(url) renderedPage = htmlRenderer.fromstring(renderUrl) auxLinks = renderedPage.xpath("//a/@href") # obtener links, cuidado que alguno ya empieza por http... auxFinalLinks = list(dict.fromkeys([ link for link in auxLinks ])) a = auxFinalLinks[0] a.startswith # + auxFinalLinks2 = [ link for link in auxFinalLinks if not link.endswith("/") and not link.startswith("#") and not link.startswith("/#") ] # - linksFinals = [] for l in auxFinalLinks2: if l.startswith("http"): linksFinals.append(l) elif l.startswith("//"): linksFinals.append("https:{}".format(l)) else: linksFinals.append("https://www.abc.es{}".format(l)) finalLinks = auxFinalLinks# [ link.replace("https://www.abc.eshttps://www.abc.es", "https://www.abc.es") for link in auxFinalLinks] len(finalLinks) auxFinalLinks2 auxFinalLinks len(linksFinals) linksFinals # + # div_id = comments-container
resources/notebooks/OldScrapper/getUrlsFromHomePage.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Network metrics and analysis import networkx as nx import matplotlib.pyplot as plt import numpy as np # %matplotlib inline # ## Centrality metrics # # The following metrics are available: Degree, Closeness, Betweenness, Eigenvector #let's start with a random graph: G=nx.gnm_random_graph(200,2000) GD=nx.gnm_random_graph(200,2000,directed=True) nx.draw(GD) # ## Degree centrality #it works for directed and undirected graphs the same way, returing #a dictionary whose elements are the nodes and their centralities G_deg_cent=nx.degree_centrality(G) # For directed graphs we also get in_degree and out_degree, #producing as output the same dictionary GD_in_deg=nx.in_degree_centrality(GD) GD_out_deg=nx.out_degree_centrality(GD) # ## Betweennes centrality # # betweenness_centrality(G, k=None, normalized=True, weight=None, endpoints=False, seed=None) # # Betweenness centrality of a node v is the sum of the fraction of all-pairs shortest paths that pass through v: # # c_B(v) =\sum_{s,t \in V} \frac{\sigma(s, t|v)}{\sigma(s, t)} # # where V is the set of nodes, \sigma(s, t) is the number of shortest (s, t)-paths, and \sigma(s, t|v) is the number of those paths passing through some node v other than s, t. If s = t, \sigma(s, t) = 1, and if v \in {s, t}, \sigma(s, t|v) = 0 # # k, integer: if given use k node samples to estimate the betweenness, the higher, the better (and slower :) ) # normalized: if True normalize values # weight: use for weighted graphs # endpoints: include or not endpoints in the shortes path computation # + G_bet=nx.betweenness_centrality(G) #this hold for nodes, you can also compute bet. for edges with: G_e_bet=nx.edge_betweenness_centrality(G) # Both algorithms return a dictionary of nodes and edges # - # ## Eigenvector centrality # # Eigenvector centrality computes the centrality for a node based on the centrality of its neighbors. The eigenvector centrality for node i is # # $\mathbf{Ax} = \lambda \mathbf{x}$ # # where $A$ is the adjacency matrix of the graph $G$ with eigenvalue $\lambda$. By virtue of the Perronโ€“Frobenius theorem, there is a unique and positive solution if $\lambda$ is the largest eigenvalue associated with the eigenvector of the adjacency matrix $A$. # # Parameters: # # **G** (graph) โ€“ A networkx graph # **max_iter** (integer, optional) โ€“ Maximum number of iterations in power method. # **tol** (float, optional) โ€“ Error tolerance used to check convergence in power method iteration. # **nstart** (dictionary, optional) โ€“ Starting value of eigenvector iteration for each node. # **weight** (None or string, optional) โ€“ If None, all edge weights are considered equal. Otherwise holds the name of the edge attribute used as weight. G_eig=nx.eigenvector_centrality(G) GD_eig=nx.eigenvector_centrality(GD) # ## Katz centrality (1953...yes, we go vintage!) # # katz_centrality(G, alpha=0.1, beta=1.0, max_iter=1000, tol=1e-06, nstart=None, normalized=True, weight='weight') # # Katz centrality computes the centrality for a node based on the centrality of its neighbors. It is a generalization of the eigenvector centrality. The Katz centrality for node i is # # $x_i = \alpha \sum_{j} A_{ij} x_j + \beta$, # # where $A$ is the adjacency matrix of the graph G with eigenvalues $\lambda$. # # The parameter $\beta$ controls the initial centrality and # # $\alpha < \frac{1}{\lambda_{max}}$. # # Katz centrality computes the relative influence of a node within a network by measuring the number of the immediate neighbors (first degree nodes) and also all other nodes in the network that connect to the node under consideration through these immediate neighbors. # # When $\alpha = 1/\lambda_{max}$ and $\beta=0$, Katz centrality is the same as eigenvector centrality. # # Leo Katz: A New Status Index Derived from Sociometric Index. Psychometrika 18(1):39โ€“43, 1953 http://phya.snu.ac.kr/~dkim/PRL87278701.pdf G_kc=nx.katz_centrality(G) # ## Exercise # # Generate a Barabasi-Albert Graph with 200 nodes and compare the centrality metrics, plotting the node's metrics distributions. # # Remark the differences (if any) among the random graph $G$ generated in this lecture. # ## Link Analysis of Directed networks # # NetworkX also contains specific algoriths for ranking nodes in directed networks, we focus on PageRank and Hits. # # ### PageRank # # pagerank(G, alpha=0.85, personalization=None, max_iter=100, tol=1e-06, nstart=None, weight='weight', dangling=None) # # PageRank computes a ranking of the nodes in the graph G based on the structure of the incoming links. It was originally designed as an algorithm to rank web pages (i.e. you are famous because other think you are). # # The eigenvector calculation is done by the power iteration method and has no guarantee of convergence. The iteration will stop after *max_iter* iterations or an error tolerance of *number_of_nodes(G) x tol* has been reached. # # The PageRank algorithm was designed for directed graphs but this algorithm does not check if the input graph is directed and will execute on undirected graphs by converting each edge in the directed graph to two edges. # # **G** (graph) โ€“ A NetworkX graph. Undirected graphs will be converted to a directed graph with two directed edges for each undirected edge. # **alpha** (float, optional) โ€“ Damping parameter for PageRank, default=0.85. # **personalization** (dict, optional) โ€“ The โ€œpersonalization vectorโ€ consisting of a dictionary with a key for every graph node and nonzero personalization value for each node. By default, a uniform distribution is used. # **max_iter** (integer, optional) โ€“ Maximum number of iterations in power method eigenvalue solver. # **tol** (float, optional) โ€“ Error tolerance used to check convergence in power method solver. # **nstart** (dictionary, optional) โ€“ Starting value of PageRank iteration for each node. # **weight** (key, optional) โ€“ Edge data key to use as weight. If None weights are set to 1. # **dangling** (dict, optional) โ€“ The outedges to be assigned to any โ€œdanglingโ€ nodes, i.e., nodes without any outedges. # # # The dict key is the node the outedge points to and the dict value is the weight of that outedge. By default, dangling nodes are given outedges according to the personalization vector (uniform if not specified). This must be selected to result in an irreducible transition matrix (see notes under google_matrix). It may be common to have the dangling dict to be the same as the personalization dict. # + GD_pr=nx.pagerank(GD) #As before it returns a dictionary: GD_pr # - # ## Hits - finding Hubs and Authorities # # hits(G, max_iter=100, tol=1e-08, nstart=None, normalized=True) # # The HITS algorithm computes two numbers for a node. Authorities estimates the node value based on the incoming links. Hubs estimates the node value based on outgoing links. # # **G** (graph) โ€“ A NetworkX graph # **max_iter** (interger, optional) โ€“ Maximum number of iterations in power method # **tol** (float, optional) โ€“ Error tolerance used to check convergence in power method iteration # **nstart** (dictionary, optional) โ€“ Starting value of each node for power method iteration # **normalized** (bool (default=True)) โ€“ Normalize results by the sum of all of the values # # Returns: # **(hubs,authorities)** โ€“ Two dictionaries keyed by node containing the hub and authority values. GD_ha=nx.hits(GD) #
Lecture_5_2_Networkx_Graph_Analysis_and_metrics.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Unity3D Game with Amazon SageMaker RL # # --- # ## Introduction # # [Unity](https://unity.com/) is currently the most popular gaming engine used by game developers around the world. Unity engine can be used to create 3D, 2D, virtual reality, and augmented reality games, as well as simulations and other experiences. [ML-Agents](https://github.com/Unity-Technologies/ml-agents) is an open-sourced toolkit developed by Unity to enable games and simulations to serve as environments for training intelligent agents. It provides capabilities on how to interact with Unity executables as well as how to train a RL agent. For how to use ML-Agents to train a Unity game agent on SageMaker, please refer to this [notebook](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/advanced_functionality/unity_ml_agents/unity_mlagents_learn.ipynb). # # In this notebook, we are using capabilities from ML-Agent to interact with Unity executables, but leveraging Amazon SageMaker RL to train the game agent. We will show how you can apply reinforcement learning to train an agent to play against a Unity game with Amazon SageMaker RL. # ## Pre-requisites # # ### Imports # # To get started, we'll import the Python libraries we need, set up the environment with a few prerequisites for permissions and configurations. # + import sagemaker import boto3 import sys import os import glob import re import subprocess import numpy as np from IPython.display import HTML import time from time import gmtime, strftime sys.path.append("common") from misc import get_execution_role, wait_for_s3_object from docker_utils import build_and_push_docker_image from sagemaker.rl import RLEstimator, RLToolkit, RLFramework # - # ### Setup S3 bucket # # Set up the linkage and authentication to the S3 bucket that you want to use for checkpoint and the metadata. sage_session = sagemaker.session.Session() s3_bucket = sage_session.default_bucket() s3_output_path = "s3://{}/".format(s3_bucket) print("S3 bucket path: {}".format(s3_output_path)) # ### Define Variables # # We define variables such as the job prefix for the training jobs *and the image path for the container (only when this is BYOC).* # create a descriptive job name job_name_prefix = "rl-unity-ray" # ### Configure where training happens # # You can train your RL training jobs using the SageMaker notebook instance or local notebook instance. In both of these scenarios, you can run the following in either local or SageMaker modes. The local mode uses the SageMaker Python SDK to run your code in a local container before deploying to SageMaker. This can speed up iterative testing and debugging while using the same familiar Python SDK interface. You just need to set `local_mode = True`. # + # run in local_mode on this machine, or as a SageMaker TrainingJob? local_mode = False if local_mode: instance_type = "local" else: # If on SageMaker, pick the instance type instance_type = "ml.c5.2xlarge" # - # ### Create an IAM role # # Either get the execution role when running from a SageMaker notebook instance `role = sagemaker.get_execution_role()` or, when running from local notebook instance, use utils method `role = get_execution_role()` to create an execution role. # + try: role = sagemaker.get_execution_role() except: role = get_execution_role() print("Using IAM role arn: {}".format(role)) # - # ### Install docker for `local` mode # # In order to work in `local` mode, you need to have docker installed. When running from you local machine, please make sure that you have docker and docker-compose (for local CPU machines) and nvidia-docker (for local GPU machines) installed. Alternatively, when running from a SageMaker notebook instance, you can simply run the following script to install dependenceis. # # Note, you can only run a single local notebook at one time. # only run from SageMaker notebook instance if local_mode: # !/bin/bash ./common/setup.sh # ## Build docker container # # We must build a custom docker container with Roboschool installed. This takes care of everything: # # 1. Fetching base container image # 2. Installing Roboschool and its dependencies # 3. Uploading the new container image to ECR # # This step can take a long time if you are running on a machine with a slow internet connection. If your notebook instance is in SageMaker or EC2 it should take 3-10 minutes depending on the instance type. # # + # %%time cpu_or_gpu = "gpu" if instance_type.startswith("ml.p") else "cpu" repository_short_name = "sagemaker-unity-ray-%s" % cpu_or_gpu docker_build_args = { "CPU_OR_GPU": cpu_or_gpu, "AWS_REGION": boto3.Session().region_name, } custom_image_name = build_and_push_docker_image(repository_short_name, build_args=docker_build_args) print("Using ECR image %s" % custom_image_name) # - # ## Use Unity Example Environment # The Unity ML-Agents Toolkit provides an expanding set of [example environments](https://github.com/Unity-Technologies/ml-agents/blob/742c2fbf01188fbf27e82d5a7d9b5fd42f0de67a/docs/Learning-Environment-Examples.md). You can specify one of the environments name in the config and SageMaker RL will start to train a RL agent against that environment. # The training code loads an example environment (`Basic` by default) from the [default registry](https://github.com/Unity-Technologies/ml-agents/blob/742c2fbf01188fbf27e82d5a7d9b5fd42f0de67a/docs/Unity-Environment-Registry.md) and start the training. Currently SageMaker RL can only support example environment with a single agent. # ### Write the Training Code # # The training code is written in the file โ€œtrain-unity.pyโ€ which is uploaded in the /src directory. # First import the environment files and the preset files, and then define the main() function. # # !pygmentize src/train-unity.py # ### Train the RL model using the Python SDK Script mode # # If you are using local mode, the training will run on the notebook instance. When using SageMaker for training, you can select a GPU or CPU instance. The RLEstimator is used for training RL jobs. # # 1. Specify the source directory where the environment, presets and training code is uploaded. # 2. Specify the entry point as the training code # 3. Specify the choice of RL toolkit and framework. This automatically resolves to the ECR path for the RL Container. # 4. Define the training parameters such as the instance count, job name, S3 path for output and job name. # 5. Specify the hyperparameters for the RL agent algorithm. # 6. Define the metrics definitions that you are interested in capturing in your logs. These can also be visualized in CloudWatch and SageMaker Notebooks. # + # %%time metric_definitions = RLEstimator.default_metric_definitions(RLToolkit.RAY) estimator = RLEstimator( entry_point="train-unity.py", source_dir="src", dependencies=["common/sagemaker_rl"], image_uri=custom_image_name, role=role, instance_type=instance_type, instance_count=1, output_path=s3_output_path, base_job_name=job_name_prefix, metric_definitions=metric_definitions, debugger_hook_config=False, hyperparameters={ # Attention scientists! You can override any Ray algorithm parameter here: # "rl.training.config.env_config.env_name": "Basic", # "rl.training.stop.timesteps_total": 10000, # "rl.training.config.num_sgd_iter": 10, }, ) estimator.fit(wait=local_mode) job_name = estimator.latest_training_job.job_name print("Training job: %s" % job_name) # - # ### Plot metrics for training job # We can see the reward metric of the training as it's running, using algorithm metrics that are recorded in CloudWatch metrics. We can plot this to see the performance of the model over time. # # `TrainingJobAnalytics` could not parse the CloudWatch log immediately after running the training procedure, please wait for the training is done and stablized before running the following cell again! # + # %matplotlib inline from sagemaker.analytics import TrainingJobAnalytics import time sm_client = boto3.client(service_name="sagemaker") state = sm_client.describe_training_job(TrainingJobName=job_name).get("SecondaryStatus") if not local_mode: while state == "Starting": state = sm_client.describe_training_job(TrainingJobName=job_name).get("SecondaryStatus") print("Training job starting...") time.sleep(30) print("Training job started. Waiting for algorithm metric...") df = TrainingJobAnalytics(job_name, ["episode_reward_mean"]).dataframe() num_metrics = len(df) if num_metrics == 0: print("No algorithm metrics found in CloudWatch, please check later.") else: plt = df.plot(x="timestamp", y="value", figsize=(12, 5), legend=True, style="b-") plt.set_ylabel("Mean reward per episode") plt.set_xlabel("Training time (s)") else: print("Can't plot metrics in local mode.") # - # ## (Optional) Bring customized Unity environment # We have shown how to load a sample environment from the Unity Environment Registry and train a RL agent. However, you can use your custom Unity executable as the environment to start the training. # 1. [Create a Unity executable](https://github.com/Unity-Technologies/ml-agents/blob/742c2fbf01188fbf27e82d5a7d9b5fd42f0de67a/docs/Learning-Environment-Executable.md) of your Unity environment for Linux platform. Please make sure your environment only contains one single agent. # 2. Upload the executable file, dependency data files and library files to s3. # 3. Specify the s3 path as a train channel of the training job. # ### Train the RL model using the Python SDK Script mode with provided Unity executables # + # # %%time # metric_definitions = RLEstimator.default_metric_definitions(RLToolkit.RAY) # estimator = RLEstimator(entry_point="train-unity.py", # source_dir='src', # dependencies=["common/sagemaker_rl"], # image_uri=custom_image_name, # role=role, # instance_type=instance_type, # instance_count=1, # output_path=s3_output_path, # base_job_name=job_name_prefix, # metric_definitions=metric_definitions, # hyperparameters={ # # Attention scientists! You can override any Ray algorithm parameter here: # # "rl.training.config.env_config.env_name": "<file name of your Unity executables> e.g. basic_env_linux.x86_64", # # "rl.training.stop.timesteps_total": 1000, # # "rl.training.config.num_sgd_iter": 10, # } # ) # s3_binary_path = '<s3 path for your Unity files> e.g. s3://bucket/unity-data' # estimator.fit({'train': s3_binary_path}, wait=local_mode) # job_name = estimator.latest_training_job.job_name # print("Training job: %s" % job_name) # - # ## Evaluation of RL models # # We use the last checkpointed model to run evaluation for the RL Agent. # # ### Load checkpointed model # # Checkpointed data from the previously trained models will be passed on for evaluation / inference in the checkpoint channel. In local mode, we can simply use the local directory, whereas in the SageMaker mode, it needs to be moved to S3 first. # + tmp_dir = "/tmp/{}".format(job_name) os.system("mkdir {}".format(tmp_dir)) print("Create local folder {}".format(tmp_dir)) if local_mode: model_tar_key = "{}/model.tar.gz".format(job_name) else: model_tar_key = "{}/output/model.tar.gz".format(job_name) local_checkpoint_dir = "{}/model".format(tmp_dir) wait_for_s3_object(s3_bucket, model_tar_key, tmp_dir, training_job_name=job_name) if not os.path.isfile("{}/model.tar.gz".format(tmp_dir)): raise FileNotFoundError("File model.tar.gz not found") os.system("mkdir -p {}".format(local_checkpoint_dir)) os.system("tar -xvzf {}/model.tar.gz -C {}".format(tmp_dir, local_checkpoint_dir)) print("Checkpoint directory {}".format(local_checkpoint_dir)) # - if local_mode: checkpoint_path = "file://{}".format(local_checkpoint_dir) print("Local checkpoint file path: {}".format(local_checkpoint_dir)) else: checkpoint_path = "s3://{}/{}/checkpoint/".format(s3_bucket, job_name) if not os.listdir(local_checkpoint_dir): raise FileNotFoundError("Checkpoint files not found under the path") os.system("aws s3 cp --recursive {} {}".format(local_checkpoint_dir, checkpoint_path)) print("S3 checkpoint file path: {}".format(checkpoint_path)) # + # %%time estimator_eval = RLEstimator( entry_point="evaluate-unity.py", source_dir="src", dependencies=["common/sagemaker_rl"], image_uri=custom_image_name, role=role, instance_type=instance_type, instance_count=1, base_job_name=job_name_prefix + "-evaluation", hyperparameters={ "evaluate_episodes": 5, "algorithm": "PPO", # change the env name here acoording to the traine environment "env": "Basic", }, ) estimator_eval.fit( {"model": checkpoint_path}, wait=local_mode ) # specify a train channel for the custom Unity executable option job_name = estimator_eval.latest_training_job.job_name # - # # Model deployment # # Now let us deploy the RL policy so that we can get the optimal action, given an environment observation. # + from sagemaker.tensorflow.model import TensorFlowModel model = TensorFlowModel(model_data=estimator.model_data, framework_version="2.1.0", role=role) predictor = model.deploy(initial_instance_count=1, instance_type=instance_type) # + # ray 0.8.5 requires all the following inputs # 'prev_action', 'is_training', 'prev_reward' and 'seq_lens' are placeholders for this example # they won't affect prediction results # observation shpae passed here must match with the environment specs input = { "inputs": { "observations": np.ones(shape=(1, 20)).tolist(), "prev_action": [0, 0], "is_training": False, "prev_reward": -1, "seq_lens": -1, } } # + result = predictor.predict(input) result["outputs"] # - # ### Clean up endpoint predictor.delete_endpoint()
reinforcement_learning/rl_unity_ray/rl_unity_ray.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Importing Libraries import warnings warnings.filterwarnings("ignore") warnings.simplefilter("ignore") import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns # Importing Dataset data = pd.read_csv("Social_Network_Ads.csv") data.head() data.tail() data.describe() data.info() data.columns X = data.iloc[:, [2,3]].values y = data.iloc[:, [4]].values # + # Splitting the dataset into Train and Test from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25) # - # Features Scaling from sklearn.preprocessing import StandardScaler sc = StandardScaler() X_train_scale = sc.fit_transform(X_train) X_test_scale = sc.transform(X_test) # Fitting Random Forest Classification to the Training set from sklearn.ensemble import RandomForestClassifier rf = RandomForestClassifier(n_estimators = 10, criterion = 'entropy', random_state = 0) rf.fit(X_train_scale, y_train) # Predicting the Test set results y_pred = rf.predict(X_test_scale) y_pred # Evaluating the results using the Classification Matrics from sklearn.metrics import accuracy_score, auc , confusion_matrix, classification_report, roc_auc_score, roc_curve # Confusion Metix cm = confusion_matrix(y_test, y_pred) sns.heatmap(cm, annot= True, fmt="d") # Classification Report print(classification_report(y_test, y_pred)) # Accuray Score print(accuracy_score(y_test, y_pred)) # roc_auc_score print(roc_auc_score(y_test, y_pred)) # + # predict probabilities probs = rf.predict_proba(X_test) # keep probabilities for the positive outcome only probs = probs[:, 1] # calculate AUC auc = roc_auc_score(y_test, probs) print('AUC: %.3f' % auc) # calculate roc curve fpr, tpr, thresholds = roc_curve(y_test, probs) # plot no skill plt.plot([0, 1], [0, 1], linestyle='--') # plot the roc curve for the model plt.plot(fpr, tpr, marker='.') # show the plot plt.show() # + jupyter={"outputs_hidden": true} # predict probabilities y_probas = rf.predict_proba(X_test_scale) y_probas # + # keep probabilities for the positive outcome only #y_probas = y_probas[:, 1] # + jupyter={"outputs_hidden": true} #y_test = # true labels #y_probas = # predicted results fpr, tpr, thresholds = roc_curve(y_test, y_probas, pos_label=0) # Print ROC curve plt.plot(fpr,tpr) plt.show() # Print AUC auc = np.trapz(tpr,fpr) print('AUC:', auc) # -
Machine Learning/2) Classification/Section 20 - Random Forest Classification/RF_mypractice.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import sys import os sys.path.append(os.path.abspath('..')) import mapclassify as mc y = mc.load_example() mc.Maximum_Breaks(y, k=4) mc.Maximum_Breaks(y, k=7) mb7 = mc.Maximum_Breaks(y, k=7) mb7.bins mb7.counts mb7.yb mb7.adcm
notebooks/maximum_breaks.ipynb