code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # Basic Topic Modeling with Gensim # Imports # + import logging import warnings # Turn off annoying warning messages logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.ERROR) warnings.filterwarnings("ignore", category=DeprecationWarning) import gensim import gensim.corpora as corpora import matplotlib.pyplot as PLT import numpy as NP import pandas as PD import pyLDAvis import pyLDAvis.gensim import re import spacy # Turn off SpaCy's parser and named-entity-recognition since we only need its POS tagger nlp = spacy.load('en', disable=['parser', 'ner']) from gensim.utils import simple_preprocess from gensim.models import CoherenceModel from nltk.corpus import stopwords from pprint import pprint # - # Setup # + stop_words = stopwords.words('english') stop_words.extend(['from', 'subject', 're', 'edu', 'use']) df = PD.read_json('https://raw.githubusercontent.com/selva86/datasets/master/newsgroups.json') print(df.target_names.unique()) # - # Data Preparation # + # Convert to list data = df.content.values.tolist() # Remove email addresses data = [re.sub('\S*@\S*\s?', '', sent) for sent in data] # Remove '\n' and '\r' data = [re.sub('\s+', ' ', sent) for sent in data] # Remove single quotes data = [re.sub("\'", "", sent) for sent in data] pprint(data[:1]) # - # Tokenization # + def sentToTokens(sents): for sent in sents: # `deacc=True` strips punctuation yield(gensim.utils.simple_preprocess(str(sent), deacc=True)) data_tokens = list(sentToTokens(data)) print(data_tokens[:1]) # - # Data Cleaning # + def removeStopwords(texts): return [ [w for w in simple_preprocess(str(doc)) if w not in stop_words] for doc in texts ] data_tokens_clean = removeStopwords(data_tokens) # - # N-Gram Modeling # + # A higher threshold results in fewer phrases bigrammer_init = gensim.models.Phrases(data_tokens, min_count=5, threshold=100) trigrammer_init = gensim.models.Phrases(bigrammer_init[data_tokens], threshold=100) bigrammer = gensim.models.phrases.Phraser(bigrammer_init) trigrammer = gensim.models.phrases.Phraser(trigrammer_init) def makeBigrams(texts): return [bigrammer[doc] for doc in texts] def makeTrigrams(texts): return [trigrammer[bigrammer[doc]] for doc in texts] data_bigrams = makeBigrams(data_tokens_clean) print(data_bigrams[:1]) # - # More Data Cleaning! # + def lemmatize(texts, allowed_pos=['NOUN', 'ADJ', 'VERB', 'ADV']): texts_out = list() for sent in texts: doc = nlp(" ".join(sent)) texts_out.append([tok.lemma_ for tok in doc if tok.pos_ in allowed_pos]) return texts_out data_lemmas = lemmatize(data_bigrams) print(data_lemmas[:1]) # -
topic_modeling.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.7 # language: python # name: python3 # --- # ### 01. Linear Regression # # #### Table of Contents # # - [1. Definition](#definition) # - [2. Cost function](#costfunction) # - [3. Gradient descent](#gradientdescent) # <a id="definition"></a> # # #### 1. Definition # # $\hat{y}=wx + b$ # # $w$ - weights, $b$ - bias. # <a id="costfunction"></a> # # #### 2. Cost function # # $MSE=J(w,b)=\frac{1}{N} \sum_{i=1}^{n}(y_{i}-(wx_{i}+b))^{2}$ # # Gradient of MSE with respect to $w$ and $b$ is: # # $J^{'}(w,b)=\begin{bmatrix} \frac{dJ}{dw}\\ \frac{dJ}{db} \end{bmatrix}=\begin{bmatrix} \frac{1}{N}\sum -2x_{i}[y_{i}-(wx_{i}+b)]\\ \frac{1}{N}\sum -2[y_{i}-(wx_{i}+b)]\end{bmatrix}$ # <a id="gradientdescent"></a> # # #### 3. Gradient descent # # <img src="./images/gradient_descent.png" alt="gradient_descent" width="600"/> # # Update rules for $w$ and $b$: # # $w=w - \alpha \frac{\partial J(w,b)}{\partial w}$ # # $b=b - \alpha \frac{\partial J(w,b)}{\partial b}$ # # $\alpha$ is the learning rate. # # If $\alpha$ is too small, gradient descent can be slow; if $\alpha$ is too large, gradient descent can overshoot the minimum. It may fail to converge, or even diverge. # # <img src="./images/learning_rates.png" alt="learning_rates" width="200"/> # # With low learning rates the improvements will be linear. With high learning rates they will start to look more exponential. Higher learning rates will decay the loss faster, but they get stuck at worse values of loss (green line). This is because there is too much "Energy" in the optimization and the parameters are bouncing around chaotically, unable to settle in a nice spot in the optimization landscape.
02-linear_regression/01-linearregression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="LYpLTJxrT39e" # # Workbook 1 - Types # # In programming, data, held in variables, have *types* associated with them. Here we shall explore a few of these and what this means for everyday programming. # # # - # ## Assigning variables values # # Before going over variable *types* let's go over what a variable is and how to assign a value to one: variable = 'value' # In Python assigning a value to a variable is as simple as writing the above. In this case the variable we have created has been assigned the value of 'value', we can retrieve this value either by entering this into the interpreter variable # or by letting a Python method (or command) act upon the variable. Here we will use the *print()* command to print out the value of the variable to the *console* print(variable) # for now we shall omit as to why a programmer may pick the latter method to display a variable's value when running a program, however, the use of *print()* is likely to be the mechanism you commonly adopt. # ## Types # # Now that we know how to assign variables values, let us look at the *type()* method. Simply put, entering in *type(variable)* will let you know what type of data is being held by the variable. # # There are quite a few different types of data that can be stored within a variable, however, the most common are: # ### The boolean # # True or False, one or zero the boolean can only be in one of two states boolean = True type(boolean) # ### The integer # # A whole number # + colab={"base_uri": "https://localhost:8080/"} id="onAKcGluT2dw" outputId="86bf1394-ce6d-4a9c-c200-78e1a908526f" integer = 3 type(integer) # - # ### The floating-point number # # Any number containing a decimal point # + colab={"base_uri": "https://localhost:8080/"} id="20yM7S2gUW4c" outputId="f50d3f8f-d79f-4ce2-978f-5a8830b43383" floating_point_number = 3.141 type(floating_point_number) # - # ### The string # # Individual letters are referred to as characters, collections of characters are 'strung' together to form strings of text # + colab={"base_uri": "https://localhost:8080/"} id="gPwxsFFJUgle" outputId="445e9330-1094-463c-950c-601c7e6f649d" text_string = 'text' type(text_string) # - # ### The list # # Just like a shopping list is a list of items, a list in Python is a list of variables. Each variable has an *index* value associated with it, making recall of an individual, specific, variables possible. # + colab={"base_uri": "https://localhost:8080/"} id="Osl2r2xmUkgf" outputId="1b61605e-4c14-4fcb-d51e-b1211a2960f3" list_of_things = ['text', 'more text', 'even more text'] type(list_of_things) # - # Once a list, such as the one above has been created it is possible to retrieve an indivdual variable using the syntax *list[n]* where you replace the letter *n* with the index of the variable. Indices start at zero where the first variable in the list (the value on the left hand side, closest to the equals sign) has the zero value; *e.g.*: list_of_things[0] # ### The dictionary # # The final type that we will cover here is the dictionary, it is very similar to the list but has a neat nuance. When using lists you have to know the index of the variable you're trying to recall, usually as you create the list you will know this, however, it is possible to append and remove entries from a list which can start to cause headaches when tracking where everything is. # # In a dictionary you call variables not by index but by their *key* and when you call a key you are returned it's value. This approach of variable storage has been given the imaginative name of *key, value* pairs. # + colab={"base_uri": "https://localhost:8080/"} id="airRxJIUUufJ" outputId="1e774d52-7508-49d9-bb06-6c228f2666da" key_value_pair_dictionary = {'key' : 'value', 'key_two' : 3.141} type(key_value_pair_dictionary) # - # Above I have created a simple dictionary containing two variables, *key* and *key_two*, to retrieve one I would simply have to enter key_value_pair_dictionary['key'] # This retrieves the value from the dictionary, interestingly although the variable *key_value_pair_dictionary* has a type of dictionary the values retain their own types type(key_value_pair_dictionary['key_two']) # As can be seen above, once key_two if we retrieve *key_two* it's type remains *float* # # As already mentioned, there are quite a few different types of variable and each one has a different role to play. This will give us a good grounding for now as we turn our attention to *flow control*. # ## Further reading # # More information on types can be found on the Python documentation here: [Built-in Types](https://docs.python.org/3.9/library/stdtypes.html)
01 - The Basics/01 - Types.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: "Python 3.7 (Intel\xAE oneAPI)" # language: python # name: c009-intel_distribution_of_python_3_oneapi-beta05-python # --- # + # %%writefile lab/main.cpp //============================================================== // Copyright © 2020 Intel Corporation // // SPDX-License-Identifier: MIT // ============================================================= #include <chrono> #include <iomanip> #include <iostream> #include <cstdio> #include <cmath> // dpc_common.hpp can be found in the dev-utilities include folder. // e.g., $ONEAPI_ROOT/dev-utilities/<version>/include/dpc_common.hpp #include "dpc_common.hpp" #include "mandel.hpp" using namespace std; using namespace sycl; void ShowDevice(queue &q) { // Output platform and device information. auto device = q.get_device(); auto p_name = device.get_platform().get_info<info::platform::name>(); cout << std::setw(20) << "Platform Name: " << p_name << "\n"; auto p_version = device.get_platform().get_info<info::platform::version>(); cout << std::setw(20) << "Platform Version: " << p_version << "\n"; auto d_name = device.get_info<info::device::name>(); cout << std::setw(20) << "Device Name: " << d_name << "\n"; auto max_work_group = device.get_info<info::device::max_work_group_size>(); cout << std::setw(20) << "Max Work Group: " << max_work_group << "\n"; auto max_compute_units = device.get_info<info::device::max_compute_units>(); cout << std::setw(20) << "Max Compute Units: " << max_compute_units << "\n\n"; } void Execute(queue &q, int choice) { // Demonstrate the Mandelbrot calculation serial and parallel. #ifdef MANDELBROT_USM cout << "Parallel Mandelbrot set using USM.\n"; MandelParallelUsm m_par(row_size, col_size, max_iterations, &q); #else cout << "Parallel Mandelbrot set using buffers.\n"; MandelParallel m_par(row_size, col_size, max_iterations); #endif MandelSerial m_ser(row_size, col_size, max_iterations); // Run the code once to trigger JIT. m_par.Evaluate(q,choice); // Run the parallel version and time it. dpc_common::TimeInterval t_par; for (int i = 0; i < repetitions; ++i) m_par.Evaluate(q, choice); double parallel_time = t_par.Elapsed(); // Print the results. m_par.Print(); m_par.WriteImage(); // Run the serial version. dpc_common::TimeInterval t_ser; m_ser.Evaluate(choice); double serial_time = t_ser.Elapsed(); // Report the results. cout << std::setw(20) << "Serial time: " << serial_time << "s\n"; cout << std::setw(20) << "Parallel time: " << (parallel_time / repetitions) << "s\n"; // Validate. m_par.Verify(m_ser); } int main(int argc, char *argv[]) { try { // Create a queue on the default device. Set SYCL_DEVICE_TYPE environment // variable to (CPU|GPU|FPGA|HOST) to change the device. // queue q(default_selector{}, dpc_common::exception_handler); queue q(gpu_selector{}, dpc_common::exception_handler); // Display the device info. ShowDevice(q); freopen("input1.txt", "r", stdin); freopen("output1.txt", "w", stdout); cout << "Enter the option to see fractals" << std::endl; cout << "1. Sin " << std::endl; cout << "2. Cos " << std::endl; cout << "3. Tan " << std::endl; cout << "4. Mandel" << std::endl; cout << "5. Lograrithmic" << std::endl; cout << "6. conjugate" << std::endl; cout << "7. conjugate sin" << std::endl; cout << "8. conjugate sin" << std::endl; cout << "9. exponent " << std::endl; cout << "10. logarithmic " << std::endl; cout << "11. log tan " << std::endl; cout << "12. log tan and sin " << std::endl; cout << "13. log tan and sin " << std::endl; cout << "14. arc of sin and cos " << std::endl; cout << "15. sin and cos mandel " << std::endl; cout << "16. tan and sin mandel " << std::endl; cout << "17. tan and cos mandel " << std::endl; cout << "18. tan complex and cos mandel " << std::endl; cout << "19. sin complex and cos complex " << std::endl; cout << "20. exponential and cos " << std::endl; cout << "21. Normal z and square z " << std::endl; cout << "22. z cubed and square z " << std::endl; cout << "23. z cubed square z and z " << std::endl; int choice; cin >> choice; // Compute Mandelbrot set. Execute(q, choice); } catch (...) { // Some other exception detected. cout << "Failed to compute Mandelbrot set.\n"; std::terminate(); } cout << "Successfully computed Mandelbrot set.\n"; return 0; } # - # ### Build and Run # Select the cell below and click Run ▶ to compile and execute the code above: # ! chmod 755 q; chmod 755 run-fractal.sh; if [ -x "$(command -v qsub)" ]; then ./q run-fractal.sh; else ./run-fractal.sh; fi
Fractal.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Real-Estate Price Prediction # By <NAME> # # In this Linear Regression based project (my first Machine Learning project) I will work on the following case study using NumPy, Pandas, Matplotlib, Seaborn, and SciKitLearn. # # Case Study: # Your neighbor is a real estate agent and wants some help predicting housing prices for regions in the USA. It would be great if you could somehow create a model for her that allows her to put in a few features of a house and returns back an estimate of what the house would sell for. She has asked you if you could help her out with your new data science skills. You say yes, and decide that Linear Regression might be a good path to solve this problem! # Your neighbor then gives you some information about a bunch of houses in regions of the United States,it is all in a cvs format. # # The data in the cvs contains the following columns: # * 'Avg. Area Income': Avg. Income of residents of the city house is located in. # * 'Avg. Area House Age': Avg Age of Houses in same city # * 'Avg. Area Number of Rooms': Avg Number of Rooms for Houses in same city # * 'Avg. Area Number of Bedrooms': Avg Number of Bedrooms for Houses in same city # * 'Area Population': Population of city house is located in # * 'Price': Price that the house sold at # * 'Address': Address for the house import numpy as np import pandas as pd import matplotlib.pyplot as plt # %matplotlib inline import seaborn as sns sns.set_style('whitegrid') # **Understanding the data** # + # Importing the data from csv file USAhousing = pd.read_csv("USA_Housing.csv") USAhousing.head() # + # Info of the data USAhousing.info() # + # Statistical description of the data USAhousing.describe() # + # Checking the columns of the data USAhousing.columns # - # **Exploratory Data Analysis** # + # Pairplot of the data sns.pairplot(USAhousing) # + # Distplot of prices sns.displot(USAhousing['Price'], kde=True) # + # Heatmap of the correlation of data sns.heatmap(USAhousing.corr(), annot=True) # - # **Linear Regression Model using ML** # + # Splitting the data into an X array that contains the features to train on, # and a y array with the target variable (Price column) X = USAhousing[['Avg. Area Income', 'Avg. Area House Age', 'Avg. Area Number of Rooms', 'Avg. Area Number of Bedrooms', 'Area Population']] y = USAhousing['Price'] # + # Separating the training set and testing set from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=101) # + # training the model on the training set from sklearn.linear_model import LinearRegression lr = LinearRegression() lr.fit(X_train, y_train) # + # Evaluating the model print('Intercept: ', lr.intercept_) print('Coefficient: ', lr.coef_) # + # Saving the coefficient in a new dataframe coeff_df = pd.DataFrame(lr.coef_, X.columns, columns=['Coefficient']) coeff_df # - # Interpreting the coefficients: # # - Holding all other features fixed, a 1 unit increase in **Avg. Area Income** is associated with an **increase of \$21.528276**. # - Holding all other features fixed, a 1 unit increase in **Avg. Area House Age** is associated with an **increase of \$164883.282027**. # - Holding all other features fixed, a 1 unit increase in **Avg. Area Number of Rooms** is associated with an **increase of \$122368.678027**. # - Holding all other features fixed, a 1 unit increase in **Avg. Area Number of Bedrooms** is associated with an **increase of \$2233.801864**. # - Holding all other features fixed, a 1 unit increase in **Area Population** is associated with an **increase of \$15.150420**. # + # Predicting from the model prediction = lr.predict(X_test) # + # Scatterplot of predicted vs actual values plt.scatter(y_test, prediction) # + # Residual histogram sns.displot((y_test-prediction), bins=50, kde=True) # + # Regression evaluation metrics from sklearn import metrics print('MAE (Mean Absolute Error):', metrics.mean_absolute_error(y_test, prediction)) print('MSE (Mean Squared Error):', metrics.mean_squared_error(y_test, prediction)) print('RMSE (Root Mean Squared Error):', np.sqrt(metrics.mean_squared_error(y_test, prediction))) # + # R^2 score and explained variance score print('R^2 score: ', metrics.r2_score(y_test, prediction)) print('Explained variance score: ', metrics.explained_variance_score(y_test, prediction))
RealEstate-Price-Prediction.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import matplotlib.pyplot as plt import matplotlib as mpl print(plt.style.available) print(mpl.get_configdir()) with plt.style.context(('dark_background')): plt.plot(np.sin(np.linspace(0, 2 * np.pi)), 'r-o') plt.show() mpl.rcdefaults()
python/learn/matplotlib/style.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # WINE CLASSIFIER # + # Imports from io import StringIO import pandas as pd import spacy from cytoolz import * import numpy as np from IPython.display import display import seaborn as sns from sklearn.ensemble import RandomForestClassifier from sklearn.feature_selection import chi2 from sklearn.svm import LinearSVC from sklearn.naive_bayes import MultinomialNB from sklearn.model_selection import * from sklearn.linear_model import * from sklearn.dummy import * from sklearn.pipeline import make_pipeline from sklearn.feature_extraction.text import * from sklearn.metrics import * from sklearn.decomposition import * from sklearn import metrics # %precision 4 # %matplotlib inline # - nlp = spacy.load('en', disable=['tagger', 'ner', 'parser']) # + #1. Prepare Data df = pd.read_msgpack('http://bulba.sdsu.edu/wine.dat') #df.head() #about 40,000 rows in full msgpack #sample created to increase speed, but remove sample definition for increased accuracy! df = df.sample(4000) df = df[pd.notnull(df['review_text'])] df = df[pd.notnull(df['wine_variant'])] df.info() # + #Create 'category_id' column for LinearSVC use df['category_id'] = df['wine_variant'].factorize()[0] category_id_df = df[['wine_variant', 'category_id']].drop_duplicates().sort_values('category_id') category_to_id = dict(category_id_df.values) id_to_category = dict(category_id_df[['category_id', 'wine_variant']].values) #Create a tokenized column for Logistical Regression use def tokenize(text): return [tok.orth_ for tok in nlp.tokenizer(text)] df['tokens'] = df['review_text'].apply(tokenize) # - df.head() # + #Check to see sample sizes for each variant, ensurring result accuracy import matplotlib.pyplot as plt fig = plt.figure(figsize=(10,8)) df.groupby('wine_variant').review_text.count().plot.bar(ylim=0) plt.show() # + #2. BASELINE folds = StratifiedKFold(shuffle = True, n_splits = 10, random_state = 10) sum(df['wine_variant'] == True), len(df) # + baseline = make_pipeline(CountVectorizer(analyzer = identity), DummyClassifier('most_frequent')) base_score = cross_val_score(baseline, df['tokens'], df['wine_variant'], cv=folds, n_jobs = -1) base_score.mean(), base_score.std() # + #3. SIMPLE LOGISTIC REGRESSION CLASSIFIER lr = make_pipeline(CountVectorizer(analyzer = identity), LogisticRegression()) params = {'logisticregression__C': [0.01, 0.1, 1.0], 'countvectorizer__min_df': [1, 2], 'countvectorizer__max_df': [0.25, 0.5]} grid_search = GridSearchCV(lr, params, n_jobs = -1, verbose = 1, return_train_score = True) grid_search.fit(df['tokens'], df['wine_variant']) # - grid_search.best_params_ # + lr.set_params(**grid_search.best_params_) lr_score = cross_val_score(lr, df['tokens'], df['wine_variant'], cv = folds, n_jobs = -1) lr_score.mean(), lr_score.std() # + grid = pd.DataFrame(grid_search.cv_results_, dtype = float) grid.plot.line('param_countvectorizer__max_df', 'mean_test_score') # + #4. BEST CLASSIFIER -- found through n_gram correlation best = make_pipeline(CountVectorizer(analyzer = identity), TfidfTransformer(), LinearSVC()) params_best = {'tfidftransformer__norm': ['l2', None], 'tfidftransformer__use_idf': [True, False], 'tfidftransformer__sublinear_tf': [True, False], 'linearsvc__penalty': ['l2'], 'linearsvc__C': [0.01, 0.1, 1.0], 'countvectorizer__min_df': [1, 2, 3], 'countvectorizer__max_df': [0.1, 0.5, 1.0]} best_grid_search = GridSearchCV(best, params_best, n_jobs = -1, verbose = 1, return_train_score = True) best_grid_search.fit(df['tokens'], df['wine_variant']) # - best_grid_search.best_params_ # + #Set hyperparameters for best model best.set_params(**best_grid_search.best_params_) best_score = cross_val_score(best, df['tokens'], df['wine_variant'], cv = folds, n_jobs = -1) best_score.mean(), best_score.std() #Result score is slightly higher than using LR model, and std is slightly less # + best_grid = pd.DataFrame(best_grid_search.cv_results_, dtype = float) best_grid.plot.line('param_countvectorizer__max_df', 'mean_test_score') # + #5. Error Analysis & Discussion #Inspect feautues tfidf = TfidfVectorizer(sublinear_tf = True, min_df = 1, norm = 'l2', encoding = 'latin-1', ngram_range = (1, 3), stop_words = 'english') features = tfidf.fit_transform(df.review_text).toarray() labels = df.category_id features.shape # + # Display the n_grams with highest correlation for each variant N = 5 for wine_variant, category_id in sorted(category_to_id.items()): # chi squared determines the correlation of each ngram to each variant, taking into account sample size features_chi2 = chi2(features, labels == category_id) indices = np.argsort(features_chi2[0]) feature_names = np.array(tfidf.get_feature_names())[indices] unigrams = [v for v in feature_names if len(v.split(' ')) == 1] bigrams = [v for v in feature_names if len(v.split(' ')) == 2] trigrams = [v for v in feature_names if len(v.split(' ')) == 3] print("# '{}':".format(wine_variant)) print(" . Most correlated unigrams:\n . {}".format('\n . '.join(unigrams[-N:]))) print(" . Most correlated bigrams:\n . {}".format('\n . '.join(bigrams[-N:]))) print(" . Most correlated trigrams:\n . {}".format('\n . '.join(trigrams[-N:]))) #The ngrams below appear more accurate and unique to each of the different variants. # + # Heatmap & Confusion Matrix to display accuracies of predictions with LinearSVC model = LinearSVC() X_train, X_test, y_train, y_test, indices_train, indices_test = train_test_split(features, labels, df.index, test_size = 0.33, random_state = 10) model.fit(X_train, y_train) y_pred = model.predict(X_test) conf_mat = confusion_matrix(y_test, y_pred) fig, ax = plt.subplots(figsize = (10,8)) sns.heatmap(conf_mat, annot = True, fmt = 'd', xticklabels = category_id_df.wine_variant.values, yticklabels = category_id_df.wine_variant.values) plt.ylabel('Actual') plt.xlabel('Predicted') plt.show() # + #WRONG RESULT EXAMPLES FOR LINEAR SVC CLASSIFIER for predicted in category_id_df.category_id: for actual in category_id_df.category_id: if predicted != actual and conf_mat[actual, predicted] >= 6: print("'{}' predicted as '{}' : {} examples.".format(id_to_category[actual], id_to_category[predicted], conf_mat[actual, predicted])) display(df.loc[indices_test[(y_test == actual) & (y_pred == predicted)]][['wine_variant', 'review_text']]) print('') # - model.fit(features, labels) N = 5 for wine_variant, category_id in sorted(category_to_id.items()): indices = np.argsort(model.coef_[category_id]) feature_names = np.array(tfidf.get_feature_names())[indices] unigrams = [v for v in reversed(feature_names) if len(v.split(' ')) == 1][:N] bigrams = [v for v in reversed(feature_names) if len(v.split(' ')) == 2][:N] trigrams = [v for v in reversed(feature_names) if len(v.split(' ')) == 3][:N] print("# '{}':".format(wine_variant)) print(" . Top unigrams:\n . {}".format('\n . '.join(unigrams))) print(" . Top bigrams:\n . {}".format('\n . '.join(bigrams))) print(" . Top trigrams:\n . {}".format('\n . '.join(trigrams))) # + # Scores for each variant using a LinearSVC classifier with Tfid print(metrics.classification_report(y_test, y_pred, target_names=df['wine_variant'].unique())) # - # My best classifier (Linear SVC + tfid) struggles with classifying reviews in which the reviewer states key words that the actual wine variant lacks, whether the reviewer is uneducated or they are saying the identifying phrase in a negative (or lacking) manner. Also, generic reviews that do not include significantly unique characteristics will struggle, mainly due to the fact that wine shares a lot of characteristics between variants, but certain of those characteristics are more heavily present (on average) with specific variants. # # I have learned that classification can be done from string comparisions, statistics (logs), indexes, and various other measurable variables/attributes. The best way to ensure that classification is most successful is to combine the various models, depending from situation to situation. This wine prediction strongly supports the idea of vector and phrase classifications for multi-classes, leading to a need for identifying the distinguishing qualities of each class. Even though there are many variables and similarities between the variants, I found the beginning predictions to be quite easy and efficient. The main errors came from trying to distingusih merlot from cabernet, which does make sense since they are the closest related wines as far as shared features. As more and more data is collected, the dictionary of unique words grows in number and certainty in making a correct prediction. I do believe that a score better than 90% is achievable after subpar and uneducated reveiws are removed, tokenization further cleans reamining punctuation errors, and continued training data is supplied to the program to increase its correlation certainties. Also, to increase the accuracy, it would be advantageous to have the phrases with a negative or quantifying variable treated separate from the usage count. By recognizing the negative and quantifying variables as an un-splittable part of its modifying phrase, we ensure that certain features, mainly in the unigram category, aren't wrongly weighting that feauture for that variant. An example of this dilemma can be seen when comparing things like 'red' and 'not red,' which obviously mean a distinct difference, but if 'not' is allowed to separate from 'red,' non-red wine variants could get too much weight on the word 'red,' creating a higher chance for inaccuracies.
notebooks/wine_classifier.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (Data Science) # language: python # name: python3__SAGEMAKER_INTERNAL__arn:aws:sagemaker:us-east-1:081325390199:image/datascience-1.0 # --- # + # #!{sys.executable} -m pip install s3fs -U #import sys # #!{sys.executable} -m pip install sagemaker -U # #!{sys.executable} -m pip install sagemaker-experiments -U # #!{sys.executable} -m pip install sagemaker==1.72.0 -U # #!pip install -U sagemaker --user # - # # Predict Paramters of Nucler Plant Operations # ### (Multi-Output Regression) # # Contents # 1. [Background](#Background) # 2. [Data](#Data) # 3. [Exploratory Data Analysis](#Exploratory-Data-Analysis) # 4. [Feature Engineering](#Feature-Engineering) # - [Fourier Transform](#Fourier-Transform) # - [Aggregate Features](#Aggregate-Features) # 5. [Trial 1](#Trial-1) # 6. [Model Deployment and Testing](#Model-Deployment-and-Testing) # 7. [Clean up](#Clean-up) # 8. [Test Cases](#Test-Cases) # 9. [Lambda Functions](#Lambda-Functions) # 10. [End to End Architcture and Next Steps](#End-to-End-Architcture-and-Next-Steps) # # ## Background # Nuclear Powerplants are one of the significant energy generators for world-leading nations. It is some times considered as a 'clean energy' source. Atomic power plants heat water to produce steam. The steam is used to spin large turbines that generate electricity. Nuclear power plants use the heat produced during nuclear fission to heat water. Nuclear energy generation of the US itself is 99.6 million kilowatts as of April 2020. Active monitoring of the powerplant systems is required to maintain balance in the systems. Any symptoms of abnormal behavior will result in catastrophic reactions. The objective of the current project is to predict x-coordinate,y-coordinate, mass, and velocity from the vibration data. This model is useful for identifying and detecting early signs of faults. import warnings; warnings.simplefilter('ignore') # + import pandas as pd import numpy as np import matplotlib.pyplot as plt import io import os import sys import time import json from IPython.display import display from time import strftime, gmtime import boto3 import re import seaborn as sns import sklearn as sl import scipy as sp from numpy import save from tqdm import tqdm import sagemaker from sklearn.model_selection import train_test_split from sagemaker import get_execution_role from sagemaker.predictor import csv_serializer from sagemaker.debugger import (rule_configs, Rule, DebuggerHookConfig) from sagemaker.model_monitor import (DataCaptureConfig, DatasetFormat, DefaultModelMonitor) from sagemaker.s3 import (S3Uploader, S3Downloader) from smexperiments.experiment import Experiment from smexperiments.trial import Trial from smexperiments.trial_component import TrialComponent from smexperiments.tracker import Tracker # - from sagemaker.amazon.amazon_estimator import get_image_uri docker_image_name = get_image_uri(boto3.Session().region_name, 'xgboost', repo_version='0.90-2') from utils import (create_train_test_split, generate_agg_feats, fft_features, plot_vibration_data) # ## Data # The train data contains five attributes. The attribute id and time are self-explanatory. The acceleration parameters in the collider are labeled as S1, S2, S3, and S4. In this data, each id is corresponding to one training instance. The timestamp difference between each observation in id is four seconds, and it can be considered an equispaced time series data-set. For each id, there is a corresponding entry in the training targets data. There are 1050000 in the training data and 2800 entries. The training target contains 2800 entries for X, Y, M, and V. These are the prediction target, the collider parameters. # # Unlike the traditional data-sets in Machine learning exercises, we can't jump into modeling immediately. The data should be further converted to an appropriate scientific format before approaching the problem. One of the widely adopted methods is to apply Fourier Transform before using any modeling techniques. Let's explore the data further to understand the same. s3_bucket_name = 'nukeall' s3_prefix = 'nuclear' # + data_file = "train_features.csv" data_target_file = "train_target.csv" features = pd.read_csv(f"s3://{s3_bucket_name}/{s3_prefix}/{data_file}") targets = pd.read_csv(f"s3://{s3_bucket_name}/{s3_prefix}/{data_target_file}") # - features.head() targets.head() # ## Exploratory Data Analysis # # Unlike the traditional Machine Learning dataset, the vibration data is arranged by the observation id's. Each id contains approximately 375 observations, which is of time series in nature. Exploration and pre-processing of the data require specific requirements than the traditional space. Let's explore a set of observations from the data. features[features.id == 10] data_id = 10 plot_vibration_data(features[features.id == 10], "Vibration Readings for the ID {0}".format(data_id)) data_id = 1250 plot_vibration_data(features[features.id == 1250], "Vibration Readings for the ID {0}".format(data_id)) data_id = 2000 plot_vibration_data(features[features.id == 2000], "Vibration Readings for the ID {0}".format(data_id)) # ## Feature Engineering # Each observation in the data consists of multiple records. We have to transform the data into a convenient format to apply any Machine Learning algorithm here. There are two prominent ways to create features from such data: # # - Apply Fourier Transform # # - Aggregate the data # # Let's try out these approaches in this example. # # ### Fourier Transform # One of the prominent methods to approach signal data is to apply Fourier transformation in the data. The Fourier transformed data can be used for training a model. # ## Aggregate Features # # An alternative approach in feature engineering is to aggregate the features and compute key statistics such as mean, median, standard deviation, minimum value, and skew. # ## Prapre Training Test and Validation Data # # Due to the very nature of the training data splitting the training test and validation is tricky. We adopted a technique by dividing the training target data frame to train, test, validation set. The ids of each respective set are used to filter the training data to the three splits. ttv = create_train_test_split(targets) test_target = ttv['test'] test_feat = features[features.id.isin(list(test_target.id))] test_feat_fft = fft_features(test_feat) test_feat_agg = generate_agg_feats(test_feat) test_target.to_csv("data/test_tgt.csv", index=False) save("data/test_fft.npy",test_feat_fft) test_feat_agg.to_csv("data/test_feat.csv", index=False) train_target = ttv['train'] train_feat = features[features.id.isin(list(train_target.id))] train_feat_fft = fft_features(train_feat) train_feat_agg = generate_agg_feats(train_feat) train_target.to_csv("data/train_tgt.csv", index=False) save("data/train_fft.npy",train_feat_fft) train_feat_agg.to_csv("data/train_feat.csv", index=False) val_traget = ttv['validate'] val_feat = features[features.id.isin(list(val_traget.id))] val_feat_fft = fft_features(val_feat) val_feat_agg = generate_agg_feats(val_feat) val_traget.to_csv("data/val_tgt.csv", index=False) save("data/val_fft.npy",val_feat_fft) val_feat_agg.to_csv("data/val_feat.csv", index=False) S3Uploader.upload('data/train_tgt.csv', 's3://{}/{}/{}'.format(s3_bucket_name, s3_prefix,'train')) S3Uploader.upload('data/train_fft.npy', 's3://{}/{}/{}'.format(s3_bucket_name, s3_prefix,'train')) S3Uploader.upload('data/train_feat.csv', 's3://{}/{}/{}'.format(s3_bucket_name, s3_prefix,'train')) S3Uploader.upload('data/test_tgt.csv', 's3://{}/{}/{}'.format(s3_bucket_name, s3_prefix,'test')) S3Uploader.upload('data/test_fft.npy', 's3://{}/{}/{}'.format(s3_bucket_name, s3_prefix,'test')) S3Uploader.upload('data/test_feat.csv', 's3://{}/{}/{}'.format(s3_bucket_name, s3_prefix,'test')) S3Uploader.upload('data/val_tgt.csv', 's3://{}/{}/{}'.format(s3_bucket_name, s3_prefix,'val')) S3Uploader.upload('data/val_fft.npy', 's3://{}/{}/{}'.format(s3_bucket_name, s3_prefix,'val')) S3Uploader.upload('data/val_feat.csv', 's3://{}/{}/{}'.format(s3_bucket_name, s3_prefix,'val')) # ## Trial 1 # In this trial, we will try to apply multi-output regression with GradientBoostingRegressor. # + sess = sagemaker.session.Session() create_date = strftime("%Y-%m-%d-%H-%M-%S", gmtime()) nuclear_experiment = Experiment.create(experiment_name="nuclear-reactor-1{}".format(create_date), description="Using xgboost to predict reactor params", sagemaker_boto_client=boto3.client('sagemaker')) trial = Trial.create(trial_name="algorithm-mode-trial-{}".format(strftime("%Y-%m-%d-%H-%M-%S", gmtime())), experiment_name=nuclear_experiment.experiment_name, sagemaker_boto_client=boto3.client('sagemaker')) # - debug_rules = [Rule.sagemaker(rule_configs.loss_not_decreasing()), Rule.sagemaker(rule_configs.overtraining()), Rule.sagemaker(rule_configs.overfit()) ] from sagemaker.sklearn.estimator import SKLearn role = sagemaker.get_execution_role() sagemaker_session = sagemaker.Session() # + script_path = 'train_script.py' sklearn = SKLearn( entry_point=script_path, train_instance_type="ml.c4.xlarge", role=role, sagemaker_session=sagemaker_session, rules=debug_rules, hyperparameters={'ftype': 'fft'}, framework_version="0.23-1") # - train_s3 = "s3://nukeall/nuclear/train/" sklearn.fit({'train': train_s3}, wait=True, experiment_config={ "ExperimentName": nuclear_experiment.experiment_name, "TrialName": trial.trial_name, "TrialComponentDisplayName": "Training", }) # ## Model Deployment and Testing # # We will use the test FFT features to generate some payload to the model endpoint to test. # + data_capture_prefix = '{}/datacapture_1'.format(s3_prefix) endpoint_name = "demo-nuke-reactor-new" + strftime("%Y-%m-%d-%H-%M-%S", gmtime()) print("EndpointName = {}".format(endpoint_name)) # - sklp = sklearn.deploy(initial_instance_count=1, instance_type='ml.m4.xlarge', endpoint_name=endpoint_name, data_capture_config=DataCaptureConfig(enable_capture=True, sampling_percentage=100, destination_s3_uri='s3://{}/{}'.format(s3_bucket_name, data_capture_prefix)) ) from sagemaker.predictor import csv_serializer from sagemaker.predictor import (numpy_deserializer, npy_serializer ) sklp.content_type = 'application/x-npy' sklp.serializer = npy_serializer sklp.deserializer = numpy_deserializer for idx,sample in enumerate(test_feat_fft[:10]): pred = sklp.predict(np.asarray([test_feat_fft[idx]])) print(pred) time.sleep(0.5) # ## Clean up # + def cleanup(experiment): '''Clean up everything in the given experiment object''' for trial_summary in experiment.list_trials(): trial = Trial.load(trial_name=trial_summary.trial_name) for trial_comp_summary in trial.list_trial_components(): trial_step=TrialComponent.load(trial_component_name=trial_comp_summary.trial_component_name) print('Starting to delete TrialComponent..' + trial_step.trial_component_name) sm.disassociate_trial_component(TrialComponentName=trial_step.trial_component_name, TrialName=trial.trial_name) trial_step.delete() time.sleep(1) trial.delete() experiment.delete() #cleanup(nuclear_experiment) # - # ## Test Cases # # Test cases for the utils script are located in the tests folder. The notebook Run_testCases.ipynb can be used to run the test cases. # # ## Lambda Functions # I created a sample lambda to invoke an endpoint. The lambda name is "nukelambda" (not fully tested). # # ## End to End Architcture and Next Steps # To operationalize the model in production scenarios, we have to leverage multiple AWS technology components. High-level considerations are listed here: # # - Data Ingestion to AWS (From Reactor) - AWS IoT Services # # - Data Storage - S3 and RDS. S3 will be landing zone and RDS (PostgreSQL) for structured data. # # - Model Rest APIS - AWS Lambda # - Dashboard - Quicksight #
AWS_Training_competition.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Introduction # # This dataset includes the list of games with sales recorded over 100,000 copies over different platforms and continents. It contains data for over 16,500 games from 1980 to 2016. This dataset was generated by scrape from [VGChartz](https://www.vgchartz.com/). # # # # Question # # To narrow down to answering my main question, I have to explore what other factors in my dataset affects sale records of a game? # # - Factors such as: # 1. Platform # 2. Genre # 3. Publisher # # Find out which factors has done better overall in terms of sale records will help me answer the main questions: # # **Does publishing more games in any given genre leads to a high sale records?** # # Dataset # # This dataset has 11 unique columns. # # - Rank : Rank the games according to best overall sales records # - Name : Name of the game # - Platform : Name of the platform (i.e. PS2, PS3, PC,..) on which the game was released. # - Year : The year in which the games was released # - Genre : Genre of the game # - Publisher : Publisher of the game # - NA_Sales : Game's sale record in North America (in millions) # - EU_Sales : Game's sale record in Europe (in millions) # - JP_Sales : Game's sale record in Japan (in millions) # - Other_Sales : Game's sale record in Other parts of the world (in millions) # - Global_Sales : Game's sale record worldwide (in millions) ## why are we using boxplot and countplot? from Scripts import project_functions as pf from Scripts import groupby_function as gf import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns # This will load data, remove the missing data and reset the index df = pf.load_and_process("../data/raw/vgsales.csv") # exporting the process data to a folder df.to_csv("../data/processed/processed_vgsales.csv") df # # Exploratory Data Analysis with observations # **Which platform has the best total global sales?** col_list = ['Platform', 'Global_Sales'] df2 = ( df.groupby(['Platform']).sum() .sort_values('Global_Sales', ascending = False) .reset_index(col_level = 1) ) df2 = df2[col_list] fig, ax = plt.subplots(figsize=(6,8)) sns.barplot(x='Global_Sales', y= 'Platform', data = df, estimator = np.sum) plt.xlabel('Global_Sales (in millions)') df2.head(11) # Observations: # # # - As we can see the PS2 has highest total game sales globally # **Let's also see which platform has highest sale record in other part of the worlds?** df3 = df.groupby(['Platform']).sum() columns = ['Rank', 'Year', 'Global_Sales'] df3.drop(columns, inplace = True, axis=1) ax = df3.plot(kind = "barh", figsize=(15,20), width = 1, stacked = True, colormap = "tab20b") ax.set_xlabel("Sales (in million)") df3 # Observation: # # - Xbox has the highest total game sales in North America. # - PS3 has the highest total game sales in Europe. # - DS has the highest total game sales in Japan. # - PS2 has the highest total game sales in other parts of the world. # # PC has been used worldwide but yet it is not the best perfroming platform. One of the reason I think is that PC games have been facing problem with pirating. Other platform like PS, GB, NES and Xbox it is tough to pirate games.Therefore, many games company sales record are not too high for PC. # **Which publisher has good sales record throughout years globally? (As there many publisher! let's take top 15)** df3 = (df.groupby(['Publisher']).sum() .sort_values('Global_Sales', ascending = False) .reset_index(col_level = 1) ) top15 = df3.head(15) fig, ax = plt.subplots(figsize=(10,20)) sns.barplot(x='Global_Sales', y='Publisher', data = top15) plt.xlabel('Global_Sales (in millions)') col = ['Publisher', 'Global_Sales'] top15[col] # Observation: # # - As we can see that Nintendo has the highest global sales # **Which Genre has been dominating the gaming industry globally?** fig, ax = plt.subplots(figsize=(10,20)) sns.barplot(x='Global_Sales', y='Genre', data = df, estimator = np.sum) plt.xlabel('Global_Sales (in millions)') # Observation: # # - Action has been taking over the gaming industry. # # # Analysis # # As of now we have seen which platform, publisher and Genre has the most game sales record. Now, we are going to focus on the main question Does pubishing more games in a dominating genre leads to higher sale of games? # # **Let's count the number of games published by each publisher** df5 = df.groupby(['Publisher']).count() df5 = df5.sort_values('Global_Sales', ascending = False) df5 = df5.rename(columns={"Rank": "No. of Games Published"}) col = ['No. of Games Published'] df5[col].head(15) fig, ax = plt.subplots(figsize=(18,12)) sns.countplot(y='Publisher', data=df,order=df.Publisher.value_counts().iloc[:15].index) # As we can see that Electronic Arts has the most number of game published. # **Let's see which publisher has released the most amount of action games?** # # As we saw before, Action genre has the highest sales recorded. Therefore, we are trying to find if which publisher has published the most action genre games. Does that publisher has the highest sale records? col = ['Name', 'Publisher', 'Genre'] df6 = df[col] df6 = df6[df6['Genre'].str.match('Action')] df6 = ( df6.reset_index(col_level = 1) .drop(['index'], axis = 1) ) df6 fig, ax = plt.subplots(figsize=(18,15)) sns.countplot(y='Publisher', data=df6,order=df6.Publisher.value_counts().iloc[:15].index) # As from the countplot above, we can observe that **Activision** has the most amount of action games published. However, they are third on the list of highest global sales. # # As of now, we know that in terms of sales record: # - **PS2** is best platform # - Best publisher is **Nintendo** # - Best Genre is **Action** # - Most game is published by **EA** # - Most action games published by **Activision** # # One of the fun fact which will help us answer the main question is that almost all of the games made by Nintendo was on avaliable to only consoles by Nintendo themselves (i.e. Wii U, Wii, SNES, ...). # # Therefore, we lead to one more question: # # **How many Action based games were published by Activision on PS2?** # # As our main question is **Does publishing more games in a given genre leads to a high sale records?** # # As Activison as published the most action games, we will check what is the sale records of Activision publishing action based games on PS2 which is the best performing platform. df7 = df[df['Genre'].str.match('Action')] df7 = df7[df7['Platform'].str.match('PS2')] df7 = df7[df7['Publisher'].str.match('Activision')] df7 = df7.groupby(['Platform']).sum() col = ['Global_Sales'] df7[col] df8 = df[df['Genre'].str.match('Action')] df8 = df8[df8['Publisher'].str.match('Activision')] df8 = df8[df8['Platform'].str.match('PS2')] df8 = df8.groupby(['Platform']).count() df8 = df8.rename(columns={"Name": "No. of Action Games"}) col = ['No. of Action Games'] df8[col] df10 = df[df['Genre'].str.match('Action')] df10 = df10[df10['Publisher'].str.match('Activision')] df10 = df10.groupby(['Publisher']).sum() col = ['Global_Sales'] df10[col].head(1) # Activision has global sales of 141.82 (in millions) of action based games across all platforms. # # # Activision has published 310 action games. Out of 310, 28 were released on PS2. (28/310)*100 # That's 9%. That 28 games on PS2 has made 22.34 (in million). (22.34/141.82)*100 # That's 15.75%. # # We have to find how many action games have Activision has published on other platforms and their global sale record and compare it with PS2 sales record. df11 = df[df['Genre'].str.match('Action')] df11 = df11[df11['Publisher'].str.match('Activision')] df11 = df11.groupby(['Platform']).count() col = ['Rank'] df11 = df11[col] df11 = df11.rename(columns={'Rank':'No. of the game published'}) df11 fig, ax = plt.subplots(figsize=(10,10)) line = df11.plot(kind = "bar", ax=ax, color=(0.2, 0.4, 0.6, 0.6)) df14 = df[df['Genre'].str.match('Action')] df14 = df14[df14['Publisher'].str.match('Activision')] df14 = df14.groupby(['Platform']).sum() df14 = df14.rename(columns={'Global_Sales':'Global_Sales (in millions)'}) col = ['Global_Sales (in millions)'] df14 = df14[col] df14 fig, ax = plt.subplots(figsize=(12,10)) line1 = df14.plot(kind = "bar",ax =ax,) line = df11.plot(kind = "bar", ax=ax,color=(0.2, 0.4, 0.6, 0.6)) # # Conclusion # # As we can see from the last graph in analysis we found that: # # - PS2 being the best platform have sale record of 22.34 millions from 28 action based games. # - X360 has the sale record of 23.76 millions from 43 action based games. # - Wii has the sale record of 18.65 millions from 33 action based games. # # As you see all 3 platform mentioned in the list above represent different company. # # - PS2 represents PlayStation. # - X360 represents Microsoft. # - Wii represents Nintendo. # # The answer to my question **Does publishing more games in any given genre leads to a high sale records?** # Well, it does not. Publishing more games in action genre does not lead to higher sale record. The reason behind it is that factor like platform matters. As we see that PS2 sale records of 22.34 million from just 28 action games, while PS3 which is the next generation platform of same comapny PlayStation has sale records of 15.82 millions from 36 action games! So, does game itself! Publisher like Activision have to make sure that their games are entertaining in the genre which they are dominating. Therefore, all the factors do affects the outcome of the main question!
analysis/.ipynb_checkpoints/milestone3-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="UbJeRRk5B9za" colab_type="code" colab={} # !wget https://www.dropbox.com/s/5nr4q08af06gmfv/ShipClassification.zip?dl=0 # + id="0Rqav1vvN12d" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="9e3025d6-3ef6-4cb8-e909-3f5aa960c1f3" # !ls # + id="aJeB9C_ICGn-" colab_type="code" colab={} from zipfile import ZipFile file_name="ShipClassification.zip?dl=0" with ZipFile(file_name,'r') as zip: zip.extractall() print("done") # + id="l8uKJYrROL1R" colab_type="code" colab={} # + id="kcFKO92dCRJh" colab_type="code" colab={} # !ls # + id="tjsKu8EgC92s" colab_type="code" colab={} # %reload_ext autoreload # %autoreload 2 # %matplotlib inline from fastai import * from fastai.vision import * import fastai.vision as fa from fastai.metrics import error_rate # + id="9fio9CiAOOiC" colab_type="code" colab={} path = Path('/content/ShipClassification/') # + id="iK_yP_vGOxcW" colab_type="code" colab={} # !pip install patool # + id="4tKxeZbIOWNH" colab_type="code" colab={} import patoolib patoolib.extract_archive("/content/ShipClassification/train.zip",outdir=path) # + id="eiBqGjycDheV" colab_type="code" colab={} path.ls() # + id="yhSXuFkdPoG3" colab_type="code" colab={} import os os.rename('/content/ShipClassification/train.csv','/content/ShipClassification/labels.csv') # + id="4oTIIjs9Dvux" colab_type="code" colab={} labels = pd.read_csv('/content/ShipClassification/labels.csv') test = pd.read_csv('/content/ShipClassification/test_ApKoW4T.csv') submit = pd.read_csv('/content/ShipClassification/sample_submission_ns2btKE.csv') # + id="h5ihEo88EH2j" colab_type="code" colab={} labels.shape, test.shape # + id="wSmdQNH3ENxW" colab_type="code" colab={} labels.dtypes # + id="xBsT9e8-FAbp" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 298} outputId="3b1b80b8-c4cd-417b-bc9b-b8de1de6e90c" os.makedirs("/content/ShipClassification/test") # + id="UspLHanLGBeK" colab_type="code" colab={} torch.cuda.is_available() # + id="VjJAKogLEd32" colab_type="code" colab={} import os import shutil src = '/content/ShipClassification/images/' dst = '/content/ShipClassification/test/' for i in range(test.shape[0]): srcpath = os.path.join(src, test['image'][i]) dstpath = os.path.join(dst, test['image'][i]) shutil.copyfile(srcpath, dstpath) # + id="94FezJqzVOEu" colab_type="code" colab={} # + id="htLBon8LVAIC" colab_type="code" colab={} def get_ex(): return open_image(path/'images/626289.jpg') def plots_f(rows, cols, width, height, **kwargs): [get_ex().apply_tfms(tfms[0], **kwargs).show(ax=ax) for i,ax in enumerate(plt.subplots( rows,cols,figsize=(width,height))[1].flatten())] # + id="0kRLQiCyWOpj" colab_type="code" colab={} tfm = symmetric_warp(magnitude=(-0.2,0.2)) _, axs = plt.subplots(2,4,figsize=(12,6)) for ax in axs.flatten(): img = get_ex().apply_tfms(tfm, padding_mode='zeros') img.show(ax=ax) # + id="TCaTIpwCFQj8" colab_type="code" colab={} np.random.seed(42) bs=16 tfms = get_transforms() data = ImageDataBunch.from_csv(path,folder='images',csv_labels='labels.csv',label_delim=',',valid_pct=0.2, ds_tfms=tfms,test='test',size=224,bs=bs) stats=data.batch_stats() data.normalize(stats) # + id="irklUeA6OHev" colab_type="code" colab={} bs=16 def get_data(sz): tfms = get_transforms() data=ImageDataBunch.from_csv(path,folder='images',csv_labels='labels.csv',label_delim=',',valid_pct=0.2, ds_tfms=tfms,test='test',bs=bs,size=sz) return data # + id="Ffad37e0P8kW" colab_type="code" colab={} data_64 = get_data(64) # + id="l1_PsrEmQ6go" colab_type="code" colab={} data_64.classes # + id="sWTj2HlyRA3R" colab_type="code" colab={} data_64.show_batch(rows=2, figsize=(15,10)) # + id="NsXVuvdeRYYc" colab_type="code" colab={} acc_02 = partial(accuracy_thresh, thresh=0.2) f_score = partial(fbeta, thresh=0.2) learn =cnn_learner(data_64, models.densenet169,metrics=[acc_02,f_score]) # + id="g3A4KWStR6ln" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 168} outputId="18614cb3-2df5-4ab9-9576-f88a1922a96d" learn.fit_one_cycle(4) # + id="GI2TFSgbUmEQ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="73ff6808-084f-4fda-ee60-29958dc57bec" learn.save('stage-1_64') learn.unfreeze() learn.lr_find() # + id="49hACgGCVX6S" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 302} outputId="7a228e80-b737-4688-9fb8-eb4c0870e0e7" learn.recorder.plot(suggestion=True) # + id="Cto921RDW-ti" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 168} outputId="d9cced2a-0a75-42aa-e42e-df69b19bc043" learn.fit_one_cycle(4, slice(1E-05,1E-04)) # + id="dt6aUO1HXKbm" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 284} outputId="b569cdc9-5a6c-4a4c-9ec2-b6506a122f24" learn.recorder.plot_losses() # + id="XWZojuisXUSX" colab_type="code" colab={} learn.save('stage-2_64') # + id="-yxKJPsqkmoS" colab_type="code" colab={} # + id="-8EEF1AuXnh-" colab_type="code" colab={} learn.data=get_data(128) # + id="WEO9YE4Of6f9" colab_type="code" colab={} data.show_batch(rows=2, figsize=(15,10)) # + id="AAvFUoyNaMtQ" colab_type="code" colab={} learn.load('stage-2_64') learn.freeze() # + id="0eRRCuUbmCS0" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 302} outputId="461f9db5-9289-4b45-e595-d1a6cee34596" learn.lr_find() learn.recorder.plot() # + id="udU618V7pI3K" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 168} outputId="7c839eb2-be77-490b-de0b-5aa9415ffc17" learn.fit_one_cycle(4, slice(1E-03,1E-02)) # + id="Mo_kVii2pnWg" colab_type="code" colab={} learn.save('stage-1-128p') # + id="MoYNbDh8qrK1" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="e447df8f-5abb-4578-ad69-e544df2e7f5e" learn.unfreeze() learn.lr_find() # + id="-FIIyXUvq2oI" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 284} outputId="1b6e5094-a041-411c-9582-ce41ffa14ee0" learn.recorder.plot() # + id="Av8RtImwgiy4" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 168} outputId="8ae07980-dc6b-4e8e-adec-40aa53473d12" learn.fit_one_cycle(4,slice(1E-04,1E-03)) # + id="KnVNVp90rk5W" colab_type="code" colab={} learn.save('stage-2-128p') learn.data=get_data(196) learn.load('stage-2-128p') learn.freeze() # + id="EFOpYfXxsO6q" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 320} outputId="8a62bcf5-b7a5-4408-be3e-98a982a2dd7e" learn.lr_find() learn.recorder.plot(suggestion=True) # + id="-t_t1CmGrsdl" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 198} outputId="0104cc19-4173-4ac7-8e7b-2850162ef609" lr=1e-4/2 learn.fit_one_cycle(5, slice(lr)) # + id="lD5rs38Ws9IE" colab_type="code" colab={} learn.save('stage-1-196p') # + id="MM2-hLvftAfl" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 320} outputId="d341a50a-c376-44b6-afd2-41659555df85" learn.unfreeze() learn.lr_find() learn.recorder.plot(suggestion=True) # + id="BX-O3wnUuNiz" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 198} outputId="4cce9253-6900-4c55-b205-7cac11bbb25d" learn.fit_one_cycle(5, slice(1e-4, 1e-3)) # + id="uZ45McdzuaTM" colab_type="code" colab={} learn.save('stage-2-196p') # + id="8-itSsfEubLy" colab_type="code" colab={} learn.data=get_data(224) learn.load('stage-2-196p') learn.freeze() # + id="M_7i9hpBWC94" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 320} outputId="b6806e55-041f-4bdf-93fe-d14af6d31d2a" learn.lr_find() learn.recorder.plot(suggestion=True) # + id="H0zCe8Qeajo2" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 198} outputId="1b0b96fa-b1fc-4a81-b0c1-fcfd4e905cae" learn.fit_one_cycle(5,slice(1e-4,1e-3)) # + id="wcgwQKEDwQVH" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 320} outputId="3622adef-ed9c-4cde-d85e-347139d0fe6e" learn.save('stage-1-224p') learn.unfreeze() learn.lr_find() learn.recorder.plot(suggestion=True) # + id="JzmJIYCywbwu" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 288} outputId="d430747a-5b28-48a6-d03d-e14332645905" learn.fit_one_cycle(8,slice(1e-4,1e-3)) # + id="Ip37u17wGDLi" colab_type="code" colab={} learn.save('stage-2-224p') # + id="1p5Wv48BGo87" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 284} outputId="0100cd37-a3d0-41f4-87e8-9d2d6ce702ae" learn.recorder.plot_losses() # + id="GJ1kLbUYGs2e" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 22765} outputId="be36f0dd-2963-42b0-d02d-37c5e463b48d" learn.load('stage-2-224p') # + id="c1Cd2Ww1yRSl" colab_type="code" colab={} interp = ClassificationInterpretation.from_learner(learn) # + id="OudaMhCpyYoK" colab_type="code" colab={} preds,_ = learn.get_preds(ds_type=DatasetType.Test) labels = np.argmax(preds, 1) test_predictions_direct = [data.classes[int(x)] for x in labels] # + id="zL3268axw8O2" colab_type="code" colab={} test_predictions_direct # + id="gQ3GO6LXAbdy" colab_type="code" colab={} fnames = [f.name for f in learn.data.test_ds.items] # + id="wk4y6LyPAfo8" colab_type="code" colab={} df = pd.DataFrame({'image':fnames, 'category':test_predictions_direct}, columns=['image', 'category']) # + id="B3bHA2uNA7TX" colab_type="code" colab={} df.head() # + id="2krfOqScBICs" colab_type="code" colab={} df.to_csv("data_fastai_30.csv",index=False) from google.colab import files files.download('data_fastai_30.csv')
Ship_Jun9_FastAI_30.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import settings # + # Load features features: pd.DataFrame = pd.read_csv(settings.DATA_PATH_RADIOMIC_PROCESSED) # print(features.mean(axis=1)) # print(features.std(axis=1)) # + def feature_plot(features, name): plt.boxplot(features, sym='') plt.xlabel("Feature number") plt.ylabel("Values") plt.gca().yaxis.set_label_coords(-0.1, .5) # fig = plt.gcf() # fig.set_size_inches(8, 5) # plt.gca().get_yaxis().get_offset_text().set_position((0, 0)) plt.gca().get_yaxis().get_offset_text().set_visible(False) plt.savefig(f"outputs/features_{name}.png", dpi=300, bbox_inches="tight") plt.show() plt_features = features.iloc[:15] mean = plt_features.mean(axis=1) std = plt_features.std(axis=1) features2 = plt_features.sub(mean, axis=0) features2 = features2.div(std, axis=0) feature_plot(plt_features, "original") feature_plot(features2, "normalized") # - features_original: pd.DataFrame = pd.read_csv(settings.DATA_PATH_RADIOMIC, index_col=0) print(len(features_original.columns)) df = features_original test = df.loc[:, (df != 0).any()] print(len(test.columns))
Jupyter/radiomic_features.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Read in character dialgoue file and look at columns. #https://towardsdatascience.com/multi-class-text-classification-with-scikit-learn-12f1e60e0a9f import pandas as pd df = pd.read_csv('zootopia_dialogue.csv',encoding = "ISO-8859-1" ) df.head() # Set only necessary columns from file. from io import StringIO col=['current_character','current_dialogue'] df=df[col] df.head() # Remove any null values and trim away whitespace issues. df=df[pd.notnull(df["current_character"])] df=df[pd.notnull(df["current_dialogue"])] df["current_character"]=df["current_character"].str.strip() df["current_dialogue"]=df["current_dialogue"].str.strip() df.head() # Factorize the target column df['category_id'] = df['current_character'].factorize()[0] df.head(10) # Create a lookup table (category id) category_id_df = df[['current_character', 'category_id']].drop_duplicates().sort_values('category_id') category_id_df.head() # Create dictionaries for category category_to_id = dict(category_id_df.values) id_to_category = dict(category_id_df[['category_id', 'current_character']].values) category_to_id id_to_category # Show a picture of number of categories import matplotlib.pyplot as plt fig = plt.figure(figsize=(8,6)) df.groupby('current_character').current_dialogue.count().plot.bar(ylim=0) plt.show() # Calculate a tdidf frequency for dialogue from sklearn.feature_extraction.text import TfidfVectorizer tfidf = TfidfVectorizer(sublinear_tf=True, min_df=5, norm='l2', encoding='latin-1', ngram_range=(1, 2), stop_words='english') features = tfidf.fit_transform(df.current_dialogue).toarray() labels = df.category_id features.shape from sklearn.feature_selection import chi2 import numpy as np N = 2 for current_character, category_id in sorted(category_to_id.items()): features_chi2 = chi2(features, labels == category_id) indices = np.argsort(features_chi2[0]) feature_names = np.array(tfidf.get_feature_names())[indices] unigrams = [v for v in feature_names if len(v.split(' ')) == 1] bigrams = [v for v in feature_names if len(v.split(' ')) == 2] print("# '{}':".format(current_character)) print(" . Most correlated unigrams:\n. {}".format('\n. '.join(unigrams[-N:]))) print(" . Most correlated bigrams:\n. {}".format('\n. '.join(bigrams[-N:]))) from sklearn.model_selection import train_test_split from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfTransformer from sklearn.naive_bayes import MultinomialNB X_train, X_test, y_train, y_test = train_test_split(df['current_dialogue'], df['current_character'], random_state = 0) count_vect = CountVectorizer() X_train_counts = count_vect.fit_transform(X_train) tfidf_transformer = TfidfTransformer() X_train_tfidf = tfidf_transformer.fit_transform(X_train_counts) clf = MultinomialNB().fit(X_train_tfidf, y_train) from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier from sklearn.svm import LinearSVC from sklearn.model_selection import cross_val_score models = [ RandomForestClassifier(n_estimators=200, max_depth=3, random_state=0), LinearSVC(), MultinomialNB(), LogisticRegression(random_state=0), ] CV = 5 cv_df = pd.DataFrame(index=range(CV * len(models))) entries = [] for model in models: model_name = model.__class__.__name__ accuracies = cross_val_score(model, features, labels, scoring='accuracy', cv=CV) for fold_idx, accuracy in enumerate(accuracies): entries.append((model_name, fold_idx, accuracy)) cv_df = pd.DataFrame(entries, columns=['model_name', 'fold_idx', 'accuracy']) import seaborn as sns sns.boxplot(x='model_name', y='accuracy', data=cv_df) sns.stripplot(x='model_name', y='accuracy', data=cv_df, size=8, jitter=True, edgecolor="gray", linewidth=2) plt.show() cv_df.groupby('model_name').accuracy.mean() model = LinearSVC() X_train, X_test, y_train, y_test, indices_train, indices_test = train_test_split(features, labels, df.index, test_size=0.33, random_state=0) model.fit(X_train, y_train) y_pred = model.predict(X_test) from sklearn.metrics import confusion_matrix conf_mat = confusion_matrix(y_test, y_pred) fig, ax = plt.subplots(figsize=(10,10)) sns.heatmap(conf_mat, annot=True, fmt='d',xticklabels=category_id_df.current_character.values, yticklabels=category_id_df.current_character.values) plt.ylabel('Actual') plt.xlabel('Predicted') plt.show() from IPython.display import display for predicted in category_id_df.category_id: for actual in category_id_df.category_id: if predicted != actual and conf_mat[actual, predicted] >= 10: print("'{}' predicted as '{}' : {} examples.".format(id_to_category[actual], id_to_category[predicted], conf_mat[actual, predicted])) display(df.loc[indices_test[(y_test == actual) & (y_pred == predicted)]][['current_character', 'current_dialogue']]) print('') from sklearn import metrics print(metrics.classification_report(y_test, y_pred, target_names=df['current_character'].unique()))
Python/Zootopia character predictor.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import os import torch import torch.nn as nn import torchvision.datasets as dset import torchvision.transforms as transforms import torch.nn.functional as F import torch.optim as optim import numpy as np import matplotlib.pyplot as plt from tqdm.notebook import tqdm from IPython.display import clear_output # ## MNIST dataset + dataloader # + root = './data' trans = transforms.Compose([transforms.ToTensor()]) train_set = dset.MNIST(root=root, train=True, transform=trans, download=True) test_set = dset.MNIST(root=root, train=False, transform=trans, download=True) train_loader = torch.utils.data.DataLoader(train_set,batch_size=32,shuffle=True) test_loader = torch.utils.data.DataLoader(test_set,batch_size=32,shuffle=False) # - # ## plot some random examples: # + fig,ax = plt.subplots(3,3,figsize=(6,6)) for axlist in ax: for ax_i in axlist: idx = np.random.randint(len(train_set)) img = train_set[idx][0] ax_i.imshow( img.data.numpy().reshape(28,28) ,cmap='gray') plt.tight_layout() plt.show() # - # ## first lets create a regular autoencoder (not variational) # + latent_dim = 2 class Encoder(nn.Module): def __init__(self): super(Encoder,self).__init__() self.fc1 = nn.Linear(784, 400) self.fc2 = nn.Linear(400, 400) self.fc3 = nn.Linear(400, latent_dim) self.relu = nn.ReLU() def forward(self,x): out = self.relu(self.fc1(x)) out = self.relu(self.fc2(out)) return self.fc3(out) class Decoder(nn.Module): def __init__(self): super(Decoder,self).__init__() self.fc1 = nn.Linear(latent_dim, 400) self.fc2 = nn.Linear(400, 400) self.fc3 = nn.Linear(400, 784) self.relu = nn.ReLU() def forward(self,x): out = self.relu(self.fc1(x)) out = self.relu(self.fc2(out)) out = self.fc3(out) return out class AutoEncoder(nn.Module): def __init__(self): super(AutoEncoder, self).__init__() self.enc = Encoder() self.dec = Decoder() def forward(self, x): z = self.enc(x.view(-1, 784)) out = self.dec(z) return out # - # + net = AutoEncoder() loss_func = nn.MSELoss() optimizer = optim.Adam(net.parameters(), lr=1e-4) if torch.cuda.is_available(): net.cuda() net.train() for epoch in range(10) : print(epoch) for x,_ in tqdm( train_loader): optimizer.zero_grad() if torch.cuda.is_available(): x = x.cuda() prediction = net(x) mse_loss = loss_func(prediction, x.view(-1,784)) # <-- note that i'm using the input as the target mse_loss.backward() optimizer.step() clear_output(wait=True) # + fig,ax = plt.subplots(1,2,figsize=(8,4)) net.cpu() net.eval() img = train_set[485][0] decoded = net(img).data.numpy().reshape(28,28) ax[0].imshow( img.data.numpy().reshape(28,28) ,cmap='gray',vmin=0,vmax=1) ax[1].imshow( decoded ,cmap='gray',vmin=0,vmax=1) ax[0].set_title('input') ax[1].set_title('reconstructed') plt.show() # - # ## the image is kind of blurry, which is a common feature of autoencoders # ## so lets introduce something call perceptual loss # # the basic idea - use another neural network as a loss function # # given network that is trained on the data to do some task (like classification) # the first layers are "feature extractors" # pass both the original image, and the reconstructed image, through the same layers, and ask that the autoencoder minize the distance between the activations # # https://arxiv.org/pdf/1610.00291.pdf # https://arxiv.org/abs/1603.08155 # ## step 1, train a classifier (if you don't have one ready) class Classifier(nn.Module): def __init__(self): super(Classifier,self).__init__() self.conv1 = nn.Conv2d(1,64,3) self.conv2 = nn.Conv2d(64, 30,3) self.conv3 = nn.Conv2d(30, 20,3) self.conv4 = nn.Conv2d(20, 10,3) self.maxpool = nn.MaxPool2d(2) self.fc1 = nn.Linear(10*4*4,50) self.fc2 = nn.Linear(50,10) self.relu = nn.ReLU() def forward(self,x): out = self.relu( self.conv1(x) ) out = self.relu( self.conv2(out) ) out = self.relu( self.conv3(out) ) out = self.maxpool(out) out = self.relu( self.conv4(out) ) out = self.maxpool(out) out = self.relu(self.fc1(out.view(-1,10*4*4))) out = self.fc2(out) return out classifier = Classifier() # + #use the training loop below, or load the model weights #classifier.load_state_dict(torch.load('classifier.pt',map_location=torch.device('cpu'))) # + if torch.cuda.is_available(): classifier.cuda() loss_func = nn.CrossEntropyLoss() optimizer = optim.Adam(classifier.parameters(), lr=1e-3) for epoch in range(5) : for x,y in tqdm( train_loader): optimizer.zero_grad() if torch.cuda.is_available(): x = x.cuda() y = y.cuda() prediction = classifier(x) loss = loss_func(prediction, y) # <-- now we are training against our digit label as usual loss.backward() optimizer.step() clear_output(wait=True) # + def compute_accuracy(data_loader,net): net.eval() net.cpu() total_number = 0 total_correct = 0 for x,y in data_loader: prediction = net(x).data.numpy() prediction = np.argmax(prediction,axis=1) correct = len( np.where(prediction==y.data.numpy())[0] ) total_correct+=correct total_number+=x.shape[0] return total_correct/float(total_number) compute_accuracy(test_loader,classifier) # - # ## step 2, build a nn.Module that extracts the activations from our original image and reconstructed image, and compares them with L1 loss class PerceptualLoss(nn.Module): def __init__(self): super(PerceptualLoss, self).__init__() self.activ1 = classifier.conv1 self.activ2 = classifier.conv2 self.activ3 = classifier.conv3 self.activ4 = classifier.conv4 self.maxpool = nn.MaxPool2d(2) self.relu = nn.ReLU() def get_activ(self,x): activ1 = self.activ1(x) out = self.relu( activ1 ) activ2 = self.activ2(out) out = self.relu( activ2 ) activ3 = self.activ3(out) out = self.relu( activ3 ) out = self.maxpool(out) activ4 = self.activ4(out) return activ1,activ2,activ3,activ4 def forward(self, y, yhat): a1, a2, a3,a4= self.get_activ(y) ap1, ap2,ap3,ap4 = self.get_activ(yhat) return torch.nn.functional.l1_loss(a1, ap1)+torch.nn.functional.l1_loss(a2, ap2)+\ torch.nn.functional.l1_loss(a3, ap3)+torch.nn.functional.l1_loss(a4, ap4) net2 = AutoEncoder() # + #net2.load_state_dict(torch.load('AE_with_perceptual_loss.pt',map_location=torch.device('cpu'))) # + net2 = AutoEncoder() loss_func = nn.MSELoss() perp_loss = PerceptualLoss() optimizer = optim.Adam(net2.parameters(), lr=1e-4) if torch.cuda.is_available(): net2.cuda() perp_loss.cuda() net2.train() for epoch in range(10) : print(epoch) for x,_ in tqdm( train_loader): optimizer.zero_grad() if torch.cuda.is_available(): x = x.cuda() prediction = net2(x) mse_loss = loss_func(prediction, x.view(-1,784)) # <-- note that i'm using the input as the target p_loss = perp_loss(prediction.view(-1,1,28,28), x) loss = mse_loss+p_loss loss.backward() optimizer.step() clear_output(wait=True) # + fig,ax = plt.subplots(1,3,figsize=(12,4)) net.cpu() net.eval() net2.cpu() net2.eval() img = train_set[123][0] decoded = net(img).data.numpy().reshape(28,28) decoded2 = net2(img).data.numpy().reshape(28,28) ax[0].imshow( img.data.numpy().reshape(28,28) ,cmap='gray',vmin=0,vmax=1) ax[1].imshow( decoded ,cmap='gray',vmin=0,vmax=1) ax[2].imshow( decoded2 ,cmap='gray',vmin=0,vmax=1) ax[0].set_title('input') ax[1].set_title('MSE') ax[2].set_title('MSE+Percep') plt.show() # - # ## Now we will add randomness to the AutoEncoder - making it "variational" # # ## how do we create "randomness" that we can take the derivative of? # # have the model predict the mean and std of a normal distribution - then sample from it # by generating random numbers from # # eps ~ N(0,1), # # and using # # z = $\mu$ + $\sigma$ * eps # # more info here: # https://pytorch.org/docs/stable/distributions.html # + ## this is the function that generates random numbers from N(0,1) ## it matches the size of the input you give it torch.randn_like(torch.rand(5,3)) # - # + latent_dim = 2 class Encoder(nn.Module): def __init__(self): super(Encoder,self).__init__() self.fc1 = nn.Linear(784, 400) self.fc2 = nn.Linear(400, 400) self.fc21 = nn.Linear(400, latent_dim) self.fc22 = nn.Linear(400, latent_dim) self.relu = nn.ReLU() def forward(self,x): out = self.relu(self.fc1(x)) out = self.relu(self.fc2(out)) return self.fc21(out), self.fc22(out) class Decoder(nn.Module): def __init__(self): super(Decoder,self).__init__() self.fc1 = nn.Linear(latent_dim, 400) self.fc2 = nn.Linear(400, 400) self.fc3 = nn.Linear(400, 784) self.fc4 = nn.Linear(400, 784) self.relu = nn.ReLU() def forward(self,x): out = self.relu(self.fc1(x)) out = self.relu(self.fc2(out)) return self.fc3(out),self.fc4(out) class VAE(nn.Module): def __init__(self): super(VAE, self).__init__() self.enc = Encoder() self.dec = Decoder() def pick_random(self, mu, logvar): std = torch.exp(0.5*logvar) eps = torch.randn_like(std) return mu + eps*std def forward(self, x): mu, logvar = self.enc(x.view(-1, 784)) z = self.pick_random(mu, logvar) mu_x, logvar_x = self.dec(z) x_reco = self.pick_random(mu_x, logvar_x) return x_reco, mu, logvar # - vae = VAE() # ## let's look at the distribution of encoding values for the untrained model on the test dataset # + encoding = {} encoding_std = {} n_test = len(test_set) for test_i in tqdm( range(n_test) ): x,y = test_set[test_i] if y not in encoding: encoding[y] = [] encoding_std[y] = [] enc_mean, enc_std = vae.enc(x.view(-1, 784)) encoding[y].append( enc_mean.data.numpy()[0] ) encoding_std[y].append( np.exp(0.5*enc_std.data.numpy()[0]) ) for key in encoding: encoding[key] = np.array(encoding[key]) encoding_std[key] = np.array(encoding_std[key]) # + from matplotlib.patches import Ellipse fig,ax =plt.subplots(figsize=(8,8)) for key in range(10): sct = ax.scatter(encoding[key][:,0],encoding[key][:,1],label=str(key),s=1.5) for p in zip(encoding[key], encoding_std[key]): el = Ellipse(xy=p[0],width=p[1][0],height=p[1][1],angle=0) el.set_facecolor(sct.get_facecolor()[0]) el.set_alpha(0.2) ax.add_artist(el) lgnd = ax.legend(fontsize=20) for i in range(10): lgnd.legendHandles[i]._sizes = [250] ax.set_xlim(-5,5) ax.set_ylim(-5,5) plt.show() # - # ## Now we are adding the KL divergence to our loss function, # ## because we want the encoding Z to be close to N(0,1) # + mse_l = nn.MSELoss(reduction='sum') def loss_function(recon_x,x,mu,logvar): MSE = mse_l(recon_x, x.view(-1, 784)) KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp()) return MSE, KLD # - vae.load_state_dict(torch.load('VAE.pt',map_location=torch.device('cpu'))) # + vae = VAE() perp_loss = PerceptualLoss() optimizer = optim.Adam(vae.parameters(), lr=1e-3) if torch.cuda.is_available(): vae.cuda() perp_loss.cuda() vae.train() for epoch in range(10) : print(epoch) for x,y in tqdm( train_loader): optimizer.zero_grad() if torch.cuda.is_available(): x = x.cuda() prediction,mu,logvar = vae(x) mse,kld = loss_function(prediction,x,mu,logvar) p_loss = perp_loss(prediction.view(-1,1,28,28), x) loss = mse+3*kld+300*p_loss loss.backward() optimizer.step() clear_output(wait=True) # + vae.cpu() vae.eval() encoding = {} encoding_std = {} n_test = len(test_set) for test_i in tqdm( range(n_test) ): x,y = test_set[test_i] if y not in encoding: encoding[y] = [] encoding_std[y] = [] enc_mean, enc_std = vae.enc(x.view(-1, 784)) encoding[y].append( enc_mean.data.numpy()[0] ) encoding_std[y].append( np.exp(0.5*enc_std.data.numpy()[0]) ) for key in encoding: encoding[key] = np.array(encoding[key]) encoding_std[key] = np.array(encoding_std[key]) # + from matplotlib.patches import Ellipse fig,ax =plt.subplots(figsize=(6,6),dpi=150) for key in range(10): sct = ax.scatter(encoding[key][:,0],encoding[key][:,1],label=str(key),s=1.5) for p in zip(encoding[key], encoding_std[key]): el = Ellipse(xy=p[0],width=p[1][0],height=p[1][1],angle=0) el.set_facecolor(sct.get_facecolor()[0]) el.set_alpha(0.2) ax.add_artist(el) lgnd = ax.legend(fontsize=12) for i in range(10): lgnd.legendHandles[i]._sizes = [100] ax.set_xlim(-5,5) ax.set_ylim(-5,5) plt.show() # - # ## we don't need an input image anymore, we can use any point in the latent space to generate an image from ipywidgets import interact, interactive, fixed, interact_manual import ipywidgets as widgets from ipywidgets import FloatSlider # + def generate_img(x,y): fig,ax = plt.subplots(1,3,figsize=(8,4)) img_mean, img_std = vae.dec(torch.FloatTensor([x,y])) img_std = torch.exp(0.5*img_std) reco_img = img_mean+img_std*torch.randn_like(img_std) ax[0].imshow( img_mean.data.numpy().reshape(28,28) ,cmap='gray',vmin=0,vmax=1) ax[1].imshow( img_std.data.numpy().reshape(28,28) ,cmap='gray') ax[2].imshow( reco_img.data.numpy().reshape(28,28) ,cmap='gray',vmin=0,vmax=1) plt.show() interactive_plot = interactive(generate_img, x=FloatSlider(min=-4.0, max=4.0, step=0.01, continuous_update=False) , y=FloatSlider(min=-4.0, max=4.0, step=0.01, continuous_update=False)) interactive_plot # -
Tutorial6/VAE.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # !ls ../ from pandas import DataFrame, read_csv import pandas as pd df = read_csv(r'/Users/seanreed1/AnacondaProjects/scrapy-projects/movie-project/box-office-data/data/final-box-office-data.csv') df.head() df.ix[:10,:] # extract title and release year to feed into OBMD process chain title_plus_year = df[['Title', 'Release_Year']] title_plus_year.head() title_plus_year.to_csv(r'/Users/seanreed1/AnacondaProjects/scrapy-projects/movie-project/box-office-data/data/title_plus_year.csv', index=False) #check to make sure the file actually saved correctly title_plus_year2= read_csv(r'/Users/seanreed1/AnacondaProjects/scrapy-projects/movie-project/box-office-data/data/title_plus_year.csv') # + title_plus_year2.head() # - title_plus_year2.tail() # + #ok, data has been saved. Now, on to the OMDB data downloader # -
notebooks/The-Numbers-Data-Exploration.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:dev] * # language: python # name: conda-env-dev-py # --- # # Sparse Matrix data structures # We consider the following simple matrix. # + import numpy as np A = np.array([ [1, 0, 0, 2, 0], [3, 4, 0, 5, 0], [6, 0, 7, 8, 9], [0, 0, 10, 11, 0], [0, 0, 0, 0, 12] ]) print(A) # - # In the following we want to give a simple overview of sparse data formats to store this matrix. In this example, although we have a number of zero entries, sparse matrix formats are not worthwile, and we use this example mainly for didactical purposes. # ## The COO (Coordinate) format # We start with the COO format. It is the most simple format. Let us conver the matrix into it. # + from scipy.sparse import coo_matrix A_coo = coo_matrix(A) # - # The coo format is a very simple format that explicitly stores the row entries. It consists of three arrays, the row indices, the column indicies and the data entries. Let us print those arrays. print(A_coo.row) print(A_coo.col) print(A_coo.data) # We can easily print out the triplets of row index, column index and associated data entry. list(zip(A_coo.row, A_coo.col, A_coo.data)) # The coo format in Scipy is most frequently used for the generation of sparse matrices. The format is very simple and we can use it to easily create sparse matrices. We only need to provide the row, column and data arrays to create the coo matrix. A major advantage is also that we can repeat indices. In the matrix creation all data entries associated with the same matrix entry is just summed up. This is a very natural operation and simplifies a number of a situations, where we need to create sparse matrices. # # However, coo is not a suitable format for typical matrix operations. Also, it is not yet optimal in terms of storage requirements. # ## The CSR (Compressed Sparse Row) Format # If we look at the printout of the indices above in the coo format we can see that there is a lot of repetition in the row indices. We store for each nonzero entry the row index even though all row indices within the same row are identical. This motivates the idea of the CSR (Compressed Sparse Row) format. Instead of the row array we store an array of index pointers that give the starting position of the row within the column array. Let us demonstrate how this works. # We first conver the COO matrix format into the CSR format. A_csr = A_coo.tocsr() # Let us now print out the arrays that define the CSR format. We have three arrays. # # * A_csr.data - The data array containing the nonzero entries # * A_csr.indices - The column indices for the nonzero entries # * A_csr.indptr - Pointers into the column indices to store which indices belong to which row. # # The first two are the same as in the COO format. The last one requires explanation. For this let us print out the three arrays. print(A_csr.data) print(A_csr.indices) print(A_csr.indptr) # Comparing the arrays shows that the first two are indeed identical to the corresponding arrays for the COO format. The third array tells us where in the `indices` array the column indices for the ith row are located, namely we have that the column indices for the ith row are located in # # ``` # indices[indptr[i] : indptr[i + 1]] # ``` # Correspondingly the assocated data entries are in # # ``` # data[indptr[i] : indptr[i + 1]] # ``` # # The `indptr` array is always 1 element larger than the number of rows in the matrix. The last entry of the `indptr` array is the total number of nonzero elements. # # There is also a variant of the CSR format that stores elements along columns and compresses the column pointers. This is called CSC (Compressed Sparse Column) Format. Both CSC and CSR are widely used in software for large sparse matrices. # ## CSR Matrix-vector products # The CSR format has a very simple implementation for the matrix-vector product that naturally parallelises on multithreaded CPUs. The following code shows an example implementation. # + import numba @numba.jit(nopython=True, parallel=True) def csr_matvec(data, indices, indptr, shape, x): """Evaluates the matrix-vector product with a CSR matrix.""" # Get the rows and columns m, n = shape y = np.zeros(m, dtype=np.float64) for row_index in numba.prange(m): col_start = indptr[row_index] col_end = indptr[row_index + 1] for col_index in range(col_start, col_end): y[row_index] += data[col_index] * x[indices[col_index]] return y # - # Let's test this against the Scipy provided implementation of sparse matrix multiplications. As test matrix we use the matrix generated with the `discretise_poission` routine. # + from scipy.sparse import coo_matrix def discretise_poisson(N): """Generate the matrix and rhs associated with the discrete Poisson operator.""" nelements = 5 * N**2 - 16 * N + 16 row_ind = np.empty(nelements, dtype=np.float64) col_ind = np.empty(nelements, dtype=np.float64) data = np.empty(nelements, dtype=np.float64) f = np.empty(N * N, dtype=np.float64) count = 0 for j in range(N): for i in range(N): if i == 0 or i == N - 1 or j == 0 or j == N - 1: row_ind[count] = col_ind[count] = j * N + i data[count] = 1 f[j * N + i] = 0 count += 1 else: row_ind[count : count + 5] = j * N + i col_ind[count] = j * N + i col_ind[count + 1] = j * N + i + 1 col_ind[count + 2] = j * N + i - 1 col_ind[count + 3] = (j + 1) * N + i col_ind[count + 4] = (j - 1) * N + i data[count] = 4 * (N - 1)**2 data[count + 1 : count + 5] = - (N - 1)**2 f[j * N + i] = 1 count += 5 return coo_matrix((data, (row_ind, col_ind)), shape=(N**2, N**2)).tocsr(), f # + N = 1000 A, _ = discretise_poisson(N) # Generate a random vector rand = np.random.RandomState(0) x = rand.randn(N * N) y = csr_matvec(A.data, A.indices, A.indptr, A.shape, x) # Compare with the Scipy sparse matrix multiplication y_exact = A @ x rel_error = np.linalg.norm(y - y_exact, np.inf) / np.linalg.norm(y_exact, np.inf) print(f"Error: {round(rel_error, 2)}.") # - # This demonstrates that our implementation is correct. Not only it is correct. It also uses multithreading for parallelism. The default Scipy implementation is only single-threaded. For many sizes this does not matter. But for very large problems this can become a performance bottleneck. # # Let us time our implementation against the Scipy one. We have chosen a matrix dimension of one million to have a sufficient size for the multithreading to be useful. # Our implementation # %timeit y = csr_matvec(A.data, A.indices, A.indptr, A.shape, x) # The default Scipy implementation # %timeit y = A @ x # We can see a small improvement against the default Scipy implementation. The improvement will be significantly more if we have many more elements per row as is the case for most three-dimensional problems or higher-order discretisation methods. # ## Other sparse formats. # There are a number of sparse matrix formats and Scipy is supporting several of them. More information on sparse matrix classes and operations for handling sparse matrices can be found at [https://docs.scipy.org/doc/scipy/reference/sparse.html](https://docs.scipy.org/doc/scipy/reference/sparse.html).
hpc_lecture_notes/sparse_data_structures.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python (opencl) # language: python # name: opencl # --- # + import timeit import numpy as np from functools import partial import matplotlib.pyplot as plt from skimage.exposure import histogram import pyclesperanto_prototype as prototype from pyclesperanto import cle cle.set_wait_for_kernel_to_finish(True) prototype.select_device() prototype.set_wait_for_kernel_finish(True) sizes = [128, 256, 512, 1024, 2048, 4096, 8192] warmup_iter = 3 eval_iter = 5 # - def benchmark_function(target_function): """ Tests a function on a couple of image sizes and returns times taken for processing. """ benchmark_data = [] for size in sizes: # make new data image = np.random.rand(1, size, size) print("data size:", image.shape) # bind target function to given image partial_function = partial(target_function, image) # warmup warm_up_time_in_s = timeit.timeit(partial_function, number=warmup_iter) # measure execution time time_in_s = timeit.timeit(partial_function, number=eval_iter) # store results benchmark_data.append([size, time_in_s]) return np.asarray(benchmark_data) # # Histogram # ### Pyclesperanto def pyclic_function(image): cle.histogram(input_image=image) pyclic_benchmark_data = benchmark_function(pyclic_function) # ### Prototype def prototype_function(image): prototype.histogram(source=image) prototype_benchmark_data = benchmark_function(prototype_function) # ### Skimage def skimage_function(image): h, bc = histogram(image) skimage_benchmark_data = benchmark_function(skimage_function) # # Results # + plt.scatter(pyclic_benchmark_data[:,0] ** 2, pyclic_benchmark_data[:,1]) plt.scatter(prototype_benchmark_data[:,0] ** 2, prototype_benchmark_data[:,1]) plt.scatter(skimage_benchmark_data[:,0] ** 2, skimage_benchmark_data[:,1]) plt.legend(["pyclic", "prototype", "skimage"]) plt.xlabel("Image size in pixels") plt.ylabel("Compute time in s") plt.show()
benchmarking/benchmark_histogram.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.7.7 64-bit (''xomibm'': conda)' # name: python_defaultSpec_1599597913137 # --- # + [markdown] id="yXw9iwappnLQ" colab_type="text" # # Quantum Integer Programming (QuIP) 47-779. Fall 2020, CMU # ## Quiz 2 # # + [markdown] id="Qs1sma0PpnLS" colab_type="text" # ### Problem statement # #### Integer linear program # Solve the following problem # $$ # \min_{\mathbf{x}} 2𝑥_0+4𝑥_1+4𝑥_2+4𝑥_3+4𝑥_4+4𝑥_5+5𝑥_6+4𝑥_7+5𝑥_8+6𝑥_9+5𝑥_{10} \\ # s.t. \begin{bmatrix} # 1 & 0 & 0 & 1 & 1 & 1 & 0 & 1 & 1 & 1 & 1\\ # 0 & 1 & 0 & 1 & 0 & 1 & 1 & 0 & 1 & 1 & 1\\ # 0 & 0 & 1 & 0 & 1 & 0 & 1 & 1 & 1 & 1 & 1 # \end{bmatrix}\mathbf{x}= # \begin{bmatrix} # 1\\ # 1\\ # 1 # \end{bmatrix} \\ # \mathbf{x} \in \{0,1 \}^{11} # $$ # Equivalently written as # # $$ # \min_{\mathbf{x}} \mathbf{c}^\top \mathbf{x}\\ # s.t. \mathbf{A}\mathbf{x}=\mathbf{b} \\ # \mathbf{x} \in \{0,1 \}^{11} # $$ # + id="HuLA1swppnLW" colab_type="code" colab={} # Import Matplotlib to generate plots import matplotlib.pyplot as plt # Import numpy and scipy for certain numerical calculations below import numpy as np from scipy.special import gamma import math # + tags=[] id="PfgLpDsOpnLb" colab_type="code" colab={} from sympy import * import networkx as nx # + [markdown] id="0tafrNztxUtg" colab_type="text" # ## BPT Method # + id="BZeOicIsw3i2" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 385} executionInfo={"status": "ok", "timestamp": 1600223912053, "user_tz": 240, "elapsed": 66399, "user": {"displayName": "<NAME>", "photoUrl": "https://lh5.googleusercontent.com/-P8Dj3kd12vU/AAAAAAAAAAI/AAAAAAAAACA/gLGIIpKlVwo/s64/photo.jpg", "userId": "11354595782624791158"}} outputId="2a774128-bf2b-434e-83b1-3a84afe39bf0" x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, z = symbols('x0 x1 x2 x3 x4 x5 x6 x7 x8 x9 x10 z') eqs = [2*x0+4*x1+4*x2+4*x3+4*x4+4*x5+5*x6+4*x7+5*x8+6*x9+5*x10-z, x0+x3+x4+x5+x7+x8+x9+x10-1, x1+x3+x5+x6+x8+x9+x10-1, x2+x4+x6+x7+x8+x9+x10-1] eqs = eqs + [x*(x-1) for x in [x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10]] result = groebner(eqs, x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, z, order = 'lex') list(result) # + id="iICK8SBurOpC" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 50} executionInfo={"status": "ok", "timestamp": 1600225638527, "user_tz": 240, "elapsed": 347, "user": {"displayName": "<NAME>", "photoUrl": "https://lh5.googleusercontent.com/-P8Dj3kd12vU/AAAAAAAAAAI/AAAAAAAAACA/gLGIIpKlVwo/s64/photo.jpg", "userId": "11354595782624791158"}} outputId="e10b5705-b904-4e36-8be7-290194f931b8" zs = solve(result[-1],z) zstar = min(zs) print("zs:",zs) print("zstar:",zstar) # + [markdown] id="Bu0eHwnsxzzx" colab_type="text" # ## CT Method # + id="_TjvsBg6pnL3" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 201} executionInfo={"status": "ok", "timestamp": 1600232816264, "user_tz": 240, "elapsed": 337, "user": {"displayName": "<NAME>", "photoUrl": "https://lh5.googleusercontent.com/-P8Dj3kd12vU/AAAAAAAAAAI/AAAAAAAAACA/gLGIIpKlVwo/s64/photo.jpg", "userId": "11354595782624791158"}} outputId="d00c7944-508f-4872-c454-00be5a86c8d3" z1,z2,z3,w0,w1,w2,w3,w4,w5,w6,w7,w8,w9,w10 = symbols('z1 z2 z3 w0 w1 w2 w3 w4 w5 w6 w7 w8 w9 w10') eqs = [z1-w0,z2-w1,z3-w2,z1*z2-w3,z1*z3-w4,z1*z2-w5,z2*z3-w6,z1*z3-w7,z1*z2*z3-w8,z1*z2*z3-w9,z1*z2*z3-w10] result = groebner(eqs,z1,z2,z3,w0,w1,w2,w3,w4,w5,w6,w7,w8,w9,w10, order='lex') result = groebner(eqs,z1,z2,z3,w10,w9,w8,w7,w6,w5,w4,w3,w2,w1,w0, order='lex') list(result) # + id="rkSnA5xhQJR2" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 167} executionInfo={"status": "ok", "timestamp": 1600234077532, "user_tz": 240, "elapsed": 313, "user": {"displayName": "<NAME>", "photoUrl": "https://lh5.googleusercontent.com/-P8Dj3kd12vU/AAAAAAAAAAI/AAAAAAAAACA/gLGIIpKlVwo/s64/photo.jpg", "userId": "11354595782624791158"}} outputId="00585489-9ea5-49ff-c626-7808e7eef80f" r = z1**1*z2**1*z3**1 r = r.subs({(z1,w0),(z2,w1),(z3,w2)}) print(r) r1 = r.subs({(w0*w1*w2, w10)}) print(r1) r2 = r.subs({(w0*w1*w2, w9)}) print(r2) r3 = r.subs({(w0*w1*w2, w8)}) print(r3) r4 = r.subs({(w0*w2, w7)}) print(r4) r5 = r.subs({(w1*w2, w6)}) print(r5) r6 = r.subs({(w0*w1, w5)}) print(r6) r7 = r.subs({(w0*w2, w4)}) print(r7) r8 = r.subs({(w0*w1, w3)}) print(r8) # + [markdown] id="Oj1Are5tLdAg" colab_type="text" # This means that # - $x_{0}=1,x_{1}=1,x_{2}=1$ is a feasible solution whose objective function is 10; # # - $x_{10}=1$ is a feasible solution whose objective function is 5; # # - $x_{9}=1$ is a feasible solution whose objective function is 5; # # - $x_{8}=1$ is a feasible solution whose objective function is 5; # # - $x_{1}=1,x_{7}=1$ is a feasible solution whose objective function is 8; # # - $x_{0}=1,x_{6}=1$ is a feasible solution whose objective function is 7; # # - $x_{2}=1,x_{5}=1$ is a feasible solution whose objective function is 8; # # - $x_{1}=1,x_{4}=1$ is a feasible solution whose objective function is 8; # # - $x_{2}=1,x_{3}=1$ is a feasible solution whose objective function is 8. # # In conclusion, the best solution is 5. # + id="BidP9uoVLuaY" colab_type="code" colab={}
notebooks/Notebook Quiz 2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.6 (tensorflow) # language: python # name: tensorflow # --- # + # import prereq. from __future__ import absolute_import, division, print_function, unicode_literals import numpy as np import pandas as pd import tensorflow as tf from sklearn import preprocessing from tensorflow import keras from tensorflow.keras import layers from tensorflow.keras.callbacks import EarlyStopping, LearningRateScheduler # used for callback # + X = pd.read_csv('X.csv') Y = pd.read_csv('Y.csv') X_c = pd.read_csv('X_c.csv') Y_c = pd.read_csv('Y_c.csv') X_t = pd.read_csv('X_t.csv') Y_t = pd.read_csv('Y_t.csv') # + batch_size = 128 num_classes = 10 epochs = 12 # input image dimensions img_rows, img_cols = 28, 28 # + # convert to numpy np.append(a, [[7,8,9]],axis = 0) data_type = np.dtype(np.float64) # define datatype x = np.asarray(X.iloc[:,1:].to_numpy(), dtype=data_type) y = np.asarray(Y.iloc[:,1].to_numpy(), dtype=data_type) x_t = np.asarray(X_t.iloc[:,1:].to_numpy(), dtype=data_type) y_t = np.asarray(Y_t.iloc[:,1].to_numpy(), dtype=data_type) x_c = np.asarray(X_c.iloc[:,1:].to_numpy(), dtype=data_type) y_c = np.asarray(Y_c.iloc[:,1].to_numpy(), dtype=data_type) # clean some memory X = X_c = X_t = Y = Y_c = Y_t = None # - x_train = x.reshape(x.shape[0], img_rows, img_cols, 1) x_cross = x_c.reshape(x_c.shape[0], img_rows, img_cols, 1) x_test = x_t.reshape(x_t.shape[0], img_rows, img_cols, 1) input_shape = (img_rows, img_cols,1) x_train = x_train.astype('float32') x_cross = x_cross.astype('float32') x_test = x_test.astype('float32') y_train = keras.utils.to_categorical(y, num_classes) y_cross = keras.utils.to_categorical(y_c, num_classes) y_test = keras.utils.to_categorical(y_t, num_classes) y_test.shape # + # arhitect the model # CGG - LITE 28x28 MAS model = tf.keras.Sequential([ # layers.Conv2D(8, kernel_size=(7, 7), # padding='same', # activation='relu', # input_shape=input_shape), # layers.Dropout(0.7), # # layers.Dropout(0.5), # # layers.MaxPooling2D(pool_size=(2, 2)), # # layers.Conv2D(16, padding='same', kernel_size=(14, 14), activation='relu'), # # layers.Dropout(0.7), # # layers.MaxPooling2D(pool_size=(2, 2)), # # layers.MaxPooling2D(pool_size=(2, 2)), # # layers.Conv2D(16, padding='same',kernel_size=(1, 3), activation='relu'), # # layers.Dropout(0.45), # layers.Flatten(), # # layers.Dense(1024, activation='relu'),#1024 # # layers.Dropout(0.5), # layers.Dense(3072, activation='relu'),#1024 # layers.Dropout(0.6), # layers.Dense(3072, activation='relu'),#1024 # layers.Dropout(0.6), # layers.Dense(400, activation='relu'),#1024 # layers.Dropout(0.55), # layers.Dense(100, activation='relu'),#1024 # layers.Dropout(0.5), # layers.Dense(10, activation='softmax')]) -- best layers.Conv2D(10, kernel_size=(7, 7), padding='same', activation='relu', input_shape=input_shape), layers.Dropout(0.65), # layers.Dropout(0.5), # layers.MaxPooling2D(pool_size=(2, 2)), # layers.MaxPooling2D(pool_size=(2, 2)), # layers.MaxPooling2D(pool_size=(2, 2)), # layers.Conv2D(16, padding='same',kernel_size=(1, 3), activation='relu'), # layers.Dropout(0.45), layers.Flatten(), # layers.Dense(1024, activation='relu'),#1024 # layers.Dropout(0.5), layers.Dense(3072, activation='relu'),#1024 layers.Dropout(0.62), layers.Dense(3072, activation='relu'),#1024 layers.Dropout(0.62), layers.Dense(3072, activation='relu'),#1024 layers.Dropout(0.62), layers.Dense(400, activation='relu'),#1024 layers.Dropout(0.52), layers.Dense(100, activation='relu'),#1024 layers.Dropout(0.52), layers.Dense(10, activation='softmax')]) # max 90 acc # 69300/69300 [==============================] - 13s 181us/sample - loss: 0.0626 - accuracy: 0.9767 - val_loss: 0.4450 - val_accuracy: 0.9143 # Out[48]: # <tensorflow.python.keras.callbacks.History at 0x1349aba3a48> # + def step_decay(epoch): initial_lrate = 0.001 k = 1 return k / np.sqrt(epoch+1) * initial_lrate lrate = LearningRateScheduler(step_decay) # + model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adam(), metrics=['accuracy']) callbacks = [EarlyStopping(monitor='val_accuracy',verbose=1,min_delta=0.00000001, patience=90), lrate] # - model.fit(x_train,y_train, epochs=100, batch_size=100, shuffle=True, verbose=1, validation_data=(x_cross, y_cross), callbacks=callbacks) model.evaluate(x_test, y_test) model.save('97_935_93.h5')
interative_model_dent.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline import matplotlib.pyplot as plt import pandas as pd import numpy as np import asteroid_family # # Asteroid Family Package -- Functions: # # - asteroid_family.homogeneus_family: create a homogeneus asteroid family # # - asteroid_family.differentiated_family: create a homogeneus asteroid family # # - asteroid_family.mass_distribution: create a mass distribution normalized by the total mass # # - asteroid_family.min_Vej: minimum ejection velocity # # - asteroid_family.mean_vej: maximum ejection velocity # # - asteroid_family.mean_vej_distribution: # # - asteroid_family.velocity_field # # - asteroid_family.catastrophic_energy: obtain the catactrophic energy based on a model from Stewart and Leinhardt(2009). # # - asteroid_family.yarkovsky_dadt: obntain the semi-axis varition in time from the Yarksovskt Effect in function of the physical parameters of the asteroid # # - asteroid_family.yakovsky_change_unit: function to change the units of dat/dt from asteroid_family.yarkovsky_dadt # # - asteroid_family.mag_absoluta: obtain the absolute magnitude, H, for a set of asteroids, given a geometric albedo and diameter. Based on Parker et al.(2008). # ## Including information for the parental body }and its location on asteroid min belt # + #Parental data rpb = 20 #km rho_mantle = 3. rho_core = 7. pv_mantle = 0.4 pv_core = 0.15 Vi = 5 #km/s impact velocity fke = 0.01 #inelastic # - #location in the Main Belt --> same as 298 Baptistina M = 280.1642437983165 #degrees peri = 135.1543076468920 #degrees node = 8.253490030039535 #degrees i = 6.28752726137564 #degrees e = 0.09612545504215386 a = 2.26362004764974 #AU period = 3.41 #year ano = 2013 mes = 11 dia = 4 da0 = 0 # ## Creating the synthetic differentiated asteroid family help(asteroid_family.differentiated_family) mass, vej, rho, radius, mag = asteroid_family.differentiated_family('H',rpb,rho_mantle,rho_core, Vi,fke,0.5,pv_core,pv_mantle,'YES') # ## Obtain the Initial velocity field VT, VR, VW, A, E, I, dA, dE, dI = asteroid_family.gauss_equations(vej, a, e, i, period, 'YES') # ## Obtain the semi-axis variation in time from the Yarkovsky effect dadt = asteroid_family.yarkovsky_dadt(2*radius,np.random.uniform(0,np.pi,len(mass))*180./np.pi) # ## Saving in a dataframe and .csv file using Pandas Package data = pd.DataFrame([massa, vej, rho, raio, mag,VT, VR, VW, A, E, I, dA, dE, dI, dadt]).T data.columns = ['mass', 'vej', 'density', 'radius', 'H', 'VT', 'VR', 'VW', 'A', 'E', 'I', 'dA', 'dE', 'dI', 'dadt'] data plt.figure(figsize=(9,6)) #plt.grid(color='white') plt.scatter(data.radius[data.density == 3],data.dadt[data.density == 3], color='blue', label='mantle = '+str(len(data.E[data.density == 3]))) plt.scatter(data.radius[data.density == 7],data.dadt[data.density == 7], color='red', label='core = '+str(len(data.E[data.density == 7]))) plt.legend() plt.xlabel('radius [km]', fontsize=20) plt.ylabel('da/dt [AU/Myr]', fontsize=20) plt.ylim(min(data.dadt),max(data.dadt)) data.to_csv('synthetic_differentiated_family.csv')
example/Asteroid Family Example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: pytesseract_venv # language: python # name: pytesseract_venv # --- import cv2 import numpy as np import math import matplotlib.pyplot as plt from PIL import Image # ***Note that the first few cells have processing of image required for the algorithm*** # The algorithm is designed after referring to a few research papers and open source contributions image = cv2.imread('images/90.JPEG') image_copy = np.copy(image) i=0 # Image plt.imshow(image) # Convert into grayscale gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY) gray_copy = np.copy(gray) # Processing of image gray = cv2.GaussianBlur(gray, (3,3), 0) edge_im = cv2.Sobel(gray, -1, 1, 0) h,sobel = cv2.threshold(edge_im,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU) se = cv2.getStructuringElement(cv2.MORPH_RECT, (11,3)) #Choice of rectangle size can affect the result final_gray = cv2.morphologyEx(sobel, cv2.MORPH_CLOSE, se) # Detecting vertical edges plt.imshow(edge_im,cmap='gray') # After erosion and dilation (convolution algorithm) all the rectangles are isolated plt.imshow(final_gray,cmap='gray') # Contour selection cnt_image,cnts,_=cv2.findContours(final_gray, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) # Below is the implementation of the algorithm. The image after being processed as above is fed to the algorithm. def radian_to_degree(angle): return angle*180/np.pi def order_points(pts): ''' The function returns the 4 corner points in a set of points and returns the rectangle formed by the four co-ordinates. The input are a set of points (pts). 'crop' is the amount of pixels that are subtracted from the points obtained which is kind of padding so that we don't lose the data that run outside the area specified. 'main' is the flag which exclusively runs when the ordering points are of the main form. If main is 1, then the cropping should be more (This is specific to the form that we have chosen) ''' rect = np.zeros((4, 2), dtype = "float32") s = pts.sum(axis = 1) rect[0] = pts[np.argmin(s)] rect[2] = pts[np.argmax(s)] diff = np.diff(pts, axis = 1) rect[1] = pts[np.argmin(diff)] rect[3] = pts[np.argmax(diff)] # Cropping a little bit more towards right rect[1][0]+=10 rect[2][0]+=10 return rect def four_point_warp(image, pts): ''' The function finds the corner four points of the image and warps the image accordingly. image : The grayscale image of the scanned form. pts : The set of points from which corner points are to be selected to warp. ''' rect = order_points(pts) (tl, tr, br, bl) = rect # Finding width and height of the image based on corner points widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2)) widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2)) maxWidth = max(int(widthA), int(widthB)) heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2)) heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2)) maxHeight = max(int(heightA), int(heightB)) # The destination where the image needs to be pasted. # This is the size of the image where the warped image will be put. dst = np.array([ [0, 0], [maxWidth - 1, 0], [maxWidth - 1, maxHeight - 1], [0, maxHeight - 1]], dtype = "float32") # Warping the image M = cv2.getPerspectiveTransform(rect, dst) warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight)) return np.array(rect),warped # + # The below four variables affect the rectangles that are chosen. Choosing the right values of these variables determine # the result of the algorithm area_range = [200,5000] aspect_ratio_range = [2.4,9] angle_thresh = 5 edge_density_threshold = 0.5 for contour in cnts: rect = cv2.minAreaRect(contour) img_width = gray.shape[1] img_height = gray.shape[0] area = img_width*img_height box = cv2.boxPoints(rect) box = np.int0(box) X = rect[0][0] Y = rect[0][1] angle = rect[2] width = rect[1][0] height = rect[1][1] angle = (angle + 180) if width < height else (angle + 90) if (width > 0 and height > 0) and ((width < img_width/2.0) and (height < img_width/2.0)): aspect_ratio = float(width)/height if width > height else float(height)/width if (aspect_ratio >= aspect_ratio_range[0] and aspect_ratio <= aspect_ratio_range[1]): if((height*width > area_range[0]) and (height*width < area_range[1])): box_list = list(box) random_point = box_list[0] del(box_list[0]) distances = [((point[0]-random_point[0])**2 + (point[1]-random_point[1])**2) for point in box_list] sorted_distances = sorted(distances) adjacent_far_point = box_list[distances.index(sorted_distances[1])] tmp_angle = 90 if abs(random_point[0]-adjacent_far_point[0]) > 0: tmp_angle = abs(float(random_point[1]-adjacent_far_point[1]))/ \ abs(random_point[0]-adjacent_far_point[0]) tmp_angle = radian_to_degree(math.atan(tmp_angle)) if tmp_angle <= angle_thresh: rect,warped = four_point_warp(image, box) warped_gray = cv2.cvtColor(warped,cv2.COLOR_BGR2GRAY) h,thresh = cv2.threshold(warped_gray,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU) #thresh = cv2.bitwise_not(thresh) white_pixels=0 for i in range(thresh.shape[0]): for j in range(thresh.shape[1]): if thresh[i][j] == 255: white_pixels += 1 """ Edge density approach can also be used. Below code represents the algorithm""" # edge_density = float(white_pixels)/(thresh.shape[0]*thresh.shape[1]) # print(edge_density) # if edge_density > edge_density_threshold : # cv2.drawContours(image, [box], 0, (255,0,0),2) # #cv2.imwrite('plate'+str(i)+'.jpg',warped) # i=i+1 # plt.imshow(warped,cmap='gray') # plt.show() """Below algorithm was tried to consider the factor that license plate has characters""" need,contrs,hier=cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) count = 0 for contour in contrs: a = cv2.contourArea(contour) t = warped.shape[0]*warped.shape[1] if a > t/30 and a<t/5: # Choosing the contours with right area here is the vital and changing # the values gives different results count= count+1 if count > 0: cv2.drawContours(image, [box], 0, (255,0,0),2) plt.imshow(warped,cmap='gray') plt.show() # - plt.imshow(image,cmap='gray')
detect_a_plate.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + nbsphinx="hidden" import open3d as o3d import numpy as np import matplotlib.pyplot as plt import os import sys # only needed for tutorial, monkey patches visualization sys.path.append('..') import open3d_tutorial as o3dtut # change to True if you want to interact with the visualization windows o3dtut.interactive = not "CI" in os.environ # - # # Surface Reconstruction # # In many scenarios we want to generate a dense 3D geometry, i.e., a triangle mesh. However, from a multi-view stereo method, or a depth sensor we only obtain an unstructured point cloud. To get a triangle mesh from this unstructured input we need a surface reconstruction method. In the literature there exist a couple of methods and Open3D implements currently the following methods: # # - Alpha shapes [\[Edelsbrunner1983\]](../reference.html#Edelsbrunner1983) # - Ball pivoting [\[Bernardini1999\]](../reference.html#Bernardini1999) # - Poisson [\[Kazhdan2006\]](../reference.html#Kazhdan2006) # ## Alpha shapes # The alpha shape [\[Edelsbrunner1983\]](../reference.html#Edelsbrunner1983) is a generalization of a convex hull. As described [here](https://graphics.stanford.edu/courses/cs268-11-spring/handouts/AlphaShapes/as_fisher.pdf) one can intuitively # think of an alpha shape as the following: Imagine a huge mass of ice-cream containing the points $S$ as hard chocolate pieces. Using one of these sphere-formed ice-cream spoons we carve out all parts of the icecream block we can reach without bumping into chocolate pieces, thereby even carving out holes in the inside (e.g., parts not reachable by simply moving the # spoon from the outside). We will eventually end up with a (not necessarily convex) object bounded by caps, arcs and points. If we now straighten all round faces to triangles and line segments, we have an intuitive description of what is called the alpha shape of $S$. # # Open3D implements the method `create_from_point_cloud_alpha_shape` that involves the tradeoff parameter `alpha`. mesh = o3dtut.get_bunny_mesh() pcd = mesh.sample_points_poisson_disk(750) o3d.visualization.draw_geometries([pcd]) alpha = 0.03 print(f"alpha={alpha:.3f}") mesh = o3d.geometry.TriangleMesh.create_from_point_cloud_alpha_shape( pcd, alpha) mesh.compute_vertex_normals() o3d.visualization.draw_geometries([mesh], mesh_show_back_face=True) # The implementation is based on the convex hull of the point cloud. If we want to compute multiple alpha shapes from a given point cloud, then we can save some computation by only computing the convex hull once and pass it to `create_from_point_cloud_alpha_shape`. tetra_mesh, pt_map = o3d.geometry.TetraMesh.create_from_point_cloud(pcd) for alpha in np.logspace(np.log10(0.5), np.log10(0.01), num=4): print(f"alpha={alpha:.3f}") mesh = o3d.geometry.TriangleMesh.create_from_point_cloud_alpha_shape( pcd, alpha, tetra_mesh, pt_map) mesh.compute_vertex_normals() o3d.visualization.draw_geometries([mesh], mesh_show_back_face=True) # ## Ball pivoting # A to alpha shapes related method for surface reconstruction is the ball pivoting algorithm (BPA) [\[Bernardini1999\]](../reference.html#Bernardini1999). Intuitively, think of a 3D ball with a given radius that we drop on the point cloud. If it hits any 3 points (and it does not fall through those 3 points) it creates a triangles. Then, the algorithm starts pivoting from the edges of the existing triangles and every time it hits 3 points where the ball does not fall through we create another triangle. # # Open3D implements this method in `create_from_point_cloud_ball_pivoting`. The method accepts a list of `radii` as parameter that corresponds to the radii of the individual balls that are pivoted on the point cloud. # # # <div class="alert alert-info"> # # **Note:** # # This algorithm assumes that the `PointCloud` has `normals`. # # </div> # + gt_mesh = o3dtut.get_bunny_mesh() gt_mesh.compute_vertex_normals() pcd = gt_mesh.sample_points_poisson_disk(3000) o3d.visualization.draw_geometries([pcd], point_show_normal=True) radii = [0.005, 0.01, 0.02, 0.04] rec_mesh = o3d.geometry.TriangleMesh.create_from_point_cloud_ball_pivoting( pcd, o3d.utility.DoubleVector(radii)) o3d.visualization.draw_geometries([pcd, rec_mesh]) # - # ## Poisson surface reconstruction # The surface reconstruction methods produce non-smooth results as the points of the `PointCloud` are also the `vertices` of the triangle mesh without any modification. The Poisson surface reconstruction method [\[Kazhdan2006\]](../reference.html#Kazhdan2006) solves a regularized optimization problem to obtain a smooth surface. # # Open3D implements the method `create_from_point_cloud_poisson` that is basically a wrapper of the code of [Kazhdan](https://github.com/mkazhdan/PoissonRecon). An important parameter of the function is `depth` that defines the depth of the octree used for the surface reconstruction and hence, implies the resolution of the resulting triangle mesh. A higher `depth` values means a mesh with more details. # # <div class="alert alert-info"> # # **Note:** # # This algorithm assumes that the `PointCloud` has `normals`. # # </div> # + pcd = o3dtut.get_eagle_pcd() print(pcd) o3d.visualization.draw_geometries([pcd], zoom=0.664, front=[-0.4761, -0.4698, -0.7434], lookat=[1.8900, 3.2596, 0.9284], up=[0.2304, -0.8825, 0.4101]) print('run Poisson surface reconstruction') with o3d.utility.VerbosityContextManager(o3d.utility.VerbosityLevel.Debug) as cm: mesh, densities = o3d.geometry.TriangleMesh.create_from_point_cloud_poisson(pcd, depth=9) print(mesh) o3d.visualization.draw_geometries([mesh], zoom=0.664, front=[-0.4761, -0.4698, -0.7434], lookat=[1.8900, 3.2596, 0.9284], up=[0.2304, -0.8825, 0.4101]) # - # Poisson surface reconstruction will also create triangles in areas of low point density, and even extrapolates into some areas (see bottom of the eagle output above). The `create_from_point_cloud_poisson` function has a second `densities` return value that indicates for each vertex the density. A low density value means that the vertex is only supported by a low number of points from the input point cloud. # # In the code below we visualize the density in 3D using pseudo color. Violet indicates low density and yellow indicates a high density. print('visualize densities') densities = np.asarray(densities) density_colors = plt.get_cmap('plasma')( (densities - densities.min()) / (densities.max() - densities.min())) density_colors = density_colors[:, :3] density_mesh = o3d.geometry.TriangleMesh() density_mesh.vertices = mesh.vertices density_mesh.triangles = mesh.triangles density_mesh.triangle_normals = mesh.triangle_normals density_mesh.vertex_colors = o3d.utility.Vector3dVector(density_colors) o3d.visualization.draw_geometries([density_mesh], zoom=0.664, front=[-0.4761, -0.4698, -0.7434], lookat=[1.8900, 3.2596, 0.9284], up=[0.2304, -0.8825, 0.4101]) # We can further use the density values to remove vertices and triangles that have a low support. In the code below we remove all vertices (and connected triangles) that have a lower density value than the $0.01$ quantile of all density values. print('remove low density vertices') vertices_to_remove = densities < np.quantile(densities, 0.01) mesh.remove_vertices_by_mask(vertices_to_remove) print(mesh) o3d.visualization.draw_geometries([mesh], zoom=0.664, front=[-0.4761, -0.4698, -0.7434], lookat=[1.8900, 3.2596, 0.9284], up=[0.2304, -0.8825, 0.4101]) # ## Normal Estimation # In the examples above we assumed that the point cloud has normals that point outwards. However, not all point clouds already come with associated normals. Open3D can be used to estimate point cloud normals. `estimate_normals` locally fits a plane per 3D point to derive the normal. The problem can be that the normals are not consistently oriented. `orient_normals_consistent_tangent_plane` propagates the normal orientation using a minimum spanning tree. # + gt_mesh = o3dtut.get_bunny_mesh() pcd = gt_mesh.sample_points_poisson_disk(5000) pcd.normals = o3d.utility.Vector3dVector(np.zeros((1,3))) # invalidate existing normals pcd.estimate_normals() o3d.visualization.draw_geometries([pcd], point_show_normal=True) pcd.orient_normals_consistent_tangent_plane(100) o3d.visualization.draw_geometries([pcd], point_show_normal=True)
examples/Python/Advanced/surface_reconstruction.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/pyGuru123/Data-Analysis-and-Visualization/blob/main/Global%20Mean%20Precipitation%20IMERG%20Analysis/global_prec.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="uI1cNAyP0dwA" # # Global Precipitation Visualization with NASA IMERG Data # + [markdown] id="9S3PKm9x14tO" # IMERG, the Integrated Multi-satellitE Retrievals for GPM, is a unified satellite precipitation product produced by NASA to estimate surface precipitation over most of the globe. With IMERG, precipitation estimates from the GPM core satellite are used to calibrate precipitation estimates from microwave and IR sensors on other satellites. By then merging the estimates from multiple satellites, surface precipitation maps can be produced half-hourly at 0.1o horizontal resolution. # + [markdown] id="jwRbojxN1y6v" # # Aim & Requirements # + [markdown] id="9ovCLxCVz0Yq" # **Aim** : Reading and visualizing Integrated Multi-satellitE Retrievals for Global Precipitation Measurement (GPM) missions dataset using Python # # **Dataset** : [official_datasets](https://disc.gsfc.nasa.gov/datasets/GPM_3IMERGM_06/summary?keywords=GPM_3IMERGM_06) \ # used dataset – [GPM Level 3 IMERG Monthly 0.1 x 0.1 degree Precipitation (GPM_3IMERGM) for July 2020](https://gpm1.gesdisc.eosdis.nasa.gov/data/GPM_L3/GPM_3IMERGM.06/2020/3B-MO.MS.MRG.3IMERG.20200701-S000000-E235959.07.V06B.HDF5) # # **Dependencies** : H5py, Matplotlib, Numpy, Cartopy # + [markdown] id="NCz6NdX62Rs6" # The hdf5 file stands for hierarchial data format version 5 is an open source file format that allows storing of large and complex hetrogenous data. hdf5 uses directory based system to store multiple datasets and related attributes. # # In python, a hdf5 file can be opened using h5py or pytables package. A dataset inside a hdf5 file can be considered as a numpy ndarray # # This study is divided in 4 parts: # 1. Downloading & Reading hdf5 data with h5py package # 2. Processing datasets # 2. Visualizing global mean precipitation for Globe # 3. Visualizing global mean precipitation for India # + [markdown] id="Hf71TtY6aO0u" # Importing required libraries # + id="JgAqI0_pysgg" import h5py import numpy as np import matplotlib.pyplot as plt # + [markdown] id="bRVJld3Kaz51" # Installing Cartopy in colab # + id="OhnWvbh5a4M3" # !apt-get install libproj-dev proj-data proj-bin # !apt-get install libgeos-dev # !pip install cython # !pip install cartopy # + [markdown] id="6W46ipzXbykV" # Installing shapely in colab # + id="Ho1Hns7bbwGE" # !pip uninstall shapely # !pip install shapely --no-binary shapely # + [markdown] id="wCJ8ZWfy57QD" # # Downloading and reading dataset # + [markdown] id="g5XVB4bG60GF" # **Downloading Dataset** # # There are two ways to download the dataset # # First way : The Hard way # # 1. Downloading from NASA Earth Data website for which an account is required at Earth Data website. One can register for a free account from here : [EarthData](https://uui-test.gesdisc.eosdis.nasa.gov/uui/data-access) # 2. Once registered, follow the steps at [this page](https://urs.earthdata.nasa.gov/approve_app?client_id=e2WVk8Pw6weeLUKZYOxvTQ) to authorize NASA GESDISC DATA ARCHIVE # 3. Click this link to reach official dataset page : [GPM IMERG DATASET](https://gpm1.gesdisc.eosdis.nasa.gov/data/GPM_L3/GPM_3IMERGM.06/) # 4. Select year for which you want the dataset for. # 5. Download the dataset by clicking the appropriate month hdf5 file link. # 6. The file used here has following name : *3B-MO.MS.MRG.3IMERG.20200701-S000000-E235959.07.V06B.HDF5*. Here 07.V06B states that its the file for July month # # Second way : Easy way \ # I have already provided the dataset used in this notebook in my github repo. [Download it from here](https://github.com/pyGuru123/Data-Analysis-and-Visualization/tree/main/Global%20Mean%20Precipitation%20IMERG%20Analysis) # + [markdown] id="dvk73zj064b0" # **Reading Dataset** # + id="bmMWuuzW5_w3" filepath = '/content/drive/MyDrive/Colab Notebooks/Data and Visulization/Gloabl Precipitation IMERG/3B-MO.MS.MRG.3IMERG.20200701-S000000-E235959.07.V06B.HDF5' f = h5py.File(filepath, 'r') # + colab={"base_uri": "https://localhost:8080/"} id="Fv1KFqqS_3z9" outputId="d12869e8-acee-4c25-d374-012a29317a03" print(f) # + [markdown] id="92Xp0_YVAIbQ" # Reading all hdf5 group keys # # A key in hdf5 is like a directory under which datasets with their attributes are kept # + colab={"base_uri": "https://localhost:8080/"} id="h9tG5QbD_-5q" outputId="06ec96ab-d116-476c-d1ee-83cc0ba46a40" f.keys() # + [markdown] id="SU9DbKa4Ayqp" # below ones are the subkeys inside the main 'Grid' key, they contain our datasets # + colab={"base_uri": "https://localhost:8080/"} id="Myq53FBAAQcs" outputId="46ac53db-a993-471d-f9ea-78782272920d" for key in f['Grid'].keys(): print(key) # + [markdown] id="8r2baPUiA-YI" # lets check our first dataset, it will simply be a numpy ndarray # + colab={"base_uri": "https://localhost:8080/"} id="XZ-JaWBPAtUm" outputId="b298f566-c6cf-4207-e27e-7aa97abc2308" dset = f['Grid/precipitationQualityIndex'][...] print(dset) print(dset.dtype) print(dset.shape) print(len(dset)) # + [markdown] id="AAQdQWH_CndQ" # # Processing Datasets # + [markdown] id="wI0dZ4AWCUXu" # **Reading precipitaion, latitude and longitde** # + id="bx-lVGGdBKoa" precip = f['Grid/precipitation'][0][:][:] lats = f['Grid/lat'][...] lons = f['Grid/lon'][...] # + colab={"base_uri": "https://localhost:8080/"} id="1LElUptEC-oJ" outputId="23a3ce5a-9d8a-4892-8b33-9858de48e6c4" print(precip) print(precip.dtype) print(precip.shape) # + [markdown] id="vYjfFHrNDkEM" # transposing precip numpy ndarray # + colab={"base_uri": "https://localhost:8080/"} id="K3T6qCdJDAZX" outputId="2a2033f4-e502-4bfb-b0e7-10801262da7b" precip = np.transpose(precip) print(precip) print(precip.dtype) print(precip.shape) # + colab={"base_uri": "https://localhost:8080/"} id="cQPr0WIiDR7f" outputId="089e066c-f1ce-4251-abe2-a73ede9afbb1" print(lats) print(lons) # + [markdown] id="pSJZ1PchD6BR" # converting lats and lons array into a numpy meshgrid # + id="Ep7v3G33D1eB" x, y = np.float32(np.meshgrid(lons, lats)) # + colab={"base_uri": "https://localhost:8080/"} id="DybcCEMOEEPQ" outputId="d092af18-f1fb-4d47-ceac-cf67c2df1174" print(x) print(y) # + [markdown] id="W2hnNGwsEPFs" # # Visualizing global mean precipitation for Globe # + id="ahQj1uDoorfp" import cartopy.crs as ccrs import cartopy.feature as cfeature from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER import matplotlib.ticker as mticker # + colab={"base_uri": "https://localhost:8080/", "height": 456} id="PYLjDVRKoZfH" outputId="331cdfbf-444e-43fb-cf6e-436c7480263f" # Set the figure size, projection, and extent fig = plt.figure(figsize=(21,7)) ax = plt.axes(projection=ccrs.PlateCarree()) ax.set_extent([-180.0,180.0,-90.0,90.0]) # Add coastlines and formatted gridlines ax.coastlines(resolution="110m",linewidth=1) gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True, linewidth=1, color='gray', linestyle='--') gl.top_labels = False gl.right_labels = False gl.xlines = True gl.xlocator = mticker.FixedLocator([-180, -90, 0, 90, 180]) gl.ylocator = mticker.FixedLocator([-60, -50, -25, 0, 25, 50, 60]) gl.xformatter = LONGITUDE_FORMATTER gl.yformatter = LATITUDE_FORMATTER gl.xlabel_style = {'size':16, 'color':'black'} gl.ylabel_style = {'size':16, 'color':'black'} # Set contour levels and draw the plot clevs = np.arange(0,1.26,0.05) plt.contourf(x, y, precip, clevs, cmap=plt.cm.rainbow) plt.title('GPM IMERG Monthly Mean Rain Rate for July 2020', size=24) cb = plt.colorbar(ax=ax, orientation="vertical", pad=0.02, aspect=16, shrink=0.8) cb.set_label('mm / hr',size=20) cb.ax.tick_params(labelsize=16) plt.show() # + id="Li9Rc-NawTFP" fig.savefig('GPM_3IMERGP_WORLD.png', bbox_inches='tight', pad_inches = 0.1) # + [markdown] id="xqHhjksMqcMc" # # Visualizing global mean precipitation for India # + colab={"base_uri": "https://localhost:8080/", "height": 449} id="xoaUypp7qkGy" outputId="b7179f9e-981d-448e-a876-2811a86226af" # Set the figure size, projection, and extent fig = plt.figure(figsize=(21,7)) ax = plt.axes(projection=ccrs.PlateCarree()) ax.add_feature(cfeature.LAND) ax.add_feature(cfeature.OCEAN) ax.add_feature(cfeature.COASTLINE) ax.add_feature(cfeature.BORDERS, linestyle=':') ax.set_extent([65.0,100.0,0.0,40.0]) # Add coastlines and formatted gridlines ax.coastlines(resolution="10m",linewidth=1) gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True, linewidth=1, color='gray', linestyle='--') gl.top_labels = False gl.right_labels = False gl.xlines = True gl.xlocator = mticker.FixedLocator([70, 80, 90]) gl.ylocator = mticker.FixedLocator([0, 5, 15, 25, 35, 60]) gl.xformatter = LONGITUDE_FORMATTER gl.yformatter = LATITUDE_FORMATTER gl.xlabel_style = {'size':16, 'color':'black'} gl.ylabel_style = {'size':16, 'color':'black'} # Set contour levels and draw the plot clevs = np.arange(0,1.26,0.05) plt.contourf(x, y, precip, clevs, cmap=plt.cm.Blues) plt.title('GPM IMERG Monthly Mean Rain Rate for July, 2020', size=16, color='white') cb = plt.colorbar(ax=ax, orientation="vertical", pad=0.02, aspect=16, shrink=0.8) cb.set_label('mm / hr',size=20) cb.ax.tick_params(labelsize=16) plt.show() # + id="Le5d9iEtqvBz" fig.savefig('GPM_3IMERGP_INDIA.png', bbox_inches='tight', pad_inches = 0.1) # + [markdown] id="0uAkp4xXwOyp" # Thats all
Global Mean Precipitation IMERG Analysis/global_prec.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + colab={} colab_type="code" id="Zg7xzP4T9kYU" year = 2018 # + [markdown] colab_type="text" id="SewNEuvE9kYT" # This notebook predicts for the relevant year using the 2018 model, for both urban and rural areas. # - # ## Imports and Setup # + colab={} colab_type="code" id="5X7_TSil9kYa" outputId="911e8317-3f30-40bf-eb7e-d6ddf0e09be1" import re import numpy as np import pandas as pd from math import sqrt import geopandas as gpd import rasterio as rio from shapely.wkt import loads from tqdm import tqdm from datetime import datetime, timedelta from joblib import Parallel, delayed from itertools import repeat import os # ignore warnings import logging import warnings logging.getLogger().setLevel(logging.ERROR) warnings.filterwarnings("ignore") import sys sys.path.insert(0, '../utils') from settings import * import geoutils import modelutils # - # ## Set directories dept_dir = data_dir + 'by_dept/' grid250_dir = dept_dir + 'grid_250x250m/' feats250_dir = dept_dir + 'features/' preds250_dir = dept_dir + 'predictions/' # ## Download data from Cloud Storage # + colab={} colab_type="code" id="jBfgOP909kYk" # !gsutil -m rsync -r gs://immap-wash-training/grid/grid_250x250m/ {grid250_dir} # !gsutil cp gs://immap-wash-training/grid/grid_250x250m_wadmin.csv {data_dir} # !gsutil cp gs://immap-wash-training/grid/grids_in_urban_and_rural_areas.csv {data_dir} # !gsutil cp gs://immap-wash-training/features/2020_*.tif {feats_dir} # !gsutil cp gs://immap-wash-training/features/2019_*.tif {feats_dir} # !gsutil cp gs://immap-wash-training/features/2018_colombia_aridity_cgiarv2.tif {feats_dir}2020_colombia_aridity_cgiarv2.tif # !gsutil cp gs://immap-wash-training/features/2018_colombia_nearest_highway.tif {feats_dir}2020_colombia_nearest_highway.tif # - # ## Get list of admin bounds to iterate over adm1s = ['amazonas', 'antioquia', 'arauca', 'atlntico', 'bogot_dc', 'bolvar', 'boyac', 'caldas', 'caquet', 'casanare', 'cauca', 'cesar', 'choc', 'crdoba', 'cundinamarca', 'guaina', 'guaviare', 'huila', 'la_guajira', 'magdalena', 'meta', 'nario', 'norte_de_santander', 'putumayo', 'quindo', 'risaralda', 'san_andrs_y_providencia', 'santander', 'sucre', 'tolima', 'valle_del_cauca', 'vaups', 'vichada'] adm1s = list(set(adm1s) - {'amazonas', 'bogot_dc'}) adm1s.sort() # ## Load 2018 data # + colab={} colab_type="code" id="bTb6ZSmX9kYz" outputId="6fe6617e-5211-4a6b-f065-6e0ec2a59305" df = pd.read_csv(data_dir + '20200916_dataset.csv') train_df = df.copy() print(train_df.shape) # - # ## Rollout by department chunk (takes 24 hours) # For each department, predict on 30K rows (chunk) at a time for adm1 in tqdm(adm1s): try: modelutils.predict_by_chunk(adm1) except: f = open(preds250_dir + f'failed-{adm1}.txt', 'w') f.close() # ## Combine chunks to one raster (takes 1h30m) # For All departments Amazonas took 7mins # + for adm1 in tqdm(adm1s): modelutils.gpkgs_to_raster(adm1, verbose = False) outnames = [ fname for fname in os.listdir(preds250_dir) if '.tif' in fname ] outnames.sort() # # copy to gcs folder for outname in tqdm(outnames): # !gsutil cp {preds250_dir}{outname} gs://immap-output/20201005/ # - # + colab={} colab_type="code" id="MZpQzkk59kZZ" # + colab={} colab_type="code" id="3_SZ3pdT9kZc"
notebooks/03_Rollout.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/cindyhfls/NMA_DL_2021_project/blob/main/IndividualScriptTests/runModelScriptAreaComparison.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="idt_5qF9ogSJ" # _____________ # # Preprocessing # + id="SHRR0PV20BqZ" colab={"base_uri": "https://localhost:8080/"} outputId="f6b1755a-de99-4216-a2d8-d5149f1a7b0a" #@title Import matplotlib, class functions, and set up variables. from matplotlib import rcParams from matplotlib import pyplot as plt import torch import copy from torch.utils.data import Dataset, DataLoader from torchvision import transforms, utils import numpy as np import math import torch.nn as nn import torch.nn.functional as F from matplotlib import pyplot as plt from torch.utils.data.dataloader import default_collate device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') rcParams['figure.figsize'] = [20, 4] rcParams['font.size'] =15 rcParams['axes.spines.top'] = False rcParams['axes.spines.right'] = False rcParams['figure.autolayout'] = True # Data Loading #@title Data retrieval import os, requests fname = [] for j in range(3): fname.append('steinmetz_part%d.npz'%j) url = ["https://osf.io/agvxh/download"] url.append("https://osf.io/uv3mw/download") url.append("https://osf.io/ehmw2/download") for j in range(len(url)): if not os.path.isfile(fname[j]): try: r = requests.get(url[j]) except requests.ConnectionError: print("!!! Failed to download data !!!") else: if r.status_code != requests.codes.ok: print("!!! Failed to download data !!!") else: with open(fname[j], "wb") as fid: fid.write(r.content) alldat = np.array([]) for j in range(len(fname)): alldat = np.hstack((alldat, np.load('steinmetz_part%d.npz'%j, allow_pickle=True)['dat'])) #@title Print Keys print(alldat[0].keys()) # @title Set random seed # @markdown Executing `set_seed(seed=seed)` you are setting the seed # for DL its critical to set the random seed so that students can have a # baseline to compare their results to expected results. # Read more here: https://pytorch.org/docs/stable/notes/randomness.html # Call `set_seed` function in the exercises to ensure reproducibility. import random import torch def set_seed(seed=None, seed_torch=True): if seed is None: seed = np.random.choice(2 ** 32) random.seed(seed) np.random.seed(seed) if seed_torch: torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True print(f'Random seed {seed} has been set.') # In case that `DataLoader` is used def seed_worker(worker_id): worker_seed = torch.initial_seed() % 2**32 np.random.seed(worker_seed) random.seed(worker_seed) #@title Define Steinmetz Class class SteinmetzSession: data = [] binSize = 10 nTrials = [] nNeurons = [] trialLen = 0 trimStart = "trialStart" trimEnd = "trialEnd" def __init__(self, dataIn): self.data = copy.deepcopy(dataIn) dims1 = np.shape(dataIn['spks']) self.nTrials = dims1[1] self.nNeurons = dims1[0] self.trialLen = dims1[2] def binData(self, binSizeIn): # Inputs: data, scalar for binning. Combines binSizeIn bins together to bin data smaller Ex. binSizeIn of 5 on the original dataset combines every 5 10 ms bins into one 50 ms bin across all trials. varsToRebinSum = ['spks'] varsToRebinMean = ['wheel', 'pupil'] spikes = self.data['spks'] histVec = range(0,self.trialLen+1, binSizeIn) spikesBin = np.zeros((self.nNeurons, self.nTrials, len(histVec))) print(histVec) for trial in range(self.nTrials): spikes1 = np.squeeze(spikes[:,trial,:]) for time1 in range(len(histVec)-1): spikesBin[:,trial, time1] = np.sum(spikes1[:, histVec[time1]:histVec[time1+1]-1], axis=1) spikesBin = spikesBin[:,:,:-1] self.data['spks'] = spikesBin self.trialLen = len(histVec) -1 self.binSize = self.binSize*binSizeIn s = "Binned spikes, turning a " + repr(np.shape(spikes)) + " matrix into a " + repr(np.shape(spikesBin)) + " matrix" print(s) def plotTrial(self, trialNum): # Basic function to plot the firing rate during a single trial. Used for debugging trimming and binning plt.imshow(np.squeeze(self.data['spks'][:,trialNum,:]), cmap='gray_r', aspect = 'auto') plt.colorbar() plt.xlabel("Time (bins)") plt.ylabel("Neuron #") def realign_data_to_movement(self,length_time_in_ms): # input has to be n * nTrials * nbins align_time_in_bins = np.round(self.data['response_time']/self.binSize*1000)+ int(500/self.binSize) # has to add 0.5 s because the first 0.5 s is pre-stimulus length_time_in_bins = int(length_time_in_ms/self.binSize) validtrials = self.data['response']!=0 maxtime = self.trialLen newshape = (self.nNeurons,self.nTrials) newshape+=(length_time_in_bins,) newdata = np.empty(newshape) for count,align_time_curr_trial in enumerate(align_time_in_bins): if (validtrials[count]==0)|(align_time_curr_trial+length_time_in_bins>maxtime) : validtrials[count] = 0 else: newdata[:,count,:]= self.data['spks'][:,count,int(align_time_curr_trial):int(align_time_curr_trial)+length_time_in_bins] # newdata = newdata[:,validtrials,:] self.data['spks'] = newdata # self.validtrials = validtrials print('spikes aligned to movement, returning validtrials') return validtrials def get_areas(self): print(set(list(self.data['brain_area']))) def extractROI(self, region): #### extract neurons from single region rmrt=list(np.where(self.data['brain_area']!=region))[0] print(f' removing data from {len(rmrt)} neurons not contained in {region} ') self.data['spks']=np.delete(self.data['spks'],rmrt,axis=0) neur=len(self.data['spks']) print(f'neurons remaining in trial {neur}') self.data['brain_area']=np.delete(self.data['brain_area'],rmrt,axis=0) self.data['ccf']=np.delete(self.data['ccf'],rmrt,axis=0) def FlattenTs(self): self.data['spks']=np.hstack(self.data['spks'][:]) def removeTrialAvgFR(self): mFR = self.data['spks'].mean(1) mFR = np.expand_dims(mFR, 1) print(np.shape(self.data['spks'])) print(np.shape(mFR)) self.data['spks'] = self.data['spks'].astype(float) self.data['spks'] -= mFR def sqrt_norm(self): self.data['spks'] = np.sqrt(self.data['spks']) def permdims(self): return torch.permute(torch.tensor(self.data['spks']),(2,1,0)) def smoothFR(self, smoothingWidth):# TODO: Smooth the data and save it back to the data structure return 0 #@title get input for network from session 31 s31=SteinmetzSession(alldat[30]) s31.sqrt_norm() s31.removeTrialAvgFR() validtrials = s31.realign_data_to_movement(500) # get 500 ms from movement time, # cannot get realign and binning to work the same time =[ # Model class Net(nn.Module): # our model def __init__(self, ncomp, NN1, NN2, bidi=True, dropout = 0): super(Net, self).__init__() # play with some of the options in the RNN! self.rnn1 = nn.RNN(NN1, ncomp, num_layers = 1, dropout = 0, # MO bidirectional = bidi, nonlinearity = 'tanh') self.rnn2 = nn.RNN(NN2,ncomp,num_layers = 1, dropout = 0, bidirectional = bidi, nonlinearity = 'tanh') #TH if bidi == True: self.fclatent = nn.Linear(ncomp*2,ncomp*2) else: self.fclatent = nn.Linear(ncomp,ncomp) self.fc = nn.Linear(ncomp,NN1) def forward(self, x0,x1): y2 = self.rnn2(x0)[0] # ncomp TH y = self.rnn1(x1)[0] # ncomp MOs y = y + self.fclatent(y2) # ncomp MOs with projection of latent TH components if self.rnn1.bidirectional: # if the rnn is bidirectional, it concatenates the activations from the forward and backward pass # we want to add them instead, so as to enforce the latents to match between the forward and backward pass q = (y[:, :, :ncomp] + y[:, :, ncomp:])/2 else: q = y # the softplus function is just like a relu but it's smoothed out so we can't predict 0 # if we predict 0 and there was a spike, that's an instant Inf in the Poisson log-likelihood which leads to failure z = F.softplus(self.fc(q), 10) return z, q def pearson_corr_tensor(input, output): rpred = output.detach().cpu().numpy() rreal = input.detach().cpu().numpy() rpred_flat = np.ndarray.flatten(rpred) rreal_flat = np.ndarray.flatten(rreal) corrcoeff = np.corrcoef(rpred_flat, rreal_flat) return corrcoeff[0,1] # + id="7vQXTiKoFHd_" def runAreaModels(s31, lr, firstArea, secondArea, latentSize, nIter, validtrials, dropout, bidi, plotExamples = False, verboseFlag = False): ### print areas s31.get_areas() # s31.FlattenTs() nTr = np.argwhere(validtrials) # since the other trials were defaulted to a zero value, only plot the valid trials MO = copy.deepcopy(s31) ###remove all neurons not in motor cortex MO.extractROI(firstArea) ### plot a trial from motor neuron if plotExamples: plt.figure() MO.plotTrial(nTr[1]) plt.title(firstArea) ### permute the trials MOdata = MO.permdims().float().to(device) MOdata = MOdata[:,validtrials,:] print(MOdata.shape) TH = copy.deepcopy(s31) ###remove all neurons not in motor cortex TH.extractROI(secondArea) ### plot a trial from motor neuron if plotExamples: plt.figure() TH.plotTrial(nTr[1]) plt.title(secondArea) THdata = TH.permdims().float().to(device) THdata = THdata[:,validtrials,:] NN1 = MOdata.shape[2] NN2 = THdata.shape[2] N = MOdata.shape[1] np.random.seed(42) ii = torch.randperm(N).tolist() idx_train = ii[:math.floor(0.6*N)] idx_val = ii[math.floor(0.6*N):math.floor(0.9*N)] idx_test = ii[math.floor(0.9*N):] x0_train = THdata[:,idx_train,:] x0_val = THdata[:,idx_val,:] x0_test = THdata[:,idx_test,:] x1_train = MOdata[:,idx_train,:] x1_val = MOdata[:,idx_val,:] x1_test = MOdata[:,idx_test,:] ncomp = latentSize learning_rate_start = lr net_baseline = Net(ncomp, NN1, NN2, bidi = bidi, dropout= dropout).to(device) net_baseline.fclatent.weight.data[:] = 0 # fixed weights =0 so the TH input is not considered net_baseline.fclatent.bias.data[:] = 0 net_baseline.fclatent.weight.requires_grad = False net_baseline.fclatent.bias.requires_grad = False # special thing: we initialize the biases of the last layer in the neural network # we set them as the mean firing rates of the neurons. # this should make the initial predictions close to the mean, because the latents don't contribute much # net_baseline.fc.bias.data[:] = MOdata.mean((0,1)) # we set up the optimizer later in the training loop if verboseFlag: print(net_baseline) loss = nn.MSELoss() optimizer = torch.optim.Adam(net_baseline.parameters(), lr=learning_rate_start) training_cost = [] val_cost = [] for k in range(nIter): ### training optimizer.zero_grad() # the network outputs the single-neuron prediction and the latents z, y = net_baseline(x0_train,x1_train) cost = loss(z,x1_train).mean() # # our log-likelihood cost # cost = Poisson_loss(z, x1_train).mean() # train the network as usual cost.backward() optimizer.step() training_cost.append(cost.item()) ### test on validation data z_val,_ = net_baseline(x0_val,x1_val) cost = loss(z_val,x1_val).mean() # cost = Poisson_loss(z_val, x1_val).mean() val_cost.append(cost.item()) if k % 100 == 0: if verboseFlag: print(f'iteration {k}, cost {cost.item():.4f}') if plotExamples: plt.plot(training_cost,'b') plt.plot(val_cost,'r') plt.hlines(np.min(training_cost),0,nIter,'b',linestyles = '--') plt.hlines(np.min(val_cost),0,nIter,'r',linestyles = '--') plt.legend(['training cost','validation cost','min training cost','min validation cost']) plt.title('Training cost over epochs') plt.ylabel('cost') plt.xlabel('epochs') corr = pearson_corr_tensor(x1_val, z_val) rpred = z.detach().cpu().numpy() rates = x1_train.cpu() if plotExamples: nTr = 5 nNeuron = 0 plt.figure(figsize=(10, 6)) plt.plot(rates[:,nTr, nNeuron]) plt.plot(rpred[:,nTr, nNeuron]) plt.legend(['spikes', 'rates (predicted)']) plt.title(f'training set Trial {nTr}, Neuron {nNeuron}') plt.show() plt.figure(figsize = (12, 8)) plt.subplot(121) plt.imshow(rates[:, nTr, :].T, cmap='gray_r') plt.xlabel('Time (ms)') plt.ylabel('Cell #') plt.title(f'True rates (training set trial {nTr})') plt.subplot(122) plt.imshow(rpred[:, nTr, :].T, cmap='gray_r') plt.xlabel('Time (ms)') plt.ylabel('Cell #') plt.title(f'Inferred rates (training set trial {nTr})') plt.show() PATH = 'steinmetz_model_baseline.pt' torch.save(net_baseline.state_dict(), PATH) del net_baseline # load saved model net_baseline = Net(ncomp, NN1, NN2, bidi = bidi, dropout= dropout).to(device) net_baseline.load_state_dict(torch.load('steinmetz_model_baseline.pt')) # after training the baseline network, get the weights of rnn1 and freeze it net_withinput = copy.deepcopy(net_baseline) net_withinput.fclatent.weight.requires_grad = True net_withinput.fclatent.bias.requires_grad = True # # set weight initalization to random net_withinput.fclatent.reset_parameters() net_withinput.rnn1.weight_ih_l0.requires_grad = False net_withinput.rnn1.weight_hh_l0.requires_grad = False net_withinput.rnn1.bias_ih_l0.requires_grad = False net_withinput.rnn1.bias_hh_l0.requires_grad = False if bidi: net_withinput.rnn1.weight_ih_l0_reverse.requires_grad = False net_withinput.rnn1.weight_hh_l0_reverse.requires_grad = False net_withinput.rnn1.bias_ih_l0_reverse.requires_grad = False net_withinput.rnn1.bias_hh_l0_reverse.requires_grad = False if verboseFlag: print(net_withinput) # we define the Poisson log-likelihood loss # def Poisson_loss(lam, spk): # return lam - spk * torch.log(lam) loss = nn.MSELoss() optimizer = torch.optim.Adam(net_withinput.parameters(), lr=learning_rate_start) # this is very important training_cost2 = [] val_cost2 = [] for k in range(nIter): ### training optimizer.zero_grad() # the network outputs the single-neuron prediction and the latents z, y = net_withinput(x0_train,x1_train) # our log-likelihood cost cost = loss(z, x1_train).mean() # train the network as usual cost.backward() optimizer.step() training_cost2.append(cost.item()) ### test on validation data z_val,_ = net_withinput(x0_val,x1_val) cost = loss(z_val, x1_val).mean() val_cost2.append(cost.item()) if k % 100 == 0: if verboseFlag: print(f'iteration {k}, cost {cost.item():.4f}') corr2 = pearson_corr_tensor(x1_val, z_val) return training_cost, training_cost2, val_cost, val_cost2, corr, corr2 # + id="jKPYawfX7qpJ" colab={"base_uri": "https://localhost:8080/"} outputId="48ddab8a-e4eb-417d-b05e-7e502df726e1" numSplits = 2 doVec = np.linspace(0, 0.9, numSplits) bootstraps = 10 train_cost = np.zeros((numSplits, nIter, bootstraps)) train_cost2 = np.zeros((numSplits, nIter, bootstraps)) val_cost = np.zeros((numSplits, nIter, bootstraps)) val_cost2 = np.zeros((numSplits, nIter, bootstraps)) corr = np.zeros((numSplits, nIter, bootstraps)) corr2 = np.zeros((numSplits, nIter, bootstraps)) firstArea = 'MOs' secondArea = 'TH' thirdArea = 'OLF' latentSize = 3 nIter = 1000 dropout = .5 bidi = False lr = 0.002 for boot in range(bootstraps): print(f'Bootstrap num ',{boot}, ' of ', {bootstraps}) train_cost[0,:, boot], train_cost2[0,:, boot], val_cost[0,:, boot], val_cost2[0,:, boot], corr[0,:, boot], corr2[0,:, boot] = runAreaModels(s31, lr, firstArea, secondArea, latentSize, nIter, validtrials, dropout, bidi, verboseFlag = False) train_cost[1,:, boot], train_cost2[1,:, boot], val_cost[1,:, boot], val_cost2[1,:, boot], corr[1,:, boot], corr2[1,:, boot] = runAreaModels(s31, lr, firstArea, thirdArea, latentSize, nIter, validtrials, dropout, bidi, verboseFlag = False) plt.figure() plt.plot(val_cost) plt.plot(val_cost2) # + [markdown] id="8XRIIHV8rRxo" # --------------------- The end ------------------- #
IndividualScriptTests/runModelScriptAreaComparison.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Machine Learning and Statistics for Physicists # Material for a [UC Irvine](https://uci.edu/) course offered by the [Department of Physics and Astronomy](https://www.physics.uci.edu/). # # Content is maintained on [github](github.com/dkirkby/MachineLearningStatistics) and distributed under a [BSD3 license](https://opensource.org/licenses/BSD-3-Clause). # # ##### &#9658; [View table of contents](Contents.ipynb) # %matplotlib inline import matplotlib.pyplot as plt import seaborn as sns; sns.set() import numpy as np import pandas as pd from mls import locate_data from sklearn import model_selection, neighbors, tree, ensemble, preprocessing import scipy.stats # ## Case Study: Redshift Inference # Our goal is to predict the [cosmological redshift](https://en.wikipedia.org/wiki/Redshift) of a galaxy based on its brightness measured through 17 different filters. Redshift is a proxy for distance or, equivalently, look back time, so is a key observable for learning about past conditions in the universe. # + [markdown] heading_collapsed=true # ### Load and Explore Data # + [markdown] hidden=true # Read the data to train and test on: # + hidden=true X = pd.read_hdf(locate_data('photoz_data.hf5')) y = pd.read_hdf(locate_data('photoz_targets.hf5')) # + hidden=true X.describe() # + hidden=true y.describe() # + hidden=true sns.pairplot(X[:500], vars=X.columns.tolist()[:6]); # + hidden=true plt.hist(y['Z'], bins=np.arange(0, 6, 0.2)) plt.xlabel('Redshift $z$'); plt.ylabel('Galaxies / ($\Delta z=0.2$)'); # + [markdown] heading_collapsed=true # ### Split Data Randomly into Training and Testing Subsamples # + hidden=true gen = np.random.RandomState(seed=123) X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size=0.25, random_state=gen) # + hidden=true print(f'{len(X)} = {len(X_train)} TRAIN + {len(X_test)} TEST') # + [markdown] heading_collapsed=true # ### Nearest Neighbor Regression # + [markdown] hidden=true # Use the K-nearest neighbors (KNN) of an input sample to estimate its properties with [KNeighborsRegressor](https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsRegressor.html#sklearn.neighbors.KNeighborsRegressor): # + hidden=true knn_fit = neighbors.KNeighborsRegressor(n_jobs=8).fit(X_train, y_train) # + [markdown] hidden=true # Scores are calculated using the [coefficient of determination](https://en.wikipedia.org/wiki/Coefficient_of_determination) $R^2$, for which perfect accuracy is $R^2 = 1$: # + hidden=true knn_fit.score(X_train, y_train), knn_fit.score(X_test, y_test) # + hidden=true knn_fit.n_neighbors # + [markdown] hidden=true # #### Hyperparameter Optimization # + [markdown] hidden=true # The main hyperparameter is the value of K: the number of nearest neighbors that contribute to the final decision. # + hidden=true def knn_study(n=(1, 2, 4, 6, 8, 12, 16), max_score_samples=2000): train_score, test_score = [], [] for n_neighbors in n: fit = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors, n_jobs=8).fit(X_train, y_train) train_score.append(fit.score(X_train[:max_score_samples], y_train[:max_score_samples])) test_score.append(fit.score(X_test[:max_score_samples], y_test[:max_score_samples])) plt.plot(n, train_score, 'rx-', label='TRAIN') plt.plot(n, test_score, 'bo-', label='TEST') plt.xlabel('KNN n_neighbors') plt.ylabel('KNN $R^2$ score') plt.legend() # + hidden=true knn_study() # - # ### Decision Tree Regression # Use a [binary decision tree](https://en.wikipedia.org/wiki/Decision_tree_learning) to sort each input sample into a small "peer group" with [DecisionTreeRegressor](https://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeRegressor.html). Note the similarity with KNN, except now we ask a set of questions to identify the "peer group", instead of using nearest neighbors. gen = np.random.RandomState(seed=123) tree_fit = tree.DecisionTreeRegressor(random_state=gen).fit(X_train, y_train) tree_fit.score(X_train, y_train), tree_fit.score(X_test, y_test) tree_fit.tree_.max_depth # #### Feature Importance # A decision tree is highly interpretable since we can see exactly what questions it is asking at each step. As a first step, look at the "feature importance" of your trained model, which quantifies how much the performance suffers when a single feature is shuffled to make it non-informative. importance = pd.DataFrame( {'feature': X.columns, 'importance': tree_fit.feature_importances_} ).sort_values(by='importance', ascending=False) importance.plot('feature', 'importance', 'barh', figsize=(10, 10), legend=False); # Re-train using only the 8 most important features: importance[:8] best_features = importance[:8]['feature'] # The re-trained tree is much simpler and almost equally accurate on the test data: tree_fit = tree.DecisionTreeRegressor(random_state=gen).fit(X_train[best_features], y_train) tree_fit.score(X_train[best_features], y_train), tree_fit.score(X_test[best_features], y_test) tree_fit.tree_.max_depth # #### Hyperparameter Optimization def tree_study(n=(3, 4, 5, 6, 8, 10, 15, 20, 25, 30, 35), seed=123): gen = np.random.RandomState(seed) train_score, test_score = [], [] for max_depth in n: fit = tree.DecisionTreeRegressor(max_depth=max_depth, random_state=gen).fit(X_train[best_features], y_train) train_score.append(fit.score(X_train[best_features], y_train)) test_score.append(fit.score(X_test[best_features], y_test)) plt.plot(n, train_score, 'rx-', label='TRAIN') plt.plot(n, test_score, 'bo-', label='TEST') plt.xlabel('DecisionTree max_depth') plt.ylabel('DecisionTree $R^2$ score') plt.legend() tree_study() # Chose a `max_depth` of 5 to minimize overfitting the training data (or choose 10 to balance overfitting with accuracy on the test data): gen = np.random.RandomState(seed=123) tree_fit = tree.DecisionTreeRegressor(max_depth=5, random_state=gen).fit(X_train[best_features], y_train) tree_fit.score(X_train[best_features], y_train), tree_fit.score(X_test[best_features], y_test) # Note that a tree of depth $n$ sorts each sample into one of $2^n$ leaf nodes, each with a fixed prediction. This leads to a visible discretization error for small $n$, which is not necessarily a problem if the uncertainties are even larger: y_predict = tree_fit.predict(X_test[best_features]) plt.scatter(y_test, y_predict, lw=0) plt.xlabel('Target value') plt.ylabel('Predicted value'); # <span style="color:blue">Systematic error is kind of the spacing between the horizontal lines above.</span> # #### Tree Visualization tree.export_graphviz(tree_fit, out_file='tree.dot') # <span style="color:blue">.dot is like a text file for describing plots. Doesn't say where the nodes are. It just describes the relationships</span> # !dot -Tpng tree.dot -o tree.png def plot_branch(path=[], fit=tree_fit, X=X_train[best_features], y=y_train.values): tree = fit.tree_ n_nodes = tree.node_count children_left = tree.children_left children_right = tree.children_right feature = tree.feature threshold = tree.threshold # Traverse the tree using the specified path. node = 0 sel = np.ones(len(X), bool) cut = threshold[node] x = X.iloc[:, feature[node]] print('nsel', np.count_nonzero(sel), 'cut', cut, 'value', np.mean(y[sel])) for below_threshold in path: if below_threshold: sel = sel & (x <= cut) node = children_left[node] else: sel = sel & (x > cut) node = children_right[node] cut = threshold[node] x = X.iloc[:, feature[node]] print('nsel', np.count_nonzero(sel), 'cut', cut, 'value', np.mean(y[sel])) fig, ax = plt.subplots(1, 2, figsize=(12, 6)) xlim = np.percentile(x[sel], (1, 95)) below = sel & (x <= cut) above = sel & (x > cut) ax[0].hist(x[below], range=xlim, bins=50, histtype='stepfilled', color='r', alpha=0.5) ax[0].hist(x[above], range=xlim, bins=50, histtype='stepfilled', color='b', alpha=0.5) ax[0].set_xlim(*xlim) ax[0].set_xlabel(X.columns[feature[node]]) ylim = np.percentile(y, (1, 99)) y_pred = np.empty_like(y) y_pred[below] = np.mean(y[below]) y_pred[above] = np.mean(y[above]) mse2 = np.mean((y[sel] - y_pred[sel]) ** 2) n_below = np.count_nonzero(below) n_above = np.count_nonzero(above) mse = (np.var(y[below]) * n_below + np.var(y[above]) * n_above) / (n_below + n_above) #print('mse', mse, mse2) ax[1].hist(y[below], range=ylim, bins=25, histtype='stepfilled', color='r', alpha=0.5) ax[1].axvline(np.mean(y[below]), c='r', ls='--') ax[1].hist(y[above], range=ylim, bins=25, histtype='stepfilled', color='b', alpha=0.5) ax[1].axvline(np.mean(y[above]), c='b', ls='--') ax[1].set_xlabel('Redshift target') plot_branch([]) plot_branch([True,]) plot_branch([False,]) # ### Random Forest Regression # Use an ensemble of decision trees that are individually less accurate but collectively more accurate, with [RandomForestRegressor](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestRegressor.html). The individual trees are trained on random sub-samples of the data and the resulting "forest" of predictions are averaged. The random subsets for each tree are created by: # - Using a "bootstrap" resampling of the rows, and # - Finding the best split at each branch from a random subset of `max_features` features (columns). gen = np.random.RandomState(seed=123) forest_fit = ensemble.RandomForestRegressor( n_estimators=15, max_features=0.5, random_state=gen, n_jobs=8).fit(X_train, y_train.values.reshape(-1)) forest_fit.score(X_train, y_train), forest_fit.score(X_test, y_test) # Compare the first branch for two of the trees in our forest: plot_branch(fit=forest_fit.estimators_[0], X=X_train) plot_branch(fit=forest_fit.estimators_[1], X=X_train) # + [markdown] heading_collapsed=true # #### Hyperparameter Optimization # + hidden=true def forest_study(n=(1, 2, 3, 5, 10, 15, 20, 25, 30), seed=123): gen = np.random.RandomState(seed) train_score, test_score = [], [] for n_estimators in n: fit = ensemble.RandomForestRegressor( n_estimators=n_estimators, max_features=0.5, random_state=gen, n_jobs=8).fit( X_train, y_train.values.reshape(-1)) train_score.append(fit.score(X_train, y_train)) test_score.append(fit.score(X_test, y_test)) plt.plot(n, train_score, 'rx-', label='TRAIN') plt.plot(n, test_score, 'bo-', label='TEST') plt.xlabel('RandomForest n_estimators') plt.ylabel('RandomForest $R^2$ score') plt.legend() # + hidden=true forest_study() # + [markdown] heading_collapsed=true # #### Feature Importance (again) # + hidden=true importance = pd.DataFrame( {'feature': X.columns, 'importance': forest_fit.feature_importances_} ).sort_values(by='importance', ascending=False) # + hidden=true importance.plot('feature', 'importance', 'barh', figsize=(10, 10), legend=False); # - # #### Prediction uncertainty # Since we now have multiple predictions for each sample, we can use their spread as an estimate of the uncertainty in the mean prediction: y_pred = forest_fit.predict(X_test) y_pred_each = np.array([tree.predict(X_test) for tree in forest_fit.estimators_]) y_pred_each.shape np.all(y_pred == np.mean(y_pred_each, axis=0)) y_pred_error = y_test.values.reshape(-1) - y_pred y_pred_spread = np.std(y_pred_each, axis=0) # Check that the estimated uncertainty increases when the estimated values are farther from the true values: plt.scatter(np.abs(y_pred_error), y_pred_spread, lw=0) plt.xlabel('$|y_{true} - y_{pred}|$') plt.ylabel('Forest prediction spread') plt.xlim(0, 3) plt.ylim(0, 3); # <span style="color:blue"> # eash pt is 1 gal<br> # x axis is diff between true answer and pred answer<br> # Larger the true error, larger the estimated error # </span> # For a more quantitative check of the estimated uncertainties, plot the distribution of "pulls": bins = np.linspace(-2.5, 2.5, 50) plt.hist(y_pred_error / y_pred_spread, bins=bins, density=True) pull = 0.5 * (bins[1:] + bins[:-1]) plt.plot(pull, scipy.stats.norm.pdf(pull), 'r-', label='$\sigma=$ spread') correction = 2.0 plt.plot(pull, correction * scipy.stats.norm.pdf(correction * pull), 'r--', label=('$\sigma=$ spread / %.1f' % correction)) plt.legend() plt.xlabel('pull = dy / $\sigma$'); # <span style="color:blue">x coord divided by y cord</span> # This test reveals that the spread overestimates the true uncertainty by about a factor of 2, which is not surprising since the individual trees do not use the full training data samples or features. The lesson is that the spread is a useful indicator but needs to be calibrated using a study like this. # <span style="color:blue">This says we're overestimates the uncertainty by a factor of two<br> # blue lines is just the histogram of the scatter plot.<br> # If the errors are perfect, it should fit a gaussian of mean 0 and sigma 1. But it doesn't. # </span> # #### "Out-of-bag" Testing # Combining the trees in a forest is known as "bagging". Since each tree leaves out some samples, we can use these omitted (aka "out-of-bag") samples to test our model. This means we no longer need to set aside a separate test dataset and can use all of our data for training the forest. # # *Technical note: since RandomForestRegressor does not support a max_samples parameter, the out-of-bag samples are only due to bootstrap sampling with replacement, which generally needs more estimators for reasonable statistics.* gen = np.random.RandomState(seed=123) forest_fit = ensemble.RandomForestRegressor( n_estimators=100, max_features=0.5, oob_score=True, random_state=gen, n_jobs=8).fit(X, y.values.reshape(-1)) forest_fit.score(X_train, y_train), forest_fit.oob_score_
notebooks/Redshift.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from keras.datasets import cifar10 from keras.utils import np_utils from keras.models import Sequential from keras.layers.core import Dense, Dropout, Activation, Flatten from keras.layers.convolutional import Conv2D, MaxPooling2D from keras.optimizers import SGD, Adam, RMSprop from keras.preprocessing.image import ImageDataGenerator #from quiver_engine import server import matplotlib.pyplot as plt # CIFAR_10 is a set of 60K images 32x32 pixels on 3 channels IMG_CHANNELS = 3 IMG_ROWS = 32 IMG_COLS = 32 #constant BATCH_SIZE = 128 NB_EPOCH = 40 NB_CLASSES = 10 VERBOSE = 1 VALIDATION_SPLIT = 0.2 OPTIM = RMSprop() #load dataset (X_train, y_train), (X_test, y_test) = cifar10.load_data() print('X_train shape:', X_train.shape) print(X_train.shape[0], 'train samples') print(X_test.shape[0], 'test samples') # convert to categorical Y_train = np_utils.to_categorical(y_train, NB_CLASSES) Y_test = np_utils.to_categorical(y_test, NB_CLASSES) # float and normalization X_train = X_train.astype('float32') X_test = X_test.astype('float32') X_train /= 255 X_test /= 255 # network model = Sequential() model.add(Conv2D(32, kernel_size=3, padding='same', input_shape=(IMG_ROWS, IMG_COLS, IMG_CHANNELS))) model.add(Activation('relu')) model.add(Conv2D(32, kernel_size=3, padding='same')) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Conv2D(64, kernel_size=3, padding='same')) model.add(Activation('relu')) model.add(Conv2D(64, 3, 3)) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(512)) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(NB_CLASSES)) model.add(Activation('softmax')) model.summary() model.compile(loss='categorical_crossentropy', optimizer=OPTIM, metrics=['accuracy']) datagen = ImageDataGenerator( featurewise_center=False, # set input mean to 0 over the dataset samplewise_center=False, # set each sample mean to 0 featurewise_std_normalization=False, # divide inputs by std of the dataset samplewise_std_normalization=False, # divide each input by its std zca_whitening=False, # apply ZCA whitening rotation_range=0, # randomly rotate images in the range (degrees, 0 to 180) width_shift_range=0.1, # randomly shift images horizontally (fraction of total width) height_shift_range=0.1, # randomly shift images vertically (fraction of total height) horizontal_flip=True, # randomly flip images vertical_flip=False) # randomly flip images datagen.fit(X_train) # train history = model.fit(X_train, Y_train, batch_size=BATCH_SIZE, epochs=NB_EPOCH, validation_split=VALIDATION_SPLIT, verbose=VERBOSE) #model.fit_generator(datagen.flow(X_train, Y_train, # batch_size=BATCH_SIZE), # samples_per_epoch=X_train.shape[0], # nb_epoch=NB_EPOCH, # verbose=VERBOSE) #server.launch(model) print('Testing...') score = model.evaluate(X_test, Y_test, batch_size=BATCH_SIZE, verbose=VERBOSE) print("\nTest score:", score[0]) print('Test accuracy:', score[1]) #save model model_json = model.to_json() open('cifar10_architecture.json', 'w').write(model_json) model.save_weights('cifar10_weights.h5', overwrite=True) # list all data in history print(history.history.keys()) # summarize history for accuracy plt.plot(history.history['acc']) plt.plot(history.history['val_acc']) plt.title('model accuracy') plt.ylabel('accuracy') plt.xlabel('epoch') plt.legend(['train', 'test'], loc='upper left') plt.show() # summarize history for loss plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('model loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train', 'test'], loc='upper left') plt.show()
Chapter03/keras_CIFAR10_V1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Example Map Plotting # + # By line: RRB 2020-07-20 # Script aims to: # - Load a netCDF file # - Extract one variable: CO # - Calculate column values: load model pressure, multiply by ppb -> column conversion factor # - Add cyclic point # - Create contour plot of variable as world map with coastlines # - Customize contours and colorbar # - Add axes labels # - Add grid lines # - # ### At the start of a Jupyter notebook you need to import all modules that you will use. import matplotlib.pyplot as plt import cartopy.crs as ccrs # For plotting maps import cartopy.feature as cfeature # For plotting maps from cartopy.util import add_cyclic_point # For plotting maps from pathlib import Path # System agnostic paths import xarray as xr # For loading the data arrays import numpy as np # For array creation and calculations # ### Define the directories and file of interest for your results. result_dir = Path("../../data") file = "CAM_chem_merra2_FCSD_1deg_QFED_monthoutput_CO_201801.nc" file_to_open = result_dir / file #the netcdf file is now held in an xarray dataset named 'nc_load' and can be referenced later in the notebook nc_load = xr.open_dataset(file_to_open) #to see what the netCDF file contains, uncomment below #nc_load # ### Extract the variable of choice at the time and level of choice. # + #extract variable var_sel = nc_load['CO'].isel(time=0) #print(var_sel) #select the surface level at a specific time and convert to ppbv from vmr #select the surface level for an average over three times and convert to ppbv from vmr var_sel = var_sel*1e09 # 10-9 to ppb print(var_sel.shape) #extract grid variables lat = var_sel.coords['lat'] lon = var_sel.coords['lon'] # - # ### Define constants for converting to column amounts. #------------------------------- #CONSTANTS and conversion factor #------------------------------- NAv = 6.0221415e+23 #--- Avogadro's number g = 9.81 #--- m/s - gravity MWair = 28.94 #--- g/mol xp_const = (NAv* 10)/(MWair*g)*1e-09 #--- scaling factor for turning vmr into pcol #--- (note 1*e-09 because in ppb) # ### Create 3d Pressure array. # Calculates pressures at each hybrid level using the formula: p(k) = a(k)*p0 + b(k)*ps. # + # Load values to create true model pressure array psurf = nc_load['PS'].isel(time=0) hyai = nc_load['hyai'] hybi = nc_load['hybi'] p0 = nc_load['P0'] lev = var_sel.coords['lev'] num_lev = lev.shape[0] # Initialize pressure edge arrays mod_press_low = xr.zeros_like(var_sel) mod_press_top = xr.zeros_like(var_sel) # Calculate pressure edge arrays # CAM-chem layer indices start at the top and end at the bottom for i in range(num_lev): mod_press_top[i,:,:] = hyai[i]*p0 + hybi[i]*psurf mod_press_low[i,:,:] = hyai[i+1]*p0 + hybi[i+1]*psurf # Delta P in hPa mod_deltap = (mod_press_low - mod_press_top)/100 #print(mod_press_low[:,0,0]) #print(mod_press_top[:,0,0]) #print(mod_deltap[:,0,0]) # - # ### Calculate columns. var_tcol = xr.dot(mod_deltap, xp_const*var_sel, dims=["lev"]) # ### Add cyclic point to avoid white stripe at lon=0. var_tcol_cyc, lon_cyc = add_cyclic_point(var_tcol, coord=lon) # ### Plot the value over the globe. # + import matplotlib.colors as colors class MidpointNormalize(colors.Normalize): def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False): self.midpoint = midpoint colors.Normalize.__init__(self, vmin, vmax, clip) def __call__(self, value, clip=None): # I'm ignoring masked values and all kinds of edge cases to make a # simple example... x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1] res = np.ma.masked_array(np.interp(value, x, y)) return res # + plt.figure(figsize=(20,8)) #Define projection ax = plt.axes(projection=ccrs.PlateCarree()) #define contour levels #clev = np.arange(0.5, 3.2, 0.1) #clev = np.log10([0.1, 0.3, 0.5, 0.75, 1, 3, 5, 7.5,]) #clev = ([0.1, 1/3, 1/2, 1, 10/3, 10/2]) clev = np.logspace(-2, 0.6, num=50) print(clev) cmap=plt.get_cmap('Spectral_r') from matplotlib.ticker import MaxNLocator #levels = MaxNLocator(nbins=15).tick_values(0.4, 4.0) levels = clev from matplotlib.colors import BoundaryNorm norm = BoundaryNorm(clev, ncolors=cmap.N, clip=True) #log10((/ 1, 3, 5, 7.5, 10, 15, 20, 25, 30/)) #plot the data #plt.contourf(lon_cyc,lat,var_tcol_cyc/1e18,clev,cmap='Spectral_r',extend='both') #plt.contourf(lon_cyc,lat,var_tcol_cyc/1e18,clev,cmap='YlOrRd',extend='both') from matplotlib import ticker #plt.contourf(lon_cyc,lat,var_tcol_cyc/1e18,locator=ticker.LogLocator(subs=clev),cmap='YlOrRd',extend='both') plt.pcolormesh(lon_cyc,lat,var_tcol_cyc/1e18,cmap='Spectral_r', transform=ccrs.PlateCarree(), norm=norm) #plt.pcolormesh(lon_cyc,lat,var_tcol_cyc/1e18,cmap='Spectral_r', transform=ccrs.PlateCarree()) # add coastlines ax.add_feature(cfeature.COASTLINE) #add lat lon grids gl = ax.gridlines(draw_labels=True, color='grey', alpha=0.8, linestyle='--') gl.xlabels_top = False gl.ylabels_right = False # Titles # Main plt.title("Global map of CAM-chem column CO, January 2018",fontsize=18) # y-axis ax.text(-0.04, 0.5, 'Latitude', va='bottom', ha='center', rotation='vertical', rotation_mode='anchor', transform=ax.transAxes) # x-axis ax.text(0.5, -0.08, 'Longitude', va='bottom', ha='center', rotation='horizontal', rotation_mode='anchor', transform=ax.transAxes) # legend ax.text(1.15, 0.5, 'CO (x 10$^{18}$ molec/cm$^{2}$)', va='bottom', ha='center', rotation='vertical', rotation_mode='anchor', transform=ax.transAxes) #cbar.ax.set_xticklabels(clev) # horizontal colorbar cbar = plt.colorbar(extend='both') #cbar.set_ticks(clev[1::3]) #cbar.set_ticklabels(clev[1::3]) plt.show() # -
Python/maps/plot_map_basic_co_column_log.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np from matplotlib import pyplot as plt all_gpio_val = [] file1 = open("gpio_values.txt","r") content = file1.readlines() for line in content: for i in line: # Checking for the digit in # the string if i.isdigit() == True: all_gpio_val.append(int(i)) file1.close() all_gpio_val_2 = [] file1 = open("gpio_values_2.txt","r") content = file1.readlines() for line in content: for i in line: # Checking for the digit in # the string if i.isdigit() == True: all_gpio_val_2.append(int(i)) file1.close() x = np.linspace(0,len(all_gpio_val),len(all_gpio_val)+1) x = list(x) x.pop() x2 = np.linspace(0,len(all_gpio_val_2),len(all_gpio_val_2)+1) x2 = list(x2) x2.pop() plt.plot(x,all_gpio_val,'r') plt.plot(x,all_gpio_val_2,'b') plt.title('Motor Encoder Analysis - Baron') plt.savefig('motor_encoder_graph.png') plt.xlabel('GPIO Input Reading') plt.ylabel('Encoder State : Baron') plt.show()
Assignment 7/Motor_Encoder_Analysis_2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/ShreyasJothish/DS-Sprint-02-Storytelling-With-Data/blob/master/module3-make-explanatory-visualizations/LS_DS_123_Make_explanatory_visualizations.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="vwnZeuKfM7TO" colab_type="text" # _Lambda School Data Science_ # # # Choose appropriate visualizations # # # Recreate this [example by FiveThirtyEight:](https://fivethirtyeight.com/features/al-gores-new-movie-exposes-the-big-flaw-in-online-movie-ratings/) # # ![](https://fivethirtyeight.com/wp-content/uploads/2017/09/mehtahickey-inconvenient-0830-1.png?w=575) # # Using this data: # # https://github.com/fivethirtyeight/data/tree/master/inconvenient-sequel # # ### Stretch goals # # Recreate more examples from [FiveThityEight's shared data repository](https://data.fivethirtyeight.com/). # # For example: # - [thanksgiving-2015](https://fivethirtyeight.com/features/heres-what-your-part-of-america-eats-on-thanksgiving/) ([`altair`](https://altair-viz.github.io/gallery/index.html#maps)) # - [candy-power-ranking](https://fivethirtyeight.com/features/the-ultimate-halloween-candy-power-ranking/) ([`statsmodels`](https://www.statsmodels.org/stable/index.html)) # + [markdown] id="oRFTcx7Yu3Mx" colab_type="text" # # Trying to recreate # **Fault Line No. 3:** # # Men vs. women # + id="ZcKSqVjwM7TU" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 392} outputId="81b2a152-c311-4d39-c572-7879f84231ce" from IPython.display import display, Image url = 'https://fivethirtyeight.com/wp-content/uploads/2017/09/methahickey-inconvenient-0830-6.png?w=450' #example = Image(url=url,width=575) - width of image can be set here also. example = Image(url=url) display(example) # + id="0B1PXJ-5vUEq" colab_type="code" colab={} # Load and perform initial data validation import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt # Using raw URL from https://github.com/fivethirtyeight/data/tree/master/inconvenient-sequel df = pd.read_csv('https://raw.githubusercontent.com/fivethirtyeight/data/master/inconvenient-sequel/ratings.csv') # + id="oWAeL7KmwinY" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 33} outputId="ddd698a9-dc34-4350-d4af-efcee31deb1c" df.shape # + id="yCOhgrAQwuro" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 485} outputId="73a0da93-f0d9-4725-cfea-6220706e7d42" df.isnull().sum() # + id="6jPNAbZvwwj2" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 306} outputId="832b944c-3b00-43b9-d76c-c4dc1c66a6cb" pd.options.display.max_columns = None df.describe() # + id="Gb3yErXbw8wi" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 166} outputId="bf965db7-447a-4f6e-ac43-f416f1e00ca2" df.describe(exclude=[np.number]) # + id="_Z2BT9S9z9kA" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 100} outputId="a1513acd-8260-4f2f-8c39-64db4a2cdbc9" df.timestamp.describe() # + id="oWagHSP1yBhP" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 134} outputId="ced7b196-866d-4e77-8364-df999a754091" df.timestamp = pd.to_datetime(df.timestamp) df.timestamp.describe() # + id="tgJNCNAlzdFi" colab_type="code" colab={} df.set_index('timestamp', inplace=True) # + id="LLKkhQhmzpBk" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 351} outputId="9afac667-cb96-43ab-80c6-2b8d19c88d22" df.category.value_counts() # + id="2_YbR-SEzr4W" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 392} outputId="ab0dd24f-2cfe-4a4f-ce44-c6d9b996f7b1" display(example) # + id="YwELyeSA1YNG" colab_type="code" colab={} # To be checked on combining all the information in one go #df[df.category=='Males' or df.category=='Females'] #df.category=='Males' or df.category=='Females' malesdf = df[df.category=='Males'] femalesdf = df[df.category=='Females'] # + id="R-WMcNdK3Efy" colab_type="code" colab={} malesdf_total = malesdf['respondents'] femalesdf_total = femalesdf['respondents'] malesdf_1_of_10 = malesdf['1_votes'] malesdf_10_of_10 = malesdf['10_votes'] malesdf_2_to_9 = (malesdf['2_votes'] + malesdf['3_votes'] + malesdf['4_votes'] + malesdf['5_votes'] + malesdf['6_votes'] + malesdf['7_votes'] + malesdf['8_votes'] + malesdf['9_votes']) # To be checked on optimizing the above code for malesdf_2_to_9 #values = ['{}_votes'.format(k) for k in range(2,10)] #print(values) #malesdf_2_to_9 = pd.pivot_table(malesdf,index=['respondents'],values=values,aggfunc=[np.sum]) #print(malesdf_2_to_9) # + id="03T-HrPz3nQs" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 356} outputId="9d4d9155-a455-4463-a3c3-d5dc4a00f73c" import matplotlib.dates as mdates from matplotlib.dates import SU import datetime plt.style.use('fivethirtyeight') ax = malesdf_total.plot(color='#EC713B', linewidth=3) femalesdf_total.plot(color='blue', linewidth=2.5) malesdf_1_of_10.plot(color='#EC713B', linewidth=2) malesdf_10_of_10.plot(color='#EC713B', linewidth=2) malesdf_2_to_9.plot(color='#EC713B', linewidth=2) ax.set(yticks=range(0,2000,500)) ax.text(x=datetime.date(2017,7,17), y=1950, s="Men dominated Al Gore's IMDb movie rating", fontsize=16, fontweight='bold') ax.text(x=datetime.date(2017,7,17), y=1850, s="Cumulative number of IMDb rating for an 'An Inconvenient Sequel' by gender and score", fontsize=12) ax.text(x=datetime.date(2017, 7, 17), y=1750, s="gender and score, July 17 through Aug.29", fontsize=12) ax.text(x=datetime.date(2017, 8, 14), y=1300, s="All ratings", fontsize=14, fontweight='bold', color='#EC713B') ax.text(x=datetime.date(2017, 8, 14), y=1200, s="from men", fontsize=14, fontweight='bold', color='#EC713B') ax.text(x=datetime.date(2017, 8, 21), y=700, s="1 out of 10 ratings", fontsize=12, color='#EC713B') ax.text(x=datetime.date(2017, 8, 24), y=550, s="10 out of 10", fontsize=12, color='#EC713B') ax.text(x=datetime.date(2017, 8, 24), y=350, s="2-9 out of 10", fontsize=12, color='#EC713B') ax.text(x=datetime.date(2017, 8, 14), y=100, s="All ratings from women", fontsize=14, fontweight='bold', color='blue') # This didnot work to set limit on x axis. #ax.set_xticklabels(['7/23','7/30','8/6','8/13','8/20','8/27']) #ax.xaxis.set_data_interval(datetime.date(2017,7,17)) #print("ax.xaxis.get_data_interval",ax.xaxis.get_data_interval()) ax.set_xlim([datetime.date(2017, 7, 17), datetime.date(2017, 8, 30)]) ax.xaxis.set_major_locator(mdates.WeekdayLocator(byweekday=SU)) ax.xaxis.set_major_formatter(mdates.DateFormatter('%m/%d')) ax.tick_params(labelrotation=0) plt.xlabel(''); # This wont work since the axis shall be scaled to match x and y coordinates. #ax.text(x=0, y=0, s="Men dominated Al Gore's IMDb movie rating") #ax.text(x=0, y=0, s="Cumulative number of IMDb rating for an 'An Inconvenient Sequel' by")""" # + id="ZxT9YOeFZPTP" colab_type="code" colab={}
module3-make-explanatory-visualizations/LS_DS_123_Make_explanatory_visualizations.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # ## Hypocycloid definition and animation ## # ### Deriving the parametric equations of a hypocycloid ### # On May 11 @fermatslibrary posted a gif file, https://twitter.com/fermatslibrary/status/862659602776805379, illustrating the motion of eight cocircular points. The Fermat's Library followers found it so fascinating that the tweet picked up more than 1000 likes and 800 retweets. Soon after I saw the gif I created a similar Python Plotly animation # although the tweet did not mention how it was generated. @plotlygraphs tweeted a link # to my [Jupyter notebook](http://nbviewer.jupyter.org/github/empet/Math/blob/master/fermat-circle-moving-online.ipynb) presenting the animation code. # # How I succeeded to reproduce it so fast? Here I explain the secret: # # At the first sight you can think that the gif displays an illusory rectiliniar motion of the eight points, but it is a real one. I noticed that the moving points lie on a rolling circle along another circle, and I knew that a fixed point on a rolling circle describes a curve called hypocycloid. In the particular case when the ratio of the two radii is 2 the hypocycloid degenerates to a diameter in the base (fixed) circle. # # In this Jupyter notebook I deduce the parametric equations of a hypoycyloid, animate its construction # and explain why when $R/r=2$ any point on the rolling circle runs a diameter in the base circle. from IPython.display import Image Image(filename='generate-hypocycloid.png') # We refer to the figure in the above cell to explain how we get the parameterization of the hypocycloid generated by a fixed point of a circle of center $O'_0$ and radius r, rolling without slipping along the circle # of center O and radius $R>r$. # # Suppose that initially the hypocycloid generating point, $P$, is located at $(R,0)$. # After the small circle was rolling along the greater circle a length corresponding to an angle of measure, $t$, it reaches the point $P'$ on the circle $C(O'_t, r)$. # # Rolling without slipping means that the length the arc $\stackrel{\frown}{PQ}$ of the greater circle equals the length of the arc $\stackrel{\frown}{P'Q}$ on the smaller one, i.e $Rt=r\omega$, where $\omega$ is the measure of the non-oriented angle $\widehat{P'O'_tQ}$ (i.e. we consider $\omega>0$) . Thus $\omega=(R/r)t$ # # The center $O'_t$ has the coordinates $x=(R-r)\cos(t), (R-r)\sin(t)$. The clockwise parameterization of the circle $C(O'_t,r)$ with respect to the coordinate system $x'O'_ty'$ is as follows: # # $$\begin{array}{llr} # x'(\tau)&=&r\cos(\tau)\\ # y'(\tau)&=&-r\sin(\tau), # \end{array}$$ # $\tau\in[0,2\pi]$. # # Hence the point $P'$ on the hypocycloid has the coordinates: # $x'=r\cos(\omega-t), y'=-r\sin(\omega-t)$, and with respect to $xOy$, the coordinates: # # $x=(R-r)\cos(t)+r\cos(\omega-t), y=(R-r)\sin(t)-r\sin(\omega-t)$. # # Replacing $\omega=(R/r)t$ we get the parameterization of the hypocycloid generated by the initial point $P$: # # $$\begin{array}{lll} # x(t)&=&(R-r)\cos(t)+r\cos(t(R-r)/r)\\ # y(t)&=&(R-r)\sin(t)-r\sin(t(R-r)/r), \quad t\in[0,2\pi] # \end{array}$$ # # If $R/r=2$ the parametric equations of the corresponding hypocycloid are: # # $$\begin{array}{lll} # x(t)&=&2r\cos(t)\\ # y(t)&=&0 # \end{array}$$ # # i.e. the moving point $P$ runs the diameter $y=0$, from the position $(R=2r, 0)$ to $(-R,0)$ when $t\in[0,\pi]$, # and back to $(R,0)$, for $t\in[\pi, 2\pi]$. # # What about the trajectory of any other point, $A$, on the rolling circle that at $t=0$ has the angular coordinate $\varphi$ with respect to the center $O'_0$? # # We show that it is also a diameter in the base circle, referring to the figure in the next cell that is a particularization of # the above figure to the case $R=2r$. Image(filename='hypocycloid-2r.png') # The arbitrary point $A$ on the rolling circle has, for t=0, the coordinates: # $x=r+r\cos(\varphi), y=r\sin(\varphi)$. # # The angle $\widehat{QO'_tP'}=\omega$ is in this case $2t$, and $\widehat{B'O'_tP'}=t$. Since $\widehat{A'O'_tP'}=\varphi$, we get that the position of the fixed point on the smaller circle, after rolling along an arc of length $r(2t-\varphi)$, # is $A'(x(t)=r\cos(t)+r\cos(t-\varphi), y(t)=r\sin(t)-r\sin(t-\varphi))$, with $\varphi$ constant, and $t$ variable in the interval $[\varphi, 2\pi+\varphi]$. # # Let us show that $y(t)/x(t)=$constant for all $t$, i.e. the generating point of the hypocycloid lies on a segment of line (diameter in the base circle): # # $$\displaystyle\frac{y(t)}{x(t)}=\frac{r\sin(t)-r\sin(t-\varphi)}{r\cos(t)+r\cos(t-\varphi)}=\left\{\begin{array}{ll}\tan(\varphi/2)& \mbox{if}\:\: t=\varphi/2\\ # \displaystyle\frac{2\cos(t-\varphi/2)\sin(\varphi/2)}{2\cos(t-\varphi/2)\cos(\varphi/2)}=\tan(\varphi/2)& \mbox{if}\:\: t\neq\varphi/2 \end{array}\right.$$ # # Hence the @fermatslibrary animation, illustrated by a Python Plotly code in my [Jupyter notebook](http://nbviewer.jupyter.org/github/empet/Math/blob/master/fermat-circle-moving-online.ipynb), displays the motion of the eight points placed on the rolling # circle of radius $r=R/2$, along the corresponding diameters in the base circle. # ### Animating the hypocycloid generation ### import numpy as np from numpy import pi, cos, sin import copy import plotly.plotly as py from plotly.grid_objs import Grid, Column import time # Set the layout of the plot: # + axis=dict(showline=False, zeroline=False, showgrid=False, showticklabels=False, range=[-1.1,1.1], autorange=False, title='' ) layout=dict(title='', font=dict(family='Balto'), autosize=False, width=600, height=600, showlegend=False, xaxis=dict(axis), yaxis=dict(axis), hovermode='closest', shapes=[], updatemenus=[dict(type='buttons', showactive=False, y=1, x=1.2, xanchor='right', yanchor='top', pad=dict(l=10), buttons=[dict(label='Play', method='animate', args=[None, dict(frame=dict(duration=90, redraw=False), transition=dict(duration=0), fromcurrent=True, mode='immediate' )] )] )] ) # - # Define the base circle: layout['shapes'].append(dict(type= 'circle', layer= 'below', xref= 'x', yref='y', fillcolor= 'rgba(245,245,245, 0.95)', x0= -1.005, y0= -1.005, x1= 1.005, y1= 1.005, line= dict(color= 'rgb(40,40,40)', width=2 ) ) ) def circle(C, rad): #C=center, rad=radius theta=np.linspace(0,1,100) return C[0]+rad*cos(2*pi*theta), C[1]-rad*sin(2*pi*theta) # Prepare data for animation to be uploaded to Plotly cloud: def set_my_columns(R=1.0, ratio=3): #R=the radius of base circle #ratio=R/r, where r=is the radius of the rolling circle r=R/float(ratio) xrol, yrol=circle([R-r, 0], 0) my_columns=[Column(xrol, 'xrol'), Column(yrol, 'yrol')] my_columns.append(Column([R-r, R], 'xrad')) my_columns.append(Column([0,0], 'yrad')) my_columns.append(Column([R], 'xstart')) my_columns.append(Column([0], 'ystart')) a=R-r b=(R-r)/float(r) frames=[] t=np.linspace(0,1,50) xpts=[] ypts=[] for k in range(t.shape[0]): X,Y=circle([a*cos(2*pi*t[k]), a*sin(2*pi*t[k])], r) my_columns.append(Column(X, 'xrcirc{}'.format(k+1))) my_columns.append(Column(Y, 'yrcirc{}'.format(k+1))) #The generator point has the coordinates(xp,yp) xp=a*cos(2*pi*t[k])+r*cos(2*pi*b*t[k]) yp=a*sin(2*pi*t[k])-r*sin(2*pi*b*t[k]) xpts.append(xp) ypts.append(yp) my_columns.append(Column([a*cos(2*pi*t[k]), xp], 'xrad{}'.format(k+1))) my_columns.append(Column([a*sin(2*pi*t[k]), yp], 'yrad{}'.format(k+1))) my_columns.append(Column(copy.deepcopy(xpts), 'xpt{}'.format(k+1))) my_columns.append(Column(copy.deepcopy(ypts), 'ypt{}'.format(k+1))) return t, Grid(my_columns) def set_data(grid): return [dict(xsrc=grid.get_column_reference('xrol'),#rolling circle ysrc= grid.get_column_reference('yrol'), mode='lines', line=dict(width=2, color='blue'), name='', ), dict(xsrc=grid.get_column_reference('xrad'),#radius in the rolling circle ysrc= grid.get_column_reference('yrad'), mode='markers+lines', line=dict(width=1.5, color='blue'), marker=dict(size=4, color='blue'), name=''), dict(xsrc=grid.get_column_reference('xstart'),#starting point on the hypocycloid ysrc= grid.get_column_reference('ystart'), mode='marker+lines', line=dict(width=2, color='red', shape='spline'), name='') ] # Set data for each animation frame: def set_frames(t, grid): return [dict(data=[dict(xsrc=grid.get_column_reference('xrcirc{}'.format(k+1)),#update rolling circ position ysrc=grid.get_column_reference('yrcirc{}'.format(k+1)) ), dict(xsrc=grid.get_column_reference('xrad{}'.format(k+1)),#update the radius ysrc=grid.get_column_reference('yrad{}'.format(k+1))#of generating point ), dict(xsrc=grid.get_column_reference('xpt{}'.format(k+1)),#update hypocycloid arc ysrc=grid.get_column_reference('ypt{}'.format(k+1)) ) ], traces=[0,1,2]) for k in range(t.shape[0]) ] # Animate the generation of a hypocycloid with 3 cusps(i.e. $R/r=3$): # + py.sign_in('empet', 'my_api_key')#access my Plotly account t, grid=set_my_columns(R=1, ratio=3) py.grid_ops.upload(grid, 'animdata-hypo3'+str(time.time()), auto_open=False)#upload data to Plotly cloud # - data1=set_data(grid) frames1=set_frames(t, grid) title='Hypocycloid with '+str(3)+' cusps, '+'<br>generated by a fixed point of a circle rolling inside another circle; R/r=3' layout.update(title=title) fig1=dict(data=data1, layout=layout, frames=frames1) py.icreate_animations(fig1, filename='anim-hypocycl3'+str(time.time())) # Hypocycloid with four cusps (astroid): t, grid=set_my_columns(R=1, ratio=4) py.grid_ops.upload(grid, 'animdata-hypo4'+str(time.time()), auto_open=False)#upload data to Plotly cloud data2=set_data(grid) frames2=set_frames(t, grid) title2='Hypocycloid with '+str(4)+' cusps, '+'<br>generated by a fixed point of a circle rolling inside another circle; R/r=4' layout.update(title=title2) fig2=dict(data=data2, layout=layout, frames=frames2) py.icreate_animations(fig2, filename='anim-hypocycl4'+str(time.time())) # Degenerate hypocycloid (R/r=2): t, grid=set_my_columns(R=1, ratio=2) py.grid_ops.upload(grid, 'animdata-hypo2'+str(time.time()), auto_open=False)#upload data to Plotly cloud data3=set_data(grid) frames3=set_frames(t, grid) title3='Degenerate Hypocycloid; R/r=2' layout.update(title=title3) fig3=dict(data=data3, layout=layout, frames=frames3) py.icreate_animations(fig3, filename='anim-hypocycl2'+str(time.time())) from IPython.core.display import HTML def css_styling(): styles = open("./custom.css", "r").read() return HTML(styles) css_styling()
hypocycloid-online.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Fibonacci # + def recur_Fib(n): if n == 0: return 0 elif n == 1: return 1 else: return recur_Fib(n-1) + recur_Fib(n-2) # Time => T(n-1) + T(n-2) # + def iter_Fib(n): if n == 0: return 0 elif n == 1: return 1 f1 = 0 f2 = 1 temp = 0 for i in range(2,n+1): temp = f1 + f2 f1 = f2 f2 = temp return temp # Time => O(n) # + # If n is even then k = n/2: # F(n) = [2*F(k-1) + F(k)]*F(k) # If n is odd then k = (n + 1)/2 # F(n) = F(k)*F(k) + F(k-1)*F(k-1) MAX = 1000 f = [0]*MAX # Array for memoization # Returns nth fibonacci number using table f[] def formula_Fib(n): if n == 0: return 0 if (n == 1 or n == 2): f[n] = 1 return (f[n]) # If fibonacci is already computed if f[n]: return (f[n]) if (n & 1): k = (n+1) // 2 else: k = n // 2 # Applying above formula [Note value n&1 is 1] # if n is odd, else 0. if (n & 1): f[n] = (formula_Fib(k)*formula_Fib(k) + formula_Fib(k-1)*formula_Fib(k-1)) else: f[n] = (2*formula_Fib(k-1) + formula_Fib(k))*formula_Fib(k) return f[n] # Time => O(log n) # Formula => {[( sqrt[5] + 1 ) / 2 ] ^ n } / sqrt[5] # + # DP using Memoization (Top-down approach) dp = [-1 for i in range(10)] def dp_Fib(n): if n <= 1: return n global dp f1 = 0 f2 = 1 if (dp[n-1] != -1): f1 = dp[n-1] else: f1 = dp_Fib(n-1) #First time calculate value if (dp[n-2] != -1): f2 = dp[n-2] else: f2 =dp_Fib(n-2) dp[n] = f1 + f2 return dp[n] # - recur_Fib(5) iter_Fib(5) formula_Fib(5) dp_Fib(5) # # Pow(x,n) # x<sup>n</sup> = {<br> # <span style="color:red">x * x<sup>n-1</sup>, if n > 0 # <br> # 1, if n == 0</span> # <br> # } def pow(x,n): if n == 0: return 1 else: return x * pow(x,n-1) # x<sup>n</sup> = {<br> # <span style="color:red">x<sup>n/2</sup> * x<sup>n/2</sup>, if n is EVEN # <br> # x * x<sup>n-1</sup>, if n is ODD</span> # <br> # } def pow_1(x,n): if n == 0: return 1 elif n%2 == 0: even_res = pow_1(x, n//2) return even_res * even_res else: return x * pow(x,n-1) pow(2,5) pow_1(2,5) # # Modular Exponential # - x<sup>n</sup> mod M # - 5<sup>2</sup> mod 7 => 25 % 7 => 4 # <br> # - (a * b ) % M => { (a % M) * (b % M) } % M # - x<sup>n</sup> mod M => ( x * x<sup>n-1</sup>) % M # - x<sup>n</sup> mod M => { <br> # <span style="color:red">{ (x<sup>n/2</sup> % M) * (x<sup>n/2</sup> % M) } % M, if n is EVEN</span> # <br> # <span style="color:red">{ (x % M) * (x <sup>n-1</sup> % M) } % M, if n is ODD</span> # <br> # <span style="color:red"> 1, if n is 0</span> # <br> # } def mod_Expo(x,n,M): if n == 0: return 1 elif n%2 == 0: res = mod_Expo(x,n/2,M) return (res * res) % M else: res = mod_Expo(x,n-1,M) return (x * res) % M # return { (x % M) * (mod_Expo(x,n-1,M))} % M mod_Expo(5,3,7) # # Count ways to reach nth stair # ## METHOD 1 # <h3>Using Finonacci</h3> # <br> # # - stair_Ways(1) = Fib(2) = 1<br> # - stair_Ways(2) = Fib(3) = 2<br> # - stair_Ways(s) = Fib(n+1)<br> # - n => target # - m => max steps that can be taken # # <h4> Time => O(2<sup>n</sup>) # <br> # Space => O(1) </h4> # + def countWays(n,m): if n <= 1: return n res = 0 i = 1 while i<=m and i<=n: res += countWays(n-i, m) i += 1 return res def stair_Ways(s, m): return countWays(s+1, m) # - stair_Ways(4, 2) # # METHOD 2 # <h3>Using DP </h3> # # - res[i] = res[i] + res[i-j] for every (i-j) >= 0 # # <h4>Time => O(m*n) # <br> # Space => O(n)</h4> # + def countWays(n, m): res = [0 for i in range(n)] res[0], res[1] = 1, 1 for i in range(2, n): # Stairs from 2+ onwards j = 1 # Check the max steps that can be taken while j <= m and j <= i: res[i] += res[i-j] j += 1 return res[n-1] def stair_Ways_1(s, m): return countWays(s+1, m) # - stair_Ways_1(5,2) # # METHOD 3 # <h3>Using Sliding Window </h3> # # <h4>Time => O(n) # <br> # Space => O(n)</h4> def stair_Ways_2(n, m): temp = 0 res = [1] for i in range(1, n + 1): start = i - m - 1 end = i - 1 if (start >= 0): temp -= res[start] temp += res[end] res.append(temp) return res[n] stair_Ways_2(5, 2) # # METHOD 4 # <h3>Using Simple Math </h3> # - Used only if order does not matter # # - Above for s = 4 => {1, 1, 1}, {1, 1, 2}, {1, 2, 1}, {2, 1, 1}, {2, 2} # # - Here s= 4 => {1, 1, 2}, {1, 2, 1}, {2, 1, 1} # <h4>Time => O(1) # <br> # Space => O(1)</h4> def stair_Ways_3(s): return 1 + ( s // 2 ) stair_Ways_3(4)
CODING/.ipynb_checkpoints/Recursion-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.7.9 64-bit (''.venv'': venv)' # name: python3 # --- import course;course.header() # + [markdown] slideshow={"slide_type": "slide"} # # # Recursive Functions # - # Writing function to solve small problems and divide bigger problems into smaller ones # + [markdown] slideshow={"slide_type": "subslide"} # ## Fibonacci - the classic # + [markdown] slideshow={"slide_type": "subslide"} # "In mathematics, the Fibonacci numbers, commonly denoted Fn, form a sequence, called the Fibonacci sequence, such that each number is the sum of the two preceding ones, starting from 0 and 1. " -- Wikipedia # # [background](https://en.wikipedia.org/wiki/Fibonacci_number) # - # ### How would you code that? # + def fibonacci(Fn): if Fn in (0, 1): return Fn return fibonacci(Fn - 1) + fibonacci(Fn - 2) return total_sum # - [fibonacci(x) for x in range(13)] # + [markdown] slideshow={"slide_type": "slide"} # ## Real world example - the nested dicts ... # - import json tree = json.load(open("../data/itol.json")) tree # #### find the cummulaitve distance to Homosapiens def dive(child, distances=None): if distances is None: distances_1 = [child.distance] distances_2 = [child.distance] if child.get("leaf", False) is True: if child['name'] == "Homo_sapiens": return child['distance'] else: return None else: distance_1.append(dive(child["child_1"]), distan) distance_2.append(dive(child["child_2"])) # + slideshow={"slide_type": "subslide"} def dive(child): if child.get("leaf", False) is True: if child['name'] == "Homo_sapiens": return child['distance'] else: return None for ckey in ["child_1", "child_2"]: distance = dive(child[ckey]) if distance is not None: return distance + child["distance"] # - dive(tree) # ## How deep did we have to dive ? def dive_2(child): if child.get("leaf", False) is True: if child['name'] == "Homo_sapiens": return { "distance" : child['distance'], "depth": 1 } else: return None for ckey in ["child_1", "child_2"]: dive_dict = dive_2(child[ckey]) if dive_dict is not None: return { "distance" : dive_dict["distance"] + child["distance"], "depth" : dive_dict["depth"] + 1 } dive_2(tree) # ## What Path did we take? def dive_3(child): if child.get("leaf", False) is True: if child['name'] == "Homo_sapiens": return { "distance" : child['distance'], "depth": 1, "1_2": [None] } else: return None for ckey in ["child_1", "child_2"]: dive_dict = dive_3(child[ckey]) if dive_dict is not None: return { "distance" : dive_dict["distance"] + child["distance"], "depth" : dive_dict["depth"] + 1, "1_2": dive_dict["1_2"] + [ckey] } dive_3(tree)
notebooks/03.c_Recursive_functions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## The preprocessing of the dataset import pandas as pd import sklearn as skn import matplotlib.pyplot as plt from bs4 import BeautifulSoup as bs import re import os import numpy as np from systemtools.location import * # Remove all the tags, scripts in the html def clean_html(html): soup = bs(html) try: title = soup.title.contents[0] except (AttributeError, IndexError): title = '' for s in soup(['script','style']): s.decompose() return ' '.join(soup.stripped_strings), title html_folder = dataDir() + "/Misc/error404/from-newslist/" train = [] # This step I will transfer all the data original in to titles and bodies by tags <title> and <p>. for html_part in os.listdir(html_folder): print(html_part) if not html_part.startswith('.'): if html_part == 'ok': mark = True elif html_part == '404': mark = False else: mark = 'NaN' html_part_path = html_folder+html_part for html in os.listdir(html_part_path): html_path = html_part_path+"/"+html #print(html_path) temp = {} temp['body'], temp['title'] = clean_html(open(html_path)) temp['type'] = mark temp['path'] = html_part+html train.append(temp) train_df = pd.DataFrame(train, columns=['title', 'body', 'type']) train_df.info() train_df.head() # ## Data Analysis # ### Length Analysis # Illustre Respectivement The Length Variation of words of title and body of 404 and ok. length_title_True = [] length_body_True = [] length_title_False = [] length_body_False = [] for row in train_df.itertuples(): #print(row) if row[3] == False: length_title_False.append(len(row[1])) length_body_False.append(len(row[2])) elif row[3] == True: length_title_True.append(len(row[1])) length_body_True.append(len(row[2])) plt.hist(length_title_False, bins=50, color='red', label='title_False', alpha=0.5) plt.title('title_False') plt.show() plt.hist(length_title_True, bins=50, color='blue', label='title_True', alpha=0.5) plt.title('title_True') plt.show() plt.hist(length_body_False, bins=50, color='red', label='body_False', alpha=0.5) plt.title('body_Flase') plt.show() plt.hist(length_body_True, bins=50, color='blue', label='title_True', alpha=0.5) plt.title('body_True') plt.show() #Seeing the graph 'body_True', we'd like to remove the outliers, so we are going to do some small modifications. def lessthan1(element): return element < 50000 def lessthan2(element): return element < 20000 length_body_True_modified = list(filter(lessthan1, length_body_True)) length_body_False_modified = list(filter(lessthan2, length_body_False)) plt.hist(length_body_False_modified, bins=50, color='red', label='body_False', alpha=0.5) plt.title('body_False_modified') plt.show() plt.hist(length_body_True_modified, bins=50, color='blue', label='title_True', alpha=0.5) plt.title('body_True_modified') plt.show() # #### Conclusion: As we can see from these graphs, these two features, length of titles and length of bodies, are interesting. # ### NLP Analysis #
error404detector/chenle/chenli-404-v1/404+error.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %matplotlib inline import numpy as np import scipy as sp import matplotlib as mpl import matplotlib.cm as cm import matplotlib.pyplot as plt import pandas as pd pd.set_option('display.width', 500) pd.set_option('display.max_columns', 100) pd.set_option('display.notebook_repr_html', True) import seaborn as sns sns.set_style("whitegrid") sns.set_context("poster") from PIL import Image #importing specific functions from sklearn.model_selection import GridSearchCV from sklearn.model_selection import train_test_split from sklearn.metrics import confusion_matrix from sklearn.metrics import accuracy_score from sklearn.metrics import roc_auc_score from sklearn.preprocessing import scale # + #pre-define functions def cv_optimize(clf, parameters, X, y, n_jobs=1, n_folds=5, score_func=None): if score_func: gs = GridSearchCV(clf, param_grid=parameters, cv=n_folds, n_jobs=n_jobs, scoring=score_func) else: gs = GridSearchCV(clf, param_grid=parameters, n_jobs=n_jobs, cv=n_folds) gs.fit(X, y) #print("BEST", gs.best_params_, gs.best_score_, gs.grid_scores_) best = gs.best_estimator_ return best def do_classify_aa(clf, parameters, indf, featurenames, targetname, target1val, score_func=None, n_folds=5, n_jobs=1): subdf=indf[featurenames] X=subdf.values X=scale(X) y=(indf[targetname].values==target1val)*1 training_accuracy = np.zeros(10) test_accuracy=np.zeros(10) test_auc=np.zeros(10) for idx,train_test in enumerate(skf.split(X,y)): X_train, X_test = X[train_test[0]], X[train_test[1]] y_train, y_test = y[train_test[0]], y[train_test[1]] if parameters: clf = cv_optimize(clf, parameters, X_train, y_train, n_jobs=n_jobs, n_folds=n_folds, score_func=score_func) clf=clf.fit(X_train, y_train) pred = clf.predict(X_test) probs = clf.predict_proba(X_test) training_accuracy[idx] = clf.score(X_train, y_train) test_accuracy[idx] = accuracy_score(y_test, pred) test_auc[idx] = roc_auc_score(y_test, probs[:,1]) #print(idx) print("############# based on k-fold cross-validation predictions ################") print("Training Accuracy %0.2f +/- %0.3f" % (training_accuracy.mean(), training_accuracy.std())) #print(") print("***** Target : GBM vs METS") print(clf) print("Accuracy on test data: %0.2f +/- %0.3f" % (test_accuracy.mean(), test_accuracy.std())) print("AUC on test data: %0.2f +/- %0.3f" % (test_auc.mean(), test_auc.std())) #print(confusion_matrix(y, pred)) print("########################################################") return clf, test_accuracy, test_auc # - #read in the pre-extracted features dfglioma=pd.read_csv("../data/glioma_all_featarray.csv") dfglioma.head() colswewant_cont = list(dfglioma) colswewant_cont.pop() Targets=['Targets'] # + # cross validation methods from sklearn.model_selection import LeaveOneOut loo = LeaveOneOut() from sklearn.model_selection import StratifiedKFold skf = StratifiedKFold(n_splits=10, random_state=2652124) from sklearn.model_selection import train_test_split #tts = train_test_split(n_splits=10, random_state=2652124) # - from sklearn import svm clfsvc = svm.SVC(probability=True) parameters = [{'kernel': ['linear'], 'C': [1]}] clfsvc, test_accuracy, test_auc = do_classify_aa(clfsvc, parameters, dfglioma, colswewant_cont, 'Targets', 1)
ipynb_notebooks/Classification_Code.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # ART for TensorFlow v2 - Callable Class/Function # This notebook demonstrates applying ART with TensorFlow v2 using callable classes or functions to define models. The code follows and extends the examples on www.tensorflow.org. # + import warnings warnings.filterwarnings('ignore') import os os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' import tensorflow as tf from tensorflow.keras.layers import Dense, Flatten, Conv2D from tensorflow.keras import Model import numpy as np from matplotlib import pyplot as plt from art.estimators.classification import TensorFlowV2Classifier from art.attacks.evasion import FastGradientMethod, CarliniLInfMethod # - if tf.__version__[0] != '2': raise ImportError('This notebook requires Tensorflow v2.') # # Load MNIST dataset # + (x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data() x_train, x_test = x_train / 255.0, x_test / 255.0 x_train = x_train.astype(np.float32) x_test = x_test.astype(np.float32) x_test = x_test[0:100] y_test = y_test[0:100] # - # Add a dimension for color channel x_train = x_train[..., tf.newaxis] x_test = x_test[..., tf.newaxis] # Create loss object and optimizer loss_object = tf.keras.losses.SparseCategoricalCrossentropy() optimizer = tf.keras.optimizers.Adam() # Define metrics for training and testing # + train_loss = tf.keras.metrics.Mean(name='train_loss') train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='train_accuracy') test_loss = tf.keras.metrics.Mean(name='test_loss') test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='test_accuracy') # - # # Tensorflow with callable class # Create a custom model class. class KerasModel(Model): def __init__(self): super(KerasModel, self).__init__() self.conv1 = Conv2D(filters=3, kernel_size=3, activation='relu') self.flatten = Flatten() self.dense1 = Dense(10, activation='softmax') def call(self, x): x = self.conv1(x) x = self.flatten(x) x = self.dense1(x) return x # Create callable model model = KerasModel() # Create input pipelines for training and testing train_ds = tf.data.Dataset.from_tensor_slices((x_train, y_train)).shuffle(10000).batch(32) test_ds = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(32) # Define the training step. @tf.function def train_step(images, labels): with tf.GradientTape() as tape: predictions = model(images) loss = loss_object(labels, predictions) gradients = tape.gradient(loss, model.trainable_variables) optimizer.apply_gradients(zip(gradients, model.trainable_variables)) train_loss(loss) train_accuracy(labels, predictions) # Define the testing step. @tf.function def test_step(images, labels): predictions = model(images) t_loss = loss_object(labels, predictions) test_loss(t_loss) test_accuracy(labels, predictions) # Fit the model on training data and collect metrics for training and testing. # + epochs = 3 for epoch in range(epochs): for images, labels in train_ds: train_step(images, labels) for test_images, test_labels in test_ds: test_step(test_images, test_labels) template = 'Epoch {}, Loss: {:4.2f}, Accuracy: {:4.2f}, Test Loss: {:4.2f}, Test Accuracy: {:4.2f}' print(template.format(epoch + 1, train_loss.result(), train_accuracy.result() * 100, test_loss.result(), test_accuracy.result() * 100)) # - # Evaluate model accuracy on test data. y_test_pred = np.argmax(model(x_test), axis=1) accuracy_test = np.sum(y_test_pred == y_test) / y_test.shape[0] print('Accuracy on test data: {:4.2f}%'.format(accuracy_test * 100)) # Create a ART Tensorflow v2 classifier for the Tensorflow custom model class. classifier = TensorFlowV2Classifier(model=model, nb_classes=10, input_shape=(28, 28, 1), loss_object=loss_object, clip_values=(0, 1), channels_first=False) # ## Fast Gradient Sign Method attack # Create a ART Fast Gradient Sign Method attack. attack_fgsm = FastGradientMethod(estimator=classifier) # Generate adversarial test data. x_test_adv = attack_fgsm.generate(x_test) # Evaluate accuracy on adversarial test data and calculate average perturbation. y_test_pred = np.argmax(model(x_test_adv), axis=1) accuracy_test_adv = np.sum(y_test_pred == y_test) / y_test.shape[0] perturbation = np.mean(np.abs((x_test_adv - x_test))) print('Accuracy on adversarial test data: {:4.2f}%'.format(accuracy_test_adv * 100)) print('Average perturbation: {:4.2f}'.format(perturbation)) # Visualise the first adversarial test sample. plt.matshow(x_test_adv[0, :, :, 0]) plt.show() # ## Carlini&Wagner Infinity-norm attack # Create a ART Carlini&Wagner Infinity-norm attack. attack_cw = CarliniLInfMethod(classifier=classifier, eps=0.3, max_iter=100, learning_rate=0.01) # Generate adversarial test data. x_test_adv = attack_cw.generate(x_test) # Evaluate accuracy on adversarial test data and calculate average perturbation. y_test_pred = np.argmax(model(x_test_adv), axis=1) accuracy_test_adv = np.sum(y_test_pred == y_test) / y_test.shape[0] perturbation = np.mean(np.abs((x_test_adv - x_test))) print('Accuracy on adversarial test data: {:4.2f}%'.format(accuracy_test_adv * 100)) print('Average perturbation: {:4.2f}'.format(perturbation)) # Visualise the first adversarial test sample. plt.matshow(x_test_adv[0, :, :, 0]) plt.show() # # Tensorflow with custom function # Reshape dataset in feature vectors becasue the model in this example requires feature vectors. x_train = x_train.reshape((60000, 784)) x_test = x_test.reshape((100, 784)) # Create input pipelines for training and testing train_ds = tf.data.Dataset.from_tensor_slices((x_train, y_train)).shuffle(10000).batch(32) test_ds = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(32) # Create variables and keep track of them W = tf.Variable(initial_value=tf.random.normal(shape=(784, 10)), name="W") b = tf.Variable(tf.zeros(shape=(10)), name="b") # Define a function representing the model @tf.function def forward(x): x = tf.matmul(x, W) + b denominator = tf.expand_dims(tf.reduce_sum(tf.exp(x), axis=1), axis=1) softmax = (1.0 / denominator) * tf.exp(x) return softmax # Define the training step. @tf.function def train_step(images, labels): with tf.GradientTape() as tape: predictions = forward(images) loss = loss_object(labels, predictions) gradients = tape.gradient(loss, [W, b]) optimizer.apply_gradients(zip(gradients, [W, b])) train_loss(loss) train_accuracy(labels, predictions) # Define the testing step. @tf.function def test_step(images, labels): predictions = forward(images) t_loss = loss_object(labels, predictions) test_loss(t_loss) test_accuracy(labels, predictions) # Fit the model on training data and collect metrics for training and testing. # + epochs = 3 for epoch in range(epochs): for images, labels in train_ds: train_step(images, labels) for test_images, test_labels in test_ds: test_step(test_images, test_labels) template = 'Epoch {}, Loss: {:4.2f}, Accuracy: {:4.2f}, Test Loss: {:4.2f}, Test Accuracy: {:4.2f}' print(template.format(epoch + 1, train_loss.result(), train_accuracy.result() * 100, test_loss.result(), test_accuracy.result() * 100)) # - # Evaluate model accuracy on test data. y_test_pred = np.argmax(forward(x_test), axis=1) accuracy_test = np.sum(y_test_pred == y_test) / y_test.shape[0] print('Accuracy on test data: {:4.2f}%'.format(accuracy_test * 100)) # Create a ART Tensorflow v2 classifier for the Tensorflow custom model function. classifier = TensorFlowV2Classifier(model=forward, nb_classes=10, input_shape=(28, 28, 1), loss_object=loss_object, clip_values=(0, 1), channels_first=False) # ## Fast Gradient Sign Method attack # Create a ART Fast Gradient Sign Method attack. attack_fgsm = FastGradientMethod(estimator=classifier) # Generate adversarial test data. x_test_adv = attack_fgsm.generate(x_test) # Evaluate accuracy on adversarial test data and calculate average perturbation. y_test_pred = np.argmax(forward(x_test_adv), axis=1) accuracy_test_adv = np.sum(y_test_pred == y_test) / y_test.shape[0] perturbation = np.mean(np.abs((x_test_adv - x_test))) print('Accuracy on adversarial test data: {:4.2f}%'.format(accuracy_test_adv * 100)) print('Average perturbation: {:4.2f}'.format(perturbation)) # Visualise the first adversarial test sample. plt.matshow(x_test_adv[0, :].reshape((28, 28))) plt.show() # ## Carlini&Wagner Infinity-norm attack # Create a ART Carlini&Wagner Infinity-norm attack. attack_cw = CarliniLInfMethod(classifier=classifier, eps=0.3, max_iter=100, learning_rate=0.01) # Generate adversarial test data. # %%capture x_test_adv = attack_cw.generate(x_test); # Evaluate accuracy on adversarial test data and calculate average perturbation. y_test_pred = np.argmax(forward(x_test_adv), axis=1) accuracy_test_adv = np.sum(y_test_pred == y_test) / y_test.shape[0] perturbation = np.mean(np.abs((x_test_adv - x_test))) print('Accuracy on adversarial test data: {:4.2f}%'.format(accuracy_test_adv * 100)) print('Average perturbation: {:4.6f}'.format(perturbation)) # Visualise the first adversarial test sample. plt.matshow(x_test_adv[0, :].reshape((28, 28))) plt.show()
notebooks/art-for-tensorflow-v2-callable.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from mxnet import gluon import mxnet as mx from mxnet import nd from mxnet.gluon import nn import numpy as np from tqdm import tqdm import matplotlib.pyplot as plt from PIL import Image, ImageOps # from gluoncv.data import VOCSegmentation, VOCAugSegmentation from gluoncv.utils.metrics.voc_segmentation import batch_pix_accuracy, batch_intersection_union from mylib.deeplabv3p import DeepLabv3p from mylib.dataset import VOCAugSegmentation # %matplotlib inline # - dataset = VOCAugSegmentation() dataset.classes dataset = VOCAugSegmentation(split='train_aug') x, y = dataset[4] x.shape y.shape l = y.copyto(mx.gpu()).mean() l.asscalar() dataloader = gluon.data.DataLoader(dataset, batch_size=8) model = DeepLabv3p(OS=8) model.initialize(ctx=mx.gpu()) # weights = "/home/jiancheng/code/segmentation/deeplabv3p_gluon/tmp_weights/pascal_trainval/pascal_trainval.params" weights = '/home/jiancheng/code/segmentation/deeplabv3p_gluon/tmp_weights/pascal_train_aug/pascal_train_aug.params' model.load_params(filename=weights, ctx=mx.gpu()) model. tbar = tqdm(dataloader) total_inter, total_union, total_correct, total_label = (0,)*4 for i, (x, y) in enumerate(tbar): x = x.copyto(mx.gpu()) y = y.copyto(mx.gpu()) pred = model(x) correct, labeled = batch_pix_accuracy(output=pred,target=y) inter, union = batch_intersection_union(output=pred,target=y,nclass=21) total_correct += correct.astype('int64') total_label += labeled.astype('int64') total_inter += inter.astype('int64') total_union += union.astype('int64') pix_acc = np.float64(1.0) * total_correct / (np.spacing(1, dtype=np.float64) + total_label) IoU = np.float64(1.0) * total_inter / (np.spacing(1, dtype=np.float64) + total_union) mIoU = IoU.mean() tbar.set_description('iter%s: pix_acc: %.4f, mIoU: %.4f' % (i, pix_acc, mIoU)) model = DeepLabv3p(OS=16) model.initialize(ctx=mx.gpu()) # weights = "/home/jiancheng/code/segmentation/deeplabv3p_gluon/tmp_weights/pascal_trainval/pascal_trainval.params" weights = '/home/jiancheng/code/segmentation/deeplabv3p_gluon/tmp_weights/pascal_train_aug/pascal_train_aug.params' model.load_params(filename=weights, ctx=mx.gpu()) tbar = tqdm(dataloader) total_inter, total_union, total_correct, total_label = (0,)*4 for i, (x, y) in enumerate(tbar): x = x.copyto(mx.gpu()) y = y.copyto(mx.gpu()) pred = model(x) correct, labeled = batch_pix_accuracy(output=pred,target=y) inter, union = batch_intersection_union(output=pred,target=y,nclass=21) total_correct += correct.astype('int64') total_label += labeled.astype('int64') total_inter += inter.astype('int64') total_union += union.astype('int64') pix_acc = np.float64(1.0) * total_correct / (np.spacing(1, dtype=np.float64) + total_label) IoU = np.float64(1.0) * total_inter / (np.spacing(1, dtype=np.float64) + total_union) mIoU = IoU.mean() tbar.set_description('iter%s: pix_acc: %.4f, mIoU: %.4f' % (i, pix_acc, mIoU)) model = DeepLabv3p(OS=8) model.initialize(ctx=mx.gpu()) weights = "/home/jiancheng/code/segmentation/deeplabv3p_gluon/tmp_weights/pascal_trainval/pascal_trainval.params" # weights = '/home/jiancheng/code/segmentation/deeplabv3p_gluon/tmp_weights/pascal_train_aug/pascal_train_aug.params' model.load_params(filename=weights, ctx=mx.gpu()) tbar = tqdm(dataloader) total_inter, total_union, total_correct, total_label = (0,)*4 for i, (x, y) in enumerate(tbar): x = x.copyto(mx.gpu()) y = y.copyto(mx.gpu()) pred = model(x) correct, labeled = batch_pix_accuracy(output=pred,target=y) inter, union = batch_intersection_union(output=pred,target=y,nclass=21) total_correct += correct.astype('int64') total_label += labeled.astype('int64') total_inter += inter.astype('int64') total_union += union.astype('int64') pix_acc = np.float64(1.0) * total_correct / (np.spacing(1, dtype=np.float64) + total_label) IoU = np.float64(1.0) * total_inter / (np.spacing(1, dtype=np.float64) + total_union) mIoU = IoU.mean() tbar.set_description('iter%s: pix_acc: %.4f, mIoU: %.4f' % (i, pix_acc, mIoU)) IoU.mean() np.float64(1.0) * total_inter / (np.spacing(1, dtype=np.float64) + total_union) mIoU
workspace/8.train_voc_pascal.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Multi-Channel Live Display # This notebook allows for live display of multiple channels # ## Plot Setup # %matplotlib widget # ## Import required modules import time import matplotlib.pyplot as plt import matplotlib.ticker as ticker import pydaqhat as py # ## Plot # + plt.rcParams['animation.html'] = 'jshtml' plt.style.use('default') fig = plt.figure() ax = fig.add_subplot(111) ax.set_ylabel("Voltage (V)") ax.set_xlabel("Sample") ax.set_title("PiDAQ Live Data") #fig.show() # + channels = [0,1] # Channels to use iepe_enable = False # IEPE enable/disable sensitivity = 1000 # Sensitivity in mV/unit sample_rate = 20000 # Number of samples per second buffer_size = 1000000 # Number of samples to keep in buffer before overwriting def plot_continous(hat): print("Plotting") sample_count = 0 while True: new = hat.a_in_scan_read(-1,0).data[-500:] ax.cla() last_start_index = 0 for i in range(len(channels)): ax.plot(new[i::2], label="Channel {}".format(i)) #ax.plot(range(sample_count, sample_count + len(new)), new) ax.set_xlim(left=0, right=len(new)//2) ax.xaxis.set_major_formatter(ticker.EngFormatter()) ax.set_ylabel("Voltage (V)") ax.set_xlabel("Sample") ax.set_title("Len {}".format(len(new))) ax.legend(loc=1) sample_count += len(new) fig.canvas.draw() #sleep(0.1) hat = py.continous_scan_start( channels = channels, iepe_enable = iepe_enable, sensitivity = sensitivity, sample_rate = sample_rate, buffer_size = buffer_size ) plot_continous(hat) # + hat.a_in_scan_stop() hat.a_in_scan_cleanup() print("Scan has stopped") # -
Notebooks/Multi-Channel Live Display.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + import os, sys sys.path.append(os.getcwd()) import numpy as np import tensorflow as tf import scipy.misc import imageio from imageio import imwrite from scipy.misc import imsave, imread import keras from keras.datasets import mnist, cifar10 (x_train, y_train), (x_test, y_test) = mnist.load_data() (x_traincifar, y_traincifar), (x_testcifar, y_testcifar) = cifar10.load_data() print(x_traincifar.shape) print(y_traincifar.shape) # + x1 = np.load('/Users/wildflowerlyi/Desktop/aug_sets/x_augmentation_array_1.npy') y1 = np.load('/Users/wildflowerlyi/Desktop/aug_sets/y_augmentation_array_1.npy') x2 = np.load('/Users/wildflowerlyi/Desktop/aug_sets/x_augmentation_array_2.npy') y2 = np.load('/Users/wildflowerlyi/Desktop/aug_sets/y_augmentation_array_2.npy') x3 = np.load('/Users/wildflowerlyi/Desktop/aug_sets/x_augmentation_array_3.npy') y3 = np.load('/Users/wildflowerlyi/Desktop/aug_sets/y_augmentation_array_3.npy') x4 = np.load('/Users/wildflowerlyi/Desktop/aug_sets/x_augmentation_array_4.npy') y4 = np.load('/Users/wildflowerlyi/Desktop/aug_sets/y_augmentation_array_4.npy') x5 = np.load('/Users/wildflowerlyi/Desktop/aug_sets/x_augmentation_array_5.npy') y5 = np.load('/Users/wildflowerlyi/Desktop/aug_sets/y_augmentation_array_5.npy') x6 = np.load('/Users/wildflowerlyi/Desktop/aug_sets/x_augmentation_array_6.npy') y6 = np.load('/Users/wildflowerlyi/Desktop/aug_sets/y_augmentation_array_6.npy') x7 = np.load('/Users/wildflowerlyi/Desktop/aug_sets/x_augmentation_array_7.npy') y7 = np.load('/Users/wildflowerlyi/Desktop/aug_sets/y_augmentation_array_7.npy') x8 = np.load('/Users/wildflowerlyi/Desktop/aug_sets/x_augmentation_array_8.npy') y8 = np.load('/Users/wildflowerlyi/Desktop/aug_sets/y_augmentation_array_8.npy') x9 = np.load('/Users/wildflowerlyi/Desktop/aug_sets/x_augmentation_array_9.npy') y9 = np.load('/Users/wildflowerlyi/Desktop/aug_sets/y_augmentation_array_9.npy') x10 = np.load('/Users/wildflowerlyi/Desktop/aug_sets/x_augmentation_array_10.npy') y10 = np.load('/Users/wildflowerlyi/Desktop/aug_sets/y_augmentation_array_10.npy') x11 = np.load('/Users/wildflowerlyi/Desktop/aug_sets/x_augmentation_array_11.npy') y11 = np.load('/Users/wildflowerlyi/Desktop/aug_sets/y_augmentation_array_11.npy') #TODO: change original image files to round x_full = np.concatenate((x1,x2,x3,x4,x5,x6,x7,x8,x9,x10,x11), axis=0) y_full = np.concatenate((y1,y2,y3,y4,y5,y6,y7,y8,y9,y10,y11), axis=0) print(x_full.shape) print(y_full.shape) x_2250 = np.concatenate((x1,x2,x3,x4,x5,x6,x7), axis=0) y_2250 = np.concatenate((y1,y2,y3,y4,y5,y6,y7), axis=0) x_2250 = x_2250[0:2250,:] y_2250 = y_2250[0:2250,:] print(x_2250.shape) print(y_2250.shape) x_augmentation_set_2250 = np.around(x_2250, 1) y_augmentation_set_2250 = np.around(y_2250, 1) np.save('/Users/wildflowerlyi/Desktop/x_augmentation_array_2250.npy', x_augmentation_set_2250) np.save('/Users/wildflowerlyi/Desktop/y_augmentation_array_2250.npy', y_augmentation_set_2250) x_1125 = np.concatenate((x1,x2,x3,x4), axis=0) y_1125 = np.concatenate((y1,y2,y3,y4), axis=0) x_1125 = x_1125[0:1125,:] y_1125 = y_1125[0:1125,:] print(x_1125.shape) print(y_1125.shape) x_augmentation_set_1125 = np.around(x_1125, 1) y_augmentation_set_1125 = np.around(y_1125, 1) np.save('/Users/wildflowerlyi/Desktop/x_augmentation_array_1125.npy', x_augmentation_set_1125) np.save('/Users/wildflowerlyi/Desktop/y_augmentation_array_1125.npy', y_augmentation_set_1125) x_560 = np.concatenate((x1,x2), axis=0) y_560 = np.concatenate((y1,y2), axis=0) x_560 = x_560[0:560,:] y_560 = y_560[0:560,:] print(x_560.shape) print(y_560.shape) x_augmentation_set_560 = np.around(x_560, 1) y_augmentation_set_560 = np.around(y_560, 1) np.save('/Users/wildflowerlyi/Desktop/x_augmentation_array_560.npy', x_augmentation_set_560) np.save('/Users/wildflowerlyi/Desktop/y_augmentation_array_560.npy', y_augmentation_set_560) # + # Load, concatenate, and then round - for interpolation 1 mean method x1mean = np.load('/Users/wildflowerlyi/Desktop/aug_sets_mean/x_augmentation_array_mean_1.npy') y1mean = np.load('/Users/wildflowerlyi/Desktop/aug_sets_mean/y_augmentation_array_mean_1.npy') x2mean = np.load('/Users/wildflowerlyi/Desktop/aug_sets_mean/x_augmentation_array_mean_2.npy') y2mean = np.load('/Users/wildflowerlyi/Desktop/aug_sets_mean/y_augmentation_array_mean_2.npy') x3mean = np.load('/Users/wildflowerlyi/Desktop/aug_sets_mean/x_augmentation_array_mean_3.npy') y3mean = np.load('/Users/wildflowerlyi/Desktop/aug_sets_mean/y_augmentation_array_mean_3.npy') x4mean = np.load('/Users/wildflowerlyi/Desktop/aug_sets_mean/x_augmentation_array_mean_4.npy') y4mean = np.load('/Users/wildflowerlyi/Desktop/aug_sets_mean/y_augmentation_array_mean_4.npy') x_meanfull = np.concatenate((x1mean,x2mean,x3mean,x4mean), axis=0) y_meanfull = np.concatenate((y1mean,y2mean,y3mean,y4mean), axis=0) x_meanfull = x_meanfull[0:4500,:] y_meanfull = y_meanfull[0:4500,:] print(x_meanfull.shape) print(y_meanfull.shape) x_augmentation_set_mean_full = np.around(x_meanfull, 1) y_augmentation_set_mean_full = np.around(y_meanfull, 1) np.save('/Users/wildflowerlyi/Desktop/x_augmentation_array_mean_full.npy', x_augmentation_set_mean_full) np.save('/Users/wildflowerlyi/Desktop/y_augmentation_array_mean_full.npy', y_augmentation_set_mean_full) x_mean2250 = x_meanfull[0:2250,:] y_mean2250 = y_meanfull[0:2250,:] print(x_mean2250.shape) print(y_mean2250.shape) x_augmentation_set_mean_2250 = np.around(x_mean2250, 1) y_augmentation_set_mean_2250 = np.around(y_mean2250, 1) np.save('/Users/wildflowerlyi/Desktop/x_augmentation_array_mean_2250.npy', x_augmentation_set_mean_2250) np.save('/Users/wildflowerlyi/Desktop/y_augmentation_array_mean_2250.npy', y_augmentation_set_mean_2250) x_mean1125 = x_meanfull[0:1125,:] y_mean1125 = y_meanfull[0:1125,:] print(x_mean1125.shape) print(y_mean1125.shape) x_augmentation_set_mean_1125 = np.around(x_mean1125, 1) y_augmentation_set_mean_1125 = np.around(y_mean1125, 1) np.save('/Users/wildflowerlyi/Desktop/x_augmentation_array_mean_1125.npy', x_augmentation_set_mean_1125) np.save('/Users/wildflowerlyi/Desktop/y_augmentation_array_mean_1125.npy', y_augmentation_set_mean_1125) x_mean560 = x_meanfull[0:560,:] y_mean560 = y_meanfull[0:560,:] print(x_mean560.shape) print(y_mean560.shape) x_augmentation_set_mean_560 = np.around(x_mean560, 1) y_augmentation_set_mean_560 = np.around(y_mean560, 1) np.save('/Users/wildflowerlyi/Desktop/x_augmentation_array_mean_560.npy', x_augmentation_set_mean_560) np.save('/Users/wildflowerlyi/Desktop/y_augmentation_array_mean_560.npy', y_augmentation_set_mean_560) # + # Load, concatenate, and then round - for interpolation 2 sampling method x1_interpol2 = np.load('/Users/wildflowerlyi/Desktop/aug_sets_2/x_augmentation_array.npy') y1_interpol2 = np.load('/Users/wildflowerlyi/Desktop/aug_sets_2/y_augmentation_array.npy') x2_interpol2 = np.load('/Users/wildflowerlyi/Desktop/aug_sets_2/x_augmentation_array_2.npy') y2_interpol2 = np.load('/Users/wildflowerlyi/Desktop/aug_sets_2/y_augmentation_array_2.npy') x3_interpol2 = np.load('/Users/wildflowerlyi/Desktop/aug_sets_2/x_augmentation_array_3.npy') y3_interpol2 = np.load('/Users/wildflowerlyi/Desktop/aug_sets_2/y_augmentation_array_3.npy') x_interpol2full = np.concatenate((x1_interpol2,x2_interpol2,x3_interpol2), axis=0) y_interpol2full = np.concatenate((y1_interpol2,y2_interpol2,y3_interpol2), axis=0) x_interpol2full = x_interpol2full[0:4500,:] y_interpol2full = y_interpol2full[0:4500,:] print(x_interpol2full.shape) print(y_interpol2full.shape) x_augmentation_set_interpol2_full = np.around(x_interpol2full, 1) y_augmentation_set_interpol2_full = np.around(y_interpol2full, 1) np.save('/Users/wildflowerlyi/Desktop/x_augmentation_array_interpol2.npy', x_augmentation_set_interpol2_full) np.save('/Users/wildflowerlyi/Desktop/y_augmentation_array_interpol2.npy', y_augmentation_set_interpol2_full) x_interpol22250 = x_interpol2full[0:2250,:] y_interpol22250 = y_interpol2full[0:2250,:] print(x_interpol22250.shape) print(y_interpol22250.shape) x_augmentation_set_interpol2_2250 = np.around(x_interpol22250, 1) y_augmentation_set_interpol2_2250 = np.around(y_interpol22250, 1) np.save('/Users/wildflowerlyi/Desktop/x_augmentation_array_interpol2_2250.npy', x_augmentation_set_interpol2_2250) np.save('/Users/wildflowerlyi/Desktop/y_augmentation_array_interpol2_2250.npy', y_augmentation_set_interpol2_2250) x_interpol21125 = x_interpol2full[0:1125,:] y_interpol21125 = y_interpol2full[0:1125,:] print(x_interpol21125.shape) print(y_interpol21125.shape) x_augmentation_set_interpol2_1125 = np.around(x_interpol21125, 1) y_augmentation_set_interpol2_1125 = np.around(y_interpol21125, 1) np.save('/Users/wildflowerlyi/Desktop/x_augmentation_array_interpol2_1125.npy', x_augmentation_set_interpol2_1125) np.save('/Users/wildflowerlyi/Desktop/y_augmentation_array_interpol2_1125.npy', y_augmentation_set_interpol2_1125) x_interpol2560 = x_interpol2full[0:560,:] y_interpol2560 = y_interpol2full[0:560,:] print(x_interpol2560.shape) print(y_interpol2560.shape) x_augmentation_set_interpol2_560 = np.around(x_interpol2560, 1) y_augmentation_set_interpol2_560 = np.around(y_interpol2560, 1) np.save('/Users/wildflowerlyi/Desktop/x_augmentation_array_interpol2_560.npy', x_augmentation_set_interpol2_560) np.save('/Users/wildflowerlyi/Desktop/y_augmentation_array_interpol2_560.npy', y_augmentation_set_interpol2_560) # + # Load, concatenate, and then round - for interpolation 2 mean method y_interpol2mean = np.load('/Users/wildflowerlyi/Desktop/aug_sets_mean_2/y_augmentation_array_interpol2mean.npy') x_interpol2mean = np.load('/Users/wildflowerlyi/Desktop/aug_sets_mean_2/x_augmentation_array_interpol2mean.npy') y_interpol2mean_2 = np.load('/Users/wildflowerlyi/Desktop/aug_sets_mean_2/y_augmentation_array_interpol2mean_2.npy') x_interpol2mean_2 = np.load('/Users/wildflowerlyi/Desktop/aug_sets_mean_2/x_augmentation_array_interpol2mean_2.npy') y_interpol2mean_3 = np.load('/Users/wildflowerlyi/Desktop/aug_sets_mean_2/y_augmentation_array_interpol2mean_3.npy') x_interpol2mean_3 = np.load('/Users/wildflowerlyi/Desktop/aug_sets_mean_2/x_augmentation_array_interpol2mean_3.npy') y_interpol2mean_4 = np.load('/Users/wildflowerlyi/Desktop/aug_sets_mean_2/y_augmentation_array_interpol2mean_4.npy') x_interpol2mean_4 = np.load('/Users/wildflowerlyi/Desktop/aug_sets_mean_2/x_augmentation_array_interpol2mean_4.npy') x_interpol2fullmean = np.concatenate((x_interpol2mean, x_interpol2mean_2, x_interpol2mean_3, x_interpol2mean_4), axis=0) y_interpol2fullmean = np.concatenate((y_interpol2mean, y_interpol2mean_2, y_interpol2mean_3, y_interpol2mean_4), axis=0) x_augmentation_set_interpol2mean = np.around(x_interpol2fullmean, 1) y_augmentation_set_interpol2mean = np.around(y_interpol2fullmean, 1) np.save('/Users/wildflowerlyi/Desktop/x_augmentation_array_interpol2mean.npy', x_augmentation_set_interpol2mean) np.save('/Users/wildflowerlyi/Desktop/y_augmentation_array_interpol2mean.npy', y_augmentation_set_interpol2mean) x_interpol2mean2250 = x_interpol2fullmean[0:2250,:] y_interpol2mean2250 = y_interpol2fullmean[0:2250,:] print(x_interpol2mean2250.shape) print(y_interpol2mean2250.shape) x_augmentation_set_interpol2mean_2250 = np.around(x_interpol2mean2250, 1) y_augmentation_set_interpol2mean_2250 = np.around(y_interpol2mean2250, 1) np.save('/Users/wildflowerlyi/Desktop/x_augmentation_array_interpol2mean_2250.npy', x_augmentation_set_interpol2mean_2250) np.save('/Users/wildflowerlyi/Desktop/y_augmentation_array_interpol2mean_2250.npy', y_augmentation_set_interpol2mean_2250) x_interpol2mean1125 = x_interpol2fullmean[0:1125,:] y_interpol2mean1125 = y_interpol2fullmean[0:1125,:] print(x_interpol2mean1125.shape) print(y_interpol2mean1125.shape) x_augmentation_set_interpol2mean_1125 = np.around(x_interpol2mean1125, 1) y_augmentation_set_interpol2mean_1125 = np.around(y_interpol2mean1125, 1) np.save('/Users/wildflowerlyi/Desktop/x_augmentation_array_interpol2mean_1125.npy', x_augmentation_set_interpol2mean_1125) np.save('/Users/wildflowerlyi/Desktop/y_augmentation_array_interpol2mean_1125.npy', y_augmentation_set_interpol2mean_1125) x_interpol2mean560 = x_interpol2fullmean[0:560,:] y_interpol2mean560 = y_interpol2fullmean[0:560,:] print(x_interpol2mean560.shape) print(y_interpol2mean560.shape) x_augmentation_set_interpol2mean_560 = np.around(x_interpol2mean560, 1) y_augmentation_set_interpol2mean_560 = np.around(y_interpol2mean560, 1) np.save('/Users/wildflowerlyi/Desktop/x_augmentation_array_interpol2mean_560.npy', x_augmentation_set_interpol2mean_560) np.save('/Users/wildflowerlyi/Desktop/y_augmentation_array_interpol2mean_560.npy', y_augmentation_set_interpol2mean_560) # + # Load, concatenate, and then round - for interpolation 3 mean method x1_interpol3mean = np.load('/Users/wildflowerlyi/Desktop/aug_sets_mean_3/x_augmentation_array_interpol3mean.npy') y1_interpol3mean = np.load('/Users/wildflowerlyi/Desktop/aug_sets_mean_3/y_augmentation_array_interpol3mean.npy') x2_interpol3mean = np.load('/Users/wildflowerlyi/Desktop/aug_sets_mean_3/x_augmentation_array_interpol3mean_2.npy') y2_interpol3mean = np.load('/Users/wildflowerlyi/Desktop/aug_sets_mean_3/y_augmentation_array_interpol3mean_2.npy') x3_interpol3mean = np.load('/Users/wildflowerlyi/Desktop/aug_sets_mean_3/x_augmentation_array_interpol3mean_3.npy') y3_interpol3mean = np.load('/Users/wildflowerlyi/Desktop/aug_sets_mean_3/y_augmentation_array_interpol3mean_3.npy') x4_interpol3mean = np.load('/Users/wildflowerlyi/Desktop/aug_sets_mean_3/x_augmentation_array_interpol3mean_4.npy') y4_interpol3mean = np.load('/Users/wildflowerlyi/Desktop/aug_sets_mean_3/y_augmentation_array_interpol3mean_4.npy') x5_interpol3mean = np.load('/Users/wildflowerlyi/Desktop/aug_sets_mean_3/x_augmentation_array_interpol3mean_5.npy') y5_interpol3mean = np.load('/Users/wildflowerlyi/Desktop/aug_sets_mean_3/y_augmentation_array_interpol3mean_5.npy') x_interpol3meanfull = np.concatenate((x1_interpol3mean,x2_interpol3mean,x3_interpol3mean,x4_interpol3mean, x5_interpol3mean), axis=0) y_interpol3meanfull = np.concatenate((y1_interpol3mean,y2_interpol3mean,y3_interpol3mean,y4_interpol3mean, y5_interpol3mean), axis=0) x_interpol3meanfull = x_interpol3meanfull[0:4500,:] y_interpol3meanfull = y_interpol3meanfull[0:4500,:] print(x_interpol3meanfull.shape) print(y_interpol3meanfull.shape) x_augmentation_set_interpol3mean_full = np.around(x_interpol3meanfull, 1) y_augmentation_set_interpol3mean_full = np.around(y_interpol3meanfull, 1) np.save('/Users/wildflowerlyi/Desktop/x_augmentation_array_interpol3mean_full.npy', x_augmentation_set_interpol3mean_full) np.save('/Users/wildflowerlyi/Desktop/y_augmentation_array_interpol3mean_full.npy', y_augmentation_set_interpol3mean_full) x_interpol3mean2250 = x_interpol3meanfull[0:2250,:] y_interpol3mean2250 = y_interpol3meanfull[0:2250,:] print(x_interpol3mean2250.shape) print(y_interpol3mean2250.shape) x_augmentation_set_interpol3mean_2250 = np.around(x_interpol3mean2250, 1) y_augmentation_set_interpol3mean_2250 = np.around(y_interpol3mean2250, 1) np.save('/Users/wildflowerlyi/Desktop/x_augmentation_array_interpol3mean_2250.npy', x_augmentation_set_interpol3mean_2250) np.save('/Users/wildflowerlyi/Desktop/y_augmentation_array_interpol3mean_2250.npy', y_augmentation_set_interpol3mean_2250) x_interpol3mean1125 = x_interpol3meanfull[0:1125,:] y_interpol3mean1125 = y_interpol3meanfull[0:1125,:] print(x_interpol3mean1125.shape) print(y_interpol3mean1125.shape) x_augmentation_set_interpol3mean_1125 = np.around(x_interpol3mean1125, 1) y_augmentation_set_interpol3mean_1125 = np.around(y_interpol3mean1125, 1) np.save('/Users/wildflowerlyi/Desktop/x_augmentation_array_interpol3mean_1125.npy', x_augmentation_set_interpol3mean_1125) np.save('/Users/wildflowerlyi/Desktop/y_augmentation_array_interpol3mean_1125.npy', y_augmentation_set_interpol3mean_1125) x_interpol3mean560 = x_interpol3meanfull[0:560,:] y_interpol3mean560 = y_interpol3meanfull[0:560,:] print(x_interpol3mean560.shape) print(y_interpol3mean560.shape) x_augmentation_set_interpol3mean_560 = np.around(x_interpol3mean560, 1) y_augmentation_set_interpol3mean_560 = np.around(y_interpol3mean560, 1) np.save('/Users/wildflowerlyi/Desktop/x_augmentation_array_interpol3mean_560.npy', x_augmentation_set_interpol3mean_560) np.save('/Users/wildflowerlyi/Desktop/y_augmentation_array_interpol3mean_560.npy', y_augmentation_set_interpol3mean_560) # + # Load, concatenate, and then round - for interpolation 3 sampling method x1_interpol3 = np.load('/Users/wildflowerlyi/Desktop/aug_sets_3/x_augmentation_array.npy') y1_interpol3 = np.load('/Users/wildflowerlyi/Desktop/aug_sets_3/y_augmentation_array.npy') x2_interpol3 = np.load('/Users/wildflowerlyi/Desktop/aug_sets_3/x_augmentation_array_2.npy') y2_interpol3 = np.load('/Users/wildflowerlyi/Desktop/aug_sets_3/y_augmentation_array_2.npy') x3_interpol3 = np.load('/Users/wildflowerlyi/Desktop/aug_sets_3/x_augmentation_array_3.npy') y3_interpol3 = np.load('/Users/wildflowerlyi/Desktop/aug_sets_3/y_augmentation_array_3.npy') x4_interpol3 = np.load('/Users/wildflowerlyi/Desktop/aug_sets_3/x_augmentation_array_4.npy') y4_interpol3 = np.load('/Users/wildflowerlyi/Desktop/aug_sets_3/y_augmentation_array_4.npy') x5_interpol3 = np.load('/Users/wildflowerlyi/Desktop/aug_sets_3/x_augmentation_array_5.npy') y5_interpol3 = np.load('/Users/wildflowerlyi/Desktop/aug_sets_3/y_augmentation_array_5.npy') x_interpol3full = np.concatenate((x1_interpol3,x2_interpol3,x3_interpol3,x4_interpol3, x5_interpol3), axis=0) y_interpol3full = np.concatenate((y1_interpol3,y2_interpol3,y3_interpol3,y4_interpol3, y5_interpol3), axis=0) x_interpol3full = x_interpol3full[0:4500,:] y_interpol3full = y_interpol3full[0:4500,:] print(x_interpol3full.shape) print(y_interpol3full.shape) x_augmentation_set_interpol3_full = np.around(x_interpol3full, 1) y_augmentation_set_interpol3_full = np.around(y_interpol3full, 1) np.save('/Users/wildflowerlyi/Desktop/aug_sets_3/x_augmentation_array_interpol3.npy', x_augmentation_set_interpol3_full) np.save('/Users/wildflowerlyi/Desktop/aug_sets_3/y_augmentation_array_interpol3.npy', y_augmentation_set_interpol3_full) x_interpol32250 = x_interpol3full[0:2250,:] y_interpol32250 = y_interpol3full[0:2250,:] print(x_interpol32250.shape) print(y_interpol32250.shape) x_augmentation_set_interpol3_2250 = np.around(x_interpol32250, 1) y_augmentation_set_interpol3_2250 = np.around(y_interpol32250, 1) np.save('/Users/wildflowerlyi/Desktop/aug_sets_3/x_augmentation_array_interpol3_2250.npy', x_augmentation_set_interpol3_2250) np.save('/Users/wildflowerlyi/Desktop/aug_sets_3/y_augmentation_array_interpol3_2250.npy', y_augmentation_set_interpol3_2250) x_interpol31125 = x_interpol3full[0:1125,:] y_interpol31125 = y_interpol3full[0:1125,:] print(x_interpol31125.shape) print(y_interpol31125.shape) x_augmentation_set_interpol3_1125 = np.around(x_interpol31125, 1) y_augmentation_set_interpol3_1125 = np.around(y_interpol31125, 1) np.save('/Users/wildflowerlyi/Desktop/aug_sets_3/x_augmentation_array_interpol3_1125.npy', x_augmentation_set_interpol3_1125) np.save('/Users/wildflowerlyi/Desktop/aug_sets_3/y_augmentation_array_interpol3_1125.npy', y_augmentation_set_interpol3_1125) x_interpol3560 = x_interpol3full[0:560,:] y_interpol3560 = y_interpol3full[0:560,:] print(x_interpol3560.shape) print(y_interpol3560.shape) x_augmentation_set_interpol3_560 = np.around(x_interpol3560, 1) y_augmentation_set_interpol3_560 = np.around(y_interpol3560, 1) np.save('/Users/wildflowerlyi/Desktop/aug_sets_3/x_augmentation_array_interpol3_560.npy', x_augmentation_set_interpol3_560) np.save('/Users/wildflowerlyi/Desktop/aug_sets_3/y_augmentation_array_interpol3_560.npy', y_augmentation_set_interpol3_560) # + # Load, concatenate, and then round - for interpolation 4 mean method y_interpol4mean = np.load('/Users/wildflowerlyi/Desktop/aug_sets_mean_4/y_augmentation_array_interpol4mean.npy') x_interpol4mean = np.load('/Users/wildflowerlyi/Desktop/aug_sets_mean_4/x_augmentation_array_interpol4mean.npy') np.save('/Users/wildflowerlyi/Desktop/aug_sets_mean_4/x_augmentation_array_interpol4mean_1.npy', x_interpol4mean) np.save('/Users/wildflowerlyi/Desktop/aug_sets_mean_4/y_augmentation_array_interpol4mean_1.npy', y_interpol4mean) y_interpol4mean_2 = np.load('/Users/wildflowerlyi/Desktop/aug_sets_mean_4/y_augmentation_array_interpol4mean_2.npy') x_interpol4mean_2 = np.load('/Users/wildflowerlyi/Desktop/aug_sets_mean_4/x_augmentation_array_interpol4mean_2.npy') y_interpol4mean_3 = np.load('/Users/wildflowerlyi/Desktop/aug_sets_mean_4/y_augmentation_array_interpol4mean_3.npy') x_interpol4mean_3 = np.load('/Users/wildflowerlyi/Desktop/aug_sets_mean_4/x_augmentation_array_interpol4mean_3.npy') y_interpol4mean_4 = np.load('/Users/wildflowerlyi/Desktop/aug_sets_mean_4/y_augmentation_array_interpol4mean_4.npy') x_interpol4mean_4 = np.load('/Users/wildflowerlyi/Desktop/aug_sets_mean_4/x_augmentation_array_interpol4mean_4.npy') x_interpol4fullmean = np.concatenate((x_interpol4mean, x_interpol4mean_2, x_interpol4mean_3, x_interpol4mean_4), axis=0) y_interpol4fullmean = np.concatenate((y_interpol4mean, y_interpol4mean_2, y_interpol4mean_3, y_interpol4mean_4), axis=0) x_augmentation_set_interpol4mean = np.around(x_interpol4fullmean, 1) y_augmentation_set_interpol4mean = np.around(y_interpol4fullmean, 1) x_interpol4fullmean = x_interpol4fullmean[0:4500,:] y_interpol4fullmean = y_interpol4fullmean[0:4500,:] print(x_interpol4fullmean.shape) print(y_interpol4fullmean.shape) np.save('/Users/wildflowerlyi/Desktop/aug_sets_mean_4/x_augmentation_array_interpol4mean.npy', x_interpol4fullmean) np.save('/Users/wildflowerlyi/Desktop/aug_sets_mean_4/y_augmentation_array_interpol4mean.npy', y_interpol4fullmean) x_interpol4mean2250 = x_interpol4fullmean[0:2250,:] y_interpol4mean2250 = y_interpol4fullmean[0:2250,:] print(x_interpol4mean2250.shape) print(y_interpol4mean2250.shape) x_augmentation_set_interpol4mean_2250 = np.around(x_interpol4mean2250, 1) y_augmentation_set_interpol4mean_2250 = np.around(y_interpol4mean2250, 1) np.save('/Users/wildflowerlyi/Desktop/aug_sets_mean_4/x_augmentation_array_interpol4mean_2250.npy', x_augmentation_set_interpol4mean_2250) np.save('/Users/wildflowerlyi/Desktop/aug_sets_mean_4/y_augmentation_array_interpol4mean_2250.npy', y_augmentation_set_interpol4mean_2250) x_interpol4mean1125 = x_interpol4fullmean[0:1125,:] y_interpol4mean1125 = y_interpol4fullmean[0:1125,:] print(x_interpol4mean1125.shape) print(y_interpol4mean1125.shape) x_augmentation_set_interpol4mean_1125 = np.around(x_interpol4mean1125, 1) y_augmentation_set_interpol4mean_1125 = np.around(y_interpol4mean1125, 1) np.save('/Users/wildflowerlyi/Desktop/aug_sets_mean_4/x_augmentation_array_interpol4mean_1125.npy', x_augmentation_set_interpol4mean_1125) np.save('/Users/wildflowerlyi/Desktop/aug_sets_mean_4/y_augmentation_array_interpol4mean_1125.npy', y_augmentation_set_interpol4mean_1125) x_interpol4mean560 = x_interpol4fullmean[0:560,:] y_interpol4mean560 = y_interpol4fullmean[0:560,:] print(x_interpol4mean560.shape) print(y_interpol4mean560.shape) x_augmentation_set_interpol4mean_560 = np.around(x_interpol4mean560, 1) y_augmentation_set_interpol4mean_560 = np.around(y_interpol4mean560, 1) np.save('/Users/wildflowerlyi/Desktop/aug_sets_mean_4/x_augmentation_array_interpol4mean_560.npy', x_augmentation_set_interpol4mean_560) np.save('/Users/wildflowerlyi/Desktop/aug_sets_mean_4/y_augmentation_array_interpol4mean_560.npy', y_augmentation_set_interpol4mean_560) # + # Load, concatenate, and then round - for interpolation 4 sampling method y_interpol4 = np.load('/Users/wildflowerlyi/Desktop/aug_sets_4/y_augmentation_array_interpol4.npy') x_interpol4 = np.load('/Users/wildflowerlyi/Desktop/aug_sets_4/x_augmentation_array_interpol4.npy') np.save('/Users/wildflowerlyi/Desktop/aug_sets_4/x_augmentation_array_interpol4_1.npy', x_interpol4) np.save('/Users/wildflowerlyi/Desktop/aug_sets_4/y_augmentation_array_interpol4_1.npy', y_interpol4) y_interpol4_2 = np.load('/Users/wildflowerlyi/Desktop/aug_sets_4/y_augmentation_array_interpol4_2.npy') x_interpol4_2 = np.load('/Users/wildflowerlyi/Desktop/aug_sets_4/x_augmentation_array_interpol4_2.npy') y_interpol4_3 = np.load('/Users/wildflowerlyi/Desktop/aug_sets_4/y_augmentation_array_interpol4_3.npy') x_interpol4_3 = np.load('/Users/wildflowerlyi/Desktop/aug_sets_4/x_augmentation_array_interpol4_3.npy') y_interpol4_4 = np.load('/Users/wildflowerlyi/Desktop/aug_sets_4/y_augmentation_array_interpol4_4.npy') x_interpol4_4 = np.load('/Users/wildflowerlyi/Desktop/aug_sets_4/x_augmentation_array_interpol4_4.npy') x_interpol4full = np.concatenate((x_interpol4, x_interpol4_2, x_interpol4_3, x_interpol4_4), axis=0) y_interpol4full = np.concatenate((y_interpol4, y_interpol4_2, y_interpol4_3, y_interpol4_4), axis=0) x_augmentation_set_interpol4 = np.around(x_interpol4full, 1) y_augmentation_set_interpol4 = np.around(y_interpol4full, 1) x_augmentation_set_interpol4 = x_augmentation_set_interpol4[0:4500,:] y_augmentation_set_interpol4 = y_augmentation_set_interpol4[0:4500,:] print(x_augmentation_set_interpol4.shape) print(y_augmentation_set_interpol4.shape) np.save('/Users/wildflowerlyi/Desktop/aug_sets_4/x_augmentation_array_interpol4.npy', x_augmentation_set_interpol4) np.save('/Users/wildflowerlyi/Desktop/aug_sets_4/y_augmentation_array_interpol4.npy', y_augmentation_set_interpol4) x_interpol42250 = x_interpol4full[0:2250,:] y_interpol42250 = y_interpol4full[0:2250,:] print(x_interpol42250.shape) print(y_interpol42250.shape) x_augmentation_set_interpol4_2250 = np.around(x_interpol42250, 1) y_augmentation_set_interpol4_2250 = np.around(y_interpol42250, 1) np.save('/Users/wildflowerlyi/Desktop/aug_sets_4/x_augmentation_array_interpol4_2250.npy', x_augmentation_set_interpol4_2250) np.save('/Users/wildflowerlyi/Desktop/aug_sets_4/y_augmentation_array_interpol4_2250.npy', y_augmentation_set_interpol4_2250) x_interpol41125 = x_interpol4full[0:1125,:] y_interpol41125 = y_interpol4full[0:1125,:] print(x_interpol41125.shape) print(y_interpol41125.shape) x_augmentation_set_interpol4_1125 = np.around(x_interpol41125, 1) y_augmentation_set_interpol4_1125 = np.around(y_interpol41125, 1) np.save('/Users/wildflowerlyi/Desktop/aug_sets_4/x_augmentation_array_interpol4_1125.npy', x_augmentation_set_interpol4_1125) np.save('/Users/wildflowerlyi/Desktop/aug_sets_4/y_augmentation_array_interpol4_1125.npy', y_augmentation_set_interpol4_1125) x_interpol4560 = x_interpol4full[0:560,:] y_interpol4560 = y_interpol4full[0:560,:] print(x_interpol4560.shape) print(y_interpol4560.shape) x_augmentation_set_interpol4_560 = np.around(x_interpol4560, 1) y_augmentation_set_interpol4_560 = np.around(y_interpol4560, 1) np.save('/Users/wildflowerlyi/Desktop/aug_sets_4/x_augmentation_array_interpol4_560.npy', x_augmentation_set_interpol4_560) np.save('/Users/wildflowerlyi/Desktop/aug_sets_4/y_augmentation_array_interpol4_560.npy', y_augmentation_set_interpol4_560) # + # Load, concatenate, and then round - for interpolation 1 mean beta method x1mean_beta = np.load('/Users/wildflowerlyi/Desktop/aug_sets_mean_beta/x_augmentation_array_mean_beta.npy') y1mean_beta = np.load('/Users/wildflowerlyi/Desktop/aug_sets_mean_beta/y_augmentation_array_mean_beta.npy') x2mean_beta = np.load('/Users/wildflowerlyi/Desktop/aug_sets_mean_beta/x_augmentation_array_mean_beta_2.npy') y2mean_beta = np.load('/Users/wildflowerlyi/Desktop/aug_sets_mean_beta/y_augmentation_array_mean_beta_2.npy') x3mean_beta = np.load('/Users/wildflowerlyi/Desktop/aug_sets_mean_beta/x_augmentation_array_mean_beta_3.npy') y3mean_beta = np.load('/Users/wildflowerlyi/Desktop/aug_sets_mean_beta/y_augmentation_array_mean_beta_3.npy') x4mean_beta = np.load('/Users/wildflowerlyi/Desktop/aug_sets_mean_beta/x_augmentation_array_mean_beta_4.npy') y4mean_beta = np.load('/Users/wildflowerlyi/Desktop/aug_sets_mean_beta/y_augmentation_array_mean_beta_4.npy') x5mean_beta = np.load('/Users/wildflowerlyi/Desktop/aug_sets_mean_beta/x_augmentation_array_mean_beta_5.npy') y5mean_beta = np.load('/Users/wildflowerlyi/Desktop/aug_sets_mean_beta/y_augmentation_array_mean_beta_5.npy') x_mean_betafull = np.concatenate((x1mean_beta,x2mean_beta,x3mean_beta,x4mean_beta,x5mean_beta), axis=0) y_mean_betafull = np.concatenate((y1mean_beta,y2mean_beta,y3mean_beta,y4mean_beta,y5mean_beta), axis=0) np.save('/Users/wildflowerlyi/Desktop/aug_sets_mean_beta/x_augmentation_array_mean_beta_1.npy', x1mean_beta) np.save('/Users/wildflowerlyi/Desktop/aug_sets_mean_beta/y_augmentation_array_mean_beta_1.npy', y1mean_beta) x_mean_beta = x_mean_betafull[0:4500,:] y_mean_beta = y_mean_betafull[0:4500,:] print(x_mean_beta.shape) print(y_mean_beta.shape) x_augmentation_set_mean_beta = np.around(x_mean_beta, 1) y_augmentation_set_mean_beta = np.around(y_mean_beta, 1) np.save('/Users/wildflowerlyi/Desktop/x_augmentation_array_mean_beta.npy', x_augmentation_set_mean_beta) np.save('/Users/wildflowerlyi/Desktop/y_augmentation_array_mean_beta.npy', y_augmentation_set_mean_beta) x_mean_beta2250 = x_mean_betafull[0:2250,:] y_mean_beta2250 = y_mean_betafull[0:2250,:] print(x_mean_beta2250.shape) print(y_mean_beta2250.shape) x_augmentation_set_mean_beta_2250 = np.around(x_mean_beta2250, 1) y_augmentation_set_mean_beta_2250 = np.around(y_mean_beta2250, 1) np.save('/Users/wildflowerlyi/Desktop/x_augmentation_array_2250mean_beta.npy', x_augmentation_set_mean_beta_2250) np.save('/Users/wildflowerlyi/Desktop/y_augmentation_array_2250mean_beta.npy', y_augmentation_set_mean_beta_2250) x_mean_beta1125 = x_mean_betafull[0:1125,:] y_mean_beta1125 = y_mean_betafull[0:1125,:] print(x_mean_beta1125.shape) print(y_mean_beta1125.shape) x_augmentation_set_mean_beta_1125 = np.around(x_mean_beta1125, 1) y_augmentation_set_mean_beta_1125 = np.around(y_mean_beta1125, 1) np.save('/Users/wildflowerlyi/Desktop/x_augmentation_array_1125mean_beta.npy', x_augmentation_set_mean_beta_1125) np.save('/Users/wildflowerlyi/Desktop/y_augmentation_array_1125mean_beta.npy', y_augmentation_set_mean_beta_1125) x_mean_beta560 = x_mean_betafull[0:560,:] y_mean_beta560 = y_mean_betafull[0:560,:] print(x_mean_beta560.shape) print(y_mean_beta560.shape) x_augmentation_set_mean_beta_560 = np.around(x_mean_beta560, 1) y_augmentation_set_mean_beta_560 = np.around(y_mean_beta560, 1) np.save('/Users/wildflowerlyi/Desktop/x_augmentation_array_560mean_beta.npy', x_augmentation_set_mean_beta_560) np.save('/Users/wildflowerlyi/Desktop/y_augmentation_array_560mean_beta.npy', y_augmentation_set_mean_beta_560) # + # Load, concatenate, and then round - for interpolation 3 mean beta method x1_interpol3mean_beta = np.load('/Users/wildflowerlyi/Desktop/aug_sets_mean_beta_3/x_augmentation_array_interpol3mean_beta.npy') y1_interpol3mean_beta = np.load('/Users/wildflowerlyi/Desktop/aug_sets_mean_beta_3/y_augmentation_array_interpol3mean_beta.npy') np.save('/Users/wildflowerlyi/Desktop/aug_sets_mean_beta_3/x_augmentation_array_interpol3mean_beta_1.npy', x1_interpol3mean_beta) np.save('/Users/wildflowerlyi/Desktop/aug_sets_mean_beta_3/y_augmentation_array_interpol3mean_beta_1.npy', y1_interpol3mean_beta) x2_interpol3mean_beta = np.load('/Users/wildflowerlyi/Desktop/aug_sets_mean_beta_3/x_augmentation_array_interpol3mean_beta_2.npy') y2_interpol3mean_beta = np.load('/Users/wildflowerlyi/Desktop/aug_sets_mean_beta_3/y_augmentation_array_interpol3mean_beta_2.npy') x3_interpol3mean_beta = np.load('/Users/wildflowerlyi/Desktop/aug_sets_mean_beta_3/x_augmentation_array_interpol3mean_beta_3.npy') y3_interpol3mean_beta = np.load('/Users/wildflowerlyi/Desktop/aug_sets_mean_beta_3/y_augmentation_array_interpol3mean_beta_3.npy') x4_interpol3mean_beta = np.load('/Users/wildflowerlyi/Desktop/aug_sets_mean_beta_3/x_augmentation_array_interpol3mean_beta_4.npy') y4_interpol3mean_beta = np.load('/Users/wildflowerlyi/Desktop/aug_sets_mean_beta_3/y_augmentation_array_interpol3mean_beta_4.npy') x_interpol3mean_betafull = np.concatenate((x1_interpol3mean_beta,x2_interpol3mean_beta,x3_interpol3mean_beta, x4_interpol3mean_beta), axis=0) y_interpol3mean_betafull = np.concatenate((y1_interpol3mean_beta,y2_interpol3mean_beta,y3_interpol3mean_beta, y4_interpol3mean_beta), axis=0) x_interpol3mean_beta = x_interpol3mean_betafull[0:4500,:] y_interpol3mean_beta = y_interpol3mean_betafull[0:4500,:] print(x_interpol3mean_beta.shape) print(y_interpol3mean_beta.shape) x_augmentation_set_interpol3mean_beta = np.around(x_interpol3mean_beta, 1) y_augmentation_set_interpol3mean_beta = np.around(y_interpol3mean_beta, 1) np.save('/Users/wildflowerlyi/Desktop/x_augmentation_array_interpol3mean_beta.npy', x_augmentation_set_interpol3mean_beta) np.save('/Users/wildflowerlyi/Desktop/y_augmentation_array_interpol3mean_beta.npy', y_augmentation_set_interpol3mean_beta) x_interpol3mean_beta2250 = x_interpol3mean_betafull[0:2250,:] y_interpol3mean_beta2250 = y_interpol3mean_betafull[0:2250,:] print(x_interpol3mean_beta2250.shape) print(y_interpol3mean_beta2250.shape) x_augmentation_set_interpol3mean_beta_2250 = np.around(x_interpol3mean_beta2250, 1) y_augmentation_set_interpol3mean_beta_2250 = np.around(y_interpol3mean_beta2250, 1) np.save('/Users/wildflowerlyi/Desktop/x_augmentation_array_interpol3_2250mean_beta.npy', x_augmentation_set_interpol3mean_beta_2250) np.save('/Users/wildflowerlyi/Desktop/y_augmentation_array_interpol3_2250mean_beta.npy', y_augmentation_set_interpol3mean_beta_2250) x_interpol3mean_beta1125 = x_interpol3mean_betafull[0:1125,:] y_interpol3mean_beta1125 = y_interpol3mean_betafull[0:1125,:] print(x_interpol3mean_beta1125.shape) print(y_interpol3mean_beta1125.shape) x_augmentation_set_interpol3mean_beta_1125 = np.around(x_interpol3mean_beta1125, 1) y_augmentation_set_interpol3mean_beta_1125 = np.around(y_interpol3mean_beta1125, 1) np.save('/Users/wildflowerlyi/Desktop/x_augmentation_array_interpol3_1125mean_beta.npy', x_augmentation_set_interpol3mean_beta_1125) np.save('/Users/wildflowerlyi/Desktop/y_augmentation_array_interpol3_1125mean_beta.npy', y_augmentation_set_interpol3mean_beta_1125) x_interpol3mean_beta560 = x_interpol3mean_betafull[0:560,:] y_interpol3mean_beta560 = y_interpol3mean_betafull[0:560,:] print(x_interpol3mean_beta560.shape) print(y_interpol3mean_beta560.shape) x_augmentation_set_interpol3mean_beta_560 = np.around(x_interpol3mean_beta560, 1) y_augmentation_set_interpol3mean_beta_560 = np.around(y_interpol3mean_beta560, 1) np.save('/Users/wildflowerlyi/Desktop/x_augmentation_array_interpol3_560mean_beta.npy', x_augmentation_set_interpol3mean_beta_560) np.save('/Users/wildflowerlyi/Desktop/y_augmentation_array_interpol3_560mean_beta.npy', y_augmentation_set_interpol3mean_beta_560)
gen_models/PixelVAE/.ipynb_checkpoints/data_augmentations-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python (3.8) # language: python # name: quantum # --- # # Draper Adder Demo # # This is a quick demo of the draperry package which includes a draper adder and sum subset finder. from draper_adder.main import subset_finder # ## QOSF Challenge # We'll begin with demonstrating the QOSF challenge. The function defaults to [5,7,8,9,1] and output=16. subset_finder() # Given that the output was denoted as a register in the problem, a quantum circuit is provided here to get the indexes of the elements that summ to the desired output. # In addition, I've provided a tuple with the binary representation of the integers used as well. # Checking our results.... ans = subset_finder() # Let's print out the circuit print(ans[0][0]) # As we can tell the numbers chosen were 7 and 9 which are placed at indices 1 and 3 respectively which is reflected in the circuit! # ## General Addition # We can also find subsets of various other input list. Let's check it out! result = subset_finder([1,2,3,7],10) print(result) We get our two desired results of [1,2,7] and [3,7]! print(result[0][0]) The circuit also matches with the correct index. # ## Draper Adder # There is also a draper adder available for use with a quantum state vector simulator provided by qiskit. It can take in binary strings and integers. from draper_adder.draper_adder import draper_adder result = draper_adder('010',7) print(result) We added 2 and 7 and got our desired result of 9! Nice! # ## Conclusion # Further improvements to this module can be made by including different quantum arithmetic methods and including various simulators and hardware. # # Enjoy :) Thank you so much for reading this demo!
demo/draper_demo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Posterior inference for GGP graph model # In this notebook, we'll infer the posterior distribution of yeast dataset using generalised gamma process graph model. # # Original source of the dataset with detailed description: http://www.cise.ufl.edu/research/sparse/matrices/Pajek/yeast.html # + import os import pickle import time from collections import defaultdict import matplotlib.pyplot as plt import numpy as np from scipy.io import loadmat from sgp import GGPgraphmcmc # %matplotlib inline # - # ### Loading yeast dataset mat = loadmat('../data/yeast/yeast.mat') graph = mat['Problem'][0][0][2] # ### Run MCMC sampler # + modelparam = dict() mcmcparam = dict() modelparam['alpha'] = (0, 0) modelparam['sigma'] = (0, 0) modelparam['tau'] = (0, 0) mcmcparam['niter'] = 500 mcmcparam['nburn'] = 1 mcmcparam['thin'] = 1 mcmcparam['leapfrog.L'] = 5 mcmcparam['leapfrog.epsilon'] = 0.1 mcmcparam['leapfrog.nadapt'] = 1 mcmcparam['latent.MH_nb'] = 1 mcmcparam['hyper.MH_nb'] = 2 mcmcparam['hyper.rw_std'] = [0.02, 0.02] mcmcparam['store_w'] = True typegraph='undirected' # or simple samples, stats = GGPgraphmcmc(graph, modelparam, mcmcparam, typegraph, verbose=True) # - # The invalid values are carefully handled in the inference codes. It is safe to ignore the warning messages. # ## Trace plots of some variables of interest plt.plot(samples['sigma']) plt.title('Trace plot of $\sigma$ variable') # When the sigma is less than 0, the inferred graph is dense. plt.plot(stats['w_rate']) plt.title('MH acceptance rate for weight w') plt.plot(stats['hyper_rate']) plt.title('MH acceptance rate for hyper-params') # checking the acceptance ratio
notebooks/PosteriorInferenceGGPgraph.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import scipy.sparse as sp import torch # # pygcn # + def encode_onehot(labels): classes = set(labels) classes_dict = {c: np.identity(len(classes))[i, :] for i, c in enumerate(classes)} labels_onehot = np.array(list(map(classes_dict.get, labels)), dtype=np.int32) return labels_onehot def normalize(mx): """Row-normalize sparse matrix `D^{-1}A` """ rowsum = np.array(mx.sum(1)) r_inv = np.power(rowsum, -1).flatten() r_inv[np.isinf(r_inv)] = 0. r_mat_inv = sp.diags(r_inv) mx = r_mat_inv.dot(mx) return mx def normalize_adj(adj): """Symmetrically normalize adjacency matrix. D^{-0.5}AD^{-0.5}""" adj = sp.coo_matrix(adj) rowsum = np.array(adj.sum(1)) d_inv_sqrt = np.power(rowsum, -0.5).flatten() d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0. d_mat_inv_sqrt = sp.diags(d_inv_sqrt) return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo() def sparse_mx_to_torch_sparse_tensor(sparse_mx): """Convert a scipy sparse matrix to a torch sparse tensor.""" sparse_mx = sparse_mx.tocoo().astype(np.float32) indices = torch.from_numpy( np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64)) values = torch.from_numpy(sparse_mx.data) shape = torch.Size(sparse_mx.shape) return torch.sparse.FloatTensor(indices, values, shape) def load_data(path="../data0/cora/", dataset="cora"): """Load citation network dataset (cora only for now)""" print('Loading {} dataset...'.format(dataset)) idx_features_labels = np.genfromtxt("{}{}.content".format(path, dataset), dtype=np.dtype(str)) features = sp.csr_matrix(idx_features_labels[:, 1:-1], dtype=np.float32) labels = encode_onehot(idx_features_labels[:, -1]) # build graph idx = np.array(idx_features_labels[:, 0], dtype=np.int32) idx_map = {j: i for i, j in enumerate(idx)} edges_unordered = np.genfromtxt("{}{}.cites".format(path, dataset), dtype=np.int32) edges = np.array(list(map(idx_map.get, edges_unordered.flatten())), dtype=np.int32).reshape(edges_unordered.shape) adj = sp.coo_matrix((np.ones(edges.shape[0]), (edges[:, 0], edges[:, 1])), shape=(labels.shape[0], labels.shape[0]), dtype=np.float32) # build symmetric adjacency matrix adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj) features = normalize(features) adj = normalize_adj(adj + sp.eye(adj.shape[0])) # idx_train = range(140) # idx_val = range(200, 500) # idx_test = range(500, 1500) idx_train = range(140) idx_val = range(140, 140+500) idx_test = range(1708, 2708) features = torch.FloatTensor(np.array(features.todense())) labels = torch.LongTensor(np.where(labels)[1]) adj = sparse_mx_to_torch_sparse_tensor(adj) idx_train = torch.LongTensor(idx_train) idx_val = torch.LongTensor(idx_val) idx_test = torch.LongTensor(idx_test) return adj, features, labels, idx_train, idx_val, idx_test # adj (2708,2708) # features (2708,1433) binary # labels (2708) int0-6 # idx_train (140) int0-139 # idx_val (300) int200-499 # idx_test (1000) int500-1499 # + # load_data(path="../data0/cora/", dataset="cora") # - # # gcn # + import numpy as np import pickle as pkl import networkx as nx import scipy.sparse as sp from scipy.sparse.linalg.eigen.arpack import eigsh import sys def parse_index_file(filename): """Parse index file.""" index = [] for line in open(filename): index.append(int(line.strip())) return index def sparse_mx_to_torch_sparse_tensor(sparse_mx): """Convert a scipy sparse matrix to a torch sparse tensor.""" sparse_mx = sparse_mx.tocoo().astype(np.float32) indices = torch.from_numpy( np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64)) values = torch.from_numpy(sparse_mx.data) shape = torch.Size(sparse_mx.shape) return torch.sparse.FloatTensor(indices, values, shape) def normalize(mx): """Row-normalize sparse matrix `D^{-1}A` """ rowsum = np.array(mx.sum(1)) r_inv = np.power(rowsum, -1).flatten() r_inv[np.isinf(r_inv)] = 0. r_mat_inv = sp.diags(r_inv) mx = r_mat_inv.dot(mx) return mx def normalize_adj(adj): """Symmetrically normalize adjacency matrix. D^{-0.5}AD^{-0.5}""" adj = sp.coo_matrix(adj) rowsum = np.array(adj.sum(1)) d_inv_sqrt = np.power(rowsum, -0.5).flatten() d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0. d_mat_inv_sqrt = sp.diags(d_inv_sqrt) return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo() def load_data(dataset_str): """ Loads input data from gcn/data directory ind.dataset_str.x => the feature vectors of the training instances as scipy.sparse.csr.csr_matrix object; ind.dataset_str.tx => the feature vectors of the test instances as scipy.sparse.csr.csr_matrix object; ind.dataset_str.allx => the feature vectors of both labeled and unlabeled training instances (a superset of ind.dataset_str.x) as scipy.sparse.csr.csr_matrix object; ind.dataset_str.y => the one-hot labels of the labeled training instances as numpy.ndarray object; ind.dataset_str.ty => the one-hot labels of the test instances as numpy.ndarray object; ind.dataset_str.ally => the labels for instances in ind.dataset_str.allx as numpy.ndarray object; ind.dataset_str.graph => a dict in the format {index: [index_of_neighbor_nodes]} as collections.defaultdict object; ind.dataset_str.test.index => the indices of test instances in graph, for the inductive setting as list object. All objects above must be saved using python pickle module. :param dataset_str: Dataset name :return: All data input files loaded (as well the training/test data). """ names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph'] objects = [] for i in range(len(names)): with open("../data/ind.{}.{}".format(dataset_str, names[i]), 'rb') as f: if sys.version_info > (3, 0): objects.append(pkl.load(f, encoding='latin1')) else: objects.append(pkl.load(f)) x, y, tx, ty, allx, ally, graph = tuple(objects) test_idx_reorder = parse_index_file("../data/ind.{}.test.index".format(dataset_str)) test_idx_range = np.sort(test_idx_reorder) if dataset_str == 'citeseer': # Fix citeseer dataset (there are some isolated nodes in the graph) # Find isolated nodes, add them as zero-vecs into the right position test_idx_range_full = range(min(test_idx_reorder), max(test_idx_reorder)+1) tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1])) tx_extended[test_idx_range-min(test_idx_range), :] = tx tx = tx_extended ty_extended = np.zeros((len(test_idx_range_full), y.shape[1])) ty_extended[test_idx_range-min(test_idx_range), :] = ty ty = ty_extended features = sp.vstack((allx, tx)).tolil() features[test_idx_reorder, :] = features[test_idx_range, :] features = normalize(features) adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph)) adj = normalize_adj(adj) labels = np.vstack((ally, ty)) labels[test_idx_reorder, :] = labels[test_idx_range, :] idx_test = test_idx_range.tolist() idx_train = range(len(y)) idx_val = range(len(y), len(y)+500) features = torch.FloatTensor(np.array(features.todense())) labels = torch.LongTensor(np.argmax(labels, axis=1)) adj = sparse_mx_to_torch_sparse_tensor(adj) idx_train = torch.LongTensor(idx_train) idx_val = torch.LongTensor(idx_val) idx_test = torch.LongTensor(idx_test) return adj, features, labels, idx_train, idx_val, idx_test # x.shape # (140, 1433) # y.shape # (140, 7) # tx.shape # (1000, 1433) # allx.shape # (1708, 1433) # ally.shape # (1708, 7) # min(test_idx_reorder) # int1708-2707 # graph # dict node:neighbor # numpy are below # adj (2708,2708) # features (2708,1433) # y_train, y_val, y_test (2708, 7) # train_mask, val_mask, test_mask (2708,) # + # load_data('cora') # -
pygcn/loadDataAsTfGcn.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Python Flair Basics # **(C) 2018-2021 by [<NAME>](http://damir.cavar.me/)** # **Version:** 0.4, January 2021 # **Download:** This and various other Jupyter notebooks are available from my [GitHub repo](https://github.com/dcavar/python-tutorial-for-ipython). # **License:** [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/) ([CA BY-SA 4.0](https://creativecommons.org/licenses/by-sa/4.0/)) # This material was used in my Advanced Topics in AI class, introduction to Deep Learning environments in Spring 2019 at Indiana University. # Two types of central objects: # # - Sentence # - Token # # A Sentence holds a textual sentence and is essentially a list of Token objects. # For creating a Sentence object we first import the Sentence class from the *flair.data* module: from flair.data import Sentence # We can now define a sentence: sentence = Sentence('The grass is green .') print(sentence) # We can access the tokens of a sentence via their token id or with their index: print(sentence.get_token(4)) print(sentence[3]) # We can also iterate over all tokens in a sentence: for token in sentence: print(token) # ## Tokenization # There is a simple tokenizer included in the library using the lightweight segtok library to tokenize your text for such a Sentence definition. In the Sentence constructor use the flag *use_tokenize* to tokenize the input string before instantiating a Sentence object: sentence = Sentence('The grass is green.', use_tokenizer=True) print(sentence) # ## Tags on Tokens # A Token has fields for linguistic annotation: # # - lemma # - part-of-speech tag # - named entity tag # # We can add a tag by specifying the tag type and the tag value. # # We will adding an NER tag of type 'color' to the word 'green'. This means that we've tagged this word as an entity of type color: sentence[3].add_tag('ner', 'color') print(sentence.to_tagged_string()) # Each tag is of class Label. An associated score indicates confidence: # + from flair.data import Label tag: Label = sentence[3].get_tag('ner') print(f'"{sentence[3]}" is tagged as "{tag.value}" with confidence score "{tag.score}"') # - # The manually added color tag has a score of 1.0. A tag predicted by a sequence labeler will have a score value that indicates the classifier confidence. # A Sentence can have one or multiple labels that can for example be used in text classification tasks. For instance, the example below shows how we add the label 'sports' to a sentence, thereby labeling it as belonging to the sports category. sentence = Sentence('France is the current world cup winner.') sentence.add_label('topic', 'sports') sentence.add_label('language', 'English') print(sentence) # sentence.add_labels(1, ['sports', 'world cup']) sentence = Sentence('France is the current world cup winner.').add_label('topic', 'sports') print(sentence) # Labels are also of the Label class. So, you can print a sentence's labels like this: # + sentence = Sentence('France is the current world cup winner.').add_label('sports', 'world cup') print(sentence) for label in sentence.labels: print(label) # - # ## Tagging Text # ### Using Pre-Trained Sequence Tagging Models # Flair has numerous pre-trained models. For the named entity recognition (NER) task there is a model that was trained on the English CoNLL-03 task and can recognize 4 different entity types. Import it using: # + from flair.models import SequenceTagger tagger = SequenceTagger.load('ner') # - # We use the predict() method of the tagger on a sentence to add predicted tags to the tokens in the sentence: # + sentence = Sentence('<NAME> went to New York City .') tagger.predict(sentence) print(sentence.to_tagged_string()) # - # Getting annotated spans for multi-word expressions can be achieved using the following command: for entity in sentence.get_spans('ner'): print(entity) # Which indicates that "<NAME>" is a person (PER) and "Washington" is a location (LOC). Each such Span has a text, a tag value, its position in the sentence and "score" that indicates how confident the tagger is that the prediction is correct. You can also get additional information, such as the position offsets of each entity in the sentence by calling: print(sentence.to_dict(tag_type='ner')) print(sentence.to_dict()) # Flair contains various sequence tagger models. You choose which pre-trained model you load by passing the appropriate string to the load() method of the SequenceTagger class. Currently, the following pre-trained models are provided: # As indicated in the list above, we also provide pre-trained models for languages other than English. Currently, we support German, French, and Dutch other languages are forthcoming. To tag a German sentence, just load the appropriate model: # + tagger = SequenceTagger.load('de-ner') sentence = Sentence('<NAME> ging nach Washington .') tagger.predict(sentence) print(sentence.to_tagged_string()) # - # Flair offers access to multi-lingual models for multi-lingual text. # + tagger = SequenceTagger.load('pos-multi') sentence = Sentence('<NAME> lebte in Washington . Dort kaufte er a horse .') tagger.predict(sentence) print(sentence.to_tagged_string()) # - # ## Semantic Frames # For English, Flair provides a pre-trained model that detects semantic frames in text, trained using Propbank 3.0 frames. This provides some sort of word sense disambiguation for frame evoking words. # + tagger = SequenceTagger.load('frame') sentence_1 = Sentence('George returned to Berlin to return his hat .') sentence_2 = Sentence('He had a look at different hats .') tagger.predict(sentence_1) tagger.predict(sentence_2) print(sentence_1.to_tagged_string()) print(sentence_2.to_tagged_string()) # - # The frame detector makes a distinction in sentence 1 between two different meanings of the word 'return'. 'return.01' means returning to a location, while 'return.02' means giving something back. # # Similarly, in sentence 2 the frame detector finds a light verb construction in which 'have' is the light verb and 'look' is a frame evoking word. # ## Sentence Tagging # To tag an entire text corpus, one needs to split the corpus into sentences and pass a list of Sentence objects to the .predict() method. # + text = "This is a sentence. John read a book. This is another sentence. I love Berlin." from segtok.segmenter import split_single sentences = [Sentence(sent, use_tokenizer=True) for sent in split_single(text)] tagger: SequenceTagger = SequenceTagger.load('ner') tagger.predict(sentences) for i in sentences: print(i.to_tagged_string()) # - # Using the mini_batch_size parameter of the .predict() method, you can set the size of mini batches passed to the tagger. Depending on your resources, you might want to play around with this parameter to optimize speed. # ## Pre-Trained Text Classification Models # Flair provides a pre-trained model for detecting positive or negative comments. It was trained on the IMDB dataset and it can recognize positive and negative sentiment in English text. The IMDB data set can be downloaded from the [linked site](http://ai.stanford.edu/~amaas/data/sentiment/). # + from flair.models import TextClassifier classifier = TextClassifier.load('en-sentiment') # - # We call the predict() method of the classifier on a sentence. This will add the predicted label to the sentence: # + sentence = Sentence('This film hurts. It is so bad that I am confused.') sentence = Sentence("I love this film.") classifier.predict(sentence) print(sentence.labels) # + sentence = Sentence('This film is fantastic. I love it.') classifier.predict(sentence) print(sentence.labels) # - # Flair has a pre-trained German and English model. # ## Using Word Embeddings # Flair provides a set of classes with which we can embed the words in sentences in various ways. # All word embedding classes inherit from the TokenEmbeddings class and implement the embed() method which we need to call to embed our text. This means that for most users of Flair, the complexity of different embeddings remains hidden behind this interface. Simply instantiate the embedding class we require and call embed() to embed our text. # # All embeddings produced with Flair's methods are Pytorch vectors, so they can be immediately used for training and fine-tuning. # Classic word embeddings are static and word-level, meaning that each distinct word gets exactly one pre-computed embedding. Most embeddings fall under this class, including the popular GloVe or Komninos embeddings. # We instantiate the WordEmbeddings class and pass a string identifier of the embedding we wish to load. If we want to use GloVe embeddings, we pass the string 'glove' to the constructor: # + from flair.embeddings import WordEmbeddings glove_embedding = WordEmbeddings('glove') # - # We create an example sentence and call the embedding's embed() method. We can also pass a list of sentences to this method since some embedding types make use of batching to increase speed. # + sentence = Sentence('The grass is green .') glove_embedding.embed(sentence) for token in sentence: print(token) print(token.embedding) print(sentence.embedding) # - # GloVe embeddings are Pytorch vectors of dimensionality 100. # # We choose which pre-trained embeddings we want to load by passing the appropriate id string to the constructor of the WordEmbeddings class. We would use the two-letter language code to init an embedding, so 'en' for English and 'de' for German and so on. By default, this will initialize FastText embeddings trained over Wikipedia. We can also always use FastText embeddings over Web crawls, by instantiating with '-crawl'. The 'de-crawl' option would use embeddings trained over German web crawls. # # For English, Flair provides a few more options. We can choose between instantiating 'en-glove', 'en-extvec' and so on. # If we want to load German FastText embeddings, instantiate as follows: german_embedding = WordEmbeddings('de') # If we want to load German FastText embeddings trained over crawls, we instantiate as follows: german_embedding = WordEmbeddings('de-crawl') # If the models are not locally available, Flair will automatically download them and install them into the local user cache. # It is recommended to use the FastText embeddings, or GloVe if we want a smaller model. # # If we want to use any other embeddings (not listed in the list above), we can load those by calling: custom_embedding = WordEmbeddings('path/to/your/custom/embeddings.gensim') # If we want to load custom embeddings, we need to make sure that the custom embeddings are correctly formatted to gensim. # # We can, for example, convert FastText embeddings to gensim using the following code snippet: # + import gensim word_vectors = gensim.models.KeyedVectors.load_word2vec_format('/path/to/fasttext/embeddings.txt', binary=False) word_vectors.save('/path/to/converted') # - # ## Character Embeddings # Some embeddings - such as character-features - are not pre-trained but rather trained on the downstream task. Normally this requires an implementation of a hierarchical embedding architecture. # # With Flair, we don't need to worry about such things. Just choose the appropriate embedding class and the character features will then automatically train during downstream task training. # + from flair.embeddings import CharacterEmbeddings embedding = CharacterEmbeddings() sentence = Sentence('The grass is green .') embedding.embed(sentence) # - # ### Sub-Word Embeddings # Flair now also includes the byte pair embeddings calulated by @bheinzerling that segment words into subsequences. This can dramatically reduce the model size vis-a-vis using normal word embeddings at nearly the same accuracy. So, if we want to train small models try out the new BytePairEmbeddings class. # We initialize with a language code (275 languages supported), a number of 'syllables', and a number of dimensions (one of 50, 100, 200 or 300). The following initializes and uses byte pair embeddings for English: # + from flair.embeddings import BytePairEmbeddings embedding = BytePairEmbeddings('en') sentence = Sentence('The grass is green .') embedding.embed(sentence) # - # [Sub-word embeddings](https://nlp.h-its.org/bpemb/) are interesting, since # # [BPEmb](https://nlp.h-its.org/bpemb/) is a collection of pre-trained subword embeddings in 275 languages, based on Byte-Pair Encoding (BPE) and trained on Wikipedia. Its intended use is as input for neural models in natural language processing. # # - subwords allow guessing the meaning of unknown / out-of-vocabulary words. E.g., the suffix -shire in Melfordshire indicates a location. # - Byte-Pair Encoding gives a subword segmentation that is often good enough, without requiring tokenization or morphological analysis. In this case the BPE segmentation might be something like melf ord shire. # - Pre-trained byte-pair embeddings work surprisingly well, while requiring no tokenization and being much smaller than alternatives: an 11 MB BPEmb English model matches the results of the 6 GB FastText model in our evaluation. # If you are using word embeddings like word2vec or GloVe, you have probably encountered out-of-vocabulary words, i.e., words for which no embedding exists. A makeshift solution is to replace such words with an UNK token and train a generic embedding representing such unknown words. # # Subword approaches try to solve the unknown word problem differently, by assuming that you can reconstruct a word's meaning from its parts. For example, the suffix -shire lets you guess that Melfordshire is probably a location, or the suffix -osis that Myxomatosis might be a sickness. # # There are many ways of splitting a word into subwords. A simple method is to split into characters and then learn to transform this character sequence into a vector representation by feeding it to a convolutional neural network (CNN) or a recurrent neural network (RNN), usually a long-short term memory (LSTM). This vector representation can then be used like a word embedding. # # Another, more linguistically motivated way is a morphological analysis, but this requires tools and training data which might not be available for your language and domain of interest. # # Enter Byte-Pair Encoding (BPE) [Sennrich et al, 2016], an unsupervised subword segmentation method. BPE starts with a sequence of symbols, for example characters, and iteratively merges the most frequent symbol pair into a new symbol. # # For example, applying BPE to English might first merge the characters h and e into a new symbol he, then t and h into th, then t and he into the, and so on. # # Learning these merge operations from a large corpus (e.g. all Wikipedia articles in a given language) often yields reasonable subword segementations. For example, a BPE model trained on English Wikipedia splits melfordshire into mel, ford, and shire. # ### Stacked Embeddings # Stacked embeddings are one of the most important concepts of Flair. We can use them to combine different embeddings together, for instance if we want to use both traditional embeddings together with contextual string embeddings (see below). Stacked embeddings allow us to mix and match. We find that a combination of embeddings often gives best results. # # All we need to do is use the StackedEmbeddings class and instantiate it by passing a list of embeddings that we wish to combine. For instance, lets combine classic GloVe embeddings with character embeddings. This is effectively the architecture proposed in (Lample et al., 2016). # + from flair.embeddings import WordEmbeddings, CharacterEmbeddings glove_embedding = WordEmbeddings('glove') character_embeddings = CharacterEmbeddings() # - # Now instantiate the StackedEmbeddings class and pass it a list containing these two embeddings. # + from flair.embeddings import StackedEmbeddings stacked_embeddings = StackedEmbeddings( embeddings=[glove_embedding, character_embeddings]) # - # We use this embedding like all the other embeddings, i.e. call the embed() method over our sentences. # + sentence = Sentence('The grass is green .') stacked_embeddings.embed(sentence) for token in sentence: print(token) print(token.embedding) # - # Words are now embedded using a concatenation of two different embeddings. This means that the resulting embedding vector is still a single PyTorch vector. # ## Other Embeddings: BERT, ELMo, Flair # Next to standard WordEmbeddings and CharacterEmbeddings, Flair also provides classes for BERT, ELMo and Flair embeddings. These embeddings enable us to train truly state-of-the-art NLP models. # # All word embedding classes inherit from the TokenEmbeddings class and implement the embed() method which we need to call to embed our text. This means that for most users of Flair, the complexity of different embeddings remains hidden behind this interface. We instantiate the embedding class we require and call embed() to embed our text. # # All embeddings produced with Flair's methods are Pytorch vectors, so they can be immediately used for training and fine-tuning. # ### Flair Embeddings # Contextual string embeddings are powerful embeddings that capture latent syntactic-semantic information that goes beyond standard word embeddings. Key differences are: (1) they are trained without any explicit notion of words and thus fundamentally model words as sequences of characters. And (2) they are contextualized by their surrounding text, meaning that the same word will have different embeddings depending on its contextual use. # # Recent advances in language modeling using recurrent neural networks have made it viable to model language as distributions over characters. By learning to predict the next character on the basis of previous characters, such models have been shown to automatically internalize linguistic concepts such as words, sentences, subclauses and even sentiment. In Flair the internal states of a trained character language model is leveraged to produce a novel type of word embedding which the authors refer to as contextual string embeddings. The proposed embeddings have the distinct properties that they (a) are trained without any explicit notion of words and thus fundamentally model words as sequences of characters, and (b) are contextualized by their surrounding text, meaning that the same word will have different embeddings depending on its contextual use. The authors conduct a comparative evaluation against previous embeddings and find that their embeddings are highly useful for downstream tasks: across four classic sequence labeling tasks they consistently outperform the previous state-of-the-art. In particular, they significantly outperform previous work on English and German named entity recognition (NER), allowing them to report new state-of-the-art F1-scores on the CONLL03 shared task. # # With Flair, we can use these embeddings simply by instantiating the appropriate embedding class, same as standard word embeddings: # + from flair.embeddings import FlairEmbeddings flair_embedding_forward = FlairEmbeddings('news-forward') sentence = Sentence('The grass is green .') flair_embedding_forward.embed(sentence) # - # We can choose which embeddings we load by passing the appropriate string to the constructor of the FlairEmbeddings class. Currently, there are numerous contextual string embeddings provided in models (more coming). See [list](https://github.com/zalandoresearch/flair/blob/master/resources/docs/TUTORIAL_4_ELMO_BERT_FLAIR_EMBEDDING.md). # The recommendation is to combine both forward and backward Flair embeddings. Depending on the task, it is also recommended to add standard word embeddings into the mix. So, the recommendation is to use StackedEmbedding for most English tasks: # + from flair.embeddings import WordEmbeddings, FlairEmbeddings, StackedEmbeddings stacked_embeddings = StackedEmbeddings([ WordEmbeddings('glove'), FlairEmbeddings('news-forward'), FlairEmbeddings('news-backward'), ]) # - # We would use this embedding like all the other embeddings, i.e. call the embed() method over our sentences. # ### BERT Embeddings # BERT embeddings were developed by Devlin et al. (2018) and are a different kind of powerful word embedding based on a bidirectional transformer architecture. Flair is using the implementation of huggingface. The embeddings are wrapped into our simple embedding interface, so that they can be used like any other embedding. # + from flair.embeddings import TransformerWordEmbeddings embedding = TransformerWordEmbeddings() sentence = Sentence('The grass is green .') embedding.embed(sentence) # - for i in sentence: print(i, i.embedding) # We can load any of the pre-trained BERT models by providing the model string during initialization: # - 'bert-base-uncased': English; 12-layer, 768-hidden, 12-heads, 110M parameters # - 'bert-large-uncased': English; 24-layer, 1024-hidden, 16-heads, 340M parameters # - 'bert-base-cased': English; 12-layer, 768-hidden, 12-heads , 110M parameters # - 'bert-large-cased': English; 24-layer, 1024-hidden, 16-heads, 340M parameters # - 'bert-base-multilingual-cased': 104 languages; 12-layer, 768-hidden, 12-heads, 110M parameters # - 'bert-base-chinese': Chinese Simplified and Traditional; 12-layer, 768-hidden, 12-heads, 110M parameters # ### ELMo Embeddings # ELMo embeddings were presented by Peters et al. in 2018. They are using a bidirectional recurrent neural network to predict the next word in a text. Flair is using the implementation of AllenNLP. As this implementation comes with a lot of sub-dependencies, which Flair authors don't want to include in Flair, you need to first install the library via `pip install allennlp` before we can use it in Flair. Using the embeddings is as simple as using any other embedding type. To specify the correct model, pick one of *small*, *medium*, *portugese*, *original*: # + from flair.embeddings import ELMoEmbeddings embedding = ELMoEmbeddings(model="small") sentence = Sentence('The grass is green .') embedding.embed(sentence) # - # AllenNLP provides the following pre-trained models. To use any of the following models inside Flair simple specify the embedding id when initializing the ELMoEmbeddings. # - 'small': English; 1024-hidden, 1 layer, 14.6M parameters # - 'medium': English; 2048-hidden, 1 layer, 28.0M parameters # - 'original': English; 4096-hidden, 2 layers, 93.6M parameters # - 'pt': Portuguese # - 'pubmed': English biomedical data; more information # ### BERT and Flair Combined # We can very easily mix and match Flair, ELMo, BERT and classic word embeddings. We instantiate each embedding we wish to combine and use them in a StackedEmbedding. # # For instance, let's say we want to combine the multilingual Flair and BERT embeddings to train a hyper-powerful multilingual downstream task model. # # First, instantiate the embeddings we wish to combine: # + from flair.embeddings import FlairEmbeddings, BertEmbeddings flair_forward_embedding = FlairEmbeddings('multi-forward') flair_backward_embedding = FlairEmbeddings('multi-backward') bert_embedding = BertEmbeddings('bert-base-multilingual-cased') # - # Now we instantiate the StackedEmbeddings class and pass it a list containing these three embeddings: # + from flair.embeddings import StackedEmbeddings stacked_embeddings = StackedEmbeddings( embeddings=[flair_forward_embedding, flair_backward_embedding, bert_embedding]) # - # We use this embedding like all the other embeddings, i.e. call the embed() method over our sentences. # + sentence = Sentence('The grass is green .') stacked_embeddings.embed(sentence) for token in sentence: print(token) print(token.embedding) # - # Words are now embedded using a concatenation of three different embeddings. This means that the resulting embedding vector is still a single PyTorch vector. # ## Document Embeddings # Document embeddings are different from word embeddings in that they provide one embedding for an entire text, whereas word embeddings provide embeddings for individual words. # ... # **(C) 2018-2021 by [<NAME>](http://damir.cavar.me/)**
notebooks/Flair Basics.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] toc=true id="kvKENgNcjiOh" colab_type="text" # <h1>Table of Contents<span class="tocSkip"></span></h1> # <div class="toc" style="margin-top: 1em;"><ul class="toc-item"><li><span><a href="#TP-intro-aux-RNNs-en-PyTorch" data-toc-modified-id="TP-intro-aux-RNNs-en-PyTorch-1"><span class="toc-item-num">1&nbsp;&nbsp;</span>TP intro aux RNNs en PyTorch</a></span></li><li><span><a href="#Manipulation-basique-de-RNNCell" data-toc-modified-id="Manipulation-basique-de-RNNCell-2"><span class="toc-item-num">2&nbsp;&nbsp;</span>Manipulation basique de RNNCell</a></span></li><li><span><a href="#Manipulation-basique-de-RNN" data-toc-modified-id="Manipulation-basique-de-RNN-3"><span class="toc-item-num">3&nbsp;&nbsp;</span>Manipulation basique de RNN</a></span></li><li><span><a href="#Données-séquentielles" data-toc-modified-id="Données-séquentielles-4"><span class="toc-item-num">4&nbsp;&nbsp;</span>Données séquentielles</a></span></li><li><span><a href="#Limites-des-RNNs-&quot;vanilla&quot;" data-toc-modified-id="Limites-des-RNNs-&quot;vanilla&quot;-5"><span class="toc-item-num">5&nbsp;&nbsp;</span>Limites des RNNs "vanilla"</a></span></li><li><span><a href="#RNNs-bidirectionnels" data-toc-modified-id="RNNs-bidirectionnels-6"><span class="toc-item-num">6&nbsp;&nbsp;</span>RNNs bidirectionnels</a></span></li></ul></div> # + [markdown] id="hpQ3QL6ijiOj" colab_type="text" # # TP intro aux RNNs en PyTorch # + [markdown] id="K2KLS5FWjiOj" colab_type="text" # Lorsque l'on travaille avec des données séquentielles (séries temporelles, phrases, etc.), l'ordre des entrées est crucial pour la tâche à accomplir. Les réseaux neuronaux récurrents (RNN) traitent les données séquentielles en tenant compte de l'entrée courante et de ce qui a été appris des entrées précédentes. Dans ce notebook, nous apprendrons comment encoder des séries temporelles, comment créer et former des RNNs. # # ![rnn](https://www.irit.fr/~Thomas.Pellegrini/ens/RNN/images/rnn.png) # # + [markdown] id="iMTA_-KSjiOk" colab_type="text" # * **Objectif:** Traiter les données séquentielles en tenant compte de l'entrée courante et de ce qui a été appris des entrées précédentes. # * **Avantages:** # * Rendre compte de l'ordre et des entrées précédentes. # * Génération conditionnée pour générer des séquences. # * **Désavantages:**Désavantages # * La prédiction à chaque pas de temps dépend de la prédiction précédente, il est donc difficile de paralléliser les calculs avec un RNN. # * Le traitement de longues séquences peut entraîner des problèmes de mémoire et de calcul. # * L'interprétabilité est difficile, mais il y a quelques [techniques](https://arxiv.org/abs/1506.02078) qui utilisent les activations des RNN pour voir quelles parties des entrées sont traitées. # * **Remarque:** # * L'amélioration de l'architecture pour rendre les RNNs plus rapides et interprétables est un domaine de recherche en cours. # + [markdown] id="qMkRVSfcjiOl" colab_type="text" # ![rnn2](https://www.irit.fr/~Thomas.Pellegrini/ens/RNN/images/rnn2.png) # # Passe "forward" d'un RNN pour un pas de temps $X_t$ : # # $h_t = tanh(W_{hh}h_{t-1} + W_{xh}X_t+b_h)$ # # $y_t = W_{hy}h_t + b_y $ # # $ P(y) = softmax(y_t) = \frac{e^y}{\sum e^y} $ # # *avec*: # # * $X_t$ = input au temps t, dans $\mathbb{R}^{NXE}$, avec $N$ la batch size, $E$ la dimension des features (des embeddings si on traite des mots) # * $W_{hh}$ = poids des neurones cachés, dans $\mathbb{R}^{HXH}$, avec $H$ la dim du hidden # * $h_{t-1}$ = état caché au temps précédent, dans $\mathbb{R}^{NXH}$ # * $W_{xh}$ = poids sur l'entrée, dans $\mathbb{R}^{EXH}$ # * $b_h$ = biais des neurones cachés, dans $\mathbb{R}^{HX1}$ # * $W_{hy}$ = poids de la sortie, dans $\mathbb{R}^{HXC}$, avec $C$ le nombre de classes # * $b_y$ = biais des neurones de sortie, dans $\mathbb{R}^{CX1}$ # # On répète ces calculs pour tous les pas de temps en entrée ($X_{t+1}, X_{t+2}, ..., X_{N})$ pour obtenir une prédiction en sortie à chaque pas de temps. # # **Remarque**: Au premier pas de temps, l'état caché précédent $h_{t-1}$ peut être soit un vecteur de zéros ("non-conditionné"), soit initialisé avec certaines valeurs tirées au hasard ou bien fixées par une condition ("conditionné"). # + [markdown] id="d2x3ANXKjiOl" colab_type="text" # # Manipulation basique de RNNCell # + id="P3zorw_3jiOm" colab_type="code" colab={} import torch import torch.nn as nn import torch.nn.functional as F # + id="dmeamaCsv6ur" colab_type="code" colab={} import numpy as np import matplotlib.pyplot as plt # + id="Yf54xt9ujiOo" colab_type="code" colab={} batch_size = 5 seq_size = 10 # taille max par input (on utilisera du masking pour les séquences qui sont plus petites que cette valeur) x_lengths = [8, 5, 4, 10, 5] # taille de chaque séquence en input embedding_dim = 100 rnn_hidden_dim = 256 output_dim = 4 # + id="zD8j_pz1jiOq" colab_type="code" outputId="f18e7c1b-2e66-4325-f4f7-f90e1dc9881b" colab={"base_uri": "https://localhost:8080/", "height": 34} # Initialisation des inputs synthétiques x_in = torch.randn(batch_size, seq_size, embedding_dim) x_lengths = torch.tensor(x_lengths) print (x_in.size()) # + id="n_DheI3-jiOt" colab_type="code" outputId="8c2dc6f1-8f05-4b94-8b68-7ccb8a5797dd" colab={"base_uri": "https://localhost:8080/", "height": 34} # Initialisation des hidden states (états cachés) à zéro hidden_t = torch.zeros((batch_size, rnn_hidden_dim)) print (hidden_t.size()) # + id="fQitTx5qjiOv" colab_type="code" outputId="9fa59ec6-fc95-4b66-b046-a08d209d481f" colab={"base_uri": "https://localhost:8080/", "height": 34} # Initialisation de la cellule RNN rnn_cell = nn.RNNCell(embedding_dim, rnn_hidden_dim) print (rnn_cell) # + id="DNCHdwS9jiOx" colab_type="code" outputId="ef2f1411-b0fa-4035-a7c6-5a0881370e13" colab={} # Passe forward à travers le RNN x_in = x_in.permute(1, 0, 2) # Le RNN prend la batch_size en dim 1 # On loop sur les pas de temps hiddens = [] for t in range(seq_size): hidden_t = rnn_cell(x_in[t], hidden_t) hiddens.append(hidden_t) hiddens = torch.stack(hiddens) hiddens = hiddens.permute(1, 0, 2) # on remet la batch_size à la dim 0 (plus logique) print (hiddens.size()) # + id="imb6EH2SjiOz" colab_type="code" colab={} def gather_last_relevant_hidden(hiddens, x_lengths): x_lengths = x_lengths.long().detach().cpu().numpy() - 1 out = [] for batch_index, column_index in enumerate(x_lengths): out.append(hiddens[batch_index, column_index]) return torch.stack(out) # + id="5rKgUhiRjiO0" colab_type="code" outputId="9e15d217-b931-4540-9138-49c8860a63fc" colab={} # Gather the last relevant hidden state z = gather_last_relevant_hidden(hiddens, x_lengths) print (z.size()) # + id="v3gKnCVbjiO2" colab_type="code" outputId="4b3a9c84-edb5-4762-b820-5681ac4a10e0" colab={} # Passe forward dans une couche full-connected fc1 = nn.Linear(rnn_hidden_dim, output_dim) y_pred = fc1(z) y_pred = F.softmax(y_pred, dim=1) print (y_pred.size()) print (y_pred) # + [markdown] id="lUIBriOWjiO4" colab_type="text" # # Manipulation basique de RNN # + [markdown] id="fGbtYfj6jiO4" colab_type="text" # Nous pouvons utiliser la couche RNN qui est plus haut-niveau que RNNCell (plus abstraite) # pour éviter de faire une boucle (nn.RNN est plus optimisé qu'une boucle) # # + id="EFKHhoM6jiO5" colab_type="code" outputId="c701e1ed-5fee-4bb9-bb6e-7f1168ccaf6a" colab={} x_in = torch.randn(batch_size, seq_size, embedding_dim) rnn = nn.RNN(embedding_dim, rnn_hidden_dim, batch_first=True) # l'option batch_first=True permet de garder la dim batch en premier out, h_n = rnn(x_in) # out : la série temporelle des prédictions # h_n : le dernier état caché à récupérer pour faire de la classification par exemple print ("in: ", x_in.size()) print ("out: ", out.size()) print ("h_n: ", h_n.size()) # Y a-t'il une différence entre le dernier vecteur de out et h_n ? print(out[0, 9, :10] == h_n[0, 0, :10]) # + id="mcyA-QvGjiO7" colab_type="code" outputId="11b74de3-cd00-4f7b-ecd8-7b89ce55f1d3" colab={} # Passe forward dans une couche full-connected fc1 = nn.Linear(rnn_hidden_dim, output_dim) y_pred = fc1(h_n) y_pred = F.softmax(y_pred, dim=1) print (y_pred.size()) print (y_pred) # + [markdown] id="X0ZWnjQ_jiO8" colab_type="text" # # Données séquentielles # + [markdown] id="NmJ6R2iOjiO9" colab_type="text" # Plusieurs types de tâches séquentielles peuvent être réalisées par des RNNs. # # 1. **One-to-one** : une entrée génère une sortie. # * Ex. Donner un mot et prédire sa catégorie syntaxique (verbe, nom, etc.). # # 2. **One-to-Many** : une entrée génère plusieurs sorties. # * Ex. Prédire une opinion (positive, négative, etc., on parle de sentiment analysis), générer une critique. # # 3. **Many-to-one** : de nombreuses entrées sont traitées séquentiellement pour générer une seule sortie. # * Ex. Traiter les mots dans une critique pour prédire sa "valence" (positive ou négative). # # 4. **Many-to-many** : de nombreuses entrées sont traitées séquentiellement pour générer de nombreuses sorties. # * Ex. Le modèle encode une phrase en français, il traite toute la phrase, puis en produit la traduction anglaise. # * Ex. Étant donnée une série de données chronologiques, prédire la probabilité d'un événement (risque de maladie) à chaque temps. # # ![seq2seq](https://www.irit.fr/~Thomas.Pellegrini/ens/RNN/images/seq2seq.jpeg) # + [markdown] id="K_giXwhYjiO-" colab_type="text" # # Limites des RNNs "vanilla" # + [markdown] id="EHKLPTchjiO-" colab_type="text" # Il y a plusieurs problèmes avec les RNN simples (dits "vanilla" en anglais) que nous avons vus ci-dessus. # # 1. Lorsque nous avons une très longue séquence d'entrée, il devient difficile pour le modèle de conserver l'information vue plus tôt à mesure que nous traitons la séquence. L'objectif du modèle est de conserver les informations utiles des pas de temps précédents, mais cela devient impossible pour une taille de séquence trop grande. # # 2. Pendant la rétropropropagation, le gradient de la fonction de perte doit remonter jusqu'au premier pas de temps. Si notre gradient est supérieur à 1 (${1.01}^{1000} = 20959$) ou inférieur à 1 (${{0.99}^{1000} = 4.31e-5$) et que nous avons beaucoup de pas de temps, cela peut rapidement dégénérer. # # Pour répondre à ces deux questions, le concept de "porte" ("gate") a été introduit dans les RNN. Les gates permettent aux RNN de contrôler le flux d'information entre les étapes temporelles afin d'optimiser la tâche à réaliser. Le fait de laisser passer sélectivement l'information permet au modèle de traiter des données séquentielles très longues. Les variantes les plus courantes des RNN sont les unités de mémoire à court terme, appelées [LSTM](https://pytorch.org/docs/stable/nn.html#torch.nn.LSTM), et les unités récurrentes à "porte" [GRU](https://pytorch.org/docs/stable/nn.html#torch.nn.GRU). Vous pouvez en savoir plus sur le fonctionnement de ces unités [ici](http://colah.github.io/posts/2015-08-Understanding-LSTMs/). # # # ![rnn](https://www.irit.fr/~Thomas.Pellegrini/ens/RNN/images/gates.png) # + id="ogKGDbFLjiO_" colab_type="code" colab={} # GRU in PyTorch gru = nn.GRU(input_size=embedding_dim, hidden_size=rnn_hidden_dim, batch_first=True) # + id="f_4Zu6rHjiPA" colab_type="code" outputId="8c2d5863-e2cc-4be1-c351-6e200670aee6" colab={} # Initialize synthetic input x_in = torch.randn(batch_size, seq_size, embedding_dim) print (x_in.size()) # + id="ZRLXtfEMjiPC" colab_type="code" outputId="98c2de7e-5da5-40d1-aa61-05b902831216" colab={} # Forward pass out, h_n = gru(x_in) print ("out:", out.size()) print ("h_n:", h_n.size()) # + id="mAImUKqgpF9R" colab_type="code" outputId="1a44f150-12c6-4a0c-ad60-ee9bfb882f5e" colab={"base_uri": "https://localhost:8080/", "height": 34} x = torch.randn(5, 3, 7) gru = nn.GRU(7, 20) out = gru(x) print(out[0].size(), out[1].size()) # + [markdown] id="VOGIdv4OjiPE" colab_type="text" # **Remarque**: Le choix d'utiliser des GRU ou des LSTM dépend des données et des performances empiriques. Les GRU offrent des performances comparables avec un nombre réduit de paramètres, tandis que les LSTM sont plus efficaces et peuvent faire la différence en termes de performances pour une tâche particulière. # + [markdown] id="mOc2POqNjiPF" colab_type="text" # # RNNs bidirectionnels # + [markdown] id="P9Jdafd1jiPF" colab_type="text" # Beaucoup de progrès ont été réalisés ces dernières années avec les RNN, comme par exemple l'introduction de mécanismes d'[attention](https://www.oreilly.com/ideas/interpretability-via-attentional-and-memory-based-interfaces-using-tensorflow), les Quasi-RNNs, etc. L'une de ces avancées, largement utilisée, sont les RNNNs bidirectionnels (Bi-RNNs). La motivation derrière les RNN bidirectionnels est de traiter une séquence d'entrée dans les deux sens. La prise en compte du contexte dans les deux sens peut améliorer la performance lorsque toute la séquence d'entrée est connue au moment de l'inférence. Une application courante des Bi-RNNs est la traduction automatique : il est avantageux de considérer une phrase entière dans les deux sens pour la traduire dans une autre langue. # # # ![rnn](https://www.irit.fr/~Thomas.Pellegrini/ens/RNN/images/birnn.png) # + id="Q9IRCtr4jiPG" colab_type="code" colab={} # BiGRU en PyTorch bi_gru = nn.GRU(input_size=embedding_dim, hidden_size=rnn_hidden_dim, batch_first=True, bidirectional=True) # + id="fkNPj75LjiPI" colab_type="code" outputId="a8a717cd-2b8a-4ba7-c716-43581c58b160" colab={"base_uri": "https://localhost:8080/", "height": 51} # Passe forward out, h_n = bi_gru(x_in) print ("out:", out.size()) # tenseur contenant tous les hidden states du RNN print ("h_n:", h_n.size()) # le dernier hidden state du RNN # + [markdown] id="nS-i_DwljiPL" colab_type="text" # La sortie à chaque temps a une taille de 512, le double de la dim cachée précisée lors de la création de la couche GRU. Cela s'explique par le fait qu'elle inclut à la fois les directions avant et arrière encodées par le BiRNNN. # # https://medium.com/dair-ai/building-rnns-is-fun-with-pytorch-and-google-colab-3903ea9a3a79 # # + [markdown] id="uqJPyG9sYGA6" colab_type="text" # # Gérer les séquences de taille variable # + id="dlav3WbujiPM" colab_type="code" colab={} import torch.nn.utils.rnn as rnn_utils from torch.autograd import Variable # + [markdown] id="lIEQt-H-YRPB" colab_type="text" # ## Solution 1 : zero-padding # # Pour construire un tensor qui regroupe des séquences de taille différente, on complète les séquences avec des zéros jusqu'à obtenir la taille de la séquence la plus grande. # # # Cela est réalisé à l'aide de pad_sequence. # + id="93jITqN0YIkb" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="bccb5334-521d-4b9a-df57-a098ae1f96c6" # sur des séquences 1-d a = torch.Tensor([1, 2, 3]) b = torch.Tensor([4, 5]) c = torch.Tensor([6]) rnn_utils.pad_sequence([a, b, c], batch_first=False, padding_value=0) # ce sont les arguments par défaut. # + id="HtmKLzObYT0T" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="efd3ee3d-5d75-469c-fbf2-fd94c5d32a5c" # sur des séquences 2-d (ici des "embeddings") embedding_dim = 100 lengths = [10, 15, 20] a = torch.randn(lengths[0], embedding_dim) b = torch.randn(lengths[1], embedding_dim) c = torch.randn(lengths[2], embedding_dim) x_in = rnn_utils.pad_sequence([a, b, c]) print (x_in.size()) # + [markdown] id="LxH27ID5Ya_B" colab_type="text" # On a obtenu un tenseur de dimensions : T x B x d # # La fonction pad_sequence suppose que les dimensions des séquences, autres que leur taille, sont identiques. D'autre part, les tenseurs doivent être de même type. # # + [markdown] id="Z-Mtv6GEYdzB" colab_type="text" # ## Solution 2 : "empaqueter" les séquences avec pack_sequence # + id="o4So3NlLYYCD" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="cfbdc211-8982-4ddc-8201-8523f2da508e" # sur des séquences 1-d a = torch.Tensor([1, 2, 3]) b = torch.Tensor([4, 5]) c = torch.Tensor([6]) # rnn_utils.pack_sequence([a, b, c], enforce_sorted=True) # enforce_sorted: vérifie que les sequences sont déjà ordenées par longueur décroissante rnn_utils.pack_sequence([a, b, c]) # + id="mENK9xWmYga7" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 85} outputId="1fff6c6f-3f99-4757-e9e3-427cfbef8848" # sur des séquences 2-d (ici des "embeddings") embedding_dim = 100 lengths = [10, 15, 20] # lengths = lengths[::-1] a = torch.randn(lengths[0], embedding_dim) b = torch.randn(lengths[1], embedding_dim) c = torch.randn(lengths[2], embedding_dim) # x_in = rnn_utils.pack_sequence([a, b, c], enforce_sorted=False) # x_in = rnn_utils.pack_sequence([a, b, c]) # --> RuntimeError: 'lengths' array has to be sorted in decreasing order x_in = rnn_utils.pack_sequence([c, b, a]) print (x_in.data.size()) print(x_in.batch_sizes) print(len(x_in.batch_sizes)) print(x_in.batch_sizes.sum()) # + [markdown] id="XkuqEr-xYlsB" colab_type="text" # ## Solution 3 : pack_padded_sequence # + id="rmEI87sdYiYL" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="883d0c62-58b1-44c2-c878-6dd78f7aa4a0" # sur des séquences 2-d (ici des "embeddings") embedding_dim = 100 lengths = [10, 15, 20] lengths = lengths[::-1] a = torch.randn(lengths[0], embedding_dim) b = torch.randn(lengths[1], embedding_dim) c = torch.randn(lengths[2], embedding_dim) x_pad = rnn_utils.pad_sequence([a, b, c]) print (x_pad.size()) # x_in = rnn_utils.pack_padded_sequence(x_pad, torch.Tensor(lengths), enforce_sorted=False) x_in = rnn_utils.pack_padded_sequence(x_pad, torch.Tensor(lengths)) print(x_in.data.size(), x_in.batch_sizes) # + [markdown] id="PYASx_WnYrUB" colab_type="text" # ## Application : mini-batch de sequences de tailles variables dans un RNN # + id="k7CB_fO9Yni7" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="6c01a307-8d63-4d70-a6e4-ef8a1bca8a1d" # sur des séquences 2-d (ici des "embeddings") embedding_dim = 100 lengths = [10, 15, 20] lengths = lengths[::-1] a = torch.randn(lengths[0], embedding_dim) b = torch.randn(lengths[1], embedding_dim) c = torch.randn(lengths[2], embedding_dim) x_pad = rnn_utils.pad_sequence([a, b, c], batch_first=False) print (x_pad.size()) # x_in = rnn_utils.pack_padded_sequence(x_pad, torch.Tensor(lengths), enforce_sorted=False) x_in = rnn_utils.pack_padded_sequence(x_pad, torch.Tensor(lengths), batch_first=False) print(x_in.data.size(), x_in.batch_sizes) # + id="K7FSskqEYter" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="ae54303e-0160-4a64-87e5-38d2ada8d515" # now run through LSTM num_layers=1 hidden_size=10 batch_size=3 rnn = nn.LSTM( input_size=embedding_dim, hidden_size=hidden_size, num_layers=num_layers, batch_first=False, ) h0 = Variable(torch.randn(num_layers, batch_size, hidden_size)) c0 = Variable(torch.randn(num_layers, batch_size, hidden_size)) out, (h, c) = rnn(x_in, (h0, c0)) # # h: hidden state for t = seq_len # c: cell state for t = seq_len print(h.size(), c.size()) print(type(out)) # + [markdown] id="MHwMFBRhYyQR" colab_type="text" # out est un objet PackedSequence. On peut défaire le packing avec pad_packed_sequence : # + id="GAryruTzYvfM" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="63484d39-eace-4215-b015-7a86dd4d10a4" # undo the packing operation out_unpacked, _ = torch.nn.utils.rnn.pad_packed_sequence(out, batch_first=False) out_unpacked.size() # + id="zVm-XhpfY0Y8" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="0243156d-4c15-4002-e61a-865996e64b99" # Pour passer out à une couche linéaire, on fait un reshape, avec un contiguous avant pour # accélérer les accès mémoire : out_vec = out_unpacked.contiguous() out_vec = out_vec.view(-1, out_vec.shape[2]) out_vec.size() # + [markdown] id="i2lYGeNhY5FS" colab_type="text" # out_vec est maintenant une matrice de size (T*B, H). # # On peut appliquer maintenant une couche linéaire. # + id="uW5lsDVAY2Yo" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="316b438d-e0b6-497f-a947-d88e245ee3c9" num_classes = 5 classif_layer = nn.Linear(hidden_size, num_classes) classif_out = classif_layer(out_vec) classif_out.size() # + [markdown] id="HujLqA_pY-Nh" colab_type="text" # Nous pouvons ffaire un reshape pour récupérer des séquences de prédictions # + id="g0p-12yqY7gw" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="0e1221aa-31bb-4163-9b72-72936778d55a" preds = classif_out.reshape(-1, batch_size, num_classes) print(preds.size()) # + id="xGUxiN3cZALm" colab_type="code" colab={}
tp0_intro_RNN/colab/TP0_intro_RNN.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: S2S Challenge # language: python # name: s2s # --- # %load_ext autoreload # %autoreload 2 import climetlab as cml import climetlab_s2s_ai_challenge def fix_dataset_dims(d): day_of_year = d.forecast_time[0].dt.dayofyear.data.item() new_d = d.expand_dims("forecast_dayofyear").assign_coords( forecast_dayofyear=[day_of_year] ) new_d = new_d.assign_coords(forecast_year=new_d.forecast_time.dt.year).swap_dims( forecast_time="forecast_year" ) dims = set(new_d.dims) dims.difference_update( ("forecast_dayofyear", "forecast_year", "latitude", "longitude") ) new_d = new_d.transpose( "forecast_year", "forecast_dayofyear", *dims, "latitude", "longitude" ) return new_d # # First surprise # + cmlds = cml.load_dataset("s2s-ai-challenge-training-output-reference", date = 20200220, parameter='tp' ) xrds = fix_dataset_dims(cmlds.to_xarray()) # - xrds.sel(forecast_year=2012).isnull().sum(dim=['latitude', 'longitude']).tp.plot() # Numer of null values increases after a certain lead time. # I think there are many cases of this in the dataset. # Here is an example of correct lead time. xrds.sel(forecast_year=2012).isel(lead_time=5).tp.plot() # Lead time 20 has only nulls. xrds.sel(forecast_year=2012).isel(lead_time=20).tp.plot() # # Second surprise # + cmlds = cml.load_dataset("s2s-ai-challenge-training-output-reference", date = 20200326, parameter='tp' ) xrds = fix_dataset_dims(cmlds.to_xarray()) # - xrds.forecast_dayofyear xrds.isel(lead_time=1).sel(forecast_year=2015, forecast_dayofyear=86).tp.plot() # Note the gap inside of Africa. # I saw different gaps of differents sizes in that same region.
notebooks/precipitation-bug-report-2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="Jxv6goXm7oGF" # ##### Copyright 2018 The TensorFlow Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # + cellView="form" id="llMNufAK7nfK" #@title Licensed under the Apache License, Version 2.0 (the "License"); { display-mode: "form" } # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # + [markdown] id="8Byow2J6LaPl" # # tf.data: Build TensorFlow input pipelines # + [markdown] id="kGXS3UWBBNoc" # <table class="tfo-notebook-buttons" align="left"> # <td> # <a target="_blank" href="https://www.tensorflow.org/guide/data"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> # </td> # <td> # <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/guide/data.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> # </td> # <td> # <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/guide/data.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> # </td> # <td> # <a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/guide/data.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> # </td> # </table> # + [markdown] id="9Qo3HgDjbDcI" # The `tf.data` API enables you to build complex input pipelines from simple, # reusable pieces. For example, the pipeline for an image model might aggregate # data from files in a distributed file system, apply random perturbations to each # image, and merge randomly selected images into a batch for training. The # pipeline for a text model might involve extracting symbols from raw text data, # converting them to embedding identifiers with a lookup table, and batching # together sequences of different lengths. The `tf.data` API makes it possible to # handle large amounts of data, read from different data formats, and perform # complex transformations. # # The `tf.data` API introduces a `tf.data.Dataset` abstraction that represents a # sequence of elements, in which each element consists of one or more components. # For example, in an image pipeline, an element might be a single training # example, with a pair of tensor components representing the image and its label. # # There are two distinct ways to create a dataset: # # * A data **source** constructs a `Dataset` from data stored in memory or in # one or more files. # # * A data **transformation** constructs a dataset from one or more # `tf.data.Dataset` objects. # # + id="UJIEjEIBdf-h" import tensorflow as tf # + id="7Y0JtWBNR9E5" import pathlib import os import matplotlib.pyplot as plt import pandas as pd import numpy as np np.set_printoptions(precision=4) # + [markdown] id="0l4a0ALxdaWF" # ## Basic mechanics # <a id="basic-mechanics"/> # # To create an input pipeline, you must start with a data *source*. For example, # to construct a `Dataset` from data in memory, you can use # `tf.data.Dataset.from_tensors()` or `tf.data.Dataset.from_tensor_slices()`. # Alternatively, if your input data is stored in a file in the recommended # TFRecord format, you can use `tf.data.TFRecordDataset()`. # # Once you have a `Dataset` object, you can *transform* it into a new `Dataset` by # chaining method calls on the `tf.data.Dataset` object. For example, you can # apply per-element transformations such as `Dataset.map()`, and multi-element # transformations such as `Dataset.batch()`. See the documentation for # `tf.data.Dataset` for a complete list of transformations. # # The `Dataset` object is a Python iterable. This makes it possible to consume its # elements using a for loop: # + id="0F-FDnjB6t6J" dataset = tf.data.Dataset.from_tensor_slices([8, 3, 0, 8, 2, 1]) dataset # + id="pwJsRJ-FbDcJ" for elem in dataset: print(elem.numpy()) # + [markdown] id="m0yy80MobDcM" # Or by explicitly creating a Python iterator using `iter` and consuming its # elements using `next`: # + id="03w9oxFfbDcM" it = iter(dataset) print(next(it).numpy()) # + [markdown] id="Q4CgCL8qbDcO" # Alternatively, dataset elements can be consumed using the `reduce` # transformation, which reduces all elements to produce a single result. The # following example illustrates how to use the `reduce` transformation to compute # the sum of a dataset of integers. # + id="C2bHAeNxbDcO" print(dataset.reduce(0, lambda state, value: state + value).numpy()) # + [markdown] id="B2Fzwt2nbDcR" # <!-- TODO(jsimsa): Talk about `tf.function` support. --> # # <a id="dataset_structure"></a> # ### Dataset structure # # A dataset produces a sequence of *elements*, where each element is # the same (nested) structure of *components*. Individual components # of the structure can be of any type representable by # `tf.TypeSpec`, including `tf.Tensor`, `tf.sparse.SparseTensor`, # `tf.RaggedTensor`, `tf.TensorArray`, or `tf.data.Dataset`. # # The Python constructs that can be used to express the (nested) # structure of elements include `tuple`, `dict`, `NamedTuple`, and # `OrderedDict`. In particular, `list` is not a valid construct for # expressing the structure of dataset elements. This is because # early tf.data users felt strongly about `list` inputs (e.g. passed # to `tf.data.Dataset.from_tensors`) being automatically packed as # tensors and `list` outputs (e.g. return values of user-defined # functions) being coerced into a `tuple`. As a consequence, if you # would like a `list` input to be treated as a structure, you need # to convert it into `tuple` and if you would like a `list` output # to be a single component, then you need to explicitly pack it # using `tf.stack`. # # The `Dataset.element_spec` property allows you to inspect the type # of each element component. The property returns a *nested structure* # of `tf.TypeSpec` objects, matching the structure of the element, # which may be a single component a tuple of components, or a nested # tuple of components. For example: # + id="Mg0m1beIhXGn" dataset1 = tf.data.Dataset.from_tensor_slices(tf.random.uniform([4, 10])) dataset1.element_spec # + id="cwyemaghhXaG" dataset2 = tf.data.Dataset.from_tensor_slices( (tf.random.uniform([4]), tf.random.uniform([4, 100], maxval=100, dtype=tf.int32))) dataset2.element_spec # + id="1CL7aB0ahXn_" dataset3 = tf.data.Dataset.zip((dataset1, dataset2)) dataset3.element_spec # + id="m5bz7R1xhX1f" # Dataset containing a sparse tensor. dataset4 = tf.data.Dataset.from_tensors(tf.SparseTensor(indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])) dataset4.element_spec # + id="lVOPHur_hYQv" # Use value_type to see the type of value represented by the element spec dataset4.element_spec.value_type # + [markdown] id="r5xNsFFvhUnr" # The `Dataset` transformations support datasets of any structure. When using the # `Dataset.map()`, and `Dataset.filter()` transformations, # which apply a function to each element, the element structure determines the # arguments of the function: # + id="2myAr3Pxd-zF" dataset1 = tf.data.Dataset.from_tensor_slices( tf.random.uniform([4, 10], minval=1, maxval=10, dtype=tf.int32)) dataset1 # + id="woPXMP14gUTg" for z in dataset1: print(z.numpy()) # + id="53PA4x6XgLar" dataset2 = tf.data.Dataset.from_tensor_slices( (tf.random.uniform([4]), tf.random.uniform([4, 100], maxval=100, dtype=tf.int32))) dataset2 # + id="2ju4sNSebDcR" dataset3 = tf.data.Dataset.zip((dataset1, dataset2)) dataset3 # + id="BgxsfAS2g6gk" for a, (b,c) in dataset3: print('shapes: {a.shape}, {b.shape}, {c.shape}'.format(a=a, b=b, c=c)) # + [markdown] id="M1s2K0g-bDcT" # ## Reading input data # # + [markdown] id="F3JG2f0h2683" # ### Consuming NumPy arrays # # See [Loading NumPy arrays](../tutorials/load_data/numpy.ipynb) for more examples. # # If all of your input data fits in memory, the simplest way to create a `Dataset` # from them is to convert them to `tf.Tensor` objects and use # `Dataset.from_tensor_slices()`. # + id="NmaE6PjjhQ47" train, test = tf.keras.datasets.fashion_mnist.load_data() # + id="J6cNiuDBbDcU" images, labels = train images = images/255 dataset = tf.data.Dataset.from_tensor_slices((images, labels)) dataset # + [markdown] id="XkwrDHN5bDcW" # Note: The above code snippet will embed the `features` and `labels` arrays # in your TensorFlow graph as `tf.constant()` operations. This works well for a # small dataset, but wastes memory---because the contents of the array will be # copied multiple times---and can run into the 2GB limit for the `tf.GraphDef` # protocol buffer. # + [markdown] id="pO4ua2gEmIhR" # ### Consuming Python generators # # Another common data source that can easily be ingested as a `tf.data.Dataset` is the python generator. # # Caution: While this is a convienient approach it has limited portability and scalibility. It must run in the same python process that created the generator, and is still subject to the Python [GIL](https://en.wikipedia.org/wiki/Global_interpreter_lock). # + id="9njpME-jmDza" def count(stop): i = 0 while i<stop: yield i i += 1 # + id="xwqLrjnTpD8Y" for n in count(5): print(n) # + [markdown] id="D_BB_PhxnVVx" # The `Dataset.from_generator` constructor converts the python generator to a fully functional `tf.data.Dataset`. # # The constructor takes a callable as input, not an iterator. This allows it to restart the generator when it reaches the end. It takes an optional `args` argument, which is passed as the callable's arguments. # # The `output_types` argument is required because `tf.data` builds a `tf.Graph` internally, and graph edges require a `tf.dtype`. # + id="GFga_OTwm0Je" ds_counter = tf.data.Dataset.from_generator(count, args=[25], output_types=tf.int32, output_shapes = (), ) # + id="fel1SUuBnDUE" for count_batch in ds_counter.repeat().batch(10).take(10): print(count_batch.numpy()) # + [markdown] id="wxy9hDMTq1zD" # The `output_shapes` argument is not *required* but is highly recomended as many tensorflow operations do not support tensors with unknown rank. If the length of a particular axis is unknown or variable, set it as `None` in the `output_shapes`. # # It's also important to note that the `output_shapes` and `output_types` follow the same nesting rules as other dataset methods. # # Here is an example generator that demonstrates both aspects, it returns tuples of arrays, where the second array is a vector with unknown length. # + id="allFX1g8rGKe" def gen_series(): i = 0 while True: size = np.random.randint(0, 10) yield i, np.random.normal(size=(size,)) i += 1 # + id="6Ku26Yb9rcJX" for i, series in gen_series(): print(i, ":", str(series)) if i > 5: break # + [markdown] id="LmkynGilx0qf" # The first output is an `int32` the second is a `float32`. # # The first item is a scalar, shape `()`, and the second is a vector of unknown length, shape `(None,)` # + id="zDTfhEzhsliM" ds_series = tf.data.Dataset.from_generator( gen_series, output_types=(tf.int32, tf.float32), output_shapes=((), (None,))) ds_series # + [markdown] id="WWxvSyQiyN0o" # Now it can be used like a regular `tf.data.Dataset`. Note that when batching a dataset with a variable shape, you need to use `Dataset.padded_batch`. # + id="A7jEpj3As1lO" ds_series_batch = ds_series.shuffle(20).padded_batch(10) ids, sequence_batch = next(iter(ds_series_batch)) print(ids.numpy()) print() print(sequence_batch.numpy()) # + [markdown] id="_hcqOccJ1CxG" # For a more realistic example, try wrapping `preprocessing.image.ImageDataGenerator` as a `tf.data.Dataset`. # # First download the data: # + id="g-_JCFRQ1CXM" flowers = tf.keras.utils.get_file( 'flower_photos', 'https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz', untar=True) # + [markdown] id="UIjPhvQ87jUT" # Create the `image.ImageDataGenerator` # + id="vPCZeBQE5DfH" img_gen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1./255, rotation_range=20) # + id="<KEY>" images, labels = next(img_gen.flow_from_directory(flowers)) # + id="Hd96nH1w3eKH" print(images.dtype, images.shape) print(labels.dtype, labels.shape) # + id="KvRwvt5E2rTH" ds = tf.data.Dataset.from_generator( lambda: img_gen.flow_from_directory(flowers), output_types=(tf.float32, tf.float32), output_shapes=([32,256,256,3], [32,5]) ) ds.element_spec # + id="LcaULBCXj_2_" for images, label in ds.take(1): print('images.shape: ', images.shape) print('labels.shape: ', labels.shape) # + [markdown] id="ma4XoYzih2f4" # ### Consuming TFRecord data # # See [Loading TFRecords](../tutorials/load_data/tf_records.ipynb) for an end-to-end example. # # The `tf.data` API supports a variety of file formats so that you can process # large datasets that do not fit in memory. For example, the TFRecord file format # is a simple record-oriented binary format that many TensorFlow applications use # for training data. The `tf.data.TFRecordDataset` class enables you to # stream over the contents of one or more TFRecord files as part of an input # pipeline. # + [markdown] id="LiatWUloRJc4" # Here is an example using the test file from the French Street Name Signs (FSNS). # + id="jZo_4fzdbDcW" # Creates a dataset that reads all of the examples from two files. fsns_test_file = tf.keras.utils.get_file("fsns.tfrec", "https://storage.googleapis.com/download.tensorflow.org/data/fsns-20160927/testdata/fsns-00000-of-00001") # + [markdown] id="seD5bOH3RhBP" # The `filenames` argument to the `TFRecordDataset` initializer can either be a # string, a list of strings, or a `tf.Tensor` of strings. Therefore if you have # two sets of files for training and validation purposes, you can create a factory # method that produces the dataset, taking filenames as an input argument: # # + id="e2WV5d7DRUA-" dataset = tf.data.TFRecordDataset(filenames = [fsns_test_file]) dataset # + [markdown] id="62NC3vz9U8ww" # Many TensorFlow projects use serialized `tf.train.Example` records in their TFRecord files. These need to be decoded before they can be inspected: # + id="3tk29nlMl5P3" raw_example = next(iter(dataset)) parsed = tf.train.Example.FromString(raw_example.numpy()) parsed.features.feature['image/text'] # + [markdown] id="qJAUib10bDcb" # ### Consuming text data # # See [Loading Text](../tutorials/load_data/text.ipynb) for an end to end example. # # Many datasets are distributed as one or more text files. The # `tf.data.TextLineDataset` provides an easy way to extract lines from one or more # text files. Given one or more filenames, a `TextLineDataset` will produce one # string-valued element per line of those files. # + id="hQMoFu2TbDcc" directory_url = 'https://storage.googleapis.com/download.tensorflow.org/data/illiad/' file_names = ['cowper.txt', 'derby.txt', 'butler.txt'] file_paths = [ tf.keras.utils.get_file(file_name, directory_url + file_name) for file_name in file_names ] # + id="il4cOjiVwj95" dataset = tf.data.TextLineDataset(file_paths) # + [markdown] id="MevIbDiwy4MC" # Here are the first few lines of the first file: # + id="vpEHKyvHxu8A" for line in dataset.take(5): print(line.numpy()) # + [markdown] id="lJyVw8ro7fey" # To alternate lines between files use `Dataset.interleave`. This makes it easier to shuffle files together. Here are the first, second and third lines from each translation: # + id="1UCveWOt7fDE" files_ds = tf.data.Dataset.from_tensor_slices(file_paths) lines_ds = files_ds.interleave(tf.data.TextLineDataset, cycle_length=3) for i, line in enumerate(lines_ds.take(9)): if i % 3 == 0: print() print(line.numpy()) # + [markdown] id="2F_pOIDubDce" # By default, a `TextLineDataset` yields *every* line of each file, which may # not be desirable, for example, if the file starts with a header line, or contains comments. These lines can be removed using the `Dataset.skip()` or # `Dataset.filter()` transformations. Here, you skip the first line, then filter to # find only survivors. # + id="X6b20Gua2jPO" titanic_file = tf.keras.utils.get_file("train.csv", "https://storage.googleapis.com/tf-datasets/titanic/train.csv") titanic_lines = tf.data.TextLineDataset(titanic_file) # + id="5M1pauNT68B2" for line in titanic_lines.take(10): print(line.numpy()) # + id="dEIP95cibDcf" def survived(line): return tf.not_equal(tf.strings.substr(line, 0, 1), "0") survivors = titanic_lines.skip(1).filter(survived) # + id="odQ4618h1XqD" for line in survivors.take(10): print(line.numpy()) # + [markdown] id="x5z5B11UjDTd" # ### Consuming CSV data # + [markdown] id="ChDHNi3qbDch" # See [Loading CSV Files](../tutorials/load_data/csv.ipynb), and [Loading Pandas DataFrames](../tutorials/load_data/pandas.ipynb) for more examples. # # The CSV file format is a popular format for storing tabular data in plain text. # # For example: # + id="kj28j5u49Bjm" titanic_file = tf.keras.utils.get_file("train.csv", "https://storage.googleapis.com/tf-datasets/titanic/train.csv") # + id="ghvtmW40LM0B" df = pd.read_csv(titanic_file) df.head() # + [markdown] id="J9uBqt5oGsR-" # If your data fits in memory the same `Dataset.from_tensor_slices` method works on dictionaries, allowing this data to be easily imported: # + id="JmAMCiPJA0qO" titanic_slices = tf.data.Dataset.from_tensor_slices(dict(df)) for feature_batch in titanic_slices.take(1): for key, value in feature_batch.items(): print(" {!r:20s}: {}".format(key, value)) # + [markdown] id="47yippqaHFk6" # A more scalable approach is to load from disk as necessary. # # The `tf.data` module provides methods to extract records from one or more CSV files that comply with [RFC 4180](https://tools.ietf.org/html/rfc4180). # # The `experimental.make_csv_dataset` function is the high level interface for reading sets of csv files. It supports column type inference and many other features, like batching and shuffling, to make usage simple. # + id="zHUDrM_s_brq" titanic_batches = tf.data.experimental.make_csv_dataset( titanic_file, batch_size=4, label_name="survived") # + id="TsZfhz79_Wlg" for feature_batch, label_batch in titanic_batches.take(1): print("'survived': {}".format(label_batch)) print("features:") for key, value in feature_batch.items(): print(" {!r:20s}: {}".format(key, value)) # + [markdown] id="k_5N7CdNGYAa" # You can use the `select_columns` argument if you only need a subset of columns. # + id="H9KNHyDwF2Sc" titanic_batches = tf.data.experimental.make_csv_dataset( titanic_file, batch_size=4, label_name="survived", select_columns=['class', 'fare', 'survived']) # + id="7C2uosFnGIT8" for feature_batch, label_batch in titanic_batches.take(1): print("'survived': {}".format(label_batch)) for key, value in feature_batch.items(): print(" {!r:20s}: {}".format(key, value)) # + [markdown] id="TSVgJJ1HJD6M" # There is also a lower-level `experimental.CsvDataset` class which provides finer grained control. It does not support column type inference. Instead you must specify the type of each column. # + id="wP1Y_NXA8bYl" titanic_types = [tf.int32, tf.string, tf.float32, tf.int32, tf.int32, tf.float32, tf.string, tf.string, tf.string, tf.string] dataset = tf.data.experimental.CsvDataset(titanic_file, titanic_types , header=True) for line in dataset.take(10): print([item.numpy() for item in line]) # + [markdown] id="oZSuLVsTbDcj" # If some columns are empty, this low-level interface allows you to provide default values instead of column types. # + id="Qry-g90FMo2I" # %%writefile missing.csv 1,2,3,4 ,2,3,4 1,,3,4 1,2,,4 1,2,3, ,,, # + id="d5_hbiE9bDck" # Creates a dataset that reads all of the records from two CSV files, each with # four float columns which may have missing values. record_defaults = [999,999,999,999] dataset = tf.data.experimental.CsvDataset("missing.csv", record_defaults) dataset = dataset.map(lambda *items: tf.stack(items)) dataset # + id="__jc7iD9M9FC" for line in dataset: print(line.numpy()) # + [markdown] id="z_4g0cIvbDcl" # By default, a `CsvDataset` yields *every* column of *every* line of the file, # which may not be desirable, for example if the file starts with a header line # that should be ignored, or if some columns are not required in the input. # These lines and fields can be removed with the `header` and `select_cols` # arguments respectively. # + id="p2IF_K0obDcm" # Creates a dataset that reads all of the records from two CSV files with # headers, extracting float data from columns 2 and 4. record_defaults = [999, 999] # Only provide defaults for the selected columns dataset = tf.data.experimental.CsvDataset("missing.csv", record_defaults, select_cols=[1, 3]) dataset = dataset.map(lambda *items: tf.stack(items)) dataset # + id="-5aLprDeRNb0" for line in dataset: print(line.numpy()) # + [markdown] id="-CJfhb03koVN" # ### Consuming sets of files # + [markdown] id="yAO7SZDSk57_" # There are many datasets distributed as a set of files, where each file is an example. # + id="1dZwN3CS-jV2" flowers_root = tf.keras.utils.get_file( 'flower_photos', 'https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz', untar=True) flowers_root = pathlib.Path(flowers_root) # + [markdown] id="4099UU8n-jHP" # Note: these images are licensed CC-BY, see LICENSE.txt for details. # + [markdown] id="FCyTYpmDs_jE" # The root directory contains a directory for each class: # + id="_2iCXsHu6jJH" for item in flowers_root.glob("*"): print(item.name) # + [markdown] id="Ylj9fgkamgWZ" # The files in each class directory are examples: # + id="lAkQp5uxoINu" list_ds = tf.data.Dataset.list_files(str(flowers_root/'*/*')) for f in list_ds.take(5): print(f.numpy()) # + [markdown] id="91CPfUUJ_8SZ" # Read the data using the `tf.io.read_file` function and extract the label from the path, returning `(image, label)` pairs: # + id="-xhBRgvNqRRe" def process_path(file_path): label = tf.strings.split(file_path, os.sep)[-2] return tf.io.read_file(file_path), label labeled_ds = list_ds.map(process_path) # + id="kxrl0lGdnpRz" for image_raw, label_text in labeled_ds.take(1): print(repr(image_raw.numpy()[:100])) print() print(label_text.numpy()) # + [markdown] id="yEh46Ee0oSH5" # <!-- # TODO(mrry): Add this section. # # ### Handling text data with unusual sizes # --> # # ## Batching dataset elements # # + [markdown] id="gR-2xY-8oSH4" # ### Simple batching # # The simplest form of batching stacks `n` consecutive elements of a dataset into # a single element. The `Dataset.batch()` transformation does exactly this, with # the same constraints as the `tf.stack()` operator, applied to each component # of the elements: i.e. for each component *i*, all elements must have a tensor # of the exact same shape. # + id="xB7KeceLoSH0" inc_dataset = tf.data.Dataset.range(100) dec_dataset = tf.data.Dataset.range(0, -100, -1) dataset = tf.data.Dataset.zip((inc_dataset, dec_dataset)) batched_dataset = dataset.batch(4) for batch in batched_dataset.take(4): print([arr.numpy() for arr in batch]) # + [markdown] id="LlV1tpFdoSH0" # While `tf.data` tries to propagate shape information, the default settings of `Dataset.batch` result in an unknown batch size because the last batch may not be full. Note the `None`s in the shape: # + id="yN7hn7OBoSHx" batched_dataset # + [markdown] id="It1fPA3NoSHw" # Use the `drop_remainder` argument to ignore that last batch, and get full shape propagation: # + id="BycWC7WCoSHt" batched_dataset = dataset.batch(7, drop_remainder=True) batched_dataset # + [markdown] id="mj9nRxFZoSHs" # ### Batching tensors with padding # # The above recipe works for tensors that all have the same size. However, many # models (e.g. sequence models) work with input data that can have varying size # (e.g. sequences of different lengths). To handle this case, the # `Dataset.padded_batch` transformation enables you to batch tensors of # different shape by specifying one or more dimensions in which they may be # padded. # + id="kycwO0JooSHn" dataset = tf.data.Dataset.range(100) dataset = dataset.map(lambda x: tf.fill([tf.cast(x, tf.int32)], x)) dataset = dataset.padded_batch(4, padded_shapes=(None,)) for batch in dataset.take(2): print(batch.numpy()) print() # + [markdown] id="wl3yhth1oSHm" # The `Dataset.padded_batch` transformation allows you to set different padding # for each dimension of each component, and it may be variable-length (signified # by `None` in the example above) or constant-length. It is also possible to # override the padding value, which defaults to 0. # # <!-- # TODO(mrry): Add this section. # # ### Dense ragged -> tf.SparseTensor # --> # # + [markdown] id="G8zbAxMwoSHl" # ## Training workflows # # + [markdown] id="UnlhzF_AoSHk" # ### Processing multiple epochs # # The `tf.data` API offers two main ways to process multiple epochs of the same # data. # # The simplest way to iterate over a dataset in multiple epochs is to use the # `Dataset.repeat()` transformation. First, create a dataset of titanic data: # + id="0tODHZzRoSHg" titanic_file = tf.keras.utils.get_file("train.csv", "https://storage.googleapis.com/tf-datasets/titanic/train.csv") titanic_lines = tf.data.TextLineDataset(titanic_file) # + id="LMO6mlXxoSHc" def plot_batch_sizes(ds): batch_sizes = [batch.shape[0] for batch in ds] plt.bar(range(len(batch_sizes)), batch_sizes) plt.xlabel('Batch number') plt.ylabel('Batch size') # + [markdown] id="WfVzmqL7oSHa" # Applying the `Dataset.repeat()` transformation with no arguments will repeat # the input indefinitely. # # The `Dataset.repeat` transformation concatenates its # arguments without signaling the end of one epoch and the beginning of the next # epoch. Because of this a `Dataset.batch` applied after `Dataset.repeat` will yield batches that straddle epoch boundaries: # + id="nZ0G1cztoSHX" titanic_batches = titanic_lines.repeat(3).batch(128) plot_batch_sizes(titanic_batches) # + [markdown] id="moH-4gBEoSHW" # If you need clear epoch separation, put `Dataset.batch` before the repeat: # + id="wmbmdK1qoSHS" titanic_batches = titanic_lines.batch(128).repeat(3) plot_batch_sizes(titanic_batches) # + [markdown] id="DlEM5f9loSHR" # If you would like to perform a custom computation (e.g. to collect statistics) at the end of each epoch then it's simplest to restart the dataset iteration on each epoch: # + id="YyekyeY7oSHO" epochs = 3 dataset = titanic_lines.batch(128) for epoch in range(epochs): for batch in dataset: print(batch.shape) print("End of epoch: ", epoch) # + [markdown] id="_Bci79WCoSHN" # ### Randomly shuffling input data # # The `Dataset.shuffle()` transformation maintains a fixed-size # buffer and chooses the next element uniformly at random from that buffer. # # Note: While large buffer_sizes shuffle more thoroughly, they can take a lot of memory, and significant time to fill. Consider using `Dataset.interleave` across files if this becomes a problem. # + [markdown] id="6YvXr-qeoSHL" # Add an index to the dataset so you can see the effect: # + id="Io4iJH1toSHI" lines = tf.data.TextLineDataset(titanic_file) counter = tf.data.experimental.Counter() dataset = tf.data.Dataset.zip((counter, lines)) dataset = dataset.shuffle(buffer_size=100) dataset = dataset.batch(20) dataset # + [markdown] id="T6tNYRcsoSHH" # Since the `buffer_size` is 100, and the batch size is 20, the first batch contains no elements with an index over 120. # + id="ayM3FFFAoSHC" n,line_batch = next(iter(dataset)) print(n.numpy()) # + [markdown] id="PLrfIjTHoSHB" # As with `Dataset.batch` the order relative to `Dataset.repeat` matters. # # `Dataset.shuffle` doesn't signal the end of an epoch until the shuffle buffer is empty. So a shuffle placed before a repeat will show every element of one epoch before moving to the next: # + id="YX3pe7zZoSG6" dataset = tf.data.Dataset.zip((counter, lines)) shuffled = dataset.shuffle(buffer_size=100).batch(10).repeat(2) print("Here are the item ID's near the epoch boundary:\n") for n, line_batch in shuffled.skip(60).take(5): print(n.numpy()) # + id="H9hlE-lGoSGz" shuffle_repeat = [n.numpy().mean() for n, line_batch in shuffled] plt.plot(shuffle_repeat, label="shuffle().repeat()") plt.ylabel("Mean item ID") plt.legend() # + [markdown] id="UucIgCxWoSGx" # But a repeat before a shuffle mixes the epoch boundaries together: # + id="Bhxb5YGZoSGm" dataset = tf.data.Dataset.zip((counter, lines)) shuffled = dataset.repeat(2).shuffle(buffer_size=100).batch(10) print("Here are the item ID's near the epoch boundary:\n") for n, line_batch in shuffled.skip(55).take(15): print(n.numpy()) # + id="VAM4cbpZoSGL" repeat_shuffle = [n.numpy().mean() for n, line_batch in shuffled] plt.plot(shuffle_repeat, label="shuffle().repeat()") plt.plot(repeat_shuffle, label="repeat().shuffle()") plt.ylabel("Mean item ID") plt.legend() # + [markdown] id="ianlfbrxbDco" # ## Preprocessing data # # The `Dataset.map(f)` transformation produces a new dataset by applying a given # function `f` to each element of the input dataset. It is based on the # [`map()`](https://en.wikipedia.org/wiki/Map_\(higher-order_function\)) function # that is commonly applied to lists (and other structures) in functional # programming languages. The function `f` takes the `tf.Tensor` objects that # represent a single element in the input, and returns the `tf.Tensor` objects # that will represent a single element in the new dataset. Its implementation uses # standard TensorFlow operations to transform one element into another. # # This section covers common examples of how to use `Dataset.map()`. # # + [markdown] id="UXw1IZVdbDcq" # ### Decoding image data and resizing it # # <!-- TODO(markdaoust): link to image augmentation when it exists --> # When training a neural network on real-world image data, it is often necessary # to convert images of different sizes to a common size, so that they may be # batched into a fixed size. # # Rebuild the flower filenames dataset: # + id="rMGlj8V-u-NH" list_ds = tf.data.Dataset.list_files(str(flowers_root/'*/*')) # + [markdown] id="GyhZLB8N5jBm" # Write a function that manipulates the dataset elements. # + id="fZObC0debDcr" # Reads an image from a file, decodes it into a dense tensor, and resizes it # to a fixed shape. def parse_image(filename): parts = tf.strings.split(filename, os.sep) label = parts[-2] image = tf.io.read_file(filename) image = tf.image.decode_jpeg(image) image = tf.image.convert_image_dtype(image, tf.float32) image = tf.image.resize(image, [128, 128]) return image, label # + [markdown] id="e0dVJlCA5qHA" # Test that it works. # + id="y8xuN_HBzGup" file_path = next(iter(list_ds)) image, label = parse_image(file_path) def show(image, label): plt.figure() plt.imshow(image) plt.title(label.numpy().decode('utf-8')) plt.axis('off') show(image, label) # + [markdown] id="d3P8N-S55vDu" # Map it over the dataset. # + id="SzO8LI_H5Sk_" images_ds = list_ds.map(parse_image) for image, label in images_ds.take(2): show(image, label) # + [markdown] id="3Ff7IqB9bDcs" # ### Applying arbitrary Python logic # # For performance reasons, use TensorFlow operations for # preprocessing your data whenever possible. However, it is sometimes useful to # call external Python libraries when parsing your input data. You can use the `tf.py_function()` operation in a `Dataset.map()` transformation. # + [markdown] id="R2u7CeA67DU8" # For example, if you want to apply a random rotation, the `tf.image` module only has `tf.image.rot90`, which is not very useful for image augmentation. # # Note: `tensorflow_addons` has a TensorFlow compatible `rotate` in `tensorflow_addons.image.rotate`. # # To demonstrate `tf.py_function`, try using the `scipy.ndimage.rotate` function instead: # + id="tBUmbERt7Czz" import scipy.ndimage as ndimage def random_rotate_image(image): image = ndimage.rotate(image, np.random.uniform(-30, 30), reshape=False) return image # + id="_wEyL7bS9S6t" image, label = next(iter(images_ds)) image = random_rotate_image(image) show(image, label) # + [markdown] id="KxVx7z-ABNyq" # To use this function with `Dataset.map` the same caveats apply as with `Dataset.from_generator`, you need to describe the return shapes and types when you apply the function: # + id="Cn2nIu92BMp0" def tf_random_rotate_image(image, label): im_shape = image.shape [image,] = tf.py_function(random_rotate_image, [image], [tf.float32]) image.set_shape(im_shape) return image, label # + id="bWPqKbTnbDct" rot_ds = images_ds.map(tf_random_rotate_image) for image, label in rot_ds.take(2): show(image, label) # + [markdown] id="ykx59-cMBwOT" # ### Parsing `tf.Example` protocol buffer messages # # Many input pipelines extract `tf.train.Example` protocol buffer messages from a # TFRecord format. Each `tf.train.Example` record contains one or more "features", # and the input pipeline typically converts these features into tensors. # + id="6wnE134b32KY" fsns_test_file = tf.keras.utils.get_file("fsns.tfrec", "https://storage.googleapis.com/download.tensorflow.org/data/fsns-20160927/testdata/fsns-00000-of-00001") dataset = tf.data.TFRecordDataset(filenames = [fsns_test_file]) dataset # + [markdown] id="HGypdgYOlXZz" # You can work with `tf.train.Example` protos outside of a `tf.data.Dataset` to understand the data: # + id="4znsVNqnF73C" raw_example = next(iter(dataset)) parsed = tf.train.Example.FromString(raw_example.numpy()) feature = parsed.features.feature raw_img = feature['image/encoded'].bytes_list.value[0] img = tf.image.decode_png(raw_img) plt.imshow(img) plt.axis('off') _ = plt.title(feature["image/text"].bytes_list.value[0]) # + id="cwzqp8IGC_vQ" raw_example = next(iter(dataset)) # + id="y2X1dQNfC8Lu" def tf_parse(eg): example = tf.io.parse_example( eg[tf.newaxis], { 'image/encoded': tf.io.FixedLenFeature(shape=(), dtype=tf.string), 'image/text': tf.io.FixedLenFeature(shape=(), dtype=tf.string) }) return example['image/encoded'][0], example['image/text'][0] # + id="lGJhKDp_61A_" img, txt = tf_parse(raw_example) print(txt.numpy()) print(repr(img.numpy()[:20]), "...") # + id="8vFIUFzD5qIC" decoded = dataset.map(tf_parse) decoded # + id="vRYNYkEej7Ix" image_batch, text_batch = next(iter(decoded.batch(10))) image_batch.shape # + [markdown] id="ry1n0UBeczit" # <a id="time_series_windowing"></a> # # ### Time series windowing # + [markdown] id="t0JMgvXEz9y1" # For an end to end time series example see: [Time series forecasting](../../tutorials/text/time_series.ipynb). # + [markdown] id="hzBABBkAkkVJ" # Time series data is often organized with the time axis intact. # # Use a simple `Dataset.range` to demonstrate: # + id="kTQgo49skjuY" range_ds = tf.data.Dataset.range(100000) # + [markdown] id="o6GLGhxgpazJ" # Typically, models based on this sort of data will want a contiguous time slice. # # The simplest approach would be to batch the data: # + [markdown] id="ETqB7QvTCNty" # #### Using `batch` # + id="pSs9XqwQpvIN" batches = range_ds.batch(10, drop_remainder=True) for batch in batches.take(5): print(batch.numpy()) # + [markdown] id="mgb2qikEtk5W" # Or to make dense predictions one step into the future, you might shift the features and labels by one step relative to each other: # + id="47XfwPhetkIN" def dense_1_step(batch): # Shift features and labels one step relative to each other. return batch[:-1], batch[1:] predict_dense_1_step = batches.map(dense_1_step) for features, label in predict_dense_1_step.take(3): print(features.numpy(), " => ", label.numpy()) # + [markdown] id="DjsXuINKqsS_" # To predict a whole window instead of a fixed offset you can split the batches into two parts: # + id="FMmkQB1Gqo6x" batches = range_ds.batch(15, drop_remainder=True) def label_next_5_steps(batch): return (batch[:-5], # Take the first 5 steps batch[-5:]) # take the remainder predict_5_steps = batches.map(label_next_5_steps) for features, label in predict_5_steps.take(3): print(features.numpy(), " => ", label.numpy()) # + [markdown] id="5a611Qr3jlhl" # To allow some overlap between the features of one batch and the labels of another, use `Dataset.zip`: # + id="11dF3wyFjk2J" feature_length = 10 label_length = 3 features = range_ds.batch(feature_length, drop_remainder=True) labels = range_ds.batch(feature_length).skip(1).map(lambda labels: labels[:label_length]) predicted_steps = tf.data.Dataset.zip((features, labels)) for features, label in predicted_steps.take(5): print(features.numpy(), " => ", label.numpy()) # + [markdown] id="adew3o2mCURC" # #### Using `window` # + [markdown] id="fF6pEdlduq8E" # While using `Dataset.batch` works, there are situations where you may need finer control. The `Dataset.window` method gives you complete control, but requires some care: it returns a `Dataset` of `Datasets`. See [Dataset structure](#dataset_structure) for details. # + id="ZEI2W_EBw2OX" window_size = 5 windows = range_ds.window(window_size, shift=1) for sub_ds in windows.take(5): print(sub_ds) # + [markdown] id="r82hWdk4x-46" # The `Dataset.flat_map` method can take a dataset of datasets and flatten it into a single dataset: # + id="SB8AI03mnF8u" for x in windows.flat_map(lambda x: x).take(30): print(x.numpy(), end=' ') # + [markdown] id="sgLIwq9Anc34" # In nearly all cases, you will want to `.batch` the dataset first: # + id="5j_y84rmyVQa" def sub_to_batch(sub): return sub.batch(window_size, drop_remainder=True) for example in windows.flat_map(sub_to_batch).take(5): print(example.numpy()) # + [markdown] id="hVugrmND3Grp" # Now, you can see that the `shift` argument controls how much each window moves over. # # Putting this together you might write this function: # + id="LdFRv_0D4FqW" def make_window_dataset(ds, window_size=5, shift=1, stride=1): windows = ds.window(window_size, shift=shift, stride=stride) def sub_to_batch(sub): return sub.batch(window_size, drop_remainder=True) windows = windows.flat_map(sub_to_batch) return windows # + id="-iVxcVfEdf5b" ds = make_window_dataset(range_ds, window_size=10, shift = 5, stride=3) for example in ds.take(10): print(example.numpy()) # + [markdown] id="fMGMTPQ4w8pr" # Then it's easy to extract labels, as before: # + id="F0fPfZkZw6j_" dense_labels_ds = ds.map(dense_1_step) for inputs,labels in dense_labels_ds.take(3): print(inputs.numpy(), "=>", labels.numpy()) # + [markdown] id="vyi_-ft0kvy4" # ### Resampling # # When working with a dataset that is very class-imbalanced, you may want to resample the dataset. `tf.data` provides two methods to do this. The credit card fraud dataset is a good example of this sort of problem. # # Note: See [Imbalanced Data](../tutorials/keras/imbalanced_data.ipynb) for a full tutorial. # # + id="U2e8dxVUlFHO" zip_path = tf.keras.utils.get_file( origin='https://storage.googleapis.com/download.tensorflow.org/data/creditcard.zip', fname='creditcard.zip', extract=True) csv_path = zip_path.replace('.zip', '.csv') # + id="EhkkM4Wx75S_" creditcard_ds = tf.data.experimental.make_csv_dataset( csv_path, batch_size=1024, label_name="Class", # Set the column types: 30 floats and an int. column_defaults=[float()]*30+[int()]) # + [markdown] id="A8O47EmHlxYX" # Now, check the distribution of classes, it is highly skewed: # + id="a8-Ss69XlzXD" def count(counts, batch): features, labels = batch class_1 = labels == 1 class_1 = tf.cast(class_1, tf.int32) class_0 = labels == 0 class_0 = tf.cast(class_0, tf.int32) counts['class_0'] += tf.reduce_sum(class_0) counts['class_1'] += tf.reduce_sum(class_1) return counts # + id="O1a3t_B4l_f6" counts = creditcard_ds.take(10).reduce( initial_state={'class_0': 0, 'class_1': 0}, reduce_func = count) counts = np.array([counts['class_0'].numpy(), counts['class_1'].numpy()]).astype(np.float32) fractions = counts/counts.sum() print(fractions) # + [markdown] id="z1b8lFhSnDdv" # A common approach to training with an imbalanced dataset is to balance it. `tf.data` includes a few methods which enable this workflow: # + [markdown] id="y8jQWsgMnjQG" # #### Datasets sampling # + [markdown] id="ov14SRrQyQE3" # One approach to resampling a dataset is to use `sample_from_datasets`. This is more applicable when you have a separate `data.Dataset` for each class. # # Here, just use filter to generate them from the credit card fraud data: # + id="6YKfCPa-nioA" negative_ds = ( creditcard_ds .unbatch() .filter(lambda features, label: label==0) .repeat()) positive_ds = ( creditcard_ds .unbatch() .filter(lambda features, label: label==1) .repeat()) # + id="8FNd3sQjzl9-" for features, label in positive_ds.batch(10).take(1): print(label.numpy()) # + [markdown] id="GxLAr-7p0ATX" # To use `tf.data.experimental.sample_from_datasets` pass the datasets, and the weight for each: # + id="vjdPVIFCngOb" balanced_ds = tf.data.experimental.sample_from_datasets( [negative_ds, positive_ds], [0.5, 0.5]).batch(10) # + [markdown] id="2K4ObOms082B" # Now the dataset produces examples of each class with 50/50 probability: # + id="Myvkw21Rz-fH" for features, labels in balanced_ds.take(10): print(labels.numpy()) # + [markdown] id="OUTE3eb9nckY" # #### Rejection resampling # + [markdown] id="kZ9ezkK6irMD" # One problem with the above `experimental.sample_from_datasets` approach is that # it needs a separate `tf.data.Dataset` per class. Using `Dataset.filter` # works, but results in all the data being loaded twice. # # The `data.experimental.rejection_resample` function can be applied to a dataset to rebalance it, while only loading it once. Elements will be dropped from the dataset to achieve balance. # # `data.experimental.rejection_resample` takes a `class_func` argument. This `class_func` is applied to each dataset element, and is used to determine which class an example belongs to for the purposes of balancing. # # The elements of `creditcard_ds` are already `(features, label)` pairs. So the `class_func` just needs to return those labels: # + id="zC_Cuzw8lhI5" def class_func(features, label): return label # + [markdown] id="DdKmE8Jumlp0" # The resampler also needs a target distribution, and optionally an initial distribution estimate: # + id="9tv0tWNxmkzM" resampler = tf.data.experimental.rejection_resample( class_func, target_dist=[0.5, 0.5], initial_dist=fractions) # + [markdown] id="YxJrOZVToGuE" # The resampler deals with individual examples, so you must `unbatch` the dataset before applying the resampler: # + id="fY6VIhr3oGHG" resample_ds = creditcard_ds.unbatch().apply(resampler).batch(10) # + [markdown] id="L-HnC1s8idqV" # The resampler returns creates `(class, example)` pairs from the output of the `class_func`. In this case, the `example` was already a `(feature, label)` pair, so use `map` to drop the extra copy of the labels: # + id="KpfCGU6BiaZq" balanced_ds = resample_ds.map(lambda extra_label, features_and_label: features_and_label) # + [markdown] id="j3d2jyEhx9kD" # Now the dataset produces examples of each class with 50/50 probability: # + id="XGLYChBQwkDV" for features, labels in balanced_ds.take(10): print(labels.numpy()) # + [markdown] id="vYFKQx3bUBeU" # ## Iterator Checkpointing # + [markdown] id="SOGg1UFhUE4z" # Tensorflow supports [taking checkpoints](https://www.tensorflow.org/guide/checkpoint) so that when your training process restarts it can restore the latest checkpoint to recover most of its progress. In addition to checkpointing the model variables, you can also checkpoint the progress of the dataset iterator. This could be useful if you have a large dataset and don't want to start the dataset from the beginning on each restart. Note however that iterator checkpoints may be large, since transformations such as `shuffle` and `prefetch` require buffering elements within the iterator. # # To include your iterator in a checkpoint, pass the iterator to the `tf.train.Checkpoint` constructor. # + id="3Fsm9wvKUsNC" range_ds = tf.data.Dataset.range(20) iterator = iter(range_ds) ckpt = tf.train.Checkpoint(step=tf.Variable(0), iterator=iterator) manager = tf.train.CheckpointManager(ckpt, '/tmp/my_ckpt', max_to_keep=3) print([next(iterator).numpy() for _ in range(5)]) save_path = manager.save() print([next(iterator).numpy() for _ in range(5)]) ckpt.restore(manager.latest_checkpoint) print([next(iterator).numpy() for _ in range(5)]) # + [markdown] id="gxWglTwX9Fex" # Note: It is not possible to checkpoint an iterator which relies on external state such as a `tf.py_function`. Attempting to do so will raise an exception complaining about the external state. # + [markdown] id="uLRdedPpbDdD" # ## Using tf.data with tf.keras # + [markdown] id="JTQe8daMcgFz" # The `tf.keras` API simplifies many aspects of creating and executing machine # learning models. Its `.fit()` and `.evaluate()` and `.predict()` APIs support datasets as inputs. Here is a quick dataset and model setup: # + id="-bfjqm0hOfES" train, test = tf.keras.datasets.fashion_mnist.load_data() images, labels = train images = images/255.0 labels = labels.astype(np.int32) # + id="wDhF3rGnbDdD" fmnist_train_ds = tf.data.Dataset.from_tensor_slices((images, labels)) fmnist_train_ds = fmnist_train_ds.shuffle(5000).batch(32) model = tf.keras.Sequential([ tf.keras.layers.Flatten(), tf.keras.layers.Dense(10) ]) model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy']) # + [markdown] id="Rdogg8CfHs-G" # Passing a dataset of `(feature, label)` pairs is all that's needed for `Model.fit` and `Model.evaluate`: # + id="9cu4kPzOHnlt" model.fit(fmnist_train_ds, epochs=2) # + [markdown] id="FzpAQfJMJF41" # If you pass an infinite dataset, for example by calling `Dataset.repeat()`, you just need to also pass the `steps_per_epoch` argument: # + id="Bp1BpzlyJinb" model.fit(fmnist_train_ds.repeat(), epochs=2, steps_per_epoch=20) # + [markdown] id="iTLsw_nqJpTw" # For evaluation you can pass the number of evaluation steps: # + id="TnlRHlaL-XUI" loss, accuracy = model.evaluate(fmnist_train_ds) print("Loss :", loss) print("Accuracy :", accuracy) # + [markdown] id="C8UBU3CJKEA4" # For long datasets, set the number of steps to evaluate: # + id="uVgamf9HKDon" loss, accuracy = model.evaluate(fmnist_train_ds.repeat(), steps=10) print("Loss :", loss) print("Accuracy :", accuracy) # + [markdown] id="aZYhJ_YSIl6w" # The labels are not required in when calling `Model.predict`. # + id="343lXJ-pIqWD" predict_ds = tf.data.Dataset.from_tensor_slices(images).batch(32) result = model.predict(predict_ds, steps = 10) print(result.shape) # + [markdown] id="YfzZORwLI202" # But the labels are ignored if you do pass a dataset containing them: # + id="mgQJTPrT-2WF" result = model.predict(fmnist_train_ds, steps = 10) print(result.shape)
site/en/guide/data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Average Precision for test data # #### Import data # + import pandas as pd import numpy as np import os import glob import matplotlib.pyplot as plt # %matplotlib inline pd.options.display.float_format = "{:,.4f}".format # - final_dir = "/scratch/jag2j/final_data/" os.chdir(final_dir) os.listdir() result_list = glob.glob('result*.csv') result_list cosine_10plus = pd.read_csv(result_list[0]).groupby('entity')['type'].unique().apply(pd.Series) pen_inner_prod_10plus = pd.read_csv(result_list[1]).groupby('entity')['type'].unique().apply(pd.Series) cosine = pd.read_csv(result_list[2]).groupby('entity')['type'].unique().apply(pd.Series) cosine_wgt_10plus = pd.read_csv(result_list[3]).groupby('entity')['type'].unique().apply(pd.Series) jaccard_wgt = pd.read_csv(result_list[4]).groupby('entity')['type'].unique().apply(pd.Series) innerproduct_wgt_10plus = pd.read_csv(result_list[5]).groupby('entity')['type'].unique().apply(pd.Series) manhattan_wgt = pd.read_csv(result_list[6]).groupby('entity')['type'].unique().apply(pd.Series) innerproduct_wgt = pd.read_csv(result_list[7]).groupby('entity')['type'].unique().apply(pd.Series) innerproduct_10plus = pd.read_csv(result_list[8]).groupby('entity')['type'].unique().apply(pd.Series) jaccard_10plus = pd.read_csv(result_list[9]).groupby('entity')['type'].unique().apply(pd.Series) manhattan_wgt_10plus = pd.read_csv(result_list[10]).groupby('entity')['type'].unique().apply(pd.Series) jaccard = pd.read_csv(result_list[11]).groupby('entity')['type'].unique().apply(pd.Series) cosine_wgt = pd.read_csv(result_list[12]).groupby('entity')['type'].unique().apply(pd.Series) pen_inner_prod = pd.read_csv(result_list[13]).groupby('entity')['type'].unique().apply(pd.Series) manhattan_10plus = pd.read_csv(result_list[14]).groupby('entity')['type'].unique().apply(pd.Series) manhattan = pd.read_csv(result_list[15]).groupby('entity')['type'].unique().apply(pd.Series) pen_inner_prod_wgt = pd.read_csv(result_list[16]).groupby('entity')['type'].unique().apply(pd.Series) jaccard_wgt_10plus = pd.read_csv(result_list[17]).groupby('entity')['type'].unique().apply(pd.Series) pen_inner_prod_wgt_10plus = pd.read_csv(result_list[18]).groupby('entity')['type'].unique().apply(pd.Series) innerproduct = pd.read_csv(result_list[19]).groupby('entity')['type'].unique().apply(pd.Series) # True types dataframe true_type_10 = pd.read_csv('31-test-types_10plus.csv').rename({'type':'true_type'}, axis=1)#.set_index('entity') true_type_10.head() # True types dataframe true_type = pd.read_csv('31-test-types.csv').rename({'type':'true_type'}, axis=1)#.set_index('entity') true_type.head() true_type_10 = true_type_10.groupby('entity')['true_type'].unique().apply(pd.Series) true_type_10 true_type = true_type.groupby('entity')['true_type'].unique().apply(pd.Series) true_type def get_avg_precision(entity_df, validation_df, n): """Gets the average precision. Keyword arguments: entity_df: Enter an scored entity to super entity dataframe validation_df: Enter the true entity to super entity dataframe n: Enter the number of super entity suggestions you are considering """ precision_dict = {} for entity in entity_df.iterrows(): ent_index = entity[0] ent_list = list(entity[1].values) precision = 0 count = 0 for i in range(n): if ent_list[i] in validation_df.loc[ent_index].values: count += 1 precision += count / (i + 1) else: pass if count == 0: avg_precision = np.nan else: avg_precision = precision / count precision_dict[ent_index] = avg_precision return precision_dict # ## 10 plus property test dataset # ### Cosine 10 plus avgp_cosine_10 = pd.Series(get_avg_precision(cosine_10plus, true_type_10, 10), name='avg_p').to_frame() avgp_cosine_10.mean() avgp_cosine_10.fillna(0).mean() # ### Penalized Inner Product 10 plus avgp_pip_10 = pd.Series(get_avg_precision(pen_inner_prod_10plus, true_type_10, 10), name='avg_p').to_frame() avgp_pip_10.mean() avgp_pip_10.fillna(0).mean() # ### Cosine w/ weights 10 plus avgp_cosine_wgt_10 = pd.Series(get_avg_precision(cosine_wgt_10plus, true_type_10, 10), name='avg_p').to_frame() avgp_cosine_wgt_10.mean() avgp_cosine_wgt_10.fillna(0).mean() # ### Inner Product w/ weights 10 plus avgp_ip_wgt_10 = pd.Series(get_avg_precision(innerproduct_wgt_10plus, true_type_10, 10), name='avg_p').to_frame() avgp_ip_wgt_10.mean() avgp_ip_wgt_10.fillna(0).mean() # ### Inner Product 10 plus avgp_ip_10 = pd.Series(get_avg_precision(innerproduct_10plus, true_type_10, 10), name='avg_p').to_frame() avgp_ip_10.mean() avgp_ip_10.fillna(0).mean() # ### Jaccard 10 plus avgp_jacc_10 = pd.Series(get_avg_precision(jaccard_10plus, true_type_10, 10), name='avg_p').to_frame() avgp_jacc_10.mean() avgp_jacc_10.fillna(0).mean() # ### Manhattan w/ weights 10 plus avgp_manh_wgt_10 = pd.Series(get_avg_precision(manhattan_wgt_10plus, true_type_10, 10), name='avg_p').to_frame() avgp_manh_wgt_10.mean() avgp_manh_wgt_10.fillna(0).mean() # ### Manhattan 10 plus avgp_manh_10 = pd.Series(get_avg_precision(manhattan_10plus, true_type_10, 10), name='avg_p').to_frame() avgp_manh_10.mean() avgp_manh_10.fillna(0).mean() # ### Jaccard w/ weights 10 plus avgp_jacc_wgt_10 = pd.Series(get_avg_precision(jaccard_wgt_10plus, true_type_10, 10), name='avg_p').to_frame() avgp_jacc_wgt_10.mean() avgp_jacc_wgt_10.fillna(0).mean() # ### Penalized Inner Product w/ weights 10 plus avgp_pip_wgt_10 = pd.Series(get_avg_precision(pen_inner_prod_wgt_10plus, true_type_10, 10), name='avg_p').to_frame() avgp_pip_wgt_10.mean() avgp_pip_wgt_10.fillna(0).mean() avgp_pip_wgt_10.mean()[0] # ### Overall Scores pd.DataFrame(np.array(['cosine', avgp_cosine_10.mean()[0], avgp_cosine_10.fillna(0).mean()[0], 'pen_inner_prod', avgp_pip_10.mean()[0], avgp_pip_10.fillna(0).mean()[0], 'cosine_wgt', avgp_cosine_wgt_10.mean()[0], avgp_cosine_wgt_10.fillna(0).mean()[0], 'innerprod_wgt', avgp_ip_wgt_10.mean()[0], avgp_ip_wgt_10.fillna(0).mean()[0], 'innerproduct', avgp_ip_10.mean()[0], avgp_ip_10.fillna(0).mean()[0], 'jaccard', avgp_jacc_10.mean()[0], avgp_jacc_10.fillna(0).mean()[0], 'manhattan_wgt', avgp_manh_wgt_10.mean()[0], avgp_manh_wgt_10.fillna(0).mean()[0], 'manhattan', avgp_manh_10.mean()[0], avgp_manh_10.fillna(0).mean()[0], 'jaccard_wgt', avgp_jacc_wgt_10.mean()[0], avgp_jacc_wgt_10.fillna(0).mean()[0], 'pen_inner_prod_wgt', avgp_pip_wgt_10.mean()[0], avgp_pip_wgt_10.fillna(0).mean()[0] ]).reshape(10,3), columns=['scoring','MAP1','MAP2']).set_index('scoring') # ## Any property count test dataset # ### Cosine avgp_cosine = pd.Series(get_avg_precision(cosine, true_type, 10), name='avg_p').to_frame() avgp_cosine.mean() avgp_cosine.fillna(0).mean() # ### Jaccard w/ weights avgp_jacc_wgt = pd.Series(get_avg_precision(jaccard_wgt, true_type, 10), name='avg_p').to_frame() avgp_jacc_wgt.mean() avgp_jacc_wgt.fillna(0).mean() # ### Manhattan w/ weights avgp_manh_wgt = pd.Series(get_avg_precision(manhattan_wgt, true_type, 10), name='avg_p').to_frame() avgp_manh_wgt.mean() avgp_manh_wgt.fillna(0).mean() # ### Inner Product w/ weights avgp_ip_wgt = pd.Series(get_avg_precision(innerproduct_wgt, true_type, 10), name='avg_p').to_frame() avgp_ip_wgt.mean() avgp_ip_wgt.fillna(0).mean() # ### Jaccard avgp_jacc = pd.Series(get_avg_precision(jaccard, true_type, 10), name='avg_p').to_frame() avgp_jacc.mean() avgp_jacc.fillna(0).mean() # ### Cosine w/ weights avgp_cosine_wgt = pd.Series(get_avg_precision(cosine_wgt, true_type, 10), name='avg_p').to_frame() avgp_cosine_wgt.mean() avgp_cosine_wgt.fillna(0).mean() # ### Penalized Inner Product avgp_pip = pd.Series(get_avg_precision(pen_inner_prod, true_type, 10), name='avg_p').to_frame() avgp_pip.mean() avgp_pip.fillna(0).mean() # ### Manhattan avgp_manh = pd.Series(get_avg_precision(manhattan, true_type, 10), name='avg_p').to_frame() avgp_manh.mean() avgp_manh.fillna(0).mean() # ### Penalized Inner Product w/ weights avgp_pip_wgt = pd.Series(get_avg_precision(pen_inner_prod_wgt, true_type, 10), name='avg_p').to_frame() avgp_pip_wgt.mean() avgp_pip_wgt.fillna(0).mean() # ### Inner Product avgp_ip = pd.Series(get_avg_precision(innerproduct, true_type, 10), name='avg_p').to_frame() avgp_ip.mean() avgp_ip.fillna(0).mean() # ### Overall Scores pd.DataFrame(np.array(['cosine', avgp_cosine.mean()[0], avgp_cosine.fillna(0).mean()[0], 'pen_inner_prod', avgp_pip.mean()[0], avgp_pip.fillna(0).mean()[0], 'cosine_wgt', avgp_cosine_wgt.mean()[0], avgp_cosine_wgt.fillna(0).mean()[0], 'innerprod_wgt', avgp_ip_wgt.mean()[0], avgp_ip_wgt.fillna(0).mean()[0], 'innerproduct', avgp_ip.mean()[0], avgp_ip.fillna(0).mean()[0], 'jaccard', avgp_jacc.mean()[0], avgp_jacc.fillna(0).mean()[0], 'manhattan_wgt', avgp_manh_wgt.mean()[0], avgp_manh_wgt.fillna(0).mean()[0], 'manhattan', avgp_manh.mean()[0], avgp_manh.fillna(0).mean()[0], 'jaccard_wgt', avgp_jacc_wgt.mean()[0], avgp_jacc_wgt.fillna(0).mean()[0], 'pen_inner_prod_wgt', avgp_pip_wgt.mean()[0], avgp_pip_wgt.fillna(0).mean()[0] ]).reshape(10,3), columns=['scoring','MAP1','MAP2']).set_index('scoring') # ### Plot the Average Precision vs Property Count # #### Import data gav_dir = '/sfs/lustre/bahamut/scratch/gtw4vx/' os.chdir(gav_dir) os.listdir() ep_count = pd.read_csv('ent_prop-result.csv').set_index('entity') ep_count_10plus = pd.read_csv('ent10_prop-result.csv').set_index('entity') ep_count.head() ep_count_10plus.head() # ## 10 plus property test dataset # ### Cosine 10 plus cosine10_meanp = avgp_cosine_10.fillna(0).join(ep_count_10plus).groupby('count')['avg_p'].mean() plt.figure(figsize=(25,10)) cosine10_meanp.plot.bar('index', rot=0, fontsize=8, ylim=(0,1)) plt.xlabel("Property Count", fontsize=20) plt.ylabel("Mean Average Precision", fontsize=20); # ### Penalized Inner Product 10 plus pip10_meanp = avgp_pip_10.fillna(0).join(ep_count_10plus).groupby('count')['avg_p'].mean() plt.figure(figsize=(25,10)) pip10_meanp.plot.bar('index', rot=0, fontsize=8, ylim=(0,1)) plt.xlabel("Property Count", fontsize=20) plt.ylabel("Mean Average Precision", fontsize=20); # ### Cosine w/ weights 10 plus cosine_wgt10_meanp = avgp_cosine_wgt_10.fillna(0).join(ep_count_10plus).groupby('count')['avg_p'].mean() plt.figure(figsize=(25,10)) cosine_wgt10_meanp.plot.bar('index', rot=0, fontsize=8, ylim=(0,1)) plt.xlabel("Property Count", fontsize=20) plt.ylabel("Mean Average Precision", fontsize=20); # ### Inner Product w/ weights 10 plus ip_wgt10_meanp = avgp_ip_wgt_10.fillna(0).join(ep_count_10plus).groupby('count')['avg_p'].mean() plt.figure(figsize=(25,10)) ip_wgt10_meanp.plot.bar('index', rot=0, fontsize=8, ylim=(0,1)) plt.xlabel("Property Count", fontsize=20) plt.ylabel("Mean Average Precision", fontsize=20); # ### Inner Product 10 plus ip10_meanp = avgp_ip_10.fillna(0).join(ep_count_10plus).groupby('count')['avg_p'].mean() plt.figure(figsize=(25,10)) ip10_meanp.plot.bar('index', rot=0, fontsize=8, ylim=(0,1)) plt.xlabel("Property Count", fontsize=20) plt.ylabel("Mean Average Precision", fontsize=20); # ### Jaccard 10 plus jacc10_meanp = avgp_jacc_10.fillna(0).join(ep_count_10plus).groupby('count')['avg_p'].mean() plt.figure(figsize=(25,10)) jacc10_meanp.plot.bar('index', rot=0, fontsize=8, ylim=(0,1)) plt.xlabel("Property Count", fontsize=20) plt.ylabel("Mean Average Precision", fontsize=20); # ### Jaccard w/ weights 10 plus jacc_wgt10_meanp = avgp_jacc_wgt_10.fillna(0).join(ep_count_10plus).groupby('count')['avg_p'].mean() plt.figure(figsize=(25,10)) jacc_wgt10_meanp.plot.bar('index', rot=0, fontsize=8, ylim=(0,1)) plt.xlabel("Property Count", fontsize=20) plt.ylabel("Mean Average Precision", fontsize=20); # ### Penalized Inner Product w/ weights 10 plus pip_wgt10_meanp = avgp_pip_wgt_10.fillna(0).join(ep_count_10plus).groupby('count')['avg_p'].mean() plt.figure(figsize=(25,10)) pip_wgt10_meanp.plot.bar('index', rot=0, fontsize=8, ylim=(0,1)) plt.xlabel("Property Count", fontsize=20) plt.ylabel("Mean Average Precision", fontsize=20); # ## Any property count test dataset # ### Cosine cosine_meanp = avgp_cosine.fillna(0).join(ep_count).groupby('count')['avg_p'].mean() plt.figure(figsize=(25,10)) cosine_meanp.plot.bar('index', rot=0, fontsize=8, ylim=(0,1)) plt.xlabel("Property Count", fontsize=20) plt.ylabel("Mean Average Precision", fontsize=20); cosine_join = avgp_cosine.fillna(0).join(ep_count) cosine_join['bin_count'] = pd.cut(cosine_join['count'], bins=[0, 2, 4, 6, 8, float("inf")], labels=["1-2", "3-4", "5-6", "7-8", "9+"]) cosinejoin_meanp = cosine_join.groupby('bin_count')['avg_p'].mean() cosinejoin_meanp plt.figure(figsize=(25,10)) cosinejoin_meanp.plot.bar('index', rot=0, fontsize=13, ylim=(0,1)) plt.xlabel("Property Count", fontsize=20) plt.ylabel("Mean Average Precision", fontsize=20); # ### Jaccard w/ weights jacc_wgt_meanp = avgp_jacc_wgt.fillna(0).join(ep_count).groupby('count')['avg_p'].mean() plt.figure(figsize=(25,10)) jacc_wgt_meanp.plot.bar('index', rot=0, fontsize=8, ylim=(0,1)) plt.xlabel("Property Count", fontsize=20) plt.ylabel("Mean Average Precision", fontsize=20); jacc_wgt_join = avgp_jacc_wgt.fillna(0).join(ep_count) jacc_wgt_join['bin_count'] = pd.cut(jacc_wgt_join['count'], bins=[0, 2, 4, 6, 8, float("inf")], labels=["1-2", "3-4", "5-6", "7-8", "9+"]) jacc_wgtjoin_meanp = jacc_wgt_join.groupby('bin_count')['avg_p'].mean() jacc_wgtjoin_meanp plt.figure(figsize=(25,10)) jacc_wgtjoin_meanp.plot.bar('index', rot=0, fontsize=13, ylim=(0,1)) plt.xlabel("Property Count", fontsize=20) plt.ylabel("Mean Average Precision", fontsize=20); # ### Inner Product w/ weights ip_wgt_meanp = avgp_ip_wgt.fillna(0).join(ep_count).groupby('count')['avg_p'].mean() plt.figure(figsize=(25,10)) ip_wgt_meanp.plot.bar('index', rot=0, fontsize=8, ylim=(0,1)) plt.xlabel("Property Count", fontsize=20) plt.ylabel("Mean Average Precision", fontsize=20); ip_wgt_join = avgp_ip_wgt.fillna(0).join(ep_count) ip_wgt_join['bin_count'] = pd.cut(ip_wgt_join['count'], bins=[0, 2, 4, 6, 8, float("inf")], labels=["1-2", "3-4", "5-6", "7-8", "9+"]) ip_wgtjoin_meanp = ip_wgt_join.groupby('bin_count')['avg_p'].mean() ip_wgtjoin_meanp plt.figure(figsize=(25,10)) ip_wgtjoin_meanp.plot.bar('index', rot=0, fontsize=13, ylim=(0,1)) plt.xlabel("Property Count", fontsize=20) plt.ylabel("Mean Average Precision", fontsize=20); # ### Jaccard jacc_meanp = avgp_jacc.fillna(0).join(ep_count).groupby('count')['avg_p'].mean() plt.figure(figsize=(25,10)) jacc_meanp.plot.bar('index', rot=0, fontsize=8, ylim=(0,1)) plt.xlabel("Property Count", fontsize=20) plt.ylabel("Mean Average Precision", fontsize=20); jacc_join = avgp_jacc.fillna(0).join(ep_count) jacc_join['bin_count'] = pd.cut(jacc_join['count'], bins=[0, 2, 4, 6, 8, float("inf")], labels=["1-2", "3-4", "5-6", "7-8", "9+"]) jaccjoin_meanp = jacc_join.groupby('bin_count')['avg_p'].mean() jaccjoin_meanp plt.figure(figsize=(25,10)) jaccjoin_meanp.plot.bar('index', rot=0, fontsize=13, ylim=(0,1)) plt.xlabel("Property Count", fontsize=20) plt.ylabel("Mean Average Precision", fontsize=20); # ### Cosine w/ weights cosine_wgt_meanp = avgp_cosine_wgt.fillna(0).join(ep_count).groupby('count')['avg_p'].mean() plt.figure(figsize=(25,10)) cosine_wgt_meanp.plot.bar('index', rot=0, fontsize=8, ylim=(0,1)) plt.xlabel("Property Count", fontsize=20) plt.ylabel("Mean Average Precision", fontsize=20); cosine_wgt_join = avgp_cosine_wgt.fillna(0).join(ep_count) cosine_wgt_join['bin_count'] = pd.cut(cosine_wgt_join['count'], bins=[0, 2, 4, 6, 8, float("inf")], labels=["1-2", "3-4", "5-6", "7-8", "9+"]) cosine_wgtjoin_meanp = cosine_wgt_join.groupby('bin_count')['avg_p'].mean() cosine_wgtjoin_meanp plt.figure(figsize=(25,10)) cosine_wgtjoin_meanp.plot.bar('index', rot=0, fontsize=13, ylim=(0,1)) plt.xlabel("Property Count", fontsize=20) plt.ylabel("Mean Average Precision", fontsize=20); # ### Penalized Inner Product pip_meanp = avgp_pip.fillna(0).join(ep_count).groupby('count')['avg_p'].mean() plt.figure(figsize=(25,10)) pip_meanp.plot.bar('index', rot=0, fontsize=8, ylim=(0,1)) plt.xlabel("Property Count", fontsize=20) plt.ylabel("Mean Average Precision", fontsize=20); pip_join = avgp_pip.fillna(0).join(ep_count) pip_join['bin_count'] = pd.cut(pip_join['count'], bins=[0, 2, 4, 6, 8, float("inf")], labels=["1-2", "3-4", "5-6", "7-8", "9+"]) pipjoin_meanp = pip_join.groupby('bin_count')['avg_p'].mean() pipjoin_meanp plt.figure(figsize=(25,10)) pipjoin_meanp.plot.bar('index', rot=0, fontsize=13, ylim=(0,1)) plt.xlabel("Property Count", fontsize=20) plt.ylabel("Mean Average Precision", fontsize=20); # ### Penalized Inner Product w/ weights pip_wgt_meanp = avgp_pip_wgt.fillna(0).join(ep_count).groupby('count')['avg_p'].mean() plt.figure(figsize=(25,10)) pip_wgt_meanp.plot.bar('index', rot=0, fontsize=8, ylim=(0,1)) plt.xlabel("Property Count", fontsize=20) plt.ylabel("Mean Average Precision", fontsize=20); pip_wgt_join = avgp_pip_wgt.fillna(0).join(ep_count) pip_wgt_join['bin_count'] = pd.cut(pip_wgt_join['count'], bins=[0, 2, 4, 6, 8, float("inf")], labels=["1-2", "3-4", "5-6", "7-8", "9+"]) pip_wgtjoin_meanp = pip_wgt_join.groupby('bin_count')['avg_p'].mean() pip_wgtjoin_meanp plt.figure(figsize=(25,10)) pip_wgtjoin_meanp.plot.bar('index', rot=0, fontsize=13, ylim=(0,1)) plt.xlabel("Property Count", fontsize=20) plt.ylabel("Mean Average Precision", fontsize=20); # ### Inner Product ip_meanp = avgp_ip.fillna(0).join(ep_count).groupby('count')['avg_p'].mean() plt.figure(figsize=(25,10)) ip_meanp.plot.bar('index', rot=0, fontsize=8, ylim=(0,1)) plt.xlabel("Property Count", fontsize=20) plt.ylabel("Mean Average Precision", fontsize=20); ip_join = avgp_ip.fillna(0).join(ep_count) ip_join['bin_count'] = pd.cut(ip_join['count'], bins=[0, 2, 4, 6, 8, float("inf")], labels=["1-2", "3-4", "5-6", "7-8", "9+"]) ipjoin_meanp = ip_join.groupby('bin_count')['avg_p'].mean() ipjoin_meanp plt.figure(figsize=(25,10)) ipjoin_meanp.plot.bar('index', rot=0, fontsize=13, ylim=(0,1)) plt.xlabel("Property Count", fontsize=20) plt.ylabel("Mean Average Precision", fontsize=20);
400 Compute precision.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline # # # NNI HPO Quickstart with PyTorch # This tutorial optimizes the model in `official PyTorch quickstart`_ with auto-tuning. # # There is also a :doc:`TensorFlow version<../hpo_quickstart_tensorflow/main>` if you prefer it. # # The tutorial consists of 4 steps: # # 1. Modify the model for auto-tuning. # 2. Define hyperparameters' search space. # 3. Configure the experiment. # 4. Run the experiment. # # # ## Step 1: Prepare the model # In first step, we need to prepare the model to be tuned. # # The model should be put in a separate script. # It will be evaluated many times concurrently, # and possibly will be trained on distributed platforms. # # In this tutorial, the model is defined in :doc:`model.py <model>`. # # In short, it is a PyTorch model with 3 additional API calls: # # 1. Use :func:`nni.get_next_parameter` to fetch the hyperparameters to be evalutated. # 2. Use :func:`nni.report_intermediate_result` to report per-epoch accuracy metrics. # 3. Use :func:`nni.report_final_result` to report final accuracy. # # Please understand the model code before continue to next step. # # # ## Step 2: Define search space # In model code, we have prepared 3 hyperparameters to be tuned: # *features*, *lr*, and *momentum*. # # Here we need to define their *search space* so the tuning algorithm can sample them in desired range. # # Assuming we have following prior knowledge for these hyperparameters: # # 1. *features* should be one of 128, 256, 512, 1024. # 2. *lr* should be a float between 0.0001 and 0.1, and it follows exponential distribution. # 3. *momentum* should be a float between 0 and 1. # # In NNI, the space of *features* is called ``choice``; # the space of *lr* is called ``loguniform``; # and the space of *momentum* is called ``uniform``. # You may have noticed, these names are derived from ``numpy.random``. # # For full specification of search space, check :doc:`the reference </hpo/search_space>`. # # Now we can define the search space as follow: # # search_space = { 'features': {'_type': 'choice', '_value': [128, 256, 512, 1024]}, 'lr': {'_type': 'loguniform', '_value': [0.0001, 0.1]}, 'momentum': {'_type': 'uniform', '_value': [0, 1]}, } # ## Step 3: Configure the experiment # NNI uses an *experiment* to manage the HPO process. # The *experiment config* defines how to train the models and how to explore the search space. # # In this tutorial we use a *local* mode experiment, # which means models will be trained on local machine, without using any special training platform. # # from nni.experiment import Experiment experiment = Experiment('local') # Now we start to configure the experiment. # # ### Configure trial code # In NNI evaluation of each hyperparameter set is called a *trial*. # So the model script is called *trial code*. # # experiment.config.trial_command = 'python model.py' experiment.config.trial_code_directory = '.' # When ``trial_code_directory`` is a relative path, it relates to current working directory. # To run ``main.py`` in a different path, you can set trial code directory to ``Path(__file__).parent``. # (`__file__ <https://docs.python.org/3.10/reference/datamodel.html#index-43>`__ # is only available in standard Python, not in Jupyter Notebook.) # # .. attention:: # # If you are using Linux system without Conda, # you may need to change ``"python model.py"`` to ``"python3 model.py"``. # # # ### Configure search space # # experiment.config.search_space = search_space # ### Configure tuning algorithm # Here we use :doc:`TPE tuner </hpo/tuners>`. # # experiment.config.tuner.name = 'TPE' experiment.config.tuner.class_args['optimize_mode'] = 'maximize' # ### Configure how many trials to run # Here we evaluate 10 sets of hyperparameters in total, and concurrently evaluate 2 sets at a time. # # experiment.config.max_trial_number = 10 experiment.config.trial_concurrency = 2 # <div class="alert alert-info"><h4>Note</h4><p>``max_trial_number`` is set to 10 here for a fast example. # In real world it should be set to a larger number. # With default config TPE tuner requires 20 trials to warm up.</p></div> # # You may also set ``max_experiment_duration = '1h'`` to limit running time. # # If neither ``max_trial_number`` nor ``max_experiment_duration`` are set, # the experiment will run forever until you press Ctrl-C. # # # ## Step 4: Run the experiment # Now the experiment is ready. Choose a port and launch it. (Here we use port 8080.) # # You can use the web portal to view experiment status: http://localhost:8080. # # experiment.run(8080) # ## After the experiment is done # Everything is done and it is safe to exit now. The following are optional. # # If you are using standard Python instead of Jupyter Notebook, # you can add ``input()`` or ``signal.pause()`` to prevent Python from exiting, # allowing you to view the web portal after the experiment is done. # # # input('Press enter to quit') experiment.stop() # :meth:`nni.experiment.Experiment.stop` is automatically invoked when Python exits, # so it can be omitted in your code. # # After the experiment is stopped, you can run :meth:`nni.experiment.Experiment.view` to restart web portal. # # .. tip:: # # This example uses :doc:`Python API </reference/experiment>` to create experiment. # # You can also create and manage experiments with :doc:`command line tool </reference/nnictl>`. # #
docs/source/tutorials/hpo_quickstart_pytorch/main.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <img src="../../../../images/qiskit_header.png" alt="Note: In order for images to show up in this jupyter notebook you need to select File => Trusted Notebook" align="middle"> # # _*Qiskit Aqua: Vehicle Routing*_ # # The latest version of this notebook is available on https://github.com/qiskit/qiskit-tutorials. # # *** # ### Contributors # <NAME><sup>[1]</sup>, <NAME><sup>[1]</sup>, <NAME><sup>[1]</sup> # # ### Affiliation # - <sup>[1]</sup>IBMQ # # ## The Introduction # # Logistics is a major industry, with some estimates valuing it at USD 8183 billion globally in 2015. Most service providers operate a number of vehicles (e.g., trucks and container ships), a number of depots, where the vehicles are based overnight, and serve a number of client locations with each vehicle during each day. There are many optimisation and control problems that consider these parameters. Computationally, the key challenge is how to design routes from depots to a number of client locations and back to the depot, so as to minimise vehicle-miles travelled, time spent, or similar objective functions. In this notebook we formalise an idealised version of the problem and showcase its solution using the quantum approximate optimization approach of Farhi, Goldstone, and Gutman (2014). # # The overall workflow we demonstrate comprises: # # 1. establish the client locations. Normally, these would be available ahead of the day of deliveries from a database. In our use case, we generate these randomly. # # 3. compute the pair-wise distances, travel times, or similar. In our case, we consider the Euclidean distance, "as the crow flies", which is perhaps the simplest possible. # # 4. compute the actual routes. This step is run twice, actually. First, we obtain a reference value by a run of a classical solver (IBM CPLEX) on the classical computer. Second, we run an alternative, hybrid algorithm partly on the quantum computer. # # 5. visualisation of the results. In our case, this is again a simplistic plot. # # In the following, we first explain the model, before we proceed with the installation of the pre-requisites and the data loading. # # ## The Model # # Mathematically speaking, the vehicle routing problem (VRP) is a combinatorial problem, wherein the best routes from a depot to a number of clients and back to the depot are sought, given a number of available vehicles. There are a number of formulations possible, extending a number of formulations of the travelling salesman problem [Applegate et al, 2006]. Here, we present a formulation known as MTZ [<NAME>, 1960]. # # Let $n$ be the number of clients (indexed as $1,\dots,n$), and $K$ be the number of available vehicles. Let $x_{ij} = \{0,1\}$ be the binary decision variable which, if it is $1$, activates the segment from node $i$ to node $j$. The node index runs from $0$ to $n$, where $0$ is (by convention) the depot. There are twice as many distinct decision variables as edges. For example, in a fully connected graph, there are $n(n+1)$ binary decision variables. # # If two nodes $i$ and $j$ have a link from $i$ to $j$, we write $i \sim j$. We also denote with $\delta(i)^+$ the set of nodes to which $i$ has a link, i.e., $j \in \delta(i)^+$ if and only if $i \sim j$. Similarly, we denote with # $\delta(i)^-$ the set of nodes which are connected to $i$, in the sense that $j \in \delta(i)^-$ if and only if $j \sim i$. # # In addition, we consider continuous variables, for all nodes $i = 1,\dots, n$, denoted $u_i$. These variables are needed in the MTZ formulation of the problem to eliminate sub-tours between clients. # # The VRP can be formulated as: # # $$ # (VRP) \quad f = \min_{\{x_{ij}\}_{i\sim j}\in \{0,1\}, \{u_i\}_{i=1,\dots,n}\in \mathbb{R}} \quad \sum_{i \sim j} w_{ij} x_{ij} # $$ # # subject to the node-visiting constraint: # # $$ # \sum_{j \in \delta(i)^+} x_{ij} = 1, \,\sum_{j \in \delta(i)^-} x_{ji} = 1,\, \forall i \in \{1,\dots,n\}, # $$ # # the depot-visiting constraints: # # $$ # \sum_{i \in \delta(0)^+} x_{0i} = K, \, \sum_{j \in \delta(0)^+} x_{j0} = K, # $$ # # and the sub-tour elimination constraints: # # $$ # u_i - u_j + Q x_{ij} \leq Q-q_j, \, \forall i \sim j, \,i ,j \neq 0, \quad q_i \leq u_i \leq Q,\, \forall i, i \neq 0. # $$ # # In particular, # - The cost function is linear in the cost functions and weighs the different arches based on a positive weight $w_{ij}>0$ (typically the distance between node $i$ and node $j$); # - The first set of constraints enforce that from and to every client, only one link is allowed; # - The second set of constraints enforce that from and to the depot, exactly $K$ links are allowed; # - The third set of constraints enforce the sub-tour elimination constraints and are bounds on $u_i$, with $Q>q_j>0$, and $Q,q_i \in \mathbb{R}$. # # # ## Classical solution # # We can solve the VRP classically, e.g., by using CPLEX. CPLEX uses a branch-and-bound-and-cut method to find an approximate solution of the VRP, which, in this formulation, is a mixed-integer linear program (MILP). For the sake of notation, we pack the decision variables in one vector as # # $$ # {\bf z} = [x_{01},x_{02},\ldots,x_{10}, x_{12},\ldots,x_{n(n-1)}]^T, # $$ # # wherein ${\bf z} \in \{0,1\}^N$, with $N = n (n+1)$. So the dimension of the problem scales quadratically with the number of nodes. Let us denote the optimal solution by ${\bf z}^*$, and the associated optimal cost $f^*$. # # # ## Quantum solution # # Here, we demonstrate an approach that combines classical and quantum computing steps, following the quantum approximate optimization approach of Farhi, Goldstone, and Gutman (2014). In particular, we use the variational quantum eigensolver (VQE). We stress that given the use of limited depth of the quantum circuits employed (variational forms), it is hard to discuss the speed-up of the algorithm, as the solution obtained is heuristic in nature. At the same time, due to the nature and importance of the target problems, it is worth investigating heuristic approaches, which may be worthwhile for some problem classes. # # Following [5], the algorithm can be summarised as follows: # - Preparation steps: # - Transform the combinatorial problem into a binary polynomial optimization problem with equality constraints only; # - Map the resulting problem into an Ising Hamiltonian ($H$) for variables ${\bf z}$ and basis $Z$, via penalty methods if necessary; # - Choose the depth of the quantum circuit $m$. Note that the depth can be modified adaptively. # - Choose a set of controls $\theta$ and make a trial function $\big|\psi(\boldsymbol\theta)\rangle$, built using a quantum circuit made of C-Phase gates and single-qubit Y rotations, parameterized by the components of $\boldsymbol\theta$. # # # - Algorithm steps: # - Evaluate $C(\boldsymbol\theta) = \langle\psi(\boldsymbol\theta)\big|H\big|\psi(\boldsymbol\theta)\rangle$ by sampling the outcome of the circuit in the Z-basis and adding the expectation values of the individual Ising terms together. In general, different control points around $\boldsymbol\theta$ have to be estimated, depending on the classical optimizer chosen. # - Use a classical optimizer to choose a new set of controls. # - Continue until $C(\boldsymbol\theta)$ reaches a minimum, close enough to the solution $\boldsymbol\theta^*$. # - Use the last $\boldsymbol\theta$ to generate a final set of samples from the distribution $\Big|\langle z_i\big|\psi(\boldsymbol\theta)\rangle\Big|^2\;\forall i$ to obtain the answer. # # # There are many parameters throughout, notably the choice of the trial wavefunction. Below, we consider: # # $$ # \big|\psi(\theta)\rangle = [U_\mathrm{single}(\boldsymbol\theta) U_\mathrm{entangler}]^m \big|+\rangle # $$ # # where $U_\mathrm{entangler}$ is a collection of C-Phase gates (fully-entangling gates), and $U_\mathrm{single}(\theta) = \prod_{i=1}^N Y(\theta_{i})$, where $N$ is the number of qubits and $m$ is the depth of the quantum circuit. # # # ### Construct the Ising Hamiltonian # # From $VRP$ one can construct a binary polynomial optimization with equality constraints only by considering cases in which $K=n-1$. In these cases the sub-tour elimination constraints are not necessary and the problem is only on the variable ${\bf z}$. In particular, we can write an augmented Lagrangian as # # $$ # (IH) \quad H = \sum_{i \sim j} w_{ij} x_{ij} + A \sum_{i \in \{1,\dots,n\}} \Big(\sum_{j \in \delta(i)^+} x_{ij} - 1\Big)^2 + A \sum_{i \in \{1,\dots,n\}}\Big(\sum_{j \in \delta(i)^-} x_{ji} - 1\Big)^2 +A \Big(\sum_{i \in \delta(0)^+} x_{0i} - K\Big)^2 + A\Big(\sum_{j \in \delta(0)^+} x_{j0} - K\Big)^2 # $$ # # where $A$ is a big enough parameter. # # ### From Hamiltonian to QP formulation # # In the vector ${\bf z}$, and for a complete graph ($\delta(i)^+ = \delta(i)^- = \{0,1,\dots,i-1,i+1,\dots,n\}$), $H$ can be written as follows. # # $$ # \min_{{\bf z}\in \{0,1\}^{n(n+1)}} {\bf w}^T {\bf z} + A \sum_{i \in \{1,\dots,n\}} \Big({\bf e}_i \otimes {\bf 1}_n^T {\bf z} - 1\Big)^2 + A \sum_{i \in \{1,\dots,n\}}\Big({\bf v}_i^T {\bf z} - 1\Big)^2 + A \Big(({\bf e}_0 \otimes {\bf 1}_n)^T{\bf z} - K\Big)^2 + A\Big({\bf v}_0^T{\bf z} - K\Big)^2. # $$ # # That is: # # $$ # \min_{\bf z\in \{0,1\}^{n(n+1)}} \bf z^T {\bf Q} \bf z + {\bf g}^T \bf z + c, # $$ # # Where: first term: # # $$ # {\bf Q} = A \sum_{i \in \{0,1,\dots,n\}} \Big[({\bf e}_i \otimes {\bf 1}_n)({\bf e}_i \otimes {\bf 1}_n)^T + {\bf v}_i{\bf v}_i^T \Big] # $$ # # Second term: # # $$ # {\bf g} = {\bf w} -2 A \sum_{i \in \{1,\dots,n\}} \Big[({\bf e}_i \otimes {\bf 1}_n) + {\bf v}_i \Big] -2 A K \Big[({\bf e}_0 \otimes {\bf 1}_n) + {\bf v}_0 \Big] # $$ # # Third term: # # $$ # c = 2An +2AK^2. # $$ # # The QP formulation of the Ising Hamiltonian is ready for the use of VQE. # # # # ## References # # [1] <NAME>, <NAME>, <NAME> e-print arXiv 1411.4028, 2014 # # [2] https://github.com/Qiskit/qiskit-tutorial/blob/master/qiskit/aqua/optimization/maxcut_and_tsp.ipynb # # [3] <NAME>, <NAME>, and <NAME> (1960). "Integer Programming Formulations and Travelling Salesman Problems". J. ACM. 7: 326–329. doi:10.1145/321043.321046. # # [4] <NAME>, <NAME>, <NAME>, and <NAME> (2006). The Traveling Salesman Problem. Princeton University Press, ISBN 978-0-691-12993-8. # ## Initialization # # First of all we load all the packages that we need: # - Python 3.6 or greater is required; # - CPLEX 12.8 or greater is required for the classical computations; # - Latest Qiskit is required for the quantum computations. # + # Load the packages that are required import numpy as np import operator import matplotlib.pyplot as plt import sys if sys.version_info < (3, 6): raise Exception('Please use Python version 3.6 or greater.') try: import cplex from cplex.exceptions import CplexError except: print("Warning: Cplex not found.") import math # Qiskit packages from qiskit.quantum_info import Pauli from qiskit.aqua.input import EnergyInput from qiskit.aqua import run_algorithm from qiskit.aqua.operators import WeightedPauliOperator # setup aqua logging import logging from qiskit.aqua._logging import set_logging_config, build_logging_config #set_logging_config(build_logging_config(logging.DEBUG)) # choose INFO, DEBUG to see the log # - # We then initialize the variables # Initialize the problem by defining the parameters n = 3 # number of nodes + depot (n+1) K = 2 # number of vehicles # We define an initializer class that randomly places the nodes in a 2-D plane and computes the distance between them. # Get the data class Initializer(): def __init__(self, n): self.n = n def generate_instance(self): n = self.n # np.random.seed(33) np.random.seed(1543) xc = (np.random.rand(n) - 0.5) * 10 yc = (np.random.rand(n) - 0.5) * 10 instance = np.zeros([n, n]) for ii in range(0, n): for jj in range(ii + 1, n): instance[ii, jj] = (xc[ii] - xc[jj]) ** 2 + (yc[ii] - yc[jj]) ** 2 instance[jj, ii] = instance[ii, jj] return xc, yc, instance # Initialize the problem by randomly generating the instance initializer = Initializer(n) xc,yc,instance = initializer.generate_instance() # ## Classical solution using IBM ILOG CPLEX # # For a classical solution, we use IBM ILOG CPLEX. CPLEX is able to find the exact solution of this problem. We first define a ClassicalOptimizer class that encodes the problem in a way that CPLEX can solve, and then instantiate the class and solve it. # class ClassicalOptimizer: def __init__(self, instance,n,K): self.instance = instance self.n = n # number of nodes self.K = K # number of vehicles def compute_allowed_combinations(self): f = math.factorial return f(self.n) / f(self.K) / f(self.n-self.K) def cplex_solution(self): # refactoring instance = self.instance n = self.n K = self.K my_obj = list(instance.reshape(1, n**2)[0])+[0. for x in range(0,n-1)] my_ub = [1 for x in range(0,n**2+n-1)] my_lb = [0 for x in range(0,n**2)] + [0.1 for x in range(0,n-1)] my_ctype = "".join(['I' for x in range(0,n**2)]) + "".join(['C' for x in range(0,n-1)]) my_rhs = 2*([K] + [1 for x in range(0,n-1)]) + [1-0.1 for x in range(0,(n-1)**2-(n-1))] + [0 for x in range(0,n)] my_sense = "".join(['E' for x in range(0,2*n)]) + "".join(['L' for x in range(0,(n-1)**2-(n-1))])+"".join(['E' for x in range(0,n)]) try: my_prob = cplex.Cplex() self.populatebyrow(my_prob,my_obj,my_ub,my_lb,my_ctype,my_sense,my_rhs) my_prob.solve() except CplexError as exc: print(exc) return x = my_prob.solution.get_values() x = np.array(x) cost = my_prob.solution.get_objective_value() return x,cost def populatebyrow(self,prob,my_obj,my_ub,my_lb,my_ctype,my_sense,my_rhs): n = self.n prob.objective.set_sense(prob.objective.sense.minimize) prob.variables.add(obj = my_obj, lb = my_lb, ub = my_ub, types = my_ctype) prob.set_log_stream(None) prob.set_error_stream(None) prob.set_warning_stream(None) prob.set_results_stream(None) rows = [] for ii in range(0,n): col = [x for x in range(0+n*ii,n+n*ii)] coef = [1 for x in range(0,n)] rows.append([col, coef]) for ii in range(0,n): col = [x for x in range(0+ii,n**2,n)] coef = [1 for x in range(0,n)] rows.append([col, coef]) # Sub-tour elimination constraints: for ii in range(0, n): for jj in range(0,n): if (ii != jj)and(ii*jj>0): col = [ii+(jj*n), n**2+ii-1, n**2+jj-1] coef = [1, 1, -1] rows.append([col, coef]) for ii in range(0,n): col = [(ii)*(n+1)] coef = [1] rows.append([col, coef]) prob.linear_constraints.add(lin_expr=rows, senses=my_sense, rhs=my_rhs) # + # Instantiate the classical optimizer class classical_optimizer = ClassicalOptimizer(instance,n,K) # Print number of feasible solutions print('Number of feasible solutions = ' + str(classical_optimizer.compute_allowed_combinations())) # - # Solve the problem in a classical fashion via CPLEX x = None z = None try: x,classical_cost = classical_optimizer.cplex_solution() # Put the solution in the z variable z = [x[ii] for ii in range(n**2) if ii//n != ii%n] # Print the solution print(z) except: print("CPLEX may be missing.") # + # Visualize the solution def visualize_solution(xc, yc, x, C, n, K, title_str): plt.figure() plt.scatter(xc, yc, s=200) for i in range(len(xc)): plt.annotate(i, (xc[i] + 0.15, yc[i]), size=16, color='r') plt.plot(xc[0], yc[0], 'r*', ms=20) plt.grid() for ii in range(0, n ** 2): if x[ii] > 0: ix = ii // n iy = ii % n plt.arrow(xc[ix], yc[ix], xc[iy] - xc[ix], yc[iy] - yc[ix], length_includes_head=True, head_width=.25) plt.title(title_str+' cost = ' + str(int(C * 100) / 100.)) plt.show() if x: visualize_solution(xc, yc, x, classical_cost, n, K, 'Classical') # - # If you have CPLEX, the solution shows the depot with a star and the selected routes for the vehicles with arrows. # ## Quantum solution from the ground up # # For the quantum solution, we use Qiskit. # # First, we derive the solution from the ground up, using a class QuantumOptimizer that encodes the quantum approach to solve the problem and then we instantiate it and solve it. We define the following methods inside the class: # - `binary_representation` : encodes the problem $(M)$ into a the Ising Hamiltonian QP (that's basically linear algebra); # - `construct_hamiltonian` : constructs the Ising Hamiltonian in terms of the $Z$ basis; # - `check_hamiltonian` : makes sure that the Ising Hamiltonian is correctly encoded in the $Z$ basis: to do this, it solves a eigenvalue-eigenvector problem for a symmetric matrix of dimension $2^N \times 2^N$. For the problem at hand $n=3$, that is $N = 12$ seems the limit; # - `vqe_solution` : solves the problem $(M)$ via VQE by using the SPSA solver (with default parameters); # - `_q_solution` : internal routine to represent the solution in a usable format. # class QuantumOptimizer: def __init__(self, instance, n, K, max_trials=1000): self.instance = instance self.n = n self.K = K self.max_trials = max_trials def binary_representation(self,x_sol=0): instance = self.instance n = self.n K = self.K A = np.max(instance) * 100 # A parameter of cost function # Determine the weights w instance_vec = instance.reshape(n ** 2) w_list = [instance_vec[x] for x in range(n ** 2) if instance_vec[x] > 0] w = np.zeros(n * (n - 1)) for ii in range(len(w_list)): w[ii] = w_list[ii] # Some variables I will use Id_n = np.eye(n) Im_n_1 = np.ones([n - 1, n - 1]) Iv_n_1 = np.ones(n) Iv_n_1[0] = 0 Iv_n = np.ones(n-1) neg_Iv_n_1 = np.ones(n) - Iv_n_1 v = np.zeros([n, n*(n-1)]) for ii in range(n): count = ii-1 for jj in range(n*(n-1)): if jj//(n-1) == ii: count = ii if jj//(n-1) != ii and jj%(n-1) == count: v[ii][jj] = 1. vn = np.sum(v[1:], axis=0) # Q defines the interactions between variables Q = A*(np.kron(Id_n, Im_n_1) + np.dot(v.T, v)) # g defines the contribution from the individual variables g = w - 2 * A * (np.kron(Iv_n_1,Iv_n) + vn.T) - \ 2 * A * K * (np.kron(neg_Iv_n_1, Iv_n) + v[0].T) # c is the constant offset c = 2 * A * (n-1) + 2 * A * (K ** 2) try: max(x_sol) # Evaluates the cost distance from a binary representation of a path fun = lambda x: np.dot(np.around(x), np.dot(Q, np.around(x))) + np.dot(g, np.around(x)) + c cost = fun(x_sol) except: cost = 0 return Q,g,c,cost def construct_hamiltonian(self): instance = self.instance n = self.n K = self.K N = (n - 1) * n # number of qubits Q,g,c,_ = self.binary_representation() # Defining the new matrices in the Z-basis Iv = np.ones(N) Qz = (Q / 4) gz = (-g / 2 - np.dot(Iv, Q / 4) - np.dot(Q / 4, Iv)) cz = (c + np.dot(g / 2, Iv) + np.dot(Iv, np.dot(Q / 4, Iv))) cz = cz + np.trace(Qz) Qz = Qz - np.diag(np.diag(Qz)) # Getting the Hamiltonian in the form of a list of Pauli terms pauli_list = [] for i in range(N): if gz[i] != 0: wp = np.zeros(N) vp = np.zeros(N) vp[i] = 1 pauli_list.append((gz[i], Pauli(vp, wp))) for i in range(N): for j in range(i): if Qz[i, j] != 0: wp = np.zeros(N) vp = np.zeros(N) vp[i] = 1 vp[j] = 1 pauli_list.append((2 * Qz[i, j], Pauli(vp, wp))) pauli_list.append((cz, Pauli(np.zeros(N), np.zeros(N)))) return cz, pauli_list def check_hamiltonian(self): cz, op = self.construct_hamiltonian() Op = WeightedPauliOperator(paulis=op) qubitOp, offset = Op, 0 algo_input = EnergyInput(qubitOp) # Making the Hamiltonian in its full form and getting the lowest eigenvalue and eigenvector algorithm_cfg = { 'name': 'ExactEigensolver', } params = { 'problem': {'name': 'ising'}, 'algorithm': algorithm_cfg } result = run_algorithm(params, algo_input) quantum_solution = self._q_solution(result['eigvecs'][0],self.n*(self.n+1)) ground_level = result['energy'] + offset return quantum_solution, ground_level def vqe_solution(self): cz, op = self.construct_hamiltonian() Op = WeightedPauliOperator(paulis=op) qubitOp, offset = Op, cz algo_input = EnergyInput(qubitOp) algorithm_cfg = { 'name': 'VQE' } optimizer_cfg = { 'name': 'SPSA', 'max_trials': self.max_trials } var_form_cfg = { 'name': 'RY', 'depth': 5, 'entanglement': 'linear' } params = { 'problem': {'name': 'ising', 'random_seed': 10598}, 'algorithm': algorithm_cfg, 'optimizer': optimizer_cfg, 'variational_form': var_form_cfg, 'backend': {'name': 'qasm_simulator' } } result = run_algorithm(params, algo_input) #quantum_solution = self._q_solution(result['eigvecs'][0], self.n * (self.n + 1)) quantum_solution_dict = result['eigvecs'][0] q_s = max(quantum_solution_dict.items(), key=operator.itemgetter(1))[0] quantum_solution= [int(chars) for chars in q_s] quantum_solution = np.flip(quantum_solution, axis=0) _,_,_,level = self.binary_representation(x_sol=quantum_solution) return quantum_solution_dict, quantum_solution, level def _q_solution(self, v, N): index_value = [x for x in range(len(v)) if v[x] == max(v)][0] string_value = "{0:b}".format(index_value) while len(string_value)<N: string_value = '0'+string_value sol = list() for elements in string_value: if elements == '0': sol.append(0) else: sol.append(1) sol = np.flip(sol, axis=0) return sol # ### Step 1 # # Instantiate the quantum optimizer class with parameters: # - the instance; # - the number of nodes and vehicles `n` and `K`; # - the number of iterations for SPSA in VQE (default 1000) # Instantiate the quantum optimizer class with parameters: quantum_optimizer = QuantumOptimizer(instance, n, K, 100) # ### Step 2 # # Encode the problem as a binary formulation (IH-QP). # # Sanity check: make sure that the binary formulation in the quantum optimizer is correct (i.e., yields the same cost given the same solution). # Check if the binary representation is correct try: if z: Q,g,c,binary_cost = quantum_optimizer.binary_representation(x_sol = z) print(binary_cost,classical_cost) if np.abs(binary_cost - classical_cost)<0.01: print('Binary formulation is correct') else: print('Error in the binary formulation') else: print('Could not verify the correctness, due to CPLEX solution being unavailable.') Q,g,c,binary_cost = quantum_optimizer.binary_representation() except NameError as e: print("Warning: Please run the cells above first.") print(e) # ### Step 3 # # Encode the problem as an Ising Hamiltonian in the Z basis. # # Sanity check: make sure that the formulation is correct (i.e., yields the same cost given the same solution) # + ground_state, ground_level = quantum_optimizer.check_hamiltonian() print(ground_state) if z: if np.abs(ground_level - classical_cost)<0.01: print('Ising Hamiltonian in Z basis is correct') else: print('Error in the Ising Hamiltonian formulation') # - # ### Step 4 # # Solve the problem via VQE. N.B. Depending on the number of qubits, the state-vector simulation can can take a while; for example with 12 qubits, it takes more than 12 hours. Logging useful to see what the program is doing. # + quantum_dictionary, quantum_solution, quantum_cost = quantum_optimizer.vqe_solution() print(quantum_solution, quantum_cost) # - # ### Step 5 # Visualize the solution # + # Put the solution in a way that is compatible with the classical variables x_quantum = np.zeros(n**2) kk = 0 for ii in range(n ** 2): if ii // n != ii % n: x_quantum[ii] = quantum_solution[kk] kk += 1 # visualize the solution visualize_solution(xc, yc, x_quantum, quantum_cost, n, K, 'Quantum') # and visualize the classical for comparison if x: visualize_solution(xc, yc, x, classical_cost, n, K, 'Classical') # - # The plots present the depot with a star and the selected routes for the vehicles with arrows. Note that in this particular case, we can find the optimal solution of the QP formulation, which happens to coincide with the optimal solution of the ILP. # # Keep in mind that VQE is an heuristic working on the QP formulation of the Ising Hamiltonian, though. For suitable choices of A, local optima of the QP formulation will be feasible solutions to the ILP. While for some small instances, as above, we can find optimal solutions of the QP formulation which coincide with optima of the ILP, finding optimal solutions of the ILP is harder than finding local optima of the QP formulation, in general, which in turn is harder than finding feasible solutions of the ILP. Even within the VQE, one may provide stronger guarantees, for specific variational forms (trial wave functions). # # Last but not least, you may be pleased to learn that the above has been packaged in Qiskit Aqua. # + from qiskit import BasicAer from qiskit.aqua import QuantumInstance from qiskit.aqua import run_algorithm from qiskit.aqua.input import EnergyInput from qiskit.aqua.algorithms import VQE, QAOA, ExactEigensolver from qiskit.aqua.components.optimizers import COBYLA from qiskit.aqua.components.variational_forms import RY from qiskit.aqua.translators.ising.vehicle_routing import * qubitOp = get_vehiclerouting_qubitops(instance, n, K) backend = BasicAer.get_backend('statevector_simulator') seed = 50 cobyla = COBYLA() cobyla.set_options(maxiter=250) ry = RY(qubitOp.num_qubits, depth=3, entanglement='full') vqe = VQE(qubitOp, ry, cobyla) vqe.random_seed = seed quantum_instance = QuantumInstance(backend=backend, seed_simulator=seed, seed_transpiler=seed) result = vqe.run(quantum_instance) # print(result) x_quantum2 = get_vehiclerouting_solution(instance, n, K, result) print(x_quantum2) quantum_cost2 = get_vehiclerouting_cost(instance, n, K, x_quantum2) print(quantum_cost2) # - import qiskit.tools.jupyter # %qiskit_version_table # %qiskit_copyright
qiskit/advanced/aqua/optimization/vehicle_routing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import torch from torch import optim import torch.nn as nn import matplotlib.pyplot as plt import numpy as np import imp import time import os import itertools as it import imp import utils.movie_readin as mru import utils.plotutils as plu import utils.model as mod import utils.dataset as vdataset #import utils.model_analysis as man from torch.autograd import Variable from torch.utils.data import Dataset, DataLoader import torchvision.transforms as transforms from torchvideotransforms import video_transforms, volume_transforms #torch.cuda.set_device(0) cuda_device = ('cuda:0') # + # movie parameters frame_rate = 15 patch_size = 32 patch_seconds = 5 patch_frames = patch_seconds * frame_rate movies_folder = '/data/vasha/duckmovie/ducks2_128_finalOut' # model params lambda_activations = [1., 100.] lambda_biophysical = 0 conv_width = 10 num_hidden_nodes = [64] noises = [0] # training hyperparameters num_epochs = 1000 batch_size = 5 learning_rates = [1e-4] optimizer_types = ['adam'] #reporting parameters print_epocs = 50 # + import imp imp.reload(vdataset) my_transforms = video_transforms.Compose([video_transforms.RandomCrop(patch_size)]) video_dataset = vdataset.VideoDataset(path=movies_folder, nframes=patch_frames, patch_size=patch_size, data_type='ducks', transform=None) video_dataloader = DataLoader(video_dataset, batch_size=batch_size, shuffle=True, num_workers=0, pin_memory=True) # + for j, video in enumerate(video_dataloader): frames = video['frames'] break; plt.hist(np.array(frames).flatten(),bins=100) plt.show() plt.imshow(np.array(frames[0,0,0,:,:])) # + imp.reload(mod) iterator = it.product(num_hidden_nodes, learning_rates, lambda_activations, optimizer_types, noises) for iteration in iterator: num_hidden_node, learning_rate, lambda_activation, optimizer_type, noise = iteration compression = patch_size**2 / num_hidden_node print(f'Model:{patch_size}^2 = {patch_size**2} pixels by {conv_width} frames, to {num_hidden_node} hidden nodes for {compression}x compression') #record current param setting print(f'Optimizer:{optimizer_type}; Conv Width:{conv_width}; Hidden Nodes:{num_hidden_node}; Lambda Act:{lambda_activation};Learning Rate:{learning_rate}; Batch Size:{batch_size}') params = f'{optimizer_type}_conv{conv_width}_hn{num_hidden_node}_lact{lambda_activation}_lr{learning_rate}_bs{batch_size}_noise{noise}' save_folder = os.path.join('./output',params) if not os.path.exists(save_folder): os.mkdir(save_folder) model = mod.AEC(num_hidden_node, conv_width, patch_size, lambda_activation, noise, cuda_device).to(cuda_device) optimizer = optim.Adam(model.parameters(), lr=learning_rate) loss_history = [] psnr_history = [] for i, epoch in enumerate(range(num_epochs)): for j, video in enumerate(video_dataloader): frames = video['frames'].to(cuda_device) optimizer.zero_grad() #zero out our gradients acts = model.encode(frames) recon = model.decode(acts) loss = model.loss_func(frames, recon, acts) loss_history.append(loss) psnr = model.calc_psnr(recon.cpu().detach().numpy(), frames.cpu().detach().numpy()) psnr_history.append(psnr) loss.backward() optimizer.step() if((j+1)%print_epocs==0): print(f'Epoch {i+1}/{num_epochs}') for name, parameter in model.named_parameters(): if(name in ['tconv.module.weight_v', 'tconv.weight_v']): inw = np.array(parameter.cpu().squeeze().detach()) elif(name in ['tdeconv.module.weight', 'tdeconv.weight']): outw = np.array(parameter.cpu().squeeze().detach()) #plot weigghts p = plu.plot_temporal_weights(inw) plt.savefig(os.path.join(save_folder,f'inw_{params}_{round(i/num_epochs,2)}.png')) plt.show() p = plu.plot_temporal_weights(outw) plt.savefig(os.path.join(save_folder,f'outw_{params}_{round(i/num_epochs,2)}.png')) plt.show() #plot stats plt.figure(figsize=(30,5)) plt.subplot(1,5,1) p = plt.hist(np.array(acts.cpu().squeeze().detach()).flatten(),bins=100) plt.title('Activations') plt.subplot(1,5,2) loss_evolution = [np.float(loss) for loss in loss_history] plt.plot(loss_evolution) plt.title('Loss Evolution') plt.subplot(1,5,3) psnr_evolution = [np.float(psnr) for psnr in psnr_history] plt.plot(psnr_evolution) plt.title('PSNR') plt.subplot(1,5,4) plt.imshow(frames[0,0,0,:,:].cpu().detach().numpy()) plt.title('Input Frame') plt.subplot(1,5,5) plt.imshow(recon[0,0,0,:,:].cpu().detach().numpy()) plt.title('Recon Frame') plt.savefig(os.path.join(save_folder,f'stats_{params}.png')) plt.show() else: print('*',end='') # + iterator = it.product(num_hidden_nodes, learning_rates, lambda_activations, optimizer_types, learning_momentums, noises) for iteration in iterator: num_hidden_node, learning_rate, lambda_activation, optimizer_type, learning_momentum, noise = iteration compression = patch_size**2 / num_hidden_node print(f'Model:{patch_size}^2 = {patch_size**2} pixels by {patch_seconds} frames, to {num_hidden_node} hidden nodes for {compression}x compression') #record current param setting print(f'Optimizer:{optimizer_type}; Conv Width:{conv_width}; Hidden Nodes:{num_hidden_node}; Lambda Act:{lambda_activation};Learning Rate:{learning_rate}; Batch Size:{batch_size}; Momentum:{learning_momentum}') params = f'{optimizer_type}_conv{conv_width}_hn{num_hidden_node}_lact{lambda_activation}_lr{learning_rate}_bs{batch_size}_mm{learning_momentum}_noise{noise}' save_folder = os.path.join('./output',params) if not os.path.exists(save_folder): os.mkdir(save_folder) #define model model = mod.AEC(num_hidden_node, conv_width, patch_size, lambda_activation, noise) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") if torch.cuda.is_available(): #print('Found GPU - Running Model on it.') model.cuda() #define optimizer if(optimizer_type == 'adam'): optimizer = optim.Adam(model.parameters(), lr=learning_rate) elif(optimizer_type == 'sgd'): optimizer = optim.SGD(model.parameters(), lr=learning_rate, momentum=learning_momentum) #train over epochs print(f'Training {num_epochs} Epochs. ',end="") printing_modulo = num_epochs/print_epocs loss_history = [] snr_history = [] for i in range(num_epochs): batches_start = np.arange(0,np.shape(movie_dataset)[0], batch_size) for bs in batches_start: movie_batch = torch.unsqueeze(torch.tensor(movie_dataset[bs:bs+batch_size]),1).cuda() optimizer.zero_grad() #zero out our gradients acts = model.encode(movie_batch) recon_batch = model.decode(acts) loss = model.loss_func(movie_batch, recon_batch, acts) loss.backward() optimizer.step() end = time.time() times.append(end-start) # record loss and snr after each epoch loss_history.append(loss.item()) snr_history.append(model.calc_psnr(movie_batch.cpu.cdetach(), recon_batch).detach()) # if it's our first iteration, give an estimate of time if(i==0): plt.clf() print(f'Estimated run time: {round(times[-1]*num_epochs/60,1)}mins.') elif((i+1)%printing_modulo==0): print(f'Epoch {i+1}/{num_epochs} (mean time per epoch: {round(np.mean(times),1)}s)') for name, parameter in model.named_parameters(): if(name in ['tconv.module.weight_v', 'tconv.weight_v']): inw = np.array(parameter.cpu().squeeze().detach()) elif(name in ['tdeconv.module.weight', 'tdeconv.weight']): outw = np.array(parameter.cpu().squeeze().detach()) p = plt.hist(np.mean(np.array(acts.cpu().squeeze().detach()),axis=(0,2))) p = plu.plot_temporal_weights(inw) plt.savefig(os.path.join(save_folder,f'inw_{params}_{round(i/num_epochs,2)}.png')) plt.show() p = plu.plot_temporal_weights(outw) plt.savefig(os.path.join(save_folder,f'outw_{params}_{round(i/num_epochs,2)}.png')) plt.show() loss_evolution = [np.float(loss) for loss in loss_history] plt.plot(loss_evolution) plt.title('Loss Evolution') plt.savefig(os.path.join(save_folder,f'loss_evolution_{params}.png')) plt.show() else: print('*',end='') #get final weights & losses for name, parameter in model.named_parameters(): if(name in ['tconv.module.weight_v', 'tconv.weight_v']): inw = np.array(parameter.cpu().squeeze().detach()) elif(name in ['tdeconv.module.weight', 'tdeconv.weight']): outw = np.array(parameter.cpu().squeeze().detach()) loss_evolution = [np.float(loss) for loss in loss_history] snr_evolution = [np.float(snr) for snr in snr_history] # plot everything p = plu.plot_temporal_weights(inw) plt.savefig(os.path.join(save_folder,f'final_inw_{params}.png')) plt.clf() p = plu.plot_temporal_weights(outw) plt.savefig(os.path.join(save_folder,f'final_outw_{params}.png')) plt.clf() plt.plot(loss_evolution) plt.title('Loss Evolution') plt.savefig(os.path.join(save_folder,f'final_loss_{params}.png')) plt.clf() plt.plot(np.log(loss_evolution)) plt.title('Log Loss Evolution') plt.savefig(os.path.join(save_folder,f'final_lgloss_{params}.png')) plt.clf() p = plt.plot(snr_evolution) plt.title('SNR Evolution') plt.savefig(os.path.join(save_folder,f'final_snr_{params}.png')) plt.clf() plt.figure() for i in range(10): plu.plot_movies_recons(np.squeeze(movie_batch), np.squeeze(recon_batch), i) plt.savefig(os.path.join(save_folder,f'finaol_recons_{params}.png')) print('Finished this Parameter') print('Finished Sweep!!!') # - loss_evolution = [loss for loss in loss_history] plt.plot(loss_evolution) plt.show() plt.plot(np.log(loss_evolution)) plt.show() plt.plot(np.log(snr_history)) plt.show() p = plu.plot_temporal_weights(inw) p = plu.plot_temporal_weights(outw) tiw = inw * 10 plu.plot_temporal_weights(tiw) plt.hist(tiw.flatten(),bins=500); plt.hist(inw.flatten(),bins=500); plt.hist(outw.flatten(),bins=500); movie_dataset.shape torch.cuda.empty_cache() # + def debug_memory(): import collections, gc, torch tensors = collections.Counter((str(o.device), o.dtype, tuple(o.shape)) for o in gc.get_objects() if torch.is_tensor(o)) for line in sorted(tensors.items()): print('{}\t{}'.format(*line)) debug_memory() # + for name, parameter in model.named_parameters(): print(name) if(name in ['tconv.module.weight_v', 'tconv.weight_v']): inw = np.array(parameter.squeeze().detach().cpu()) elif(name in ['tdeconv.module.weight', 'tdeconv.weight']): outw = np.array(parameter.squeeze().detach().cpu()) #elif(name=='tconv.weight_g'): # print(np.shape(np.array(parameter.squeeze().detach()))) #print(inw.shape) #print(bias.shape) #print(wnorm.shape) #print(outw.shape) p = plu.plot_temporal_weights(inw) p = plu.plot_temporal_weights(outw) # - plt.hist(inw.flatten()) plt.hist(outw.flatten()) imp.reload(plu) #movies = movie_batch[0] #recons = recon_batch[0] print('Movies:') for i in range(10): plu.plot_movies_recons(np.squeeze(movie_batch), np.squeeze(recon_batch), i) #plt.colorbar() plt.show() outw print(inw.shape) print(np.max(inw)) print(np.min(inw)) for i in range(inw.shape[1]): plt.imshow(inw[9,i,:,:],cmap='Greys_r') plt.show() print(movie.shape) m = movie[7,0,1,:,:] plt.imshow(m) print(m) movie.shape if(False): moreepochs = 2000 print(f'Training for {moreepochs} more Epochs:') for i in range(moreepochs): start = time.time() times = [] for movie in train_loader: movie = torch.unsqueeze(movie,1) #print(movie.size()) movie = movie.float().cuda() optimizer.zero_grad() acts = model.encode(movie) recon = model.decode(acts) loss = loss_func(movie, recon, acts) loss_history.append(loss.detach()) loss.backward() optimizer.step() end=time.time() times.append(end-start) if((i+1)%printing_modulo==0): print(f'{i+1}th Epoch (mean time per epoch: {round(np.mean(times))}s)') else: print('*',end='') print('Done!') # + def visualize_aec(): with torch.no_grad(): # Get a batch of training data data = next(iter(train_loader))[0].to(device) input_tensor = data.cpu() transformed_input_tensor = model.encode(data).cpu() in_grid = convert_image_np( torchvision.utils.make_grid(input_tensor)) out_grid = convert_image_np( torchvision.utils.make_grid(transformed_input_tensor)) # Plot the results side-by-side f, axarr = plt.subplots(1, 2) axarr[0].imshow(in_grid) axarr[0].set_title('Dataset Images') axarr[1].imshow(out_grid) axarr[1].set_title('Recon Images') visualize_aec() plt.ioff() plt.show() # - torchvision.utils.make_grid(input_tensor) # + # initialize figure f, a = plt.subplots(2, N_TEST_IMG, figsize=(5, 2)) plt.ion() # continuously plot # original data (first row) for viewing view_data = train_data.train_data[:N_TEST_IMG].view(-1, 28*28).type(torch.FloatTensor)/255. for i in range(N_TEST_IMG): a[0][i].imshow(np.reshape(view_data.numpy()[i], (28, 28)), cmap='gray'); a[0][i].set_xticks(()); a[0][i].set_yticks(()) for epoch in range(EPOCH): for step, (x, b_label) in enumerate(train_loader): b_x = x.view(-1, 28*28) # batch x, shape (batch, 28*28) b_y = x.view(-1, 28*28) # batch y, shape (batch, 28*28) encoded, decoded = autoencoder(b_x) loss = loss_func(decoded, b_y) # mean square error optimizer.zero_grad() # clear gradients for this training step loss.backward() # backpropagation, compute gradients optimizer.step() # apply gradients if step % 100 == 0: print('Epoch: ', epoch, '| train loss: %.4f' % loss.numpy()) # plotting decoded image (second row) _, decoded_data = autoencoder(view_data) for i in range(N_TEST_IMG): a[1][i].clear() a[1][i].imshow(np.reshape(decoded_data.numpy()[i], (28, 28)), cmap='gray') a[1][i].set_xticks(()); a[1][i].set_yticks(()) plt.draw(); plt.pause(0.05) plt.ioff() plt.show() # - torch.cuda.empty_cache() model = 'a' # + def train(epoch): model.train() for batch_idx, (data, target) in enumerate(train_loader): data, target = data.to(device), target.to(device) optimizer.zero_grad() output = model(data) loss = F.nll_loss(output, target) loss.backward() optimizer.step() if batch_idx % 500 == 0: print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( epoch, batch_idx * len(data), len(train_loader.dataset), 100. * batch_idx / len(train_loader), loss.item())) # # A simple test procedure to measure STN the performances on MNIST. # def test(): with torch.no_grad(): model.eval() test_loss = 0 correct = 0 for data, target in test_loader: data, target = data.to(device), target.to(device) output = model(data) # sum up batch loss test_loss += F.nll_loss(output, target, size_average=False).item() # get the index of the max log-probability pred = output.max(1, keepdim=True)[1] correct += pred.eq(target.view_as(pred)).sum().item() test_loss /= len(test_loader.dataset) print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n' .format(test_loss, correct, len(test_loader.dataset), 100. * correct / len(test_loader.dataset))) # - model = AEC() if torch.cuda.is_available(): model.cuda() # +
taec_torch.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:tensorflow] # language: python # name: conda-env-tensorflow-py # --- # + # import the necessaey modules import tensorflow as tf import numpy as np import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec import os from tensorflow.contrib.layers import xavier_initializer # %matplotlib inline # + # Load data from tensorflow.examples.tutorials.mnist import input_data data = input_data.read_data_sets('MNIST_data', one_hot=True) # define hyperparameters batch_size = 128 Z_dim = 100 im_size = 28 h_size=128 learning_rate_D = .0005 learning_rate_G = .0006 # + #Create Placeholder for input X and random noise Z X = tf.placeholder(tf.float32, shape=[None, im_size*im_size]) Z = tf.placeholder(tf.float32, shape=[None, Z_dim]) initializer=xavier_initializer() # Define Discriminator and Generator training variables #Discriminiator D_W1 = tf.Variable(initializer([im_size*im_size, h_size])) D_b1 = tf.Variable(tf.zeros(shape=[h_size])) D_W2 = tf.Variable(initializer([h_size, 1])) D_b2 = tf.Variable(tf.zeros(shape=[1])) theta_D = [D_W1, D_W2, D_b1, D_b2] #Generator G_W1 = tf.Variable(initializer([Z_dim, h_size])) G_b1 = tf.Variable(tf.zeros(shape=[h_size])) G_W2 = tf.Variable(initializer([h_size, im_size*im_size])) G_b2 = tf.Variable(tf.zeros(shape=[im_size*im_size])) theta_G = [G_W1, G_W2, G_b1, G_b2] # + def sample_Z(m, n): return np.random.uniform(-1., 1., size=[m, n]) def generator(z): """ Two layer Generator Network Z=>128=>784 """ G_h1 = tf.nn.relu(tf.matmul(z, G_W1) + G_b1) G_log_prob = tf.matmul(G_h1, G_W2) + G_b2 G_prob = tf.nn.sigmoid(G_log_prob) return G_prob def discriminator(x): """ Two layer Discriminator Network X=>128=>1 """ D_h1 = tf.nn.relu(tf.matmul(x, D_W1) + D_b1) D_logit = tf.matmul(D_h1, D_W2) + D_b2 D_prob = tf.nn.sigmoid(D_logit) return D_prob, D_logit def plot(samples): "function to plot generated samples" fig = plt.figure(figsize=(10, 10)) gs = gridspec.GridSpec(5, 5) gs.update(wspace=0.05, hspace=0.05) for i, sample in enumerate(samples): ax = plt.subplot(gs[i]) plt.axis('off') ax.set_xticklabels([]) ax.set_yticklabels([]) ax.set_aspect('equal') plt.imshow(sample.reshape(28, 28), cmap='gray') return fig # + G_sample = generator(Z) D_real, D_logit_real = discriminator(X) D_fake, D_logit_fake = discriminator(G_sample) # losses: # ------------------- D_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_logit_real, labels=tf.ones_like(D_logit_real))) D_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_logit_fake, labels=tf.zeros_like(D_logit_fake))) D_loss = D_loss_real + D_loss_fake G_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_logit_fake, labels=tf.ones_like(D_logit_fake))) D_solver = tf.train.AdamOptimizer(learning_rate=learning_rate_D).minimize(D_loss, var_list=theta_D) G_solver = tf.train.AdamOptimizer(learning_rate=learning_rate_G).minimize(G_loss, var_list=theta_G) # + sess = tf.Session() sess.run(tf.global_variables_initializer()) GLoss = [] DLoss = [] if not os.path.exists('out/'): os.makedirs('out/') for it in range(100000): if it % 100 == 0: samples = sess.run(G_sample, feed_dict={Z: sample_Z(25, Z_dim)}) fig = plot(samples) plt.savefig('out/{}.png'.format(str(it).zfill(3)), bbox_inches='tight') plt.close(fig) X_mb, _ = data.train.next_batch(batch_size) _, D_loss_curr = sess.run([D_solver, D_loss], feed_dict={X: X_mb, Z: sample_Z(batch_size, Z_dim)}) _, G_loss_curr = sess.run([G_solver, G_loss], feed_dict={Z: sample_Z(batch_size, Z_dim)}) GLoss.append(G_loss_curr) DLoss.append(D_loss_curr) if it % 100 == 0: print('Iter: {} D loss: {:.4} G_loss: {:.4}'.format(it,D_loss_curr, G_loss_curr)) print('Done') # - plt.plot(DLoss, label = 'Discriminator Loss') plt.plot(GLoss, label = 'Generator Loss') plt.legend() plt.xlabel('epochs') plt.ylabel('Loss')
Chapter07/Vanilla_GAN.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Load the dataset with open("./datasets/football_key.tsv") as f: # Each line is of form: <country_id> <country_name> def fmt(line): return (int(line[0])-1, line[1].strip('"')) data_key = [fmt(line.strip().split()) for line in f if line[0] != '*'] with open("./datasets/football_pairs.tsv") as f: # Each line is of form: <country_a_id> <country_b_id> <number_of_players> def fmt(pair): return (int(pair[0])-1, int(pair[1])-1, 1) data_pairs = [fmt(line.strip().split()) for line in f if line[0] != '*'] # Create the neighbours[] sets neighbours = [set() for _ in range(len(data_key))] for p in data_pairs: neighbours[p[0]].add(p[1]) neighbours[p[1]].add(p[0]) # Define the similarity metric: graph distance. # # Method to compute graph distance without Dijkstra or similar taken from _Scalable Proximity Estimation and Link Prediction in Online Social Networks_ - <NAME>, <NAME>, <NAME>, <NAME>, <NAME>: # # We initialize S = {x} and D = {y}. In each step we either expand set S to include its members’ neighbors (i.e., S = S ∪ {v|⟨u, v⟩ ∈ E ∧ u ∈ S}) or expand set D to include its members’ inverse neighbors (i.e., D = D ∪ {u|⟨u, v⟩ ∈ E ∧ v ∈ D}). We stop whenever S ∩ D != ∅ . The number of steps taken so far gives the shortest path distance. For efficiency, we always expand the smaller set between S and D in each step. def similarity_GD(x, y, ignore_set=None): MAX_DIST = 6 def expand(nset): for n in set(nset): for m in neighbours[n]: if (ignore_set is not None and ((n, m) in ignore_set or (m, n) in ignore_set)): # We should calculate without this link, # as it is in the test set for this iter. continue nset.add(m) s = set([x]) d = set([y]) dist = 0 while len(s & d) == 0 and dist <= MAX_DIST: dist += 1 if len(d) < len(s): expand(d) else: expand(s) return -dist # Compute the similarities across the dataset. def compute_similarities(ignore_set=None): # S_GD[x][y] contains the similarity of nodes x and y using the Graph Distance (GD) metric. S_GD = [[0 for _ in range(len(data_key))] for _ in range(len(data_key))] for i in range(len(data_key)-1): for j in range(0, len(data_key)): S_GD[i][j] = similarity_GD(i, j, ignore_set=ignore_set) return S_GD # + # A quick eyeball check of a subset of the data. # Similarities with all links included. S_GD = compute_similarities() num_to_print = len(data_key)//2 print(' '*4 + ' '.join(d[1] for d in data_key[:num_to_print])) print('\n'.join(data_key[i][1] + ' ' + ','.join('{:>3}'.format(c) for c in S_GD[i][:num_to_print]) for i in range(num_to_print))) # - # Split the data into 10 disjoint subsets. # + def chunks(l, n): """Yield successive n-sized chunks from l.""" for it in range(0, len(l), n): yield l[it:it + n] e = [] predict = [] for i in range(len(data_key)): for j in range(i+1, len(data_key)): if i in neighbours[j]: e.append((i, j)) else: predict.append((i, j)) # e now contains all link pairs # predict contains all non-existing links from the original data # each pair is a tuple (a, b), where a < b # We now randomly shuffle this list import random random.shuffle(e) print('len(e)', len(e)) print('len(e)//10 =', len(e)//10) # Create e_prime, a list of 10 partitions e_prime = (list(chunks(e, len(e)//10 + 1))) # TODO(iandioch): Figure out why the following line is necessary? # e_prime = e_prime[0] # The following is a quick eyeball test to make sure the partitions look ok. print('10 subsets:') for i in range(len(e_prime)): entry = e_prime[i] print(entry) # + aucs = [] n1s = [] n2s = [] n3s = [] ns = [] # Column headings. print('\t\tn1 \tn2 \tn3 \tAUC') # Iterate across the 10 folds. for i in range(10): test = e_prime[i] S_CN = compute_similarities(ignore_set=test) n1 = 0 # missing_link > nonexistant_link n2 = 0 # missing_link = nonexistant_link n3 = 0 # missing_link < nonexistant_link n = 0 # total link comparisons for missing_link in test: a_score = S_CN[missing_link[0]][missing_link[1]] for nonexistant_link in predict: b_score = S_CN[nonexistant_link[0]][nonexistant_link[1]] if abs(a_score-b_score) < 0.0005: n2 += 1 elif a_score > b_score: n1 += 1 else: n3 += 1 n += 1 auc = (n1 + 0.5*n2)/(n) aucs.append(auc) n1s.append(n1) n2s.append(n2) n3s.append(n3) ns.append(n) print('Fold {:<2}:\t{:<5}\t{:<5}\t{:<5}\t{:<.6f}'.format(i+1, n1, n2, n3, auc)) def avg(seq): return sum(seq)/len(seq) print('Average:\t{:<5}\t{:<5}\t{:<5}\t{:<.6f}'.format(int(round(avg(n1s))), int(round(avg(n2s))), int(round(avg(n3s))), avg(aucs))) # -
research/graph_distance_football.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Feature Extraction # * Read the video files. Run the dlib face-lip finder thing. Make the 100x50 frames # * Note: Each video file (.mpg) is `(75, 576, 720)` import os import numpy as np from keras import backend as K from scipy import ndimage from scipy.misc import imresize import dlib from utils import Align import skvideo skvideo.setFFmpegPath('C:\\Users\\Joseph\\Desktop\\Misc. Packages\\ffmpeg-20181203-8ef0fda-win64-static\\ffmpeg-20181203-8ef0fda-win64-static\\bin\\') import skvideo.io class Video(object): def __init__(self, vtype='mouth', face_predictor_path=None): if vtype == 'face' and face_predictor_path is None: raise AttributeError('Face video need to be accompanied with face predictor') self.face_predictor_path = face_predictor_path self.vtype = vtype def from_frames(self, path): frames_path = sorted([os.path.join(path, x) for x in os.listdir(path)]) frames = [ndimage.imread(frame_path) for frame_path in frames_path] self.handle_type(frames) return self def from_video(self, path): frames = self.get_video_frames(path) self.handle_type(frames) return self def from_array(self, frames): self.handle_type(frames) return self def handle_type(self, frames): if self.vtype == 'mouth': self.process_frames_mouth(frames) elif self.vtype == 'face': self.process_frames_face(frames) else: raise Exception('Video type not found') def process_frames_face(self, frames): detector = dlib.get_frontal_face_detector() predictor = dlib.shape_predictor(self.face_predictor_path) mouth_frames = self.get_frames_mouth(detector, predictor, frames) self.face = np.array(frames) self.mouth = np.array(mouth_frames) self.set_data(mouth_frames) def process_frames_mouth(self, frames): self.face = np.array(frames) self.mouth = np.array(frames) self.set_data(frames) def get_frames_mouth(self, detector, predictor, frames): MOUTH_WIDTH = 100 MOUTH_HEIGHT = 50 HORIZONTAL_PAD = 0.19 normalize_ratio = None mouth_frames = [] for frame in frames: dets = detector(frame, 1) shape = None for k, d in enumerate(dets): shape = predictor(frame, d) i = -1 if shape is None: # Detector doesn't detect face, just return as is return frames mouth_points = [] for part in shape.parts(): i += 1 if i < 48: # Only take mouth region continue mouth_points.append((part.x,part.y)) np_mouth_points = np.array(mouth_points) mouth_centroid = np.mean(np_mouth_points[:, -2:], axis=0) if normalize_ratio is None: mouth_left = np.min(np_mouth_points[:, :-1]) * (1.0 - HORIZONTAL_PAD) mouth_right = np.max(np_mouth_points[:, :-1]) * (1.0 + HORIZONTAL_PAD) normalize_ratio = MOUTH_WIDTH / float(mouth_right - mouth_left) new_img_shape = (int(frame.shape[0] * normalize_ratio), int(frame.shape[1] * normalize_ratio)) resized_img = imresize(frame, new_img_shape) mouth_centroid_norm = mouth_centroid * normalize_ratio mouth_l = int(mouth_centroid_norm[0] - MOUTH_WIDTH / 2) mouth_r = int(mouth_centroid_norm[0] + MOUTH_WIDTH / 2) mouth_t = int(mouth_centroid_norm[1] - MOUTH_HEIGHT / 2) mouth_b = int(mouth_centroid_norm[1] + MOUTH_HEIGHT / 2) mouth_crop_image = resized_img[mouth_t:mouth_b, mouth_l:mouth_r] mouth_frames.append(mouth_crop_image) return mouth_frames def get_video_frames(self, path): videogen = skvideo.io.vreader(path) frames = np.array([frame for frame in videogen]) return frames def set_data(self, frames): data_frames = [] for frame in frames: frame = frame.swapaxes(0,1) # swap width and height to form format W x H x C if len(frame.shape) < 3: frame = np.array([frame]).swapaxes(0,2).swapaxes(0,1) # Add grayscale channel data_frames.append(frame) frames_n = len(data_frames) data_frames = np.array(data_frames) # T x W x H x C if K.image_data_format() == 'channels_first': data_frames = np.rollaxis(data_frames, 3) # C x T x W x H self.data = data_frames self.length = frames_n
lipreading/io/Feature Extraction.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import pandas as pd import matplotlib.pyplot as plt from PIL import Image import tifffile as tiff import cv2 import os from tqdm.notebook import tqdm import zipfile from tqdm import tqdm as tqdm sys.path.append('../') from utils.ImageTransform001 import ImageTransform,make_datapath_list, Dataset # # PATH DATASET_PATH = "../data_ignore/datasets/" TRAIN_PATH = os.path.join(DATASET_PATH,"train") TEST_PATH = os.path.join(DATASET_PATH,"test") ZIP_TRAIN = os.path.join(DATASET_PATH,"zip/train256") ZIP_MASK = os.path.join(DATASET_PATH,"zip/masks256")
notebooks/004-data_analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Using Bokeh from bokeh.plotting import figure from myst_nb_bokeh import glue_bokeh # output_notebook() p = figure() p.circle(range(1, 10), range(10, 1, -1)); # show(p) glue_bokeh("bokeh_figure", p) # :::{glue:} bokeh_figure # ::: print("ASDF")
tests/notebooks/bokeh.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd df_1 = pd.read_csv("Data_02.csv") df_1 df_1.head(5) # 5 primeiras linhas df_1.tail(5) # 5 últimas linhas df_1.to_csv("outro Data_02.csv") df_2 = pd.read_excel("Data_03.xlsx") df_2.head() df_2.to_excel("outro Data_03.xlsx") df_2.info() df_2.dropna() df_2.drop("Gas Exp", axis = 1) # coluna com nome gas exp df_2.shape df_2.columns = ["ano", "gasolina", "populacao", "gasp", "lucro"] df_2.head() # Estatísticas descritivas df_2.sum() df_2.min() df_2.max() df_2.describe() df_2.mean() df_2.median() df_2.sort_values(by = ) df_2.loc[0, "ano"]
pandas/curso pandas/pandas importar dados csv.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="LFS-PKCRjMag" colab_type="text" # # 下準備 # # # ## PyTorchのインストール # + id="Jg32ZeiX4qZw" colab_type="code" outputId="8fd78e99-8e75-4fee-c140-55b4b72b1620" colab={"base_uri": "https://localhost:8080/", "height": 119} # !pip install torch torchvision # + id="Qaq_LyiCYlAJ" colab_type="code" colab={} print(torch.__version__) # + [markdown] id="K28hN0NWMhEc" colab_type="text" # # 調理手順 # # + [markdown] id="veMbc-mfN7pv" colab_type="text" # ## 必要なパケージをインポートする # + id="e6lAU8OCWnOB" colab_type="code" colab={} import matplotlib.pyplot as plt import torchvision.transforms as transforms from torch.utils.data import DataLoader from torchvision.datasets import MNIST # + [markdown] id="q9jgkZvyN1Eg" colab_type="text" # ## データセットのダウンロード # + id="YTuivz8y_Ae-" colab_type="code" colab={} data_folder='~/data' BATCH_SIZE=8 # + id="V7bUjDdIWnOG" colab_type="code" colab={} mnist_data = MNIST(data_folder, train=True, download=True, transform=transforms.ToTensor()) # data_loader = DataLoader(mnist_data,batch_size=BATCH_SIZE,shuffle=False) # + id="5-F0ROYk_TvI" colab_type="code" colab={} # ls # + [markdown] id="uyPaqK-9OBB3" colab_type="text" # ## データの中身を見てみる # + id="f-9LSCZtWnON" colab_type="code" colab={} data_iterator = iter(data_loader) images, labels = data_iterator.next() # + id="c1MMQaaS_hB6" colab_type="code" colab={} print(len(images)) print(len(labels)) # + [markdown] id="0b1meGCNOHTX" colab_type="text" # ## 1つ目のデータを可視化してみる # + id="Mb--62-MU-fe" colab_type="code" colab={} # 何番目の画像を表示しますか location=4 # numpy行列に変換した上、dataに代入します data = images[location].numpy() print(data.shape) # matplotlibが描画するためにデータチャンネル調整します reshaped_data=data.reshape(28,28) # データから画像を描画します plt.imshow(reshaped_data, cmap='inferno', interpolation='bicubic') plt.show() print('ラベル:', labels[location]) # + [markdown] id="mER3dMmaOL4s" colab_type="text" # ## 学習データと検証データを用意する # + id="uhcMc7_IWnOU" colab_type="code" colab={} # 学習データ train_data_with_labels = MNIST(data_folder, train=True, download=True, transform=transforms.ToTensor()) train_data_loader = DataLoader(train_data_with_labels,batch_size=BATCH_SIZE,shuffle=True) # 検証データ test_data_with_labels = MNIST(data_folder, train=False, download=True, transform=transforms.ToTensor()) test_data_loader = DataLoader(test_data_with_labels,batch_size=BATCH_SIZE,shuffle=True) # + [markdown] id="bl9UnaJbRDp4" colab_type="text" # ## ニューラルネットワークの定義 # + id="2tjPMjJXWnOX" colab_type="code" colab={} from torch.autograd import Variable import torch.nn as nn import torch.nn.functional as F class MLP(nn.Module): def __init__(self): super().__init__() # 入力層 self.layer1 = nn.Linear(28 * 28, 100) # 中間層(隠れ層) self.layer2 = nn.Linear(100, 50) # 出力層 self.layer3 = nn.Linear(50,10) def forward(self, input_data): input_data = input_data.view(-1, 28 * 28) input_data = self.layer1(input_data) input_data = self.layer2(input_data) input_data = self.layer3(input_data) return input_data # + [markdown] id="FRB1_hcPiUvx" colab_type="text" # ## モデル # + id="4qJy6sJsiYcO" colab_type="code" colab={} model = MLP() # + [markdown] id="rzIoYxwTRJXb" colab_type="text" # ## コスト関数と最適化手法を定義する # # # + id="9UzvWFHLWnOa" colab_type="code" colab={} import torch.optim as optimizer # ソフトマックスロスエントロピー lossResult = nn.CrossEntropyLoss() # SGD optimizer = optimizer.SGD(model.parameters(), lr=0.01) # + [markdown] id="1UdbDR-zj6jg" colab_type="text" # ## 学習 # # + id="wPHZ4fX_WnOe" colab_type="code" colab={} # 最大学習回数 MAX_EPOCH=4 for epoch in range(MAX_EPOCH): total_loss = 0.0 for i, data in enumerate(train_data_loader): # dataから学習対象データと教師ラベルデータを取り出します train_data, teacher_labels = data # 入力をtorch.autograd.Variableに変換します train_data, teacher_labels = Variable(train_data), Variable(teacher_labels) # 計算された勾配情報を削除します optimizer.zero_grad() # モデルに学習データを与えて予測を計算します outputs = model(train_data) # lossとwによる微分計算します loss = lossResult(outputs, teacher_labels) loss.backward() # 勾配を更新します optimizer.step() # 誤差を累計します total_loss += loss.data[0] # 2000ミニバッチずつ、進捗を表示します if i % 2000 == 1999: print('学習進捗:[%d, %d] 学習誤差(loss): %.3f' % (epoch + 1, i + 1, total_loss / 2000)) total_loss = 0.0 print('学習終了') # + [markdown] id="npn5XGVsWQSZ" colab_type="text" # ## 検証 # + id="Ep_R-2Z_WnOj" colab_type="code" colab={} import torch # トータル total = 0 # 正解カウンター count_when_correct = 0 # for data in test_data_loader: # 検証データローダーからデータを取り出した上、アンパックします test_data, teacher_labels = data # テストデータを変換した上、モデルに渡して、判定してもらいます results = model(Variable(test_data)) # 予測を取り出します _, predicted = torch.max(results.data, 1) # total += teacher_labels.size(0) count_when_correct += (predicted == teacher_labels).sum() print('count_when_correct:%d'%(count_when_correct)) print('total:%d'%(total)) print('正解率:%d / %d = %f'% (count_when_correct, total, int(count_when_correct)/int(total))) # + [markdown] id="nIP4Fic_Wj1w" colab_type="text" # ## 個別データで検証 # + id="55Xfqi2sWnOn" colab_type="code" colab={} test_iterator = iter(test_data_loader) # ここで回数を増減して、違うテストデータを取り出せます test_data, teacher_labels = test_iterator.next() # テストデータを変換した上、モデルに渡して、判定してもらいます results = model(Variable(test_data)) _, predicted_label = torch.max(results.data, 1) location=1 plt.imshow(test_data[location].numpy().reshape(28, 28), cmap='inferno', interpolation='bicubic') print('ラベル:', predicted_label[location])
Colaboratory/04_07(PyTorch_MNIST).ipynb
// -*- coding: utf-8 -*- // --- // jupyter: // jupytext: // text_representation: // extension: .rs // format_name: light // format_version: '1.5' // jupytext_version: 1.14.4 // kernelspec: // display_name: Rust // language: rust // name: rust // --- // # EvCxR Jupyter Kernel チュートリアル // // Jupyter Notebook や JupyterLab において、以下のボックスのような「セル」にコードを書き込むことができる // // 本テキストのように、セルにマークダウンを含めることも可能 // // 各コードセルは個別にコンパイルおよび実行されるが、変数、定義された関数などはセル間で保持される // // ## 出力の表示、式の評価 // // 標準出力(`stdout`)および標準エラー出力(`stderr`)を表示する // // ※ stdout と stderr は別のストリームであるため、必ずしも実行順通りには表示されないことに注意 // + // stdout に出力 println!("Hello, stdout!"); /** * stderr に出力 * stdout, stderr は別ストリームのためどちらが先に表示されるかは不定 */ eprintln!("Hello, stderr!"); // - // Jupyter では、最後に評価された式の結果がアウトプットセルに表示される // // ただし、セミコロンを後ろにつけてしまうと、式(`Expression`)ではなく文(`Statement`)として解釈されるため、何も返さない(アウトプットセルに何も表示されない) // フォーマットされた文字列を返す(セミコロンなし: 式として評価) format!("Hello {}!", "world") // フォーマットされた文字列を返す(セミコロンあり: 文として実行) format!("Hello {}!", "world"); // ## コメントについて // // 上記コードですでに出ているが、Rust では、`//...` で一行のコメント、`/* ... */` で複数行のコメントを記述することができる // // コメント部分はプログラム上では一切解釈されないため、自由に記述して問題ない // ## 変数の割り当てと利用 // // 次の例では `message` という変数に文字列を割り当て、別のセルで処理を施した後、最終的に変数に割り当てられている文字列を表示する // // Rust では、`let <変数名> = <値>` という宣言文で変数を宣言・値の割り当てを行うことができるが、デフォルトでは変数は immutable(不変)となっている // // そのため、変数に対して処理を施したい場合や、別の値を再代入したい場合などは `let mut <変数名>` という宣言を行い、mutable(可変)変数を定義する /** * message 変数を mutable で宣言し、"Hello " 文字列を割り当てる * 以下では String::from("Hello ") メソッドで文字列を生成しているが、単に "Hello " を渡した場合は文字列の参照(&str)となる */ let mut message = String::from("Hello "); // message 変数に文字列 "world!" を追加 message.push_str("world!"); // message の中身を確認(セミコロンなし: 式として評価 => output cell に表示) message /** * 単に "Hello " を割り当てた場合は、push_str() メソッドでエラーが発生する * この場合 message 変数は "Hello " という文字列が存在するメモリ空間への参照(&str)が割り当てられているだけなので、文字列操作を行うことは出来ない */ let mut message = "Hello "; message.push_str("world!"); message // ## 関数の定義・再定義 // // EvCxR では、関数を一度定義した後、関数の内容を修正したい場合、再定義を行うことが出来る // // Rust の関数定義は `fn <関数名>([引数宣言]) [-> 戻り値の型] {<処理式>}` という形式で行われるが、デフォルトでは private 関数となり、スコープ外から呼び出すことは出来ない // // public 関数として公開したい場合は `pub fn <関数名>(...) {...}` のように `pub` キーワードを用いる // + /// フィボナッチ数を返す関数(※ただし間違っている) pub fn fib(x: i32) -> i32 { // Rust の関数は最後に評価された式の結果を戻り値として返す if x <= 2 {0} else {fib(x - 2) + fib(x - 1)} } // 1~13 の数列に対して fib 関数を適用し、配列(Vec)形式に変換 (1..13).map(fib).collect::<Vec<i32>>() // + /** * 上記の結果は間違っていたため、fib 関数を修正(再定義する) */ /// フィボナッチ数を返す関数 pub fn fib(x: i32) -> i32 { if x <= 2 {1} else {fib(x - 2) + fib(x - 1)} } (1..13).map(fib).collect::<Vec<i32>>() // - // ## 外部クレートのロード // // **クレート** とは Rust におけるコンパイルの単位 // // 他の言語におけるライブラリやモジュールのようなものと考えれば良い // // 通常は **Cargo** パッケージマネージャを用いて外部クレートをインストールする必要があるが、EvCxR では `:dep <クレート名>` というマジックコマンドで外部クレートを呼び出すことが出来る // // ただし、マジックコマンドの前に何らかの式や文、コメント等を入れてはならない(`:dep` マジックコマンドが Rust 式として解釈されてエラーとなる) // + // :dep base64 = "0.10.1" // 上記マジックコマンドで base64: 0.10.1 クレートが使えるようになる base64::encode(&vec![1, 2, 3, 4]) // - // ## Jupyter 表示のカスタマイズ // // `evcxr_display` メソッドを継承することで、アウトプットセルの表示をカスタマイズすることができる // // 以下の例では、`Matrix` 構造体に `evcxr_display` メソッドを定義し、アウトプットセルで `<table>` HTML を表示できるようにしている // + // std::fmt::Debug クレートを使う use std::fmt::Debug; // Matrix 構造体を定義 pub struct Matrix<T> { pub values: Vec<T>, pub row_size: usize } /** * Matrix構造体に共通のメソッド(トレイト)を定義 * Rust はオブジェクト指向言語ではなく class はないが、特定の型に共通のふるまい(トレイト)を定義することで、より柔軟なオブジェクト指向風の実装が可能 */ impl<T: Debug> Matrix<T> { /// evcxr_display: Jupyterアウトプットセルへの表示カスタマイズ pub fn evcxr_display(&self) { let mut html = String::from("<table>"); for r in 0..(self.values.len() / self.row_size) { html.push_str("<tr>"); for c in 0..self.row_size { html.push_str("<td>"); html.push_str(&format!("{:?}", self.values[r * self.row_size + c])); html.push_str("</td>"); } html.push_str("</tr>"); } html.push_str("</table>"); /** * 以下のような形式で出力することで Jupyter 上では HTML として出力できる * EVCXR_BEGIN_CONTENT text/html * ...(HTML content) * EVCXR_END_CONTENT */ println!("EVCXR_BEGIN_CONTENT text/html\n{}\nEVCXR_END_CONTENT", html); } } // + let m = Matrix { values: vec![1, 2, 3, 4, 5, 6, 7, 8, 9], row_size: 3 }; m // - // 同様に、`evcxr_image` 等のアドオンクレートを使用して画像を表示することもできる // // これにより RGB およびグレースケールの画像を Jupyter 上で表示できるようになる // // ※ 使用するクレートのバージョンは `evcxr_image` によって使用されるバージョンと一致する必要があることに注意(一致しない場合、型が実質的に異なり、イメージが表示されない) // + // :dep image = "0.23" // :dep evcxr_image = "1.1" use evcxr_image::ImageDisplay; // 256 x 256 の画像を関数から作成 // Rust では `|[引数]| {<式>}` という形式で無名関数を定義できる image::ImageBuffer::from_fn(256, 256, |x, y| { if (x as i32 - y as i32).abs() < 3 { image::Rgb([0, 0, 255]) } else { image::Rgb([0, 0, 0]) } }) // - // ## 定義されている変数の表示 // // `:vars` マジックコマンドにより、定義済みの変数一覧を表示することが出来る // :vars // その他、使用可能なマジックコマンドは `:help` により確認することが出来る // :help
01_get-started/01_jupyter.ipynb
// --- // jupyter: // jupytext: // text_representation: // extension: .java // format_name: light // format_version: '1.5' // jupytext_version: 1.14.4 // kernelspec: // display_name: Java // language: java // name: java // --- // # Load MXNet model // // In this tutorial, you learn how to load an existing MXNet model and use it to run a prediction task. // // // ## Preparation // // This tutorial requires the installation of Java Kernel. For more information on installing the Java Kernel, see the [README](https://github.com/deepjavalibrary/djl/blob/master/jupyter/README.md). // + // // %mavenRepo snapshots https://oss.sonatype.org/content/repositories/snapshots/ // %maven ai.djl:api:0.12.0 // %maven ai.djl:model-zoo:0.12.0 // %maven ai.djl.mxnet:mxnet-engine:0.12.0 // %maven ai.djl.mxnet:mxnet-model-zoo:0.12.0 // %maven org.slf4j:slf4j-api:1.7.26 // %maven org.slf4j:slf4j-simple:1.7.26 // %maven net.java.dev.jna:jna:5.3.0 // See https://github.com/deepjavalibrary/djl/blob/master/engines/mxnet/mxnet-engine/README.md // for more MXNet library selection options // %maven ai.djl.mxnet:mxnet-native-auto:1.8.0 // - import java.awt.image.*; import java.nio.file.*; import ai.djl.*; import ai.djl.inference.*; import ai.djl.ndarray.*; import ai.djl.modality.*; import ai.djl.modality.cv.*; import ai.djl.modality.cv.util.*; import ai.djl.modality.cv.transform.*; import ai.djl.modality.cv.translator.*; import ai.djl.translate.*; import ai.djl.training.util.*; import ai.djl.util.*; // ## Step 1: Prepare your MXNet model // // This tutorial assumes that you have a MXNet model trained using Python. A MXNet symbolic model usually contains the following files: // * Symbol file: {MODEL_NAME}-symbol.json - a json file that contains network information about the model // * Parameters file: {MODEL_NAME}-{EPOCH}.params - a binary file that stores the parameter weight and bias // * Synset file: synset.txt - an optional text file that stores classification classes labels // // This tutorial uses a pre-trained MXNet `resnet18_v1` model. // We use `DownloadUtils` for downloading files from internet. DownloadUtils.download("https://mlrepo.djl.ai/model/cv/image_classification/ai/djl/mxnet/resnet/0.0.1/resnet18_v1-symbol.json", "build/resnet/resnet18_v1-symbol.json", new ProgressBar()); DownloadUtils.download("https://mlrepo.djl.ai/model/cv/image_classification/ai/djl/mxnet/resnet/0.0.1/resnet18_v1-0000.params.gz", "build/resnet/resnet18_v1-0000.params", new ProgressBar()); DownloadUtils.download("https://mlrepo.djl.ai/model/cv/image_classification/ai/djl/mxnet/synset.txt", "build/resnet/synset.txt", new ProgressBar()); // ## Step 2: Load your model Path modelDir = Paths.get("build/resnet"); Model model = Model.newInstance("resnet"); model.load(modelDir, "resnet18_v1"); // ## Step 3: Create a `Translator` Pipeline pipeline = new Pipeline(); pipeline.add(new CenterCrop()).add(new Resize(224, 224)).add(new ToTensor()); Translator<Image, Classifications> translator = ImageClassificationTranslator.builder() .setPipeline(pipeline) .optSynsetArtifactName("synset.txt") .optApplySoftmax(true) .build(); // ## Step 4: Load image for classification var img = ImageFactory.getInstance().fromUrl("https://resources.djl.ai/images/kitten.jpg"); img.getWrappedImage() // ## Step 5: Run inference // + Predictor<Image, Classifications> predictor = model.newPredictor(translator); Classifications classifications = predictor.predict(img); classifications // - // ## Summary // // Now, you can load any MXNet symbolic model and run inference. // // You might also want to check out [load_pytorch_model.ipynb](https://github.com/deepjavalibrary/djl/blob/master/jupyter/load_pytorch_model.ipynb) which demonstrates loading a local model using the ModelZoo API.
jupyter/load_mxnet_model.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split df = pd.read_csv('2013-19.csv', encoding = "ISO-8859-1") df.head(5) df.tail(5) print("Shape of the dataset:",df.shape) df.dtypes df.info() df.describe() df.isnull().sum() #take a peek on what is in each colume print("Checking Unique values\n") print("Mine County : \n",df['Mine County'].unique(),"\n") print("Mine Status : \n",df['Mine Status'].unique(),"\n") print("Mine Type : \n",df['Mine Type'].unique(),"\n") print("Company Type : \n",df['Company Type'].unique(),"\n") print("Operating Company : \n",df['Operating Company'].unique(),"\n") print("Union Code : \n",df['Union Code'].unique(),"\n") print("Company Type : \n",df['Company Type'].unique(),"\n") # ## Replacing the mispelling of the value indepedent in Company Type df['Company Type'].replace(to_replace='Indepedent Producer Operator', value = 'Independent Producer Operator', inplace=True) df.dtypes # ## Renaming columns by underscore to remove the spaces between the Column names df.rename(columns=lambda x: x.replace(" ", "_"), inplace=True) df.head() df.hist(bins=50,figsize=(7,7)) correlation = df.corr()['Production_(short_tons)'] correlation correlation = df.corr() correlation fig = plt.subplots(figsize=(8,8)) sns.heatmap(correlation,annot=True) plt.scatter(df.Average_Employees, df.Labor_Hours) plt.xlabel('Total Number of Employees') plt.ylabel('Total Number of Hours worked') plt.savefig('Employee_vs_Labourhrs_Scatter.png', dpi=300, bbox_inches='tight') # ### We used basic scatter plot to show the relationship between Employees vs Number of Hours Work. Here we can see that as the # ### number of emplyees goes up, the total number of hours worked increases at the mines in a linear relationship. #using seaborn regression plot sns_plot=sns.regplot(df.Average_Employees, df.Labor_Hours, ci=95, n_boot=1000,scatter_kws={"color": "orange"}, line_kws={"color": "red"}) fig = sns_plot.get_figure() fig.savefig("AverageEmp_vs_labourhrs.png") # ### Here using seaborn we showed the same trend, but by fitting a line in the data which gave it a bootstrapping in the middle of the line. plt.scatter(df.Labor_Hours, df['Production_(short_tons)'], color='#ff7f0e') df['Production_(short_tons)'].hist(bins=10) #wanted to see the minimum value min(df['Production_(short_tons)']) # + #look at where the production shorttons is eqaul to zero df['Production_(short_tons)'] == 0 #where is the production equal to zero df[df['Production_(short_tons)'] == 0] # - # ### There are quite a number of mines that had no production values. Next trying to look at the mines that produced at least 1 tons or more. # ## Slicing the Data #where is the production is at least 1 tonne df = df[df['Production_(short_tons)'] >1 ] df.head() len(df) #May be predictive variable: A good predictor in how productive the mine is. df.Mine_Status.unique() df[['Mine_Status','Production_(short_tons)']].groupby('Mine_Status').mean() df[['Mine_Status','Production_(short_tons)']].groupby('Mine_Status').mean() # ## Predict the production of coal mines in 2015 # What caused high or lower production # # ### Feature Engineering for columns in df: print(columns) df.Union_Code.unique() # Of the above features on two are number: labour hours and Average_employee--Split into numerical features; and categorical variables pd.get_dummies(df["Company_Type"]).sample(50).head() from sklearn import preprocessing le = preprocessing.LabelEncoder() df['Coal_Supply_Region']= le.fit_transform(df['Coal_Supply_Region']) df['Mine_State']= le.fit_transform(df['Mine_State']) df['Mine_County']= le.fit_transform(df['Mine_County']) df['Mine_Status']= le.fit_transform(df['Mine_Status']) df['Mine_Type']= le.fit_transform(df['Mine_Type']) df['Company_Type']= le.fit_transform(df['Company_Type']) df['Operation_Type']= le.fit_transform(df['Operation_Type']) df.drop(['Union_Code'],axis='columns',inplace=True) df.head() # # Build our Model from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestRegressor # + #len(dummy_categoricals) # - X = df.iloc[:,[12,13]] y = df.iloc[:,14] feature_list = list(X.columns) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3) model = RandomForestRegressor(n_estimators=100, oob_score=True) model.fit(X_train,y_train) model.score(X_test,y_test) y_pred=model.predict(X_test) from sklearn import model_selection kfold = model_selection.KFold(n_splits=10, random_state=7, shuffle=True) scoring = 'r2' results = model_selection.cross_val_score(model, X, y, cv=kfold, scoring=scoring) print("R^2: %.3f (%.3f)" % (results.mean(), results.std())) scoring = 'neg_mean_squared_error' results = model_selection.cross_val_score(model, X, y, cv=kfold, scoring=scoring) print("MSE: %.3f (%.3f)" % (results.mean(), results.std())) # ## Make Predictions on the Test Set scoring = 'neg_mean_absolute_error' results = model_selection.cross_val_score(model, X, y, cv=kfold, scoring=scoring) print("MAE: %.3f (%.3f)" % (results.mean(), results.std())) # Calculate the absolute errors errors = abs(y_pred - y_test) # Print out the mean absolute error (mae) print('Mean Absolute Error:', round(np.mean(errors), 2)) # ## Determine Performance Metrics # Calculate mean absolute percentage error (MAPE) mape = 100 * (errors / y_test) # Calculate and display accuracy accuracy = 100 - np.mean(mape) print('Accuracy:', round(accuracy, 2), '%.') # ## Interpret Model and Report Results # ## Visualizing a Single Decision Tree # !pip install pydot # Import tools needed for visualization from sklearn.tree import export_graphviz import pydot # Pull out one tree from the forest tree = model.estimators_[5] # Import tools needed for visualization from sklearn.tree import export_graphviz import pydot # Pull out one tree from the forest tree = model.estimators_[5] # Export the image to a dot file export_graphviz(tree, out_file = 'tree.dot', feature_names = feature_list, rounded = True, precision = 1) # Use dot file to create a graph (graph, ) = pydot.graph_from_dot_file('tree.dot') # Write graph to a png file graph.write_png('tree.png') # Limit depth of tree to 3 levels rf_small = RandomForestRegressor(n_estimators=10, max_depth = 3) rf_small.fit(X_train,y_train) # Extract the small tree tree_small = rf_small.estimators_[5] # Save the tree as a png image export_graphviz(tree_small, out_file = 'small_tree.dot', feature_names = feature_list, rounded = True, precision = 1) (graph, ) = pydot.graph_from_dot_file('small_tree.dot') graph.write_png('small_tree.png'); # Get numerical feature importances importances = list(model.feature_importances_) # List of tuples with variable and importance feature_importances = [(feature, round(importance, 2)) for feature, importance in zip(feature_list, importances)] # Sort the feature importances by most important first feature_importances = sorted(feature_importances, key = lambda x: x[1], reverse = True) # Print out the feature and importances [print('Variable: {:20} Importance: {}'.format(*pair)) for pair in feature_importances]; # Import matplotlib for plotting and use magic command for Jupyter Notebooks import matplotlib.pyplot as plt # %matplotlib inline # Set the style plt.style.use('fivethirtyeight') # list of x locations for plotting x_values = list(range(len(importances))) # Make a bar chart plt.bar(x_values, importances, orientation = 'vertical') # Tick labels for x axis plt.xticks(x_values, feature_list, rotation='vertical') # Axis labels and title plt.ylabel('Importance'); plt.xlabel('Variable'); plt.title('Variable Importances'); # + import pickle # Saving model to disk pickle.dump(model, open('model.pkl','wb')) # Loading model to compare the results modell = pickle.load(open('model.pkl','rb')) # - print("We need 28774319 (short tinnes) production in 2022 with 20 number of employees.\nSo Our 20 employees have to work total :",model.predict([[28774319,20]]),"hrs in 2022")
.ipynb_checkpoints/laborempnovsprod-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: qchem # name: qchem # --- # # Measurement Grouping # # Since current quantum hardware permits only single-qubit projective measurement, only terms commuting within individual qubit's subspace can be measured together. These terms are said to be qubit-wise commuting (QWC). # # Thus, one can not measure the entire electronic Hamiltonian $\hat H$ at once, and instead needs to separate it into fragments. # $$\hat H = \sum_n \hat H_n$$ # where each $\hat H_n$ is a QWC fragment. # # Here, we use $H_2$ as an example. Notice below that all 3 fragments have the same terms on all qubits. from utility import * # Here we use $H_2$ as an example. To show differences between QWC and more advanced grouping, we didn't use the qubit-tappering techinique. # + tags=[] h2 = get_qubit_hamiltonian(mol='h2', geometry=1, basis='sto3g') qwc_list = get_qwc_group(h2) print('1st fragments: \n{}\n'.format(qwc_list[0])) print('2nd fragments:\n{}\n'.format(qwc_list[1])) print('3rd fragments:\n{}\n'.format(qwc_list[2])) # - # By applying extra unitaries, one may rotate more terms of $\hat H$ into a QWC fragment. # # Recall that in digital quantum computing, the expectation value of $\hat H_n$ given a trial wavefunction $|\psi\rangle$ is # $$ E_n =\ \langle\psi| \hat H_n | \psi\rangle$$ # Inserting unitary transformation $\hat U_n$ does not change the expectation value. # $$ E_n =\ \langle\psi| \hat U_n^\dagger \hat U_n \hat H_n \hat U_n^\dagger \hat U_n |\psi\rangle$$ # This nonetheless changes the trial wavefunction and the terms to be measured. # $$ |\psi\rangle \rightarrow \hat U_n |\psi\rangle = |\phi\rangle$$ # $$ \hat H_n \rightarrow \hat U_n \hat H_n \hat U_n^\dagger = \hat A_n$$ # The transformation of $|\psi \rangle$ can be done on the quantum computer, and the transformation of $\hat H_n$ is possible on the classical computer. # # Now, $\hat A_n$ needs to be a QWC fragment to be measurable on a quantum computer. # But $\hat H_n$ does not. # Instead, if we restrict $\hat U_n$ to be a clifford operation, the terms in $\hat H$ need only mutually commute. # # Here, we obtain measurable parts of $H_2$ by partitioning its terms into mutually commuting fragments. # + tags=[] comm_groups = get_commuting_group(h2) print('Number of mutually commuting parts: {}'.format(len(comm_groups))) print('The first commuting group') print(comm_groups[1]) # - # To see this fragment is indeed measurable, one can construct the corresponding unitary operator $\hat U_n$. # + tags=[] u = get_qwc_unitary(comm_groups[1]) print('This is unitary. U * U^+ = I ') print(u * u) # - # The qubit-wise commuting form of the first mutually commuting group # + tags=[] print(u * comm_groups[1] * u)
Project_2_VQE_Molecules/S4_Measurement.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python (unityml) # language: python # name: python3 # --- # # Section 4 - Computer vision-based machine learning # # ## Mini-project 4: Introduction to `SciNet` architecture ## # # ## Dr. <NAME> (<EMAIL>) # ## High Energy Physics Group # ## 523 Blackett Lab # #### The markdown comments were added by <NAME> (<EMAIL>) #### # * [Original repository](https://github.com/fd17/SciNet_PyTorch) # * [Reference](https://arxiv.org/abs/1807.10300) import torch import torch.nn as nn import torch.nn.functional as F import numpy as np import random from matplotlib import pyplot as plt import torch.optim as optim from utils import pendulum as solution # + # Generate training data tmax = 10 A0 = 1 delta0 = 0 m = 1 train_outputs = [] train_inputs = [] data = [] N_TRAIN = 100000 N_SAMPLE = 50 counter = 0 while len(train_inputs) < N_TRAIN: b = np.random.uniform(0.5,1) k = np.random.uniform(5,10) tprime = np.random.uniform(0,tmax) question = tprime answer = solution(tprime,A0,delta0,k,b,m) if answer == None: continue t_arr = np.linspace(0,tmax,N_SAMPLE) x = solution(t_arr,A0,delta0,k,b,m) combined_inputs = np.append(x, question) train_inputs.append( combined_inputs ) train_outputs.append( answer ) train_inputs = np.array(train_inputs) train_outputs = np.array(train_outputs) np.save("training_data/inputs.npy", train_inputs) np.save("training_data/outputs.npy", train_outputs) # - # * `train_inputs` are composed of 50 points of pendulum oscillation and a time from the start as a question. # * `train_outputs` are the answers to the questions (i.e. deviation of the pendulum from the center) # Plot example training input plt.plot(train_inputs[0][0:-1],".", label='#0') plt.plot(train_inputs[1][0:-1],".", label='#1') plt.plot(train_inputs[2][0:-1],".", label='#2') plt.legend() print("question:",train_inputs[0][-1]) print("answer:",train_outputs[0])
Generate_Trainingdata.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Lesson 3: # # # Introduction to GridAPPS-D Python # This tutorial provides a first look at the GridAPPS-D Python Library # # __Learning Objectives:__ # # At the end of the tutorial, the user should be able to # # * Explain how API calls can be wrapped in a generic programming language # * Import required Python libraries and modules # * Import GridAPPS-D utilities and describe how they are used # * Establish a connection to the GridAPPS-D platform # * Pass a simple API call and print the response received # ------ # ## What is GridAPPSD-Python? # # GridAPPSD-Python is a Python library that can wrap API calls and pass them to the various GridAPPS-D APIs through the GOSS Message Bus # # The library has numerous shortcuts to help you develop applications faster and interface them with other applications, services, and GridAPPS-D compatible software packages. # # # # -------- # ## Import Required Python Libraries # # The first step is to import the required libraries for your sample application: # # * _argparse_ -- This is the recommended command-line parsing module in Python.([Online Documentation](https://docs.python.org/3/howto/argparse.html)) # # * _json_ -- Encoder and decoder for JavaScript Object Notation (JSON). ([Online Documentation](https://docs.python.org/3/library/json.html)) # # * _logging_ -- This module defines classes and functions for event logging. ([Online Documentation](https://docs.python.org/3/library/logging.html) # # * _sys_ -- Python module for system specific parameters. ([Online Documentation](https://docs.python.org/3/library/sys.html)) # # * _time_ -- Time access and conversions. ([Online Documentation](https://docs.python.org/3/library/time.html)) # # * _pytz_ -- Library to enable resolution of cross-platform time zones and ambiguous times. ([Online Documentation](https://pypi.org/project/pytz/) # # * _stomp_ -- Python client for accessing messaging servers using the Simple Text Oriented Messaging Protocol (STOMP). ([Online Documentation](https://pypi.org/project/stomp.py/)) # # # # import argparse import json import logging import sys import time import pytz import stomp # ----------- # ## Import Required GridAPPS-D Libraries # # The GridAPPS-Python API contains several libraries, which are used to query for information, subscribe to measurements, and publish commands to the GOSS message bus. These inlcude # # ___GridAPPSD___ -- does something # # # ___utils___ -- A set of utilities to assist with common commands, inlcuding # # # # <p style='text-align: left;'> Function Call | <p style='text-align: left;'> Usage # --------------|----------- # <p style='text-align: left;'> _utils.validate_gridappsd_uri()_ | <p style='text-align: left;'> Checks if GridAPPS-D is hosted on the correct port # <p style='text-align: left;' > _utils.get_gridappsd_address()_ | <p style='text-align: left;'> Returns the platform address such that response can be passed directly to a socket or the STOMP library # <p style='text-align: left;'> _utils.get_gridappsd_user()_ | <p style='text-align: left;'> Returns the login username # <p style='text-align: left;'> _utils.get_gridappsd_pass()_ | <p style='text-align: left;'> Returns the login password # <p style='text-align: left;'> _utils.get_gridappsd_application_id()_ | <p style='text-align: left;'> Only applicable if the environment variable 'GRIDAPPSD_APPLICATION_ID' has been set # <p style='text-align: left;'> utils.get_gridappsd_simulation_id()_ | <p style='text-align: left;'> retrieves the simulation id from the environment. # # from gridappsd import GridAPPSD, utils # ------- # # ## Establish a Connection to GridAPPS-D Platform # # The next step is to establish a connection with the GridAPPS-D platform so that API calls can be passed and processed. # # This can be done by 1) manually specifying the connection and port or 2) using the GridAPPS-D utils to automatically determine the port # ### Start Docker containers # _Open the Ubuntu terminal and start the GridAPPS-D Platform if it is not running already:_ # # `cd gridappsd-docker` # # ~/gridappsd-docker$ `./run.sh -t develop` # # _Once containers are running,_ # # gridappsd@[container]:/gridappsd$ `./run-gridappsd.sh` # ### Option 1: Manually specify connection parameters # # By default, the GridAPPS-D API communicates with the platform on port 61613. gridappsd_conn = GridAPPSD("('localhost', 61613)", username='system', password='<PASSWORD>') # ### Option 2: Use GridAPPS-D utils to determine connection # # The GridAPPS-D utils include several functions to automatically determine the location of the platform and security credentials for passing API commands gridappsd_conn = GridAPPSD(address=utils.get_gridappsd_address(), username=utils.get_gridappsd_user(), password=utils.get_gridappsd_pass()) # ----- # ## Pass a Simple API Call # # # # There are three generic API call routines: # # * _send(self, topic, message)_ -- # * _get_response(self, topic, message)_ -- # * _subscribe(self, topic, callback)_ -- # # For this example, we will use a very short query to request the MRIDs of the models available in the GridAPPS-D Platform. We will explain how to make various kinds of queries in the upcoming lessons on how to use each API. # The first step is to define the topic, which specifies the channel on which to communicate with the API. The specific topic definitions and their purposes will be discussed in greater detail in the lessons on each GridAPPS-D API. topic = "goss.gridappsd.process.request.data.powergridmodel" # Next, we need to create the message the will be passed. The message must be a valid JSON format, concatenated into a string. We can do this two ways. # If it is a short query, we can write it as a single line with concatenating quotes (`'query text'`). message = '{"requestType": "QUERY_MODEL_NAMES", "resultFormat": "JSON"}' # If it is a long query, we use three quotes at the beginning and end of the JSON query text to concatenate it into a string that can be passed to the API, like this: # # `message = """ # { # QUERY TEXT LINE 1 # QUERY TEXT LINE 2 # ... # QUERY TEXT LINE N # }"""` # message = """ { "requestType": "QUERY_MODEL_NAMES", "resultFormat": "JSON" }""" # The GridAPPSD-Python Library then wraps that string and passes it as a message to the API through the GOSS Message Bus. gridappsd_conn.get_response(topic, message) # ---- # ## Conclusion # # Congratulations! You have passed your first API call using the GridAPPSD-Python Library. # # You should now be able to establish a connection with the GridAPPS-D Platform and pass API calls.
.ipynb_checkpoints/Lesson 1.3. Intro to GridAPPS-Python-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="339cuvjJcBVW" # Connect to google drive # + id="VaXOTQQkV8zG" from google.colab import drive drive.mount("/content/gdrive") # + [markdown] id="ovxEfbBZdJBV" # **DOWNLOAD DATA** # # From the link download "Repositories with more than 50 stars part 2" (56.8 GB) # # # https://jetbrains.team/p/ccrm/repositories/fl-dataset/files/docs/README.md#download # + [markdown] id="E5yDT_VidTp6" # We only use Java files: # # - You can only keep "dataset-open-50-more-2/dataset/v3/languages/Java" which is around 13 GB # - Extract the java files in colab local (reading through from google drive takes more time) # + id="WoNF-NOJUwyr" # !unzip "/content/gdrive/My Drive/Java.zip" -d "javadata" dataDir = 'javadata/Java/.java' # + [markdown] id="LMTAadMcb_iy" # ***IMPORT PACKAGES*** # + id="j3c7vz75Y9Nf" # !pip3 install tree_sitter # !git clone https://github.com/tree-sitter/tree-sitter-java # + id="m25Jc8GGBHHk" from tree_sitter import Language, Parser import os from tqdm import tqdm import matplotlib.pyplot as plt # + [markdown] id="dj5UrxrhhVNZ" # Create python dependency for tree_sitter # + id="4hE2Db50ZHX6" Language.build_library( # Store the library in the `build` directory 'build/my-languages.so', # Include one or more languages [ 'tree-sitter-java' ] ) JAVA_LANGUAGE = Language('build/my-languages.so', 'java') parser = Parser() parser.set_language(JAVA_LANGUAGE) # + [markdown] id="3FFLZGsMhkNu" # # **Traversing AST tree of java files** # # + id="4TFtThVvZ2IP" def traverse_tree(tree): cursor = tree.walk() reached_root = False while reached_root == False: yield cursor.node if cursor.goto_first_child(): continue if cursor.goto_next_sibling(): continue retracing = True while retracing: if not cursor.goto_parent(): retracing = False reached_root = True if cursor.goto_next_sibling(): retracing = False # + id="Rv8fCKwZbTQN" content = [] c=0 for root, dirs, files in tqdm(os.walk(dataDir), position=0): for file in files: file_name = os.path.join(root, file) if os.path.isfile(file_name): try: number_of_comment = 0 file = open(file_name, mode='r') number_of_lines = len(file.readlines()) file.seek(0) all_of_it = file.read() file.close() ast = parser.parse(bytes(all_of_it, "utf8")) for node in traverse_tree(ast): if node.type == "comment": number_of_comment += 1 content.append((number_of_lines,number_of_comment)) c +=1 print(c) except: pass # + [markdown] id="e-h9DHCLnGxg" # #**Remove outliers** # # From around 1.6 million files, only few thousand files are with more than 1K comments and bigger than 5K lines, we can so ignore them as follow: # # + id="t6pzgepNfPoh" #total number of java files: file_count = len(content) print("Total number of Java Files: " + str(file_count)) #total number of files with number of comments>1000 #print("Total number of Java Files with number of comments more than 1K: " + str(outliers)) #From around 1.6 million files, only around 200 files are with more than 1k comments, we can so ignore them as follow: #print("Total number of Java Files with length > 10K line: " + str(outliers)) #From around 1.6 million files, only around files are with bigger than 5K lines, we can so ignore them as follow: content_filtered = [x for x in content if x[0]<=5000 and x[1]<1000] # + [markdown] id="Ns1jhfwu1R6x" # # **GRAPHS PLOTTING** # # + [markdown] id="bjnqgMOA1c14" # Graph for how many files exists per spesific number of comments range # # + id="C5IZLFiQctxd" comment_count_per_file_list = [x[1] for x in content_filtered] bins = [0, 100, 200, 300, 400, 500 ,600,700,800,900,1000] plt.hist(comment_count_per_file_list, bins, histtype='bar', rwidth=0.7) plt.xlabel('Number of comments') plt.ylabel('Number of files') plt.title("Count of files given number of comments") plt.savefig("a.png") ###(1e6 =10^6)### # + id="n_UCF6n6imfj" nocom = len([x for x in comment_count_per_file_list if x == 0] ) com = len([x for x in comment_count_per_file_list if x != 0 ]) bins = [nocom, com] plt.bar(["number of files without any comment", "number of files with at least 1 comments"], bins, width=0.4) plt.savefig('b.png') # + id="LiC3P4P4cnyl" percent = lambda part, whole: float(part) * 100 / float(whole) percents_arr = [] size_files = [] for line in content_filtered: try: number_comment = line[1] size= line[0] size_files.append(size) percents_arr.append(percent(number_comment,size)) except: pass plt.xlabel('% of comments in file') plt.ylabel('Count') plt.title("") bins = [0,10,20,30,40,50,60,70,80,90,100] plt.hist(percents_arr, bins, histtype='bar', rwidth=0.7) plt.savefig('c.png') # + id="3sxUsfDQiU9w" from pyparsing import ParseExpression import plotly.express as px # #! pip install -U kaleido comment_count_per_file_list = [] size_files = [] for line in content_filtered: try: number_comment = line[1] size= line[0] if (number_comment)>0: size_files.append(size) comment_count_per_file_list.append(number_comment) except: pass fig = px.scatter(x=size_files,y=comment_count_per_file_list, width=1000, height=800, labels={'x':'number of line in file', 'y':'number of comment in the file'}) fig.show() # + [markdown] id="zRJhhW-CL0bF" # ## **THIS APPROACH IS ONLY APPROXIMATION FOR COUNTING COMMENTS IN A GIVEN CHUNK** # # - Create code chunks based on empty lines # # - If a line contains "comment special character, assume it is comment: # This way may result some non-comment line as if they are comment (such as in URL links http:// but it is an approximation that is fast) # # + id="RDkubXQUQfin" result = [] c = 0 def averageLen(lst): lengths = [len(i) for i in lst] return 0 if len(lengths) == 0 else (float(sum(lengths)) / len(lengths)) for root, dirs, files in tqdm(os.walk(dataDir), position=0): for file in files: file_name = os.path.join(root, file) if os.path.isfile(file_name): try: chunks_list =[] chunks = [] comment_count = 0 comment = False count = 0 file = open(file_name, mode='r') lines = file.readlines() for line in lines: if not line.split(): #start a chunk chunks_list.append(chunks) chunks = [] if comment: comment_count += 1 comment =False else: chunks.append(line) if "//" in line or '/*' in line: comment = True c +=1 print(c) result.append((file_name , len(chunks_list), comment_count, averageLen(chunks_list))) except: pass # + id="PbYe9mfRQnF-" from pyparsing import ParseExpression import plotly.express as px x = [] y = [] result_without_filename = [(x[1], x[2]) for x in result] for line in result_without_filename: count_chunks = line[0] count_comments = line[1] x.append(count_chunks) y.append(count_comments) fig = px.scatter(x=x,y=y, width=1000, height=800, labels={'x':'number of chunks in file', 'y':'number of commented chunks'}) fig.show() # + id="g0zVUD0g-bGv" percent = lambda part, whole: float(part) * 100 / float(whole) percents_arr = [] size_files = [] for line in result_without_filename: number_comment = line[1] size= line[0] size_files.append(size) if size>0: percents_arr.append(percent(number_comment,size)) plt.xlabel('Percentage of commented chunks in files') plt.ylabel('Count') plt.title("") bins = [0,10,20,30,40,50,60,70,80,90,100] plt.hist(percents_arr, bins, histtype='bar', rwidth=0.7) plt.show() # + id="Z-9jQ0Ze_k5O" lis = [(x[3]) for x in result] plt.xlabel('Average length of chunks in files') plt.ylabel('Count') plt.title("") bins = [0,10,20,30,40,50,60,70,80,90,100] plt.hist(lis, bins, histtype='bar', rwidth=0.7) plt.show()
java-analysis/analysis_java_files.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="mmD0cqW2YKAh" # Initializing # + id="PNrSvJM1QACg" # !git clone https://github.com/qiuqiangkong/audioset_tagging_cnn.git # %cd audioset_tagging_cnn # + [markdown] id="qZmZblGxOatR" # Install required packages # + id="oXF6WP0OXB-8" # !pip install torch # !pip install torchlibrosa # + [markdown] id="uofx7uOWhwnu" # Download the dataset # + id="5ejNSipKeOjG" # %env DATASET_DIR=./datasets/audioset201906 # %env WORKSPACE=./workspaces/audioset_tagging # !mkdir -p $DATASET_DIR"/metadata" # !wget -O $DATASET_DIR"/metadata/eval_segments.csv" http://storage.googleapis.com/us_audioset/youtube_corpus/v1/csv/eval_segments.csv # !wget -O $DATASET_DIR"/metadata/balanced_train_segments.csv" http://storage.googleapis.com/us_audioset/youtube_corpus/v1/csv/balanced_train_segments.csv # !wget -O $DATASET_DIR"/metadata/class_labels_indices.csv" http://storage.googleapis.com/us_audioset/youtube_corpus/v1/csv/class_labels_indices.csv # !wget -O $DATASET_DIR"/metadata/qa_true_counts.csv" http://storage.googleapis.com/us_audioset/youtube_corpus/v1/qa/qa_true_counts.csv # + [markdown] id="qvSmVuxPOCMt" # Download and index balanced training dataset # # + id="why5QWEyETQt" # !python3 utils/dataset.py download_wavs --csv_path=$DATASET_DIR"/metadata/eval_segments.csv" --audios_dir=$DATASET_DIR"/audios/eval_segments" # !python3 utils/dataset.py pack_waveforms_to_hdf5 --audios_dir="/content/audioset_tagging_cnn/datasets/audioset201906/audios/eval_segments" --csv_path="/content/audioset_tagging_cnn/datasets/audioset201906/metadata/eval_segments.csv" --waveforms_hdf5_path=$WORKSPACE"/hdf5s/waveforms/eval.h5" --mini_data # !python3 utils/create_indexes.py create_indexes --waveforms_hdf5_path=$WORKSPACE"/hdf5s/waveforms/eval.h5" --indexes_hdf5_path=$WORKSPACE"/hdf5s/indexes/eval.h5" # + [markdown] id="wflwDeY0OGVz" # # + id="40FsJLS2EwVw" # !python3 utils/dataset.py download_wavs --csv_path=$DATASET_DIR"/metadata/balanced_train_segments.csv" --audios_dir=$DATASET_DIR"/audios/balanced_train_segments" # !python3 utils/dataset.py pack_waveforms_to_hdf5 --audios_dir="/content/audioset_tagging_cnn/datasets/audioset201906/audios/balanced_train_segments" --csv_path="/content/audioset_tagging_cnn/datasets/audioset201906/metadata/balanced_train_segments.csv" --waveforms_hdf5_path=$WORKSPACE"/hdf5s/waveforms/balanced_train.h5" # !python3 utils/create_indexes.py create_indexes --waveforms_hdf5_path=$WORKSPACE"/hdf5s/waveforms/balanced_train.h5" --indexes_hdf5_path=$WORKSPACE"/hdf5s/indexes/balanced_train.h5" # + [markdown] id="J9AwluN6OJjr" # Train the model using balanced training dataset # + id="hSx9xsMFiFEr" # %env WORKSPACE=./workspaces/audioset_tagging # %env CUDA_VISIBLE_DEVICES=0 # !python3 pytorch/main.py train --workspace=$WORKSPACE --data_type='balanced_train' --window_size=1024 --hop_size=320 --mel_bins=64 --fmin=50 --fmax=14000 --model_type='Cnn14' --loss_type='clip_bce' --balanced='balanced' --augmentation='mixup' --batch_size=32 --learning_rate=1e-3 --resume_iteration=0 --early_stop=1000000 --cuda
AudiosetTaggingCnn.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # # Introduction to Engneering for Computer Science # Every Thursday Class 6-7 from 13:30 # # ### Dr. <NAME> (github profile: fangli-ying.github.io/) # #### Department of Computer Science # #### East China University of Science and Technology # #### Email:<EMAIL> # + [markdown] slideshow={"slide_type": "slide"} # ### Class 1: A Brief Introduction to Artificial Intelligence and its Applications to Engineering Problems # # 2.24 Thursday 6-7 # + [markdown] slideshow={"slide_type": "slide"} # ## Before Starting: A brief self-introduction # # # + [markdown] slideshow={"slide_type": "fragment"} # #### Dr <NAME>(应方立) # - Courses:Image Processing, A.I., Information Security, Digital Circuits and Logic Desgin(En), Intro of CS # - Research:Computer Vision, Few-shot Learning, Computational Biology # - PhD in National University of Ireland Maynooth # - Bsc in Zhejiang University # - **Github Profile: fangli-ying.github.io** # + [markdown] slideshow={"slide_type": "fragment"} # <img src="./img/bio.jpg" width=150 height=150> # + [markdown] slideshow={"slide_type": "slide"} # ## Teaching in Computer Science # # - 2014-2016, Information Security for Postgrads, Advances in Computer Science for Undergrads, Artifactual Intelligence for Adult Education # - 2016-2018, Information Security for Postgrads, Image Processing and Computer Vision for Undergrads, processing programming language for Art student, Multi-Media Technology and Introduction to logic design for Adult Education, Practice of Computer Science for 1st Year students, Artifactual Intelligence for cross model students # - 2018-present, Image Processing and Computer Vision for Undergrads, Practice of Computer Science for 1st Year students, Introduction to logic design for international student in 2021, Summer School Seminar for the international students # + [markdown] slideshow={"slide_type": "slide"} # ## Text Book download link is in the recourse folder in "学习通" app,but you won't use it too much. Because most of our course will be comprise of several practical workshops # # + [markdown] slideshow={"slide_type": "fragment"} # # <img src="./img/front.PNG" width=200 height=200> # # + [markdown] slideshow={"slide_type": "slide"} # ## Your grade will be comprised of: # # - Homework/quiz/attendance 30% # - Exam (Midterm and Final) 70% # # # + [markdown] slideshow={"slide_type": "slide"} # ## What is this course all about? # ### Introduction to Engineering for CS students. # # - the fundamental vocabulary and concepts of Artificial Intelligence and machine learning # - to see how A.I. and M.L.work and show some examples of its applications to the engineering problems. # - to take a deeper dive into the details of several of the most important machine learning approaches, and develop an intuition into how they work and when and where they are applicable for the engineering problems raised in industry. # # ### What will you learn? # - Understanding engineering issues in data science and design the practical solutions using machine learning, # - also learning how to evaluate our work in engineering perspective # + [markdown] slideshow={"slide_type": "slide"} # # Course Mission # # - This course is intended to provide you with an understanding of engineering issues in the field of Computer Science, especially for the issues raised in data science # # - Provide a number of practical A.I. and M.L. skills to work like a data scientist, and design potential solutions to the engineering problems in both the academic and industry perspectives # # # # + [markdown] slideshow={"slide_type": "fragment"} # - Programming Language: Python Only # - Tools: Jupyter Notebook, Anaconda, Numpy, matplotlib, SkLearn(ML), Pytorch(DL) # - All the slides are opensourced and will send over the platform # # + [markdown] slideshow={"slide_type": "slide"} # <table> # <td> # <img src="./img/MLtools.jpg" style="width:500;height:500px;"> <br> # </td> # <td> # <img src="./img/an.PNG" style="width:500;height:500px;"> <br> # </td> # </table> # # + [markdown] slideshow={"slide_type": "slide"} # ## Data Science VS Machine Learning VS Artificial Intelligence # # # + [markdown] slideshow={"slide_type": "fragment"} # <img src="./img/rl.PNG" width=500 height=300> # + [markdown] slideshow={"slide_type": "slide"} # <img src="./img/dsmlai.PNG" width=800 height=300> # + [markdown] slideshow={"slide_type": "slide"} # ‘**What is Data Science?**’, # - Data science is a broad field of study pertaining to data systems and processes, aimed at maintaining data sets and deriving meaning out of them. # - Data scientists use a combination of tools, applications, principles and algorithms to make sense of random data clusters. Since almost all kinds of organizations today are generating exponential amounts of data around the world, it becomes difficult to monitor and store this data. # - Data science focuses on data modelling and data warehousing to track the ever-growing data set. The information extracted through data science applications are used to guide business processes and reach organisational goals. # + [markdown] slideshow={"slide_type": "slide"} # # [Video:What is Data Science)?](https://youtu.be/Nrfht_c3T7w) # + [markdown] slideshow={"slide_type": "slide"} # ### Data science uses a wide array of data-oriented technologies including SQL, Python, R, and Hadoop, etc. However, it also makes extensive use of statistical analysis, data visualization, distributed architecture, and more to extract meaning out of sets of data. # # ### Data scientists are skilled professionals whose expertise allows them to quickly switch roles at any point in the life cycle of data science projects. They can work with Artificial Intelligence and machine learning with equal ease. In fact, data scientists need machine learning skills for specific requirements # + [markdown] slideshow={"slide_type": "slide"} # # What is artificial intelligence (AI)? # + [markdown] slideshow={"slide_type": "slide"} # # [Video:What is AI (Artificial Intelligence)?](https://youtu.be/nASDYRkbQIY) # + [markdown] slideshow={"slide_type": "slide"} # ## Artificial intelligence is the simulation of human intelligence processes by machines, especially computer systems. Specific applications of AI include expert systems, natural language processing, speech recognition and machine vision # # - **How does AI work?** # In general, AI systems work by ingesting large amounts of labeled training data, analyzing the data for correlations and patterns, and using these patterns to make predictions about future states. In this way, a chatbot that is fed examples of text chats can learn to produce lifelike exchanges with people, or an image recognition tool can learn to identify and describe objects in images by reviewing millions of examples. Often what they refer to as AI is simply one component of AI, such as **machine learning**. AI requires a foundation of specialized hardware and software for writing and training machine learning algorithms # . # + [markdown] slideshow={"slide_type": "slide"} # # [Video:What is AI (Artificial Intelligence)?](https://youtu.be/0oRVLf16CMU) # + [markdown] slideshow={"slide_type": "slide"} # # Machine Learning # - In many ways, machine learning is the primary means by which data science manifests itself to the broader world. Machine learning is where these computational and algorithmic skills of data science meet the statistical thinking of data science, and the result is a collection of approaches to inference and data exploration that are not about effective theory so much as effective computation. # # - The term "**machine learning**" is sometimes thrown around as if it is some kind of magic pill: apply machine learning to your data, and all your problems will be solved! As you might expect, the reality is rarely this simple. # - While these methods can be incredibly powerful, to be effective they must be approached with a firm grasp of the strengths and weaknesses of each method, as well as a grasp of general concepts such as bias and variance, overfitting and underfitting, and more. # + [markdown] slideshow={"slide_type": "slide"} # ### Machine learning involves observing and studying data or experiences to identify patterns and set up a reasoning system based on the findings. The various components of machine learning include: # # - **Supervised machine learning**: This model uses historical data to understand behaviour and formulate future forecasts. This kind of learning algorithms analyse any given training data set to draw inferences which can be applied to output values. Supervised learning parameters are crucial in mapping the input-output pair. # - **Unsupervised machine learning**: This type of ML algorithm does not use any classified or labelled parameters. It focuses on discovering hidden structures from unlabeled data to help systems infer a function properly. Algorithms with unsupervised learning can use both generative learning models and a retrieval-based approach. # - **Semi-supervised machine learning**: This model combines elements of supervised and unsupervised learning yet isn’t either of them. It works by using both labelled and unlabeled data to improve learning accuracy. Semi-supervised learning can be a cost-effective solution when labelling data turns out to be expensive. # - **Reinforcement machine learnin**g: This kind of learning doesn’t use any answer key to guide the execution of any function. The lack of training data results in learning from experience. The process of trial and error finally leads to long-term rewards. # + [markdown] slideshow={"slide_type": "slide"} # [Video:Machine Learning & Artificial Intelligence](https://www.youtube.com/watch?v=z-EtmaFJieY) # + [markdown] slideshow={"slide_type": "slide"} # ## Qualitative Examples of Machine Learning Applications # # To make these ideas more concrete, let's take a look at a few very simple examples of a machine learning task. # These examples are meant to give an intuitive, non-quantitative overview of the types of machine learning tasks we will be looking at in this chapter. # In later sections, we will go into more depth regarding the particular models and how they are used. # For a preview of these more technical aspects, you can find the Python source that generates the following figures # + [markdown] slideshow={"slide_type": "slide"} # Classification: Predicting discrete labels # We will first take a look at a simple classification task, in which you are given a set of labeled points and want to use these to classify some unlabeled points. # # Imagine that we have the data shown in this figure: # + [markdown] slideshow={"slide_type": "slide"} # <img src="./figures/05.01-classification-1.png" > # + [markdown] slideshow={"slide_type": "slide"} # Here we have two-dimensional data: that is, we have two features for each point, represented by the (x,y) positions of the points on the plane. In addition, we have one of two class labels for each point, here represented by the colors of the points. From these features and labels, we would like to create a model that will let us decide whether a new point should be labeled "blue" or "red." # # There are a number of possible models for such a classification task, but here we will use an extremely simple one. We will make the assumption that the two groups can be separated by drawing a straight line through the plane between them, such that points on each side of the line fall in the same group. Here the model is a quantitative version of the statement "a straight line separates the classes", while the model parameters are the particular numbers describing the location and orientation of that line for our data. The optimal values for these model parameters are learned from the data (this is the "learning" in machine learning), which is often called training the model. # # The following figure shows a visual representation of what the trained model looks like for this data: # + [markdown] slideshow={"slide_type": "slide"} # Here we have two-dimensional data: that is, we have two *features* for each point, represented by the *(x,y)* positions of the points on the plane. # In addition, we have one of two *class labels* for each point, here represented by the colors of the points. # From these features and labels, we would like to create a model that will let us decide whether a new point should be labeled "blue" or "red." # # There are a number of possible models for such a classification task, but here we will use an extremely simple one. We will make the assumption that the two groups can be separated by drawing a straight line through the plane between them, such that points on each side of the line fall in the same group. # Here the *model* is a quantitative version of the statement "a straight line separates the classes", while the *model parameters* are the particular numbers describing the location and orientation of that line for our data. # The optimal values for these model parameters are learned from the data (this is the "learning" in machine learning), which is often called *training the model*. # # The following figure shows a visual representation of what the trained model looks like for this data: # + [markdown] slideshow={"slide_type": "slide"} # <img src="./figures/05.01-classification-2.png" > # + [markdown] slideshow={"slide_type": "slide"} # Now that this model has been trained, it can be generalized to new, unlabeled data. # In other words, we can take a new set of data, draw this model line through it, and assign labels to the new points based on this model. # This stage is usually called *prediction*. See the following figure: # + [markdown] slideshow={"slide_type": "fragment"} # <img src="./figures/05.01-classification-3.png" > # + [markdown] slideshow={"slide_type": "slide"} # This is the basic idea of a classification task in machine learning, where "classification" indicates that the data has discrete class labels. # At first glance this may look fairly trivial: it would be relatively easy to simply look at this data and draw such a discriminatory line to accomplish this classification. # A benefit of the machine learning approach, however, is that it can generalize to much larger datasets in many more dimensions. # # For example, this is similar to the task of automated spam detection for email; in this case, we might use the following features and labels: # # - *feature 1*, *feature 2*, etc. $\to$ normalized counts of important words or phrases ("Viagra", "Nigerian prince", etc.) # - *label* $\to$ "spam" or "not spam" # # For the training set, these labels might be determined by individual inspection of a small representative sample of emails; for the remaining emails, the label would be determined using the model. # For a suitably trained classification algorithm with enough well-constructed features (typically thousands or millions of words or phrases), this type of approach can be very effective. # We will see an example of such text-based classification # + [markdown] slideshow={"slide_type": "slide"} # ### Regression: Predicting continuous labels # # In contrast with the discrete labels of a classification algorithm, we will next look at a simple *regression* task in which the labels are continuous quantities. # # Consider the data shown in the following figure, which consists of a set of points each with a continuous label: # + [markdown] slideshow={"slide_type": "fragment"} # <img src="./figures/05.01-regression-1.png" > # + [markdown] slideshow={"slide_type": "slide"} # As with the classification example, we have two-dimensional data: that is, there are two features describing each data point. # The color of each point represents the continuous label for that point. # # There are a number of possible regression models we might use for this type of data, but here we will use a simple linear regression to predict the points. # This simple linear regression model assumes that if we treat the label as a third spatial dimension, we can fit a plane to the data. # This is a higher-level generalization of the well-known problem of fitting a line to data with two coordinates. # # We can visualize this setup as shown in the following figure: # + [markdown] slideshow={"slide_type": "fragment"} # <img src="./figures/05.01-regression-2.png" > # + [markdown] slideshow={"slide_type": "slide"} # Notice that the *feature 1-feature 2* plane here is the same as in the two-dimensional plot from before; in this case, however, we have represented the labels by both color and three-dimensional axis position. # From this view, it seems reasonable that fitting a plane through this three-dimensional data would allow us to predict the expected label for any set of input parameters. # Returning to the two-dimensional projection, when we fit such a plane we get the result shown in the following figure: # + [markdown] slideshow={"slide_type": "slide"} # This plane of fit gives us what we need to predict labels for new points. Visually, we find the results shown in the following figure: # + [markdown] slideshow={"slide_type": "fragment"} # <img src="./figures/05.01-regression-4.png " > # + [markdown] slideshow={"slide_type": "slide"} # As with the classification example, this may seem rather trivial in a low number of dimensions. # But the power of these methods is that they can be straightforwardly applied and evaluated in the case of data with many, many features. # # For example, this is similar to the task of computing the distance to galaxies observed through a telescope—in this case, we might use the following features and labels: # # - *feature 1*, *feature 2*, etc. $\to$ brightness of each galaxy at one of several wave lengths or colors # - *label* $\to$ distance or redshift of the galaxy # # The distances for a small number of these galaxies might be determined through an independent set of (typically more expensive) observations. # Distances to remaining galaxies could then be estimated using a suitable regression model, without the need to employ the more expensive observation across the entire set. # In astronomy circles, this is known as the "photometric redshift" problem. # + [markdown] slideshow={"slide_type": "slide"} # ### Clustering: Inferring labels on unlabeled data # # The classification and regression illustrations we just looked at are examples of supervised learning algorithms, in which we are trying to build a model that will predict labels for new data. # Unsupervised learning involves models that describe data without reference to any known labels. # # One common case of unsupervised learning is "clustering," in which data is automatically assigned to some number of discrete groups. # For example, we might have some two-dimensional data like that shown in the following figure: # + [markdown] slideshow={"slide_type": "fragment"} # <img src="./figures/05.01-clustering-1.png " > # + [markdown] slideshow={"slide_type": "slide"} # By eye, it is clear that each of these points is part of a distinct group. # Given this input, a clustering model will use the intrinsic structure of the data to determine which points are related. # Using the very fast and intuitive *k*-means algorithm , we find the clusters shown in the following figure: # + [markdown] slideshow={"slide_type": "fragment"} # <img src="./figures/05.01-clustering-2.png" > # + [markdown] slideshow={"slide_type": "slide"} # *k*-means fits a model consisting of *k* cluster centers; the optimal centers are assumed to be those that minimize the distance of each point from its assigned center. # Again, this might seem like a trivial exercise in two dimensions, but as our data becomes larger and more complex, such clustering algorithms can be employed to extract useful information from the dataset. # + [markdown] slideshow={"slide_type": "slide"} # ### Dimensionality reduction: Inferring structure of unlabeled data # # Dimensionality reduction is another example of an unsupervised algorithm, in which labels or other information are inferred from the structure of the dataset itself. # Dimensionality reduction is a bit more abstract than the examples we looked at before, but generally it seeks to pull out some low-dimensional representation of data that in some way preserves relevant qualities of the full dataset. # Different dimensionality reduction routines measure these relevant qualities in different ways # # As an example of this, consider the data shown in the following figure: # + [markdown] slideshow={"slide_type": "fragment"} # <img src="./figures/05.01-dimesionality-1.png " > # + [markdown] slideshow={"slide_type": "slide"} # Visually, it is clear that there is some structure in this data: it is drawn from a one-dimensional line that is arranged in a spiral within this two-dimensional space. # In a sense, you could say that this data is "intrinsically" only one dimensional, though this one-dimensional data is embedded in higher-dimensional space. # A suitable dimensionality reduction model in this case would be sensitive to this nonlinear embedded structure, and be able to pull out this lower-dimensionality representation. # # The following figure shows a visualization of the results of the Isomap algorithm, a manifold learning algorithm that does exactly this: # + [markdown] slideshow={"slide_type": "fragment"} # <img src="./figures/05.01-dimesionality-2.png " > # + [markdown] slideshow={"slide_type": "slide"} # Notice that the colors (which represent the extracted one-dimensional latent variable) change uniformly along the spiral, which indicates that the algorithm did in fact detect the structure we saw by eye. # As with the previous examples, the power of dimensionality reduction algorithms becomes clearer in higher-dimensional cases. # For example, we might wish to visualize important relationships within a dataset that has 100 or 1,000 features. # Visualizing 1,000-dimensional data is a challenge, and one way we can make this more manageable is to use a dimensionality reduction technique to reduce the data to two or three dimensions. # # + [markdown] slideshow={"slide_type": "slide"} # ## Summary # # Here we have seen a few simple examples of some of the basic types of machine learning approaches. # Needless to say, there are a number of important practical details that we have glossed over, but I hope this section was enough to give you a basic idea of what types of problems machine learning approaches can solve. # # In short, we saw the following: # # - *Supervised learning*: Models that can predict labels based on labeled training data # # - *Classification*: Models that predict labels as two or more discrete categories # - *Regression*: Models that predict continuous labels # # - *Unsupervised learning*: Models that identify structure in unlabeled data # # - *Clustering*: Models that detect and identify distinct groups in the data # - *Dimensionality reduction*: Models that detect and identify lower-dimensional structure in higher-dimensional data # # In the following sections we will go into much greater depth within these categories, and see some more interesting examples of where these concepts can be useful. # # All of the figures in the preceding discussion are generated based on actual machine learning computations; the code behind them can be found in my github resource # -
Classnotebook/Class1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # start PRFX='CvCls0730_1' dbg = False if dbg: dbgsz=500 # - https://www.kaggle.com/drhabib/starter-kernel-for-0-79/ # + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a" # !pip install ../input/efficientnetpytorch/efficientnet_pytorch-0.3.0-py3-none-any.whl # - # !nvidia-smi # + # Downloading: "http://storage.googleapis.com/public-models/efficientnet-b3-c8376fa2.pth" to /tmp/.cache/torch/checkpoints/efficientnet-b3-c8376fa2.pth import os if not os.path.exists('/tmp/.cache/torch/checkpoints/'): os.makedirs('/tmp/.cache/torch/checkpoints/') # !cp ../input/efficientnetpytorch/*.pth /tmp/.cache/torch/checkpoints/ # - # # params # + p_o = f'../output/{PRFX}' from pathlib import Path Path(p_o).mkdir(exist_ok=True, parents=True) SEED = 111 BS = 64 FP16 = True PERC_VAL = 0.1 WD = 0.01 MODEL_NAME = 'efficientnet-b5' from efficientnet_pytorch import EfficientNet SZ = EfficientNet.get_image_size(MODEL_NAME) for i in range(6): print(f'efficientnet-b{i} size', EfficientNet.get_image_size(f'efficientnet-b{i}')) SZ = 224 print('SZ:', SZ) from fastai.vision import * params_tfms = {} # - # ## img proc # + use_open_yz = True import cv2 def load_ben_color(fn)->Image: image = cv2.imread(fn) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # image = crop_image_from_gray(image) image, _ = crop_margin(image) image = center_crop(image) image = cv2.resize(image, (640, 480))#most common in test # image = cv2.resize(image, (SZ, SZ)) image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB) # image = cv2.addWeighted ( image,4, cv2.GaussianBlur( image , (0,0) , sigmaX=10) , -4 ,128) return image # - # > get_transforms(do_flip:bool=True, flip_vert:bool=False, max_rotate:float=10.0, max_zoom:float=1.1, max_lighting:float=0.2, max_warp:float=0.2, p_affine:float=0.75, p_lighting:float=0.75, xtra_tfms:Optional[Collection[Transform]]=None) → Collection[Transform] from fastai.vision import * params_tfms = dict( do_flip=True, flip_vert=True, max_rotate=360, ) # > By default, the library resizes the image while keeping its original ratio so that the smaller size corresponds to the given size, then takes a crop (ResizeMethod.CROP). You can choose to resize the image while keeping its original ratio so that the bigger size corresponds to the given size, then take a pad (ResizeMethod.PAD). Another way is to just squish the image to the given size (ResizeMethod.SQUISH). kwargs_tfms = dict( resize_method=ResizeMethod.SQUISH, padding_mode='zeros' ) # # setup from fastai import * from fastai.vision import * from fastai.callbacks import * # ## image processing # + import cv2 def crop_margin(image, keep_less=0.83): output = image.copy() gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) ret,gray = cv2.threshold(gray,10,255,cv2.THRESH_BINARY) contours,hierarchy = cv2.findContours(gray,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE) if not contours: #print('no contours!') flag = 0 return image, flag cnt = max(contours, key=cv2.contourArea) ((x, y), r) = cv2.minEnclosingCircle(cnt) r = r*keep_less x = int(x); y = int(y); r = int(r) flag = 1 #print(x,y,r) if r > 100: return output[0 + (y-r)*int(r<y):-1 + (y+r+1)*int(r<y),0 + (x-r)*int(r<x):-1 + (x+r+1)*int(r<x)], flag else: #print('none!') flag = 0 return image,flag def crop_image1(img,tol=7): # img is image data # tol is tolerance mask = img>tol return img[np.ix_(mask.any(1),mask.any(0))] def crop_image_from_gray(img,tol=7): if img.ndim ==2: mask = img>tol return img[np.ix_(mask.any(1),mask.any(0))] elif img.ndim==3: gray_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) mask = gray_img>tol check_shape = img[:,:,0][np.ix_(mask.any(1),mask.any(0))].shape[0] if (check_shape == 0): # image is too dark so that we crop out everything, return img # return original image else: img1=img[:,:,0][np.ix_(mask.any(1),mask.any(0))] img2=img[:,:,1][np.ix_(mask.any(1),mask.any(0))] img3=img[:,:,2][np.ix_(mask.any(1),mask.any(0))] # print(img1.shape,img2.shape,img3.shape) img = np.stack([img1,img2,img3],axis=-1) # print(img.shape) return img # https://stackoverflow.com/questions/16646183/crop-an-image-in-the-centre-using-pil def center_crop(img): h0, w0 = 480, 640 #most common in test ratio = h0/w0 #most common in test height, width, _= img.shape new_width, new_height = width, math.ceil(width*ratio) width = img.shape[1] height = img.shape[0] if new_width is None: new_width = min(width, height) if new_height is None: new_height = min(width, height) left = int(np.ceil((width - new_width) / 2)) right = width - int(np.floor((width - new_width) / 2)) top = int(np.ceil((height - new_height) / 2)) bottom = height - int(np.floor((height - new_height) / 2)) if len(img.shape) == 2: center_cropped_img = img[top:bottom, left:right] else: center_cropped_img = img[top:bottom, left:right, ...] return center_cropped_img def open_yz(fn, convert_mode, after_open)->Image: image = load_ben_color(fn) return Image(pil2tensor(image, np.float32).div_(255)) if use_open_yz: vision.data.open_image = open_yz # - # ## QWK # + import scipy as sp from sklearn.metrics import cohen_kappa_score def quadratic_weighted_kappa(y1, y2): return cohen_kappa_score(y1, y2, weights='quadratic') def qwk(y_pred, y): return torch.tensor( # quadratic_weighted_kappa(torch.round(y_pred), y), quadratic_weighted_kappa(np.argmax(y_pred,1), y), device='cuda:0') # - # ## set seed # + def set_torch_seed(seed=SEED): os.environ['PYTHONHASHSEED'] = str(seed) random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) if torch.cuda.is_available(): torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) torch.backends.cudnn.deterministic = True # torch.backends.cudnn.benchmark = False set_torch_seed() # - # ## TTTA # + from fastai.core import * from fastai.basic_data import * from fastai.basic_train import * from fastai.torch_core import * def _tta_only(learn:Learner, ds_type:DatasetType=DatasetType.Valid, num_pred:int=5) -> Iterator[List[Tensor]]: "Computes the outputs for several augmented inputs for TTA" dl = learn.dl(ds_type) ds = dl.dataset old = ds.tfms aug_tfms = [o for o in learn.data.train_ds.tfms if o.tfm !=zoom] try: pbar = master_bar(range(num_pred)) for i in pbar: ds.tfms = aug_tfms yield get_preds(learn.model, dl, pbar=pbar)[0] finally: ds.tfms = old Learner.tta_only = _tta_only def _TTA(learn:Learner, beta:float=0, ds_type:DatasetType=DatasetType.Valid, num_pred:int=5, with_loss:bool=False) -> Tensors: "Applies TTA to predict on `ds_type` dataset." preds,y = learn.get_preds(ds_type) all_preds = list(learn.tta_only(ds_type=ds_type, num_pred=num_pred)) avg_preds = torch.stack(all_preds).mean(0) if beta is None: return preds,avg_preds,y else: final_preds = preds*beta + avg_preds*(1-beta) if with_loss: with NoneReduceOnCPU(learn.loss_func) as lf: loss = lf(final_preds, y) return final_preds, y, loss return final_preds, y Learner.TTA = _TTA # - # # preprocess # ## prep # + img2grd = [] p = '../input/aptos2019-blindness-detection' pp = Path(p) train = pd.read_csv(pp/'train.csv') test = pd.read_csv(pp/'test.csv') len_blnd = len(train) len_blnd_test = len(test) img2grd_blnd = [(f'{p}/train_images/{o[0]}.png',o[1],'blnd') for o in train.values] len_blnd, len_blnd_test # - img2grd += img2grd_blnd display(len(img2grd)) cnt = Counter(o[1] for o in img2grd) t2c_trn_has = dict(cnt) display(cnt.most_common()) sm = sum(cnt.values()) display([(o[0], o[1]/sm) for o in cnt.most_common()]) # + p = '../input/diabetic-retinopathy-detection' pp = Path(p) train=pd.read_csv(pp/'trainLabels.csv') train=pd.read_csv(pp/'trainLabels.csv') img2grd_diab_train=[(f'../input/diabetic-retinopathy-detection/train_images/{o[0]}.jpeg',o[1],'diab') for o in train.values] img2grd += img2grd_diab_train display(len(img2grd)) display(Counter(o[1] for o in img2grd).most_common()) test=pd.read_csv(pp/'retinopathy_solution.csv') img2grd_diab_test=[(f'../input/diabetic-retinopathy-detection/test_images/{o[0]}.jpeg',o[1],'diab') for o in test.values] img2grd += img2grd_diab_test display(len(img2grd)) display(Counter(o[1] for o in img2grd).most_common()) # + p = '../input/IDRID/B. Disease Grading' pp = Path(p) train=pd.read_csv(pp/'2. Groundtruths/a. IDRiD_Disease Grading_Training Labels.csv') img2grd_idrid_train=[(f'../input/IDRID/B. Disease Grading/1. Original Images/a. Training Set/{o[0]}.jpg',o[1],'idrid') for o in train.values] img2grd += img2grd_idrid_train display(len(img2grd)) display(Counter(o[1] for o in img2grd).most_common()) test=pd.read_csv(pp/'2. Groundtruths/b. IDRiD_Disease Grading_Testing Labels.csv') img2grd_idrid_test=[(f'../input/IDRID/B. Disease Grading/1. Original Images/b. Testing Set/{o[0]}.jpg',o[1],'idrid') for o in test.values] img2grd += img2grd_idrid_test display(len(img2grd)) display(Counter(o[1] for o in img2grd).most_common()) # - df = pd.DataFrame(img2grd) df.columns = ['fnm', 'target', 'src'] df = df.reset_index() df.shape if not np.all([Path(o[0]).exists() for o in img2grd]): print('Some files are missing!!!') # ## df2use df.target.value_counts() # + df2use = df[df.src.isin(['blnd', 'idrid'])].copy() df2use.target.value_counts() # - n_t_wanted = 2000 n_t_extra = dict(n_t_wanted - df2use.target.value_counts()) n_t_extra set_torch_seed() for t,n in n_t_extra.items(): df2use = pd.concat([df2use, df[(df.target==t) & (df.src=='diab')].sample(n)]) df2use.shape df2use.target.value_counts() if dbg: df2use = df2use.head(dbgsz) # ## dataset # + # %%time tfms = get_transforms(**params_tfms) def get_data(sz=SZ, bs=BS): src = (ImageList.from_df(df=df2use,path='./',cols='fnm') .split_by_rand_pct(0.2) .label_from_df(cols='target', #label_cls=FloatList ) ) data= (src.transform(tfms, size=sz, **kwargs_tfms ) #Data augmentation .databunch(bs=bs) #DataBunch .normalize(imagenet_stats) #Normalize ) return data set_torch_seed() data = get_data() # - # %%time data.show_batch(rows=3, figsize=(10, 10)) # ## add test dataset p = '../input/aptos2019-blindness-detection' pp = Path(p) test = pd.read_csv(pp/'test.csv') if dbg: test = test.head(dbgsz) data.add_test(ImageList.from_df(test, '../input/aptos2019-blindness-detection', folder='test_images', suffix='.png')) # %%time data.show_batch(rows=3, figsize=(10, 10), ds_type=DatasetType.Test) # ## train model = EfficientNet.from_pretrained(MODEL_NAME, num_classes=5) learn = Learner(data, model, path=p_o, # wd=WD, metrics=[accuracy, qwk], ) if FP16: learn = learn.to_fp16() # %%time learn.lr_find() # !nvidia-smi learn.recorder.plot(suggestion=True, skip_end=15) # + set_torch_seed() learn.fit_one_cycle(10, max_lr=1e-3, callbacks=[SaveModelCallback(learn, every='epoch', name=f'{PRFX}_model')]) # - learn.recorder.plot_losses() learn.recorder.plot_metrics() # # validate and thresholding learn = learn.to_fp32() # %%time set_torch_seed() preds_val, y_val = learn.get_preds(ds_type=DatasetType.Valid) preds_val = preds_val.numpy().squeeze() y_val= y_val.numpy() preds_val = np.argmax(preds_val, 1) quadratic_weighted_kappa(preds_val, y_val) Counter(y_val).most_common() Counter(preds_val).most_common() # # testing # %%time set_torch_seed() preds_tst, _ = learn.get_preds(ds_type=DatasetType.Test) preds_tst = preds_tst.numpy().squeeze() preds_tst = np.argmax(preds_tst, 1) # + # %%time set_torch_seed() preds_tst_tta, _ = learn.TTA(ds_type=DatasetType.Test) preds_tst_tta = preds_tst_tta.numpy().squeeze() preds_tst_tta = np.argmax(preds_tst_tta, 1) # - pd.Series(preds_tst.astype(int)).value_counts() pd.Series(preds_tst_tta.astype(int)).value_counts() # ## submit subm = pd.read_csv("../input/aptos2019-blindness-detection/test.csv") subm['diagnosis'] = preds_tst subm.head() subm.diagnosis.value_counts() subm.to_csv(f"{p_o}/submission.csv", index=False)
nbs/CvCls0730_1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] nbsphinx="hidden" # # Characterization of Discrete Systems # # *This Jupyter notebook is part of a [collection of notebooks](../index.ipynb) in the bachelors module Signals and Systems, Comunications Engineering, Universität Rostock. Please direct questions and suggestions to [<EMAIL>](mailto:<EMAIL>).* # - # ## Eigenfunctions # # An [eigenfunction](https://en.wikipedia.org/wiki/Eigenfunction) of a discrete system is defined as the input signal $x[k]$ which produces the output signal $y[k] = \mathcal{H}\{ x[k] \} = \lambda \cdot x[k]$ with $\lambda \in \mathbb{C}$. The weight $\lambda$ associated with $x[k]$ is known as scalar eigenvalue of the system. Hence besides a weighting factor, an eigenfunction is not modified by passing through the system. # # [Complex exponential signals](../discrete_signals/standard_signals.ipynb#Complex-Exponential-Signal) $z^k$ with $z \in \mathbb{C}$ are eigenfunctions of discrete linear time-invariant (LTI) systems. Let's assume a generic LTI system with input signal $x[k] = z^k$ and output signal $y[k] = \mathcal{H}\{ x[k] \}$. Due to the time-invariance of the system, the response to a shifted input signal $x(k-\kappa) = z^{k - \kappa}$ reads # # \begin{equation} # y[k- \kappa] = \mathcal{H}\{ x[k - \kappa] \} = \mathcal{H}\{ z^{- \kappa} \cdot z^k \} # \end{equation} # # Due to the linearity of the system this can be reformulated as # # \begin{equation} # y[k- \kappa] = z^{- \kappa} \cdot \mathcal{H}\{ z^k \} = z^{- \kappa} \cdot y[k] # \end{equation} # # If the complex exponential signal $z^k$ is an eigenfunction of the LTI system, the output # signal is a weighted exponential signal $y[k] = \lambda \cdot z^k$. Introducing $y[k]$ into the left- and right-hand side of above equation yields # # \begin{equation} # \lambda z^k z^{- \kappa} = z^{- \kappa} \lambda z^k # \end{equation} # # which obviously is fulfilled. This proves that the exponential signal $z^k$ is an eigenfunction of LTI systems. # **Example** # # The output signal of the previously introduced [second-order recursive LTI system](difference_equation.ipynb#Second-Order-System) with the difference equation # # \begin{equation} # y[k] - y[k-1] + \frac{1}{2} y[k-2] = x[k] # \end{equation} # # is computed for a complex exponential signal $x[k] = z^k$ at the input. The output signal should be a weighted complex exponential due to above reasoning. # + # %matplotlib inline import numpy as np from scipy import signal import matplotlib.pyplot as plt a = [1.0, -1.0, 1/2] b = [1.0] z = np.exp(0.02 + .5j) k = np.arange(30) x = z**k y = signal.lfilter(b, a, x) # - # The real and imaginary part of the input and output signal is plotted. # + plt.figure(figsize=(10,8)) plt.subplot(221) plt.stem(k, np.real(x)) plt.xlabel('$k$') plt.ylabel(r'$\Re \{ x[k] \}$') plt.subplot(222) plt.stem(k, np.imag(x)) plt.xlabel('$k$') plt.ylabel(r'$\Im \{ x[k] \}$') plt.tight_layout() plt.subplot(223) plt.stem(k, np.real(y)) plt.xlabel('$k$') plt.ylabel(r'$\Re \{ y[k] \}$') plt.subplot(224) plt.stem(k, np.imag(y)) plt.xlabel('$k$') plt.ylabel(r'$\Im \{ y[k] \}$') plt.tight_layout() # - # **Exercise** # # * From the in- and output signal only, can we conclude that the system is LTI? # ## Transfer Function # # The complex eigenvalue $\lambda$ characterizes the properties of the transfer of a complex exponential signal $z^k$ with [complex frequency $z$](../discrete_signals/standard_signals.ipynb#Complex-Exponential-Signal) through a discrete LTI system. It is commonly termed as [*transfer function*](https://en.wikipedia.org/wiki/Transfer_function) and denoted by $H(z)=\lambda(z)$. Using this definition, the output signal $y[k]$ of an LTI system with complex exponential signal at the input reads # # \begin{equation} # y[k] = \mathcal{H} \{ z^k \} = H(z) \cdot z^k # \end{equation} # # Note that the concept of the transfer function is directly linked to the linearity and time-invariance of a system. Only in this case, complex exponential signals are eigenfunctions of the system and $H(z)$ describes the properties of an LTI system with respect to these. # # Above equation can be rewritten in terms of the magnitude $| H(z) |$ and phase $\varphi(z)$ of the complex transfer function $H(z)$ # # \begin{equation} # y[k] = | H(z) | \cdot z^k = | H(z) | \cdot e^{\Sigma k + j \Omega k + j \varphi(z)} # \end{equation} # # where $z = e^{\Sigma + j \Omega}$ has been substituted to derive the last equality. The magnitude $| H(z) |$ provides the frequency dependent attenuation of the eigenfunction $z^k$ by the system, while $\varphi(z)$ provides the phase-shift introduced by the system. # ## Link between Transfer Function and Impulse Response # # In order to establish a link between the transfer function $H(z)$ and the impulse response $h[k]$, the output signal $y[k] = \mathcal{H} \{ x[k] \}$ of an LTI system with input signal $x[k]$ is computed. It is given by convolving the input signal with the impulse response # # \begin{equation} # y[k] = x[k] * h[k] = \sum_{\kappa = -\infty}^{\infty} x[k-\kappa] \cdot h[\kappa] # \end{equation} # # For a complex exponential signal as input $x[k] = z^k$ the output of the LTI system is given as $y[k] = \mathcal{H} \{ z^k \} = H(z) \cdot z^k$. Introducing both signals into the left- and right-hand side of the convolution yields # # \begin{equation} # H(z) \cdot z^k = \sum_{\kappa = -\infty}^{\infty} z^k \, z^{- \kappa} \cdot h[\kappa] # \end{equation} # # which after canceling out $z^k$ results in # # \begin{equation} # H(z) = \sum_{\kappa = -\infty}^{\infty} h[\kappa] \cdot z^{- \kappa} # \end{equation} # # The transfer function $H(z)$ can be computed from the impulse response by summing over the impulse response $h[k]$ multiplied with the complex exponential function $z^k$. This constitutes a transformation, which is later introduced in more detail as [$z$-transform](https://en.wikipedia.org/wiki/Z-transform). # + [markdown] nbsphinx="hidden" # ### Copyright # # <p xmlns:dct="http://purl.org/dc/terms/"> # <a rel="license" # href="http://creativecommons.org/publicdomain/zero/1.0/"> # <img src="http://i.creativecommons.org/p/zero/1.0/88x31.png" style="border-style: none;" alt="CC0" /> # </a> # <br /> # To the extent possible under law, # <span rel="dct:publisher" resource="[_:publisher]">the person who associated CC0</span> # with this work has waived all copyright and related or neighboring # rights to this work. # </p>
discrete_systems/eigenfunctions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <!--BOOK_INFORMATION--> # <img align="left" style="padding-right:10px;" src="figures/PDSH-cover-small.png"> # *This notebook contains an excerpt from the [Python Data Science Handbook](http://shop.oreilly.com/product/0636920034919.do) by <NAME>; the content is available [on GitHub](https://github.com/jakevdp/PythonDataScienceHandbook).* # # *The text is released under the [CC-BY-NC-ND license](https://creativecommons.org/licenses/by-nc-nd/3.0/us/legalcode), and code is released under the [MIT license](https://opensource.org/licenses/MIT). If you find this content useful, please consider supporting the work by [buying the book](http://shop.oreilly.com/product/0636920034919.do)!* # <!--NAVIGATION--> # < [Profiling and Timing Code](01.07-Timing-and-Profiling.ipynb) | [Contents](Index.ipynb) | [Introduction to NumPy](02.00-Introduction-to-NumPy.ipynb) > # # More IPython Resources # In this chapter, we've just scratched the surface of using IPython to enable data science tasks. # Much more information is available both in print and on the Web, and here we'll list some other resources that you may find helpful. # ## Web Resources # # - [The IPython website](http://ipython.org): The IPython website links to documentation, examples, tutorials, and a variety of other resources. # - [The nbviewer website](http://nbviewer.jupyter.org/): This site shows static renderings of any IPython notebook available on the internet. The front page features some example notebooks that you can browse to see what other folks are using IPython for! # - [A gallery of interesting Jupyter Notebooks](https://github.com/jupyter/jupyter/wiki/A-gallery-of-interesting-Jupyter-Notebooks/): This ever-growing list of notebooks, powered by nbviewer, shows the depth and breadth of numerical analysis you can do with IPython. It includes everything from short examples and tutorials to full-blown courses and books composed in the notebook format! # - Video Tutorials: searching the Internet, you will find many video-recorded tutorials on IPython. I'd especially recommend seeking tutorials from the PyCon, SciPy, and PyData conferenes by <NAME> and <NAME>, two of the primary creators and maintainers of IPython and Jupyter. # ## Books # # - [*Python for Data Analysis*](http://shop.oreilly.com/product/0636920023784.do): <NAME>'s book includes a chapter that covers using IPython as a data scientist. Although much of the material overlaps what we've discussed here, another perspective is always helpful. # - [*Learning IPython for Interactive Computing and Data Visualization*](https://www.packtpub.com/big-data-and-business-intelligence/learning-ipython-interactive-computing-and-data-visualization): This short book by <NAME> offers a good introduction to using IPython for data analysis. # - [*IPython Interactive Computing and Visualization Cookbook*](https://www.packtpub.com/big-data-and-business-intelligence/ipython-interactive-computing-and-visualization-cookbook): Also by <NAME>, this book is a longer and more advanced treatment of using IPython for data science. Despite its name, it's not just about IPython–it also goes into some depth on a broad range of data science topics. # # Finally, a reminder that you can find help on your own: IPython's ``?``-based help functionality (discussed in [Help and Documentation in IPython](01.01-Help-And-Documentation.ipynb)) can be very useful if you use it well and use it often. # As you go through the examples here and elsewhere, this can be used to familiarize yourself with all the tools that IPython has to offer. # <!--NAVIGATION--> # < [Profiling and Timing Code](01.07-Timing-and-Profiling.ipynb) | [Contents](Index.ipynb) | [Introduction to NumPy](02.00-Introduction-to-NumPy.ipynb) >
PythonDataScienceHandbook/notebooks/01.08-More-IPython-Resources.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np import seaborn as sns from sklearn.preprocessing import MinMaxScaler from scipy import stats from sklearn import preprocessing from sklearn.decomposition import PCA # + # Loading train and test data train1 = pd.read_csv("../Dataset/train.csv") test1 = pd.read_csv("../Dataset/test.csv") # + # Checking number of rows and columns print(train1.shape) print(test1.shape) # + # Analysing some basic Statistics train1.describe() # + # Checking NAN values print(train1.isnull().values.sum()) print(test1.isnull().values.sum()) # + #Checking data balance according to target variable Positive = train1[train1["TARGET"] == 1] print(Positive.shape) Negative = train1[train1["TARGET"] == 0] print(Negative.shape) # + # Balancing data train2 = train1[train1["TARGET"] == 0].sample(3008).append(train1[train1["TARGET"] == 1]) Positive = train2[train2["TARGET"] == 1] print(Positive.shape) Negative = train2[train2["TARGET"] == 0] print(Negative.shape) # + # Removing variables with std = 0 train3 = train2.loc[:,train2.std(axis = 0) != 0] test3 = test1.loc[:,train2.std(axis = 0) != 0] train3.shape # + # Creating a variable to count the number of zeros for each ID train4 = train3 test4 = test3 train4["Zeros"] = train3.drop("TARGET", 1).apply(lambda x: sum(x == 0), 1) test4["Zeros"] = test3.apply(lambda x: sum(x == 0), 1) # + # Applying PCA to the low correlation variables train_corr = train4.corr(method = 'pearson')["TARGET"] train_corr = train_corr.fillna(0) trainPCA = train4 testPCA = test4 trainPCA = trainPCA.drop("TARGET",1) # Applying PCA pca = PCA(n_components=6) trainPCA = pd.DataFrame(pca.fit_transform(trainPCA)) testPCA = pd.DataFrame(pca.fit_transform(testPCA)) trainPCA.columns = ["PCA0","PCA1","PCA2","PCA3", "PCA4", "PCA5"] testPCA.columns = ["PCA0","PCA1","PCA2","PCA3", "PCA4", "PCA5"] trainPCA["TARGET"] = train4.TARGET.values print("The PCA explained variance ratio is: " + str(sum(pca.explained_variance_ratio_)*1)) # - trainPCA # + # Removing outliers trainPCA2 = trainPCA[(np.abs(stats.zscore(trainPCA)) < 3).all(axis=1)] # + # Making boxplot trainPCA2.boxplot(grid = False, figsize = (18,5)) # + # Normalizing Data scaler = MinMaxScaler(feature_range = (0, 1)) trainPCA3 = trainPCA2.drop(["TARGET"], 1) trainPCA3 = pd.DataFrame(scaler.fit_transform(trainPCA3), columns=trainPCA3.columns) trainPCA3["TARGET"] = trainPCA2.TARGET.values testPCA3 = pd.DataFrame(scaler.fit_transform(testPCA), columns=testPCA.columns) # + # Making boxplot trainPCA3.boxplot(grid = False, figsize = (18,5)) # + # Saving data trainPCA3.to_csv("../Dataset/train_1.csv") testPCA3.to_csv("../Dataset/test_1.csv")
code/Exploratory-Analysis-1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 유방암 진단 데이터 실습 # load_breast_cancer 명령은 유방암(breast cancer) 진단 데이터를 제공한다. 유방암 진단 사진으로부터 측정한 종양(tumar)의 특징값을 사용하여 종양이 양성(benign)인지 악성(malignant)인지를 판별한다. # + from sklearn.datasets import load_breast_cancer cancer = load_breast_cancer() print(cancer.DESCR) # + import pandas as pd import matplotlib.pyplot as plt import seaborn as sns # %matplotlib inline df = pd.DataFrame(cancer.data, columns=cancer.feature_names) sy = pd.Series(cancer.target, dtype="category") sy = sy.cat.rename_categories(cancer.target_names) df['class'] = sy df.tail() # - sns.pairplot(vars=["mean radius", "mean texture", "mean perimeter", "mean area"], hue="class", data=df) plt.show() # ### import # + from sklearn.datasets import load_breast_cancer from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt # %matplotlib inline # - # ### Data load # 유방암 데이터를 로드한다. # train data와 test data를 분리한다. # + cancer = load_breast_cancer() X_train, X_test, y_train, y_test = train_test_split(cancer.data, cancer.target, stratify=cancer.target, random_state=42) # - # ### 실습 : 로지스틱 모형 # # 1. 로지스틱 모형을 생성한다. # 2. 모형의 정확도를 test 데이터를 사용하여 확인한다. # + # TODO # - # ## Regularization: # # - overfitting을 방지하자. # - L1 - 중요한 변수만을 남긴다. # - L2 - 기본값. 중요하지 않은 변수의 weight값을 0에 가깝게 규제한다. # # > regularization 규제강도를 조정하는 C 매개변수가 중요하다. # - C의 값을 감소시키면 규제의 강도가 증가한다. # - lower C => 규제의 강도가 커진다. weight값이 0에 가깝게 조정된다. 과소적합된다. # - higher C => 규제의 강도가 낮아진다. 모든 data point를 최대한 분류하도록 조정된다. 과대적합된다. # ### 실습 : Regularization # # 1. C=100 으로 변경 해서 모형을 평가해 보자. # 2. C=0.01 로 변경해서 모형을 평가해 보자. # + #TODO
03Supervised/02LogisticRegression/LogisticRegression_실습.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="SP6ophbQq5I0" # # ir_datasets - Tutorial - CLI # # **NOTE: This tutorial is for the command-line interface. See the other tutorial for Python.** # + [markdown] id="cl8KYrJTq-g0" # ## Getting Started # # We'll start out by installing the package. The package is available on pypi, # so you can install it with your favorite package manager. # + colab={"base_uri": "https://localhost:8080/"} id="vbGhAIREqw1c" outputId="1d7fcdb3-93a2-4668-fd7d-787d1471f648" # !pip install ir_datasets # + [markdown] id="7v_X6XqlrTan" # ## export # # The `ir_datasets export` command outputs data to stdout as TSV, # JSON, and other formats. # # The command format is: # # ``` # ir_datasets export <dataset-id> <entity-type> # ``` # # with optional other arguments following entity-type. # # `<dataset-id>` is the dataset's identifier, found [in the catalog](https://ir-datasets.com/). `<entity-type>` is one of: `docs`, `queries`, `qrels`, `scoreddocs`. # + [markdown] id="myI4M6OCsJQL" # Let's start by getting the top 10 documents from the `cord19/trec-covid` collection. The first time you run the command, it will automatically download the dataset. # # + colab={"base_uri": "https://localhost:8080/"} id="wt-QU7q1q-Mn" outputId="469d6b3f-4a0f-44db-a42b-ee8fef1232fe" # !ir_datasets export cord19/trec-covid docs | head -n 10 # + [markdown] id="HTQaik0isguS" # You can export in other formats too. Here's an exporting in JSON-Lines. # + colab={"base_uri": "https://localhost:8080/"} id="XaYh4lwLrTDZ" outputId="b50827d6-02e6-409c-bdcf-72dfbfdf1529" # !ir_datasets export cord19/trec-covid docs --format jsonl | head -n 10 # + [markdown] id="hhVG2gp6sqdZ" # If you do not want all the fields, you can specify which ones with `--fields`: # + colab={"base_uri": "https://localhost:8080/"} id="kFI8UHbzq6Cu" outputId="a06ac0f5-2248-4c09-e5e6-f8b49f5cf29f" # !ir_datasets export cord19/trec-covid docs --format jsonl --fields doc_id date | head -n 10 # + [markdown] id="WUjwx7i1s5HD" # The export command works the same way for `queries`, `qrels`, and `scoreddocs` (where available). By default, `qrels` and `scoreddocs` output in the TREC format. But you can choose to export as tsv or jsonl as well. # + colab={"base_uri": "https://localhost:8080/"} id="JoeB2aresxAV" outputId="872f8a51-ceb2-4c29-84ba-eb503f58ce1d" # !ir_datasets export cord19/trec-covid queries --fields query_id title | head -n 10 # + colab={"base_uri": "https://localhost:8080/"} id="Me_hppfJtRxG" outputId="b2fdb388-7eea-4e47-f5ea-859e07fe1b74" # !ir_datasets export cord19/trec-covid qrels | head -n 10 # + [markdown] id="2zocHeB1tgKu" # If you're savvy at the command line, piping can let you capture some dataset statistics pretty easily. Here's an example giving the label proportions using `awk`: # + colab={"base_uri": "https://localhost:8080/"} id="vqCnPJOVtaWl" outputId="6b041b9e-9b85-47bc-91c1-1595c9d5968b" # !ir_datasets export cord19/trec-covid qrels | awk '{a[$4]+=1; s+=1}END{for (x in a){print x, a[x], a[x]/s}}' # + [markdown] id="IgE2qowjuZV8" # ## lookup # # You can look up documents by their ID with the `ir_datasets lookup` command. The command format is: # # ``` # ir_datasets lookup <dataset-id> <doc-ids> ... # ``` # # These lookups are generally O(1) and memory-efficient. # + colab={"base_uri": "https://localhost:8080/"} id="dBrzCdwbtug4" outputId="cd3522f7-3acf-450b-ae68-ac72ce9f0877" # !ir_datasets lookup cord19/trec-covid 005b2j4b 00fmeepz 010vptx3 # + [markdown] id="v6leewIGvYKf" # You can also specify the fields to return. # + colab={"base_uri": "https://localhost:8080/"} id="GkVpdPsXvFIq" outputId="106acd40-9d69-495e-e9f8-7191b1c81d78" # !ir_datasets lookup cord19/trec-covid 005b2j4b 00fmeepz 010vptx3 --fields doc_id title # + [markdown] id="DOm67Sbsvjon" # And of course, you can do all sorts of fancy piping here as well. Let's find all highly-relevant documents for Query 50: # + colab={"base_uri": "https://localhost:8080/"} id="yVMHEFRXvLfh" outputId="e4ad1626-5057-40a3-c517-9cc12ad0052a" # !ir_datasets lookup cord19/trec-covid $(ir_datasets export cord19/trec-covid qrels | awk '$1==50&&$4==2{printf "%s ", $3}') --fields doc_id title # + [markdown] id="h9dhCjb0y4Rj" # ## doc_fifos # # For indexing using some tools (e.g., Anserini), it is helpful to have multiple concurrent document streams. You can do this with the `ir_datasets doc_fifos` command. Note that this command only works on posix systems (e.g., unix, macos). # # This command runs until all the documents are exhausted, so you need to run it in the background or elsewhere. So it's not condusive to show in a Colab setting. # + id="YGKeucnFzFOW" # !ir_datasets doc_fifos cord19/trec-covid
examples/ir_datasets_cli.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda root] # language: python # name: conda-root-py # --- # + from datetime import timedelta from pykalman import KalmanFilter import matplotlib.pyplot as plt import pandas as pd import numpy as np from numpy.linalg import inv from math import sqrt from sklearn.preprocessing import PolynomialFeatures from alpha_vantage.timeseries import TimeSeries s = 0.5 n = 4 n1 = 4 y = np.array([]) z = np.array([]) z_bench = np.array([]) predict_ind = np.array([]) predict = np.array([]) res = np.array([]) mean = 0 mean1 = 0 var1 = 0 var = 0 port = np.array([1000000]) port_ideal = np.array([1000000]) return_bench = np.array([]) return_strat = np.array([]) flag = 0 count = 0 count_ideal = 0 ts = TimeSeries(key='5Z1WW0XDW1MQWHGI', output_format='pandas') df1, meta_data = ts.get_intraday(symbol='AMZN',interval='1min', outputsize='full') df2, meta_data2 = ts.get_intraday(symbol='SPY',interval='1min', outputsize='full') ############################-I-N-I-T-I-A-L-I-Z-A-T-I-O-N-############################################# print(df1) kf = KalmanFilter(transition_matrices = [1], observation_matrices = [1], initial_state_mean = 1400, initial_state_covariance = 1, observation_covariance=1, transition_covariance=.01) state_means, _ = kf.filter(df1['4. close']) state_means = pd.DataFrame(state_means.flatten(), index=df1.index) df1['4. close'].plot() state_means[0].plot() plt.show() for i in range(0,len(df1.index)-n+1): x = np.array([]) for j in range(i,n+i-1): # x = np.append(x,np.array(df1['1. open'][j])) # x = np.append(x,np.array(df1['3. low'][j])) x = np.append(x,np.array(df1['4. close'][j])) # x = np.append(x,np.array(df1['2. high'][j])) # x = np.append(x,np.array(df1['5. volume'][j])) z = np.append(z,np.array(df1['4. close'][i+n-1])) z_bench = np.append(z_bench,np.array(df2['4. close'][i+n-1])) predict_ind = np.append(predict_ind, np.array(df1.index[i+n-1])) y = np.concatenate((y,x)) y = np.reshape(y,(len(df1.index)-n+1,1*(n-1))) z = np.reshape(z,(len(df1.index)-n+1,1)) z_bench = np.reshape(z,(len(df1.index)-n+1,1)) split = int(s*len(df1)) index_ = predict_ind[split:] price_test = pd.DataFrame(z[split:]) price_test.index = index_ z_bench = z_bench[split:] #############################-M-E-A-N--T-R-E-N-D--M-O-D-E-L-####################### for i in range(0,len(index_)): y_train = y[i:split+i] y_test = y[split+i:] z_train = z[i:split+i] poly = PolynomialFeatures(1, interaction_only=False, include_bias=False) y_train = poly.fit_transform(y_train) y_test = poly.fit_transform(y_test) model = inv(np.transpose(y_train).dot(y_train)).dot(np.transpose(y_train).dot(z_train)) predict_ = y_test.dot(model) predict = np.append(predict, predict_[0]) ###########################-Y-U-L-E--W-A-L-K-E-R-################################### temp1 = y_train.dot(model) residue = temp1 - z_train - np.mean(temp1 - z_train) residue = np.reshape(residue, len(temp1)) x = residue[-n1:] auto_corr = np.correlate(residue, residue, mode='full')/np.sum(residue**2) ac = auto_corr[int(auto_corr.size/2):n1+int(auto_corr.size/2)] cor = np.array([]) for i in range(0,n1): cor = np.append(cor, np.roll(ac,i)) cor = np.reshape(cor, (n1, n1)) for i in range(0,n1): for j in range(i,n1): cor[j][i] = cor[i][j] ac1 = auto_corr[1+int(auto_corr.size/2):1+n1+int(auto_corr.size/2)] p = np.transpose(ac1[::-1]).dot(inv(cor)).dot(x) res = np.append(res, np.array(p+np.mean(temp1 - z_train))) predict = pd.DataFrame(predict) residue = pd.DataFrame(res) predict.index = index_ residue.index = index_ ##############################-S-T-A-T-I-S-T-I-C-S-################################ plt.xlabel('Date') plt.ylabel('Stock_price') #d = predict - price_test #temp = d[0] - np.mean(d[0]) d1 = predict d1[1] = price_test d1[2] = residue #for i in range(1,len(index_)): # if np.datetime64(index_[i])-np.datetime64(index_[i-1]) != timedelta(seconds = 60): # d1.drop(index_[i], inplace=True) d1[0].plot() print(d1.index[0], d1.index[-1], len(d1)) d1[1].plot() plt.show() for i in range(0,len(d1)): mean = mean + (d1[0][d1.index[i]]-d1[1][d1.index[i]])/len(d1) mean1 = mean1 + (d1[0][d1.index[i]]-d1[1][d1.index[i]]+d1[2][d1.index[i]])/len(d1) var = var + (d1[0][d1.index[i]]-d1[1][d1.index[i]])*(d1[0][d1.index[i]]-d1[1][d1.index[i]])/len(d1) var1 = var1 + (d1[0][d1.index[i]]-d1[1][d1.index[i]]+d1[2][d1.index[i]])*(d1[0][d1.index[i]]-d1[1][d1.index[i]]+d1[2][d1.index[i]])/len(d1) std = sqrt(var) std1 = sqrt(var1) print(model) print(mean, std, mean1, std1) (d1[0]-d1[1]).plot() plt.show() (d1[0]-d1[1]+d1[2]).plot() plt.show() ################################-A-U-T-O-C-O-R-R-E-L-A-T-I-O-N-######################################## corr = np.corrcoef(np.array(d1[0]),np.array(d1[1])) print(corr) corr = np.corrcoef(np.array(d1[0]-d1[2]),np.array(d1[1])) print(corr) temp = d1[0]-d1[1] - np.mean(d1[0]-d1[1]) auto_corr = np.correlate(temp, temp, mode='full')/np.sum(temp**2) ac = auto_corr[int(auto_corr.size/2):int(auto_corr.size)] plt.plot(auto_corr[int(auto_corr.size/2):50+int(auto_corr.size/2)], 'ro') plt.axhline(y=1-0.9545) plt.axhline(y=-1+0.9545) plt.show() temp = d1[0]-d1[1]+d1[2] - np.mean(d1[0]-d1[1]+d1[2]) auto_corr = np.correlate(temp, temp, mode='full')/np.sum(temp**2) ac = auto_corr[int(auto_corr.size/2):int(auto_corr.size)] plt.plot(auto_corr[int(auto_corr.size/2):50+int(auto_corr.size/2)], 'ro') plt.axhline(y=1-0.9545) plt.axhline(y=-1+0.9545) plt.show() ###############################-T-I-M-E--S-E-R-I-E-S--A-N-A-L-Y-S-I-S-################################### #temp = d[0] - np.mean(d[0]) #print(index_[0]) #split1 = int(len(temp)*s1) #index_ = index_[split1:] #a = predict[split1:] #b = price_test[split1:] #res = np.array([]) #for k in range(0,len(temp)-split1): # temp1 = temp[k:k+split1] # print(temp1.index[-1]) # x = np.array(temp[k+split1-n1:k+split1]) # auto_corr = np.correlate(temp1, temp1, mode='full')/np.sum(temp1**2) # ac = auto_corr[int(auto_corr.size/2):n1+int(auto_corr.size/2)] # print(ac,"z") # cor = np.array([]) # for i in range(0,n1): # cor = np.append(cor, np.roll(ac,i)) # cor = np.reshape(cor, (n1, n1)) # for i in range(0,n1): # for j in range(i,n1): # cor[j][i] = cor[i][j] # ac = auto_corr[1+int(auto_corr.size/2):1+n1+int(auto_corr.size/2)] # print(ac,'a', ac[::-1], 'b', cor,'c',temp[k+split1-n1:k+split1],'d', x) # print("asdf") # p = np.transpose(ac[::-1]).dot(inv(cor)).dot(x) # print(p) # res = np.append(res, np.array(p+mean)) # #residue = pd.DataFrame(res) #residue.index = index_ #d2 = a-b-residue #corr = np.corrcoef(np.array(a[0]-residue[0]),np.array(b[0])) #print(corr) # #d2.plot() #plt.show() # #for i in range(1,len(index_)): # if np.datetime64(index_[i])-np.datetime64(index_[i-1]) != timedelta(seconds = 60): # d2.drop(index_[i], inplace=True) # # #temp2 = d2[0] - np.mean(d2[0]) #auto_corr = np.correlate(temp2, temp2, mode='full')/np.sum(temp2**2) #plt.plot(auto_corr[int(auto_corr.size/2):50+int(auto_corr.size/2)], 'ro') #plt.axhline(y=1-0.9545) #plt.axhline(y=-1+0.9545) #plt.show() ##############################-P-O-R-T-F-O-L-I-O--A-N-A-L-Y-S-I-S-###################################### for i in range(0,len(index_)-1): if np.datetime64(index_[i+1])-np.datetime64(index_[i]) == timedelta(seconds = 60): if (predict[0][index_[i+1]]+residue[0][i+1]) > price_test[0][index_[i]]: if flag == 1: port = np.append(port, port[-1]+j*(price_test[0][index_[i+1]] - price_test[0][index_[i]])) if flag == 0: j = port[-1]/price_test[0][index_[i]] port = np.append(port, port[-1]+j*(price_test[0][index_[i+1]] - price_test[0][index_[i]])) flag = 1 count += 1 else: if flag == 0: port = np.append(port, port[-1]) if flag == 1: port = np.append(port, port[-1]) flag = 0 count += 1 else : port = np.append(port, port[-1]) if flag == 1: count+=1 flag = 0 for i in range(0,len(index_)-1): if np.datetime64(index_[i+1])-np.datetime64(index_[i]) == timedelta(seconds = 60): if price_test[0][index_[i+1]] > price_test[0][index_[i]]: if flag == 1: port_ideal = np.append(port_ideal, port_ideal[-1]+j*(price_test[0][index_[i+1]] - price_test[0][index_[i]])) if flag == 0: j = port_ideal[-1]/price_test[0][index_[i]] port_ideal = np.append(port_ideal, port_ideal[-1]+j*(price_test[0][index_[i+1]] - price_test[0][index_[i]])) flag = 1 count_ideal += 1 else: if flag == 0: port_ideal = np.append(port_ideal, port_ideal[-1]) if flag == 1: port_ideal = np.append(port_ideal, port_ideal[-1]) flag = 0 count_ideal += 1 else : port_ideal = np.append(port_ideal, port_ideal[-1]) if flag == 1: count_ideal += 1 port_ideal1 = pd.DataFrame(port_ideal) port1 = pd.DataFrame(port) port1.index = index_ port_ideal1.index = index_ port1[0].plot() port_ideal1[0].plot() plt.show() print(port_ideal[-1]/(port_ideal[0]), port[-1]/(port[0]), price_test[0][-1]/price_test[0][0], count_ideal, count) #################################-S-H-A-R-P-E--R-A-T-I-O-################################################ for i in range(0, len(index_)-1): return_strat = np.append(return_strat, (port[i+1]-port[i])/port[i]) return_bench = np.append(return_bench, (z_bench[i+1]-z_bench[i])/z_bench[i]) SR = sqrt(len(index_)-1) * np.mean(return_strat-return_bench) / np.std(return_strat-return_bench) print(SR) ######################################################################################################### # -
finpredlrintra.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import tkinter as tk import numpy as np window = tk.Tk() canvas = tk.Canvas(window, width=120, height=100) canvas.pack(fill="both", expand=1) line1 = canvas.create_line(30, 50, 100, 50, fill="red", width=2) count = 0 def update(): """ This function updates the line coordinates and calls itself 62 times """ global count count += 1 canvas.coords(line1, 30, 50, 100, 50 + 30*np.sin(count/5)) if count <= 62: window.after(100, update) # Start the animation update() # Launch the app window.mainloop() # - import tkinter window = tkinter.Tk() window.title("GUI") canvas = tkinter.Canvas(window, width = 500, height = 500) canvas.pack() line1 = canvas.create_line(25, 25, 250, 150) line2 = canvas.create_line(25, 250, 250, 150, fill = "red") rect = canvas.create_rectangle(500, 25, 175, 75, fill = "green") window.mainloop() # + import tkinter def make_menu(w): global the_menu the_menu = tkinter.Menu(w, tearoff=0) the_menu.add_command(label="Cut") the_menu.add_command(label="Copy") the_menu.add_command(label="Paste") def show_menu(e): print(type(e)) w = e.widget the_menu.entryconfigure("Cut", command=lambda: w.event_generate("<<Cut>>")) the_menu.entryconfigure("Copy", command=lambda: w.event_generate("<<Copy>>")) the_menu.entryconfigure("Paste", command=lambda: w.event_generate("<<Paste>>")) the_menu.tk.call("tk_popup", the_menu, e.x_root, e.y_root) t = tkinter.Tk() make_menu(t) e1 = tkinter.Entry(); e1.pack() e2 = tkinter.Entry(); e2.pack() e1.bind_class("Entry", "<Button-3><ButtonRelease-3>", show_menu) t.mainloop()
tkinter/copy_paste/copying.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Facial Expression Recognition # # This is an attempt to detect facial expressions. # # See: https://www.kaggle.com/c/challenges-in-representation-learning-facial-expression-recognition-challenge # # The solution is using pre-weighted VGG16 from Keras library # %matplotlib inline import pandas as pd import os import json from glob import glob import numpy as np np.set_printoptions(precision=4, linewidth=100) from matplotlib import pyplot as plt from scipy.misc import imresize, imsave # + import csv from tqdm import tqdm from tqdm import tnrange, tqdm_notebook from keras.utils import np_utils source_size = (48,48) target_size = (224,224) cmap = plt.get_cmap('hot') # Data Labels Defined labels = ['Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral'] # Read in data in an efficient manner # We need to convert the black and white image data to an RGB image that the VGG16 model expects # We accomplish the color conversion with the use of a color map def load_data(filename, dataset=None): assert dataset in ['Training', 'PublicTest', 'PrivateTest'] with open(filename, 'rb') as csvfile: reader = csv.reader(csvfile) header = reader.next() usages = set() i=0 for row in tqdm_notebook(reader): emotion = int(row[0]) usage = row[2] usages.add(usage) if usage != dataset: continue image_data = imresize(pd.DataFrame(row[1].split(), dtype=int).values.reshape(*source_size),target_size) image_data = np.delete(cmap(image_data), 3, 2) #image_data = np.swapaxes(image_data, 3, 1) filename = "%s/%s/%s.png" % (dataset,emotion, i) dirname = os.path.dirname(filename) if not os.path.exists(dirname): os.makedirs(dirname,) imsave(filename, image_data) i+=1 # - # Load the training data set load_data('fer2013/fer2013.csv', dataset='Training') load_data('fer2013/fer2013.csv', dataset='PublicTest') load_data('fer2013/fer2013.csv', dataset='PrivateTest') # + from keras.preprocessing.image import ImageDataGenerator from keras.layers import Dense, GlobalAveragePooling2D, GlobalMaxPooling2D, Lambda, Input, Flatten, Dropout from keras.models import Model, load_model from keras import optimizers from keras.applications import vgg16 nb_classes = len(labels) weights_file = "weights.h5" target_size=(224,224) vgg_mean = np.array([123.68, 116.779, 103.939], dtype=np.float32).reshape((3,1,1)) def vgg_preprocess(x): x = x - vgg_mean return x[:, ::-1] # reverse axis rgb->bgr def create_default_model(): # build the model from scratch using VGG16 as a base model_vgg16 = vgg16.VGG16(weights='imagenet', include_top=False) model_vgg16.summary() for layer in model_vgg16.layers: layer.trainable = False input_layer = Input(shape=(3,224,224),name = 'image_input') preprocess_layer = Lambda(vgg_preprocess, input_shape=(3,224,224), output_shape=(3,224,224))(input_layer) #Use the generated model output_vgg16 = model_vgg16(preprocess_layer) #Add the fully-connected layers x = Flatten(name='flatten')(output_vgg16) x = Dense(4096, activation='relu', name='fc1')(x) x = Dropout(0.5)(x) x = Dense(4096, activation='softmax', name='fc2')(x) x = Dropout(0.5)(x) x = Dense(nb_classes, activation='softmax', name='predictions')(x) model = Model(input=input_layer, output=x) return model def add_compiler(model, lr=0.001): model.compile(optimizer=optimizers.Adam(lr=lr), loss='categorical_crossentropy', metrics=['accuracy']) def load_model_with_weights_if_available(): # if weights exists on disk, then load it model = create_default_model() if os.path.exists(weights_file): #model.load_weights(weights_file) print("Model loaded from file %s" % weights_file) else: print("Model built from scratch") add_compiler(model) model.summary() return model def get_batches(path, gen=ImageDataGenerator(), shuffle=True, batch_size=8, class_mode='categorical'): return gen.flow_from_directory(path, target_size=(224,224), class_mode=class_mode, shuffle=shuffle, batch_size=batch_size) def plots(ims, figsize=(12,6), rows=1, interp=False, titles=None): if type(ims[0]) is np.ndarray: ims = np.array(ims).astype(np.uint8) if (len(ims.shape) == 4 and ims.shape[-1] != 3): ims = ims.transpose((0,2,3,1)) f = plt.figure(figsize=figsize) for i in range(len(ims)): sp = f.add_subplot(rows, len(ims)//rows, i+1) sp.axis('Off') if titles is not None: sp.set_title(titles[i], fontsize=16) plt.imshow(ims[i], interpolation=None if interp else 'none') # - model = load_model_with_weights_if_available() batch_size=32 batches = get_batches("Training", batch_size=batch_size) samples = get_batches("TrainingSample", batch_size=batch_size) public_batches = get_batches("PublicTest", batch_size=batch_size) private_batches = get_batches("PrivateTest", batch_size=batch_size) # MAIN TRAINING CYCLE # Fit the model, and then save the weights to disk nb_epoch = 5 N_test=10 samples = get_batches("TrainingSample", batch_size=10, shuffle=True) N = samples.N #N = 20 model.fit_generator(samples, N, nb_epoch, validation_data=public_batches, nb_val_samples=N_test) #model.save_weights(weights_file) # Check Accuracy of the test data sets loss, acc = model.evaluate_generator(public_batches, 100) print("Public Test Loss: %.4f, Accuracy: %.4f" % (loss, acc)) loss, acc = model.evaluate_generator(private_batches, 100) print("Private Test Loss: %.4f, Accuracy: %.4f" % (loss, acc)) # predict private set results and save submission file to disk results = model.predict(private_batches) values = np.argmax(results, axis=1) with open("submission_private.csv", "wb") as fp: for x in values: fp.write("%d\n" % x) fp.close()
challenges-in-representation-learning-facial-expression-recognition-challenge/solution.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import sqlalchemy import pandas as pd import numpy as np import matplotlib from matplotlib import style style.use('seaborn') from sqlalchemy.ext.automap import automap_base from sqlalchemy.orm import Session from sqlalchemy import create_engine, inspect, func import datetime as dt import matplotlib.pyplot as plt from sqlalchemy import Column, Integer, String, Float, Text, ForeignKey engine = create_engine("sqlite:///hawaii.sqlite") Base = automap_base() Base.prepare(engine, reflect=True) Base.classes.keys() inspector = inspect(engine) inspector.get_table_names() Station_class = Base.classes.station Measurements_class = Base.classes.measurements session = Session(engine) dates = session.query(Measurements_class.date,Measurements_class.prcp).order_by(Measurements_class.date.desc()).group_by(Measurements_class.station).limit(365).all() dates a = session.query(Station_class.station).all() for i in range(len(a)): print(a[i][0]) last_date = session.query(Measurements_class.date).order_by(Measurements_class.date.desc()).first() last_year = dt.date(2017, 8, 23) - dt.timedelta(days=365) dates1 = session.query(Measurements_class.date,Measurements_class.prcp).filter(Measurements_class.date>=last_year).all() # + dates1 = pd.DataFrame(dates1) # - dates = dates1.drop_duplicates(subset=["date"], keep="first") dates.plot("date","prcp") plt.xlabel("Date") plt.ylabel("Rain in Inches") plt.title("Precipitation Analysis (8/23/16 to 8/23/17)") plt.legend(["Precipitation"]) plt.xticks(rotation=45) plt.tight_layout() plt.show() dates1 = dates.set_index("date") stations_locations = session.query(Measurements_class).group_by(Measurements_class.station).count() ##observations by station since in the past year stations1 = session.query(Measurements_class.station,func.count(Measurements_class.tobs)).filter(Measurements_class.date>=last_year).group_by(Measurements_class.station).all() len(stations1) busiest_station = stations1[0] num = 0 for i in range(len(stations1)): if stations1[i][1]>num: num = stations1[i][1] busiest_station = stations1[i] temperature = session.query(Measurements_class.station, Measurements_class.date, Measurements_class.tobs).filter(Measurements_class.date>=last_year).filter(Measurements_class.station == busiest_station[0]).all() len(temperature) temperature_df = pd.DataFrame(temperature) temperature_df.head() plt.hist(temperature_df["tobs"],12) plt.xlabel("Temperature") plt.ylabel("Number of Observations") plt.title("Station Analysis for busiest station") plt.show() def calc_temps(start_date,end_date): data_measure = session.query(func.min(Measurements_class.tobs),func.avg(Measurements_class.tobs),func.max(Measurements_class.tobs)).filter(Measurements_class.date>start_date).filter(Measurements_class.date<end_date).all() return data_measure start = dt.date(2013,4,23) end = dt.date(2013,5,20) temps = calc_temps(start,end) # + ##I guess I do not really need this.. fig,ax = plt.subplots() x_axis = range(len(temps)) ax.boxplot(temps) ax.set_title("Trip Average from (4/23/13) to (5/20/13)") ax.set_xlabel("Trip") ax.set_ylabel("Temperature") fig.tight_layout() plt.show() # - last_year last_date = dt.date(2017,8,23) temp_avg_year = calc_temps(last_year,last_date) range_temp = temp_avg_year[0][2]-temp_avg_year[0][0] avg_temp = temp_avg_year[0][1] min_temp = temp_avg_year[0][0] max_temp = temp_avg_year[0][2] # + fig,ax = plt.subplots() temp_bar_chart = ax.bar(1,avg_temp,color = "yellow",yerr = range_temp) ax.set_xlabel("Trip") ax.set_ylabel("Temperature") ax.set_title("Trip Average Temperature") ##I took this code...again def autolabels(rects): for rect in rects: h=rect.get_height() ax.text(rect.get_x() + rect.get_width()/2., .6*h,'%.2f' % float(h) ,ha='center', va='bottom', fontsize=10) autolabels(temp_bar_chart) plt.ylim(0,100) plt.xlim(0,2) fig.tight_layout() plt.show() # -
climate_analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # #### Generate a Toy dataset for linear regression. Assume that the model follows a line $(Y = mx + c)$ to generate the $1000$ point and Add white noise to all the points generated. Take $70%$ generated points at random and use it to find the model (line in this example). Plot original line and the computed line along with data points. What is the MSE for the training and test data used import numpy as np import matplotlib.pyplot as plot # # # Let slope $(M) = 1$ and Y-intercept $(C) = 0$ : # + x_points = np.random.rand(1000,1) m = 1 c = 0 y_points = m*x_points + c # - # Design of Gaussian noise: # + y_noise = np.random.randn(1000, 1) plot.hist(y_noise, bins=50) plot.xlabel('X-Axis') plot.ylabel('Y-Axis') plot.title('Gaussian Noise', fontweight ="bold") plot.show() # - # Adding above noise to y: # + y_points = y_points + y_noise plot.figure(figsize=(20,10)) plot.scatter(x_points, y_points, s=10) plot.title('y-axis with Gussian Noise', fontweight ="bold") plot.xlabel('X-Axis') plot.ylabel('Y-Axis') # - # Take 70% of data for training and remaining data for testing: # + Data_Train = np.hstack((np.expand_dims(x_points,axis=1),np.expand_dims(y_points,axis=1))) N = len(Data_Train) Train_Test = int(N * (70/100)) # i,e Train_Test = 700 Train_Data, Test_Data = Data_Train[:Train_Test], Data_Train[Train_Test:] x_Data, y_Data = Data_Train[:,0], Data_Train[:,1] Train_x, Train_y = x_Data[:Train_Test], y_Data[:Train_Test] Test_x, Test_y = x_Data[Train_Test:], y_Data[Train_Test:] plot.figure(figsize = (20,10)) plot.scatter(Train_x,Train_y, s=10) plot.title('Train Data', fontweight ="bold") plot.xlabel('X-Axis') plot.ylabel('Y-Axis') plot.figure(figsize = (20,10)) plot.scatter(Test_x,Test_y, s=10) plot.title('Test Data', fontweight ="bold") plot.xlabel('X-Axis') plot.ylabel('Y-Axis') # - # Convert X to (X and ones matrix): x_oneTrain, x_oneTest = np.append(Train_x,np.ones((len(Train_x),1)),axis=1), np.append(Test_x,np.ones((len(Test_x),1)),axis=1) x_oneTest # Setting up initial thetta matrix: thetta = np.array([[0], [0]]) # Find cost function or MSE: def cost_function(X, Y, Thetta): J = np.sum((X.dot(thetta)-Y)**2)*1/len(Y) return J # Apply Gradient descent: def gradient_descent(X, Y, thetta, alpha, iteration): Cost_history = [0] * iteration n = len(Y) for iteration in range(iteration): y_predict = X.dot(thetta) Gradient = (1/n)*(X.T.dot(y_predict - Y)) thetta = thetta - (alpha*Gradient) Cost = cost_function(X, Y, thetta) Cost_history[iteration] = Cost if (Cost>Cost_history[iteration-1]): alpha = alpha*0.01 return thetta, y_predict # ### Training: # + Thetta_Train, y_predict = gradient_descent(x_oneTrain, Train_y, thetta, 0.5, 10000) m, c = Thetta_Train Y_new = y_predict print("Value of slope, M: ",m ,"\nValue of Y intercept, C: ", c, "\n\n") # - # Mean Squared Error or MSE of training: MSE = np.sum((Train_y - Y_new)**2)/len(Train_y) print("Mean squared error or MSE of Training: ", MSE, "\n\n") # Plotting graph of training: # + Train_Y = m*Train_x + c plot.figure(figsize = (20, 10)) plot.scatter(Train_x,Train_y, s=10) plot.title('Train', fontweight ="bold") plot.xlabel('X-Axis') plot.ylabel('Y-Axis') plot.plot(Train_Y, Train_x, color='g') # - # ### Testing: # + Thetta_Test, y_predict = gradient_descent(x_oneTest, Test_y, thetta, 0.5, 10000) Y_New = y_predict m_Test, c_Test = Thetta_Test print("Value of slope, M: ",m ,"\nValue of Y intercept, C: ", c, "\n\n") # - # Mean Squared Error or MSE of testing: MSE = np.sum((Test_y - Y_New)**2)/len(Test_y) print("Mean squared error or MSE of Testing: ", MSE) # Plotting graph of testing: # + Test_Y = m*Test_x + c plot.figure(figsize = (20,10)) plot.scatter(Test_x,Test_y, s=10) plot.title('Test', fontweight ="bold") plot.xlabel('X-Axis') plot.ylabel('Y-Axis') plot.plot(Test_Y, Test_x, color='r')
Machine Learning/Linear Regression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np from pandas import DataFrame, read_csv from sklearn.model_selection import train_test_split from sklearn.preprocessing import minmax_scale datos = read_csv("https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data",header=None) X = minmax_scale(datos.iloc[:,1:]) Y = np.array(datos.iloc[:,0]) type(X),type(Y),X.shape,Y.shape X_train, X_test, Y_train, Y_test = train_test_split(X,Y,test_size=1/3,stratify=Y,random_state=0) from sklearn.cluster import KMeans kmedias = KMeans(n_clusters=6).fit(X_train) centros = kmedias.cluster_centers_ centros # # Matriz de distancias - SIGMA from sklearn.metrics.pairwise import euclidean_distances distancias = euclidean_distances(centros) sigma = sum(distancias)/(centros.shape[0]-1) sigma # # Salida capa OCULTA datos entrenamiento X_mlp_train = np.zeros((X_train.shape[0], centros.shape[0]), dtype=float) for i in range(X_train.shape[0]): for j in range(centros.shape[0]): X_mlp_train[i][j] = np.exp(-sum((X_train[i]-centros[j])**2)/(2.0*(sigma[j]**2))) # # Salida capa OCULTA datos test X_mlp_test = np.zeros((X_test.shape[0], centros.shape[0]), dtype=float) for i in range(X_test.shape[0]): for j in range(centros.shape[0]): X_mlp_test[i][j] = np.exp(-sum((X_test[i]-centros[j])**2)/(2.0*(sigma[j]**2))) # # Binarización de las salidas from sklearn.preprocessing import LabelBinarizer etiquetas = LabelBinarizer() etiquetas.fit([1,2,3]) Ymlp = etiquetas.transform(Y) Y_mlp_train = etiquetas.transform(Y_train) Y_mlp_test = etiquetas.transform(Y_test) X_mlp_train.shape, X_mlp_test.shape, Y_mlp_train.shape, Y_mlp_test.shape # # Adaline con SGD from sklearn.linear_model import SGDRegressor adaline = SGDRegressor(max_iter=5000) Yp_test = np.zeros((Y_test.shape[0],3)) for neurona_salida in range(3): adaline.fit(X_mlp_train,Y_mlp_train[:,neurona_salida]) Yp_test[:,neurona_salida] = adaline.predict(X_mlp_test) aciertos = sum(np.argmax(Yp_test, axis=1) == np.argmax(Y_mlp_test,axis=1)) / Y_mlp_test.shape[0] print("Tasa de aciertos =",aciertos*100, "%")
RBF.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/margaretmz/esrgan-e2e-tflite-tutorial/blob/master/ml/add%20metadata/Add%20metadata%20to%20ESRGAN.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="gKfbp8hjNX_t" colab_type="text" # # #Add metadata to ESRGAN # Written by <NAME>. # Created on July 21, 2020 | Updated on August 3, 2020 (by <NAME>) # # This Colab Notebook adds metadata to a tflite model which enables CodeGen in Android Studio with ML Model Binding. # + id="Xn71TnUirD7D" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 292} outputId="fdf59493-108e-4f61-bd79-896572c511c5" # !pip install tflite-support # + id="fwBU4CHXpNwT" colab_type="code" colab={} import os import tensorflow as tf from absl import flags # + id="6x4MEcCvJgrT" colab_type="code" colab={} from tflite_support import flatbuffers from tflite_support import metadata as _metadata from tflite_support import metadata_schema_py_generated as _metadata_fb # + id="qV3uOnrBN5X0" colab_type="code" colab={} # Create 2 directories, and manually upload the original .tflite to the /model_without_metadata folder # !mkdir model_without_metadata # !mkdir model_with_metadata # + id="xRtNzghgSfDC" colab_type="code" colab={} # !wget https://github.com/margaretmz/esrgan-e2e-tflite-tutorial/raw/master/ml/add%20metadata/model_without_metadata/esrgan_dr.tflite # !wget https://github.com/margaretmz/esrgan-e2e-tflite-tutorial/raw/master/ml/add%20metadata/model_without_metadata/esrgan_fp16.tflite # !wget https://github.com/margaretmz/esrgan-e2e-tflite-tutorial/raw/master/ml/add%20metadata/model_without_metadata/esrgan_int8.tflite # !mv *.tflite model_without_metadata/ # + id="PwPskqPj2MVS" colab_type="code" colab={} quantization = "int8" #@param ["dr", "fp16", "int8"] # The original .tflite file without metadata MODEL_FILE = f"/content/model_without_metadata/esrgan_{quantization}.tflite" # This is where we will export a new .tflite model file with metadata, and a .json file with metadata info EXPORT_DIR = "model_with_metadata" # + id="YqcWmNMNUS1D" colab_type="code" colab={} class MetadataPopulatorForGANModel(object): """Populates the metadata for the tflite model.""" def __init__(self, model_file): self.model_file = model_file self.metadata_buf = None def populate(self): """Creates metadata and thesn populates it for a style transfer model.""" self._create_metadata() self._populate_metadata() def _create_metadata(self): """Creates the metadata for the tflite model.""" # Creates model info. model_meta = _metadata_fb.ModelMetadataT() model_meta model_meta.name = "ESRGAN" model_meta.description = ("Enhanced super-res GAN for improving image quality. Converted by TFLiteConverter from TF 2.2.0") model_meta.version = "v-2020-07-30" model_meta.author = "TensorFlow" model_meta.license = ("Apache License. Version 2.0 " "http://www.apache.org/licenses/LICENSE-2.0.") # Creates info for the input, original image. input_image_meta = _metadata_fb.TensorMetadataT() input_image_meta.name = "original_image" input_image_meta.description = ( "The expected image is 128 x 128, with three channels " "(red, blue, and green) per pixel. Each value in the tensor is between" " 0.0 and 255.0.") input_image_meta.content = _metadata_fb.ContentT() input_image_meta.content.contentProperties = ( _metadata_fb.ImagePropertiesT()) input_image_meta.content.contentProperties.colorSpace = ( _metadata_fb.ColorSpaceType.RGB) input_image_meta.content.contentPropertiesType = ( _metadata_fb.ContentProperties.ImageProperties) input_image_normalization = _metadata_fb.ProcessUnitT() input_image_normalization.optionsType = ( _metadata_fb.ProcessUnitOptions.NormalizationOptions) input_image_normalization.options = _metadata_fb.NormalizationOptionsT() input_image_normalization.options.mean = [0.0] input_image_normalization.options.std = [1.0] input_image_meta.processUnits = [input_image_normalization] input_image_stats = _metadata_fb.StatsT() input_image_stats.max = [255] input_image_stats.min = [0] input_image_meta.stats = input_image_stats # Creates output info, anime image output_image_meta = _metadata_fb.TensorMetadataT() output_image_meta.name = "enhanced_image" output_image_meta.description = "Image enhanced." output_image_meta.content = _metadata_fb.ContentT() output_image_meta.content.contentProperties = _metadata_fb.ImagePropertiesT() output_image_meta.content.contentProperties.colorSpace = ( _metadata_fb.ColorSpaceType.RGB) output_image_meta.content.contentPropertiesType = ( _metadata_fb.ContentProperties.ImageProperties) output_image_normalization = _metadata_fb.ProcessUnitT() output_image_normalization.optionsType = ( _metadata_fb.ProcessUnitOptions.NormalizationOptions) output_image_normalization.options = _metadata_fb.NormalizationOptionsT() output_image_normalization.options.mean = [0.0] output_image_normalization.options.std = [1.0] output_image_meta.processUnits = [output_image_normalization] output_image_stats = _metadata_fb.StatsT() output_image_stats.max = [255.0] output_image_stats.min = [0.0] output_image_meta.stats = output_image_stats # Creates subgraph info. subgraph = _metadata_fb.SubGraphMetadataT() subgraph.inputTensorMetadata = [input_image_meta] subgraph.outputTensorMetadata = [output_image_meta] model_meta.subgraphMetadata = [subgraph] b = flatbuffers.Builder(0) b.Finish( model_meta.Pack(b), _metadata.MetadataPopulator.METADATA_FILE_IDENTIFIER) self.metadata_buf = b.Output() def _populate_metadata(self): """Populates metadata to the model file.""" populator = _metadata.MetadataPopulator.with_model_file(self.model_file) populator.load_metadata_buffer(self.metadata_buf) populator.populate() # + id="CxwBoinwoAjC" colab_type="code" colab={} def populate_metadata(model_file): """Populates the metadata using the populator specified. Args: model_file: valid path to the model file. model_type: a type defined in StyleTransferModelType . """ # Populates metadata for the model. model_file_basename = os.path.basename(model_file) export_path = os.path.join(EXPORT_DIR, model_file_basename) tf.io.gfile.copy(model_file, export_path, overwrite=True) populator = MetadataPopulatorForGANModel(export_path) populator.populate() # Displays the metadata that was just populated into the tflite model. displayer = _metadata.MetadataDisplayer.with_model_file(export_path) export_json_file = os.path.join( EXPORT_DIR, os.path.splitext(model_file_basename)[0] + ".json") json_file = displayer.get_metadata_json() with open(export_json_file, "w") as f: f.write(json_file) print("Finished populating metadata and associated file to the model:") print(export_path) print("The metadata json file has been saved to:") print(os.path.join(EXPORT_DIR, os.path.splitext(model_file_basename)[0] + ".json")) # + id="jGb6149poE4s" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 85} outputId="e05ad669-6ed5-41f2-81f0-53f31cc3d3f7" populate_metadata(MODEL_FILE)
ml/add metadata/Add metadata to ESRGAN.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.8.5 64-bit (conda) # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/Coldestadam/DynamicHead/blob/master/torch/DyHead_Example.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="B7ewdSTK8V46" import torch import torch.nn as nn # + [markdown] id="3BEZ50M_8V47" # # Grabbing a random Image # + id="xKUi0KWI8V48" outputId="406f890f-0e11-4de7-ac2e-4f69d2fb55ab" colab={"base_uri": "https://localhost:8080/", "height": 520} import urllib.request import PIL.Image as Image # Looking at an image img_url = "https://s3.amazonaws.com/cdn-origin-etr.akc.org/wp-content/uploads/2017/11/20113314/Carolina-Dog-standing-outdoors.jpg" urllib.request.urlretrieve(img_url, 'dog.jpg') img = Image.open('dog.jpg') print(img.size) img # + [markdown] id="ybtBu99K8V49" # # Transforming image # # Resizing image to (224, 224) because of the input size of ResNet Models # + id="o7lfmyMS8V49" outputId="687794c5-88b6-4512-c05b-40af46cfd4da" colab={"base_uri": "https://localhost:8080/"} import torchvision.transforms as transforms transform = transforms.Compose([transforms.Resize((224, 224)), transforms.ToTensor()]) img_tensor = transform(img).unsqueeze(dim=0) print(img_tensor.shape) # + [markdown] id="VcgmV5b38V49" # # Importing Resnet model with FPN Backbone # + id="-kGdIkTM8V4-" from torchvision.models.detection.backbone_utils import resnet_fpn_backbone backbone_name = 'resnet50' backbone = resnet_fpn_backbone(backbone_name=backbone_name, pretrained=True) # + [markdown] id="znfVp29k8V4-" # # Going through each step in a forward pass through each step of Figure 1. # # ![img](https://github.com/Coldestadam/DynamicHead/blob/master/imgs/Figure_1.png?raw=true) # + id="TKxczQoU8V4-" outputId="e3f94ce9-e7f0-4767-bede-b5e33d66e071" colab={"base_uri": "https://localhost:8080/"} # Passing image through backbone F = backbone(img_tensor) print(type(F)) num_levels = 0 # F contains all the outputs of the Feature Pyramid Network for level, level_tensor in F.items(): print('Level: {} \t Shape: {}'.format(level, level_tensor.shape)) try: if isinstance(int(level), int): num_levels+=1 except ValueError: pass print('{} total levels'.format(num_levels)) # + [markdown] id="-VkK8fXN8V4_" # ## Defining concatenation layer # # This will upsample or downsample each level to the median Height and Width # + id="c6sC74ls8V4_" outputId="7ebad832-2c8f-4f1f-cef9-ebfed21949e7" colab={"base_uri": "https://localhost:8080/"} import numpy as np from concat_fpn_output import concat_feature_maps concat_layer = concat_feature_maps() F = concat_layer(F) print('Shape: {}'.format(F.shape)) L, S, C = F.shape[1:] print('Median HeightxWidth: {}x{}'.format(int(np.sqrt(S)), int(np.sqrt(S)))) print("F Dimensions\nL: {} S: {} C: {}".format(L, S, C)) # + [markdown] id="rxeYC6MI8V4_" # ## Defining Scale-aware Attention Layer # # ![scale_img](https://github.com/Coldestadam/DynamicHead/blob/master/imgs/scale_attention.png?raw=true) # + id="KivQK0eM8V4_" outputId="6445e334-cf22-43af-8dcd-cbafd38838c4" colab={"base_uri": "https://localhost:8080/"} from attention_layers import Scale_Aware_Layer scale_layer = Scale_Aware_Layer(s_size=S) scale_output = scale_layer(F) print(f'Shape of scale output: {scale_output.shape}') # + [markdown] id="lU5Kfti68V5A" # ## Defining Spatial-aware Attention Layer # ![spatial_img](https://github.com/Coldestadam/DynamicHead/blob/master/imgs/spatial_attention.png?raw=true) # + id="EuOTxfwN8V5A" outputId="321d3e09-c3cb-4caf-8793-09840b4ec47a" colab={"base_uri": "https://localhost:8080/"} from attention_layers import Spatial_Aware_Layer spatial_layer = Spatial_Aware_Layer(L_size=L) spatial_output = spatial_layer(scale_output) print(f'Shape of scale output: {spatial_output.shape}') # + [markdown] id="DpC2BQ_z9sSr" # ## Defining Task-aware Attention Layer # # ![task_image](https://github.com/Coldestadam/DynamicHead/blob/master/imgs/task_aware.png?raw=true) # + id="kIr77tKH8t1D" outputId="2681a20a-5c95-48c6-e961-766d40389cf2" colab={"base_uri": "https://localhost:8080/"} from attention_layers import Task_Aware_Layer task_layer = Task_Aware_Layer(num_channels=C) task_output = task_layer(spatial_output) print(f'Shape of scale output: {task_output.shape}') # + [markdown] id="-KFnuv7PBiY9" # # Contructing a single DyHead Block # # ![DyHead img](https://github.com/Coldestadam/DynamicHead/blob/master/imgs/DyHead_Block.png?raw=true) # + id="e6VKdL5GBwWJ" outputId="7653631e-c05f-4f90-aef6-bcefb58f7f14" colab={"base_uri": "https://localhost:8080/"} from DyHead import DyHead_Block dyhead_block = DyHead_Block(L=L, S=S, C=C) print(dyhead_block) # + [markdown] id="ofol1O1DCrg6" # # Constructing a DyHead # + id="fPKKBh0rC5r-" outputId="10e5c526-f58d-420a-cafa-d213900db5cc" colab={"base_uri": "https://localhost:8080/"} from DyHead import DyHead num_blocks = 6 # This is the baseline given in the paper full_head = DyHead(num_blocks, L, S, C) print(full_head)
torch/DyHead_Example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Tutorial of QA_FAST(FMRIB's Automated Segmentation Tool) # FAST (FMRIB's Automated Segmentation Tool) segments a 3D image of the brain into different tissue types (Grey Matter, White Matter, CSF, etc.), whilst also correcting for spatial intensity variations (also known as bias field or RF inhomogeneities). FAST is based on a hidden Markov random field model and an associated Expectation-Maximization algorithm. The whole process is fully automated and can also produce a bias field-corrected input image and a probabilistic and/or partial volume tissue segmentation. It is robust and reliable, compared to most finite mixture model-based methods, which are sensitive to noise. # more information about FAST, please visit https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/FAST # # A quality analysis picture showing the segmentation of a skull-removed brain into white matter, gray matter, and cerebrospinal fluid. Segmentation uses the function of FAST in FSL. You can use the command line below to call the FAST function in FSL for splitting. The file type is nifti image. # # fast -t 1 -n 3 -o basename_for_input_skull-removed brain basename_for_outputs # ## Import packages # qa_fast depends on matplotlib,numpy, nibabel, scipy and uses functions from m2g's qa_utils. import matplotlib.pyplot as plt import numpy as np import nibabel as nb from scipy import ndimage from matplotlib.colors import LinearSegmentedColormap from m2g.utils.qa_utils import pad_im from m2g.stats.qa_fast import qa_fast_png # ## qa_fast_png # qa_fast_png function can overlay three image of WM, GM and csf with different color # # Three slices were cut from each of the sagittal, axial, and coronal directions of the white matter, gray matter, and cerebrospinal fluid. The cut positions were 0.35, 0.51, and 0.65, respectively. def qa_fast_png(csf, gm, wm, outdir): """ FAST (FMRIB's Automated Segmentation Tool) segments a 3D image of the brain into different tissue types (Grey Matter, White Matter, CSF, etc.) Mark different colors of white matter, gray matter, cerebrospinal fluid in a '3 by 3' picture, i.e. QA for FAST Parameters --------------- csf: str the path of csf nifti image gm: str the path of gm nifti image wm: str the path of wm nifti image outdir: str the path to save QA graph """ # load data gm_data = nb.load(gm).get_data() csf_data = nb.load(csf).get_data() wm_data = nb.load(wm).get_data() # set Color map cmap1 = LinearSegmentedColormap.from_list('mycmap1', ['white', 'blue']) cmap2 = LinearSegmentedColormap.from_list('mycmap2', ['white', 'magenta']) cmap3 = LinearSegmentedColormap.from_list('mycmap2', ['white', 'green']) overlay = plt.figure() overlay.set_size_inches(12.5, 10.5, forward=True) plt.title(f'Qa for FAST(segments a 3D image of the brain into different tissue types)\n (scan volume:{gm_data.shape}) \n', fontsize=22) plt.xticks([]) plt.yticks([]) plt.axis('off') # Determine whether the input data types are consistent. If they are inconsistent, an error is reported. if gm_data.shape != csf_data.shape: raise ValueError("GM and CSF are not the same shape.") elif gm_data.shape != wm_data.shape: raise ValueError("GM and WM are not the same shape.") elif wm_data.shape != csf_data.shape: raise ValueError("WM and CSF are not the same shape.") # Set the 3D matrix cutting position in three directions shape = csf_data.shape index = [0.35, 0.51, 0.65] x = [int(shape[0] * index[0]), int(shape[0] * index[1]), int(shape[0] * index[2])] y = [int(shape[1] * index[0]), int(shape[1] * index[1]), int(shape[1] * index[2])] z = [int(shape[2] * index[0]), int(shape[2] * index[1]), int(shape[2] * index[2])] coords = (x, y, z) # Set labels for the y-axis labs = [ "Sagittal Slice", "Coronal Slice", "Axial Slice", ] var = ["X", "Y", "Z"] # Generate 3 by 3 picture idx = 0 for i, coord in enumerate(coords): for pos in coord: idx += 1 ax = overlay.add_subplot(3, 3, idx) ax.set_title(var[i] + " = " + str(pos)) if i == 0: csf_slice = ndimage.rotate(csf_data[pos, :, :], 90) gm_slice = ndimage.rotate(gm_data[pos, :, :], 90) wm_slice = ndimage.rotate(wm_data[pos, :, :], 90) elif i == 1: csf_slice = ndimage.rotate(csf_data[:, pos, :], 90) gm_slice = ndimage.rotate(gm_data[:, pos, :], 90) wm_slice = ndimage.rotate(wm_data[:, pos, :], 90) else: csf_slice = ndimage.rotate(csf_data[:, :, pos], 90) gm_slice = ndimage.rotate(gm_data[:, :, pos], 90) wm_slice = ndimage.rotate(wm_data[:, :, pos], 90) # set y labels if idx % 3 == 1: plt.ylabel(labs[i]) # padding pictures to make them the same size csf_slice = (csf_slice*255).astype(np.uint8) gm_slice = (gm_slice*255).astype(np.uint8) wm_slice = (wm_slice*255).astype(np.uint8) csf_slice = pad_im(csf_slice, max(shape), 0, False) gm_slice = pad_im(gm_slice, max(shape), 0, False) wm_slice = pad_im(wm_slice, max(shape), 0, False) # hide axes ax.set_xticks([]) ax.set_yticks([]) ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) ax.spines['bottom'].set_visible(False) ax.spines['left'].set_visible(False) # display image ax.imshow(csf_slice, interpolation="none", cmap=cmap1, alpha=1) ax.imshow(gm_slice, interpolation="none", cmap=cmap2, alpha=0.5) ax.imshow(wm_slice, interpolation="none", cmap=cmap3, alpha=0.3) # Legend of white matter(WM), gray matter(GM) and cerebrospinal fluid(csf) if idx == 3: plt.plot(0, 0, "-", c='green', label='wm') plt.plot(0, 0, "-", c='pink', label='gm') plt.plot(0, 0, "-", c='blue', label='csf') plt.legend(loc='upper right',fontsize=15,bbox_to_anchor=(1.5,1.2)) # save figure overlay.savefig(f"{outdir}", format="png") # ## Set the input data path # output_dir is a path of m2g output # # csf_input, gm_input, wm_input is the path of nifiti image of csf, gm and wm, respectively. # # the name of the qa picture is qa_fast.png # + # change this path to your own m2g output_dir when you want to run in your own computer output_dir = 'd:/Downloads/neurodatadesign/output_data/flirt/sub-0025864/ses-1/' # The following input path will be generated automatically csf_input = f"{output_dir}anat/preproc/t1w_seg_pve_0.nii.gz" gm_input = f"{output_dir}anat/preproc/t1w_seg_pve_1.nii.gz" wm_input = f"{output_dir}anat/preproc/t1w_seg_pve_2.nii.gz" # The path where the picture is saved, the name is qa_fast.png save_dir = f"{output_dir}qa/reg/qa_fast.png" # - # ## Call function to generate quality analysis picture # The green part of the picture shows white matter, the red part shows gray matter, and the blue part shows cerebrospinal fluid. # The number on each small picture represents the position of the slice, and the vertical axis on the left shows the orientation of the slice. # # you can import qa_fast_png use 'from m2g.stats.qa_fast import qa_fast_png. # or you can also use the qa_fast_png in this tutorial # Generates quality analysis pictures of white matter, gray matter and cerebrospinal fluid qa_fast_png(csf_input, gm_input, wm_input, save_dir) # ## Potential Issues # If you see the colors for the various sections of the brain are incorrect (white matter is pink or blue) check the order of the inputs for qa_fast_png. The order determines the color-scheme used for each tissue type. # # The color contrast of white matter, gray matter and cerebrospinal fluid can be adjusted by parameters in the function, and the color can also be adjusted. # Change the alpha(0-1) parameter to adjust the color depth # + ax.imshow(csf_slice, interpolation="none", cmap=cmap1, alpha=1) ax.imshow(gm_slice, interpolation="none", cmap=cmap2, alpha=0.5) ax.imshow(wm_slice, interpolation="none", cmap=cmap3, alpha=0.3) # - # the last parameter can change color # + cmap1 = LinearSegmentedColormap.from_list('mycmap1', ['white', 'blue']) cmap2 = LinearSegmentedColormap.from_list('mycmap2', ['white', 'magenta']) cmap3 = LinearSegmentedColormap.from_list('mycmap2', ['white', 'green'])
tutorials/Tutorial_of_QA_for_FAST.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Downloading PDFs import re import requests import cssselect from lxml import html from lxml.html import parse, fromstring import urllib from urllib import request import concurrent import glob import os baseurl = 'http://ceowestbengal.nic.in/UploadFiles/Election/Parliament/2019/' page = 'http://ceowestbengal.nic.in/UploadFiles/AE2021/Form20/Form20_GE2021.html' folder = '/home/hennes/Internship/pdfs/' ['PC0'+os.path.split(e)[1].split('C')[-1] for e in glob.glob(folder+'*') if len(os.path.split(e)[1].split('C')[-1].split('.')[0]) == 2] [os.rename(e, folder+'PC0'+os.path.split(e)[1].split('C')[-1]) for e in glob.glob(folder+'*') if len(os.path.split(e)[1].split('C')[-1].split('.')[0]) == 2] response = requests.get(page) tree = html.fromstring(response.text) linkelements = [x for x in tree.cssselect('td > a')] links = sorted([baseurl+x.attrib['href'] for x in linkelements]) def download_pdf(link): name = link.split('/')[-1].split('.')[0].split('_')[0].split('C')[1] if len(name) == 1: name = '00'+name if len(name) == 2: name = '0'+name print(f'downloading {name}') request.urlretrieve(link, f'{folder}PC{name}.pdf') for link in links[:-6]: download_pdf(link)
Downloading PDFs.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] colab_type="text" id="MhoQ0WE77laV" # ##### Copyright 2019 The TensorFlow Authors. # + cellView="form" colab_type="code" id="_ckMIh7O7s6D" colab={} #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # + [markdown] colab_type="text" id="jYysdyb-CaWM" # # Custom training with TPUs # + [markdown] colab_type="text" id="S5Uhzt6vVIB2" # <table class="tfo-notebook-buttons" align="left"> # <td> # <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/r1/tutorials/distribute/tpu_custom_training.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> # </td> # <td> # <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/r1/tutorials/distribute/tpu_custom_training.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> # </td> # </table> # + [markdown] colab_type="text" id="FbVhjPpzn6BM" # This tutorial will take you through using [tf.distribute.experimental.TPUStrategy](https://www.tensorflow.org/api_docs/python/tf/distribute/experimental/TPUStrategy). This is a new strategy, a part of `tf.distribute.Strategy`, that allows users to easily switch their model to using TPUs. As part of this tutorial, you will create a Keras model and take it through a custom training loop (instead of calling `fit` method). # # You should be able to understand what is a strategy and why it’s necessary in Tensorflow. This will help you switch between CPU, GPUs, and other device configurations more easily once you understand the strategy framework. To make the introduction easier, you will also make a Keras model that produces a simple convolutional neural network. A Keras model usually is trained in one line of code (by calling its `fit` method), but because some users require additional customization, we showcase how to use custom training loops. Distribution Strategy was originally written by DeepMind -- you can [read the story here](https://deepmind.com/blog/tf-replicator-distributed-machine-learning/). # + colab_type="code" id="dzLKpmZICaWN" colab={} from __future__ import absolute_import, division, print_function, unicode_literals # Import TensorFlow try: # # %tensorflow_version only exists in Colab. # %tensorflow_version 2.x except Exception: pass import tensorflow.compat.v1 as tf # Helper libraries import numpy as np import os assert os.environ['COLAB_TPU_ADDR'], 'Make sure to select TPU from Edit > Notebook settings > Hardware accelerator' assert float('.'.join(tf.__version__.split('.')[:2])) >= 1.14, 'Make sure that Tensorflow version is at least 1.14' # + colab_type="code" id="5TPCyNk1LjAC" colab={} TPU_WORKER = 'grpc://' + os.environ['COLAB_TPU_ADDR'] # + [markdown] colab_type="text" id="MM6W__qraV55" # ## Create model # + [markdown] colab_type="text" id="58ff7ew6MK9d" # Since you will be working with the [MNIST data](https://en.wikipedia.org/wiki/MNIST_database), which is a collection of 70,000 greyscale images representing digits, you want to be using a convolutional neural network to help us with the labeled image data. You will use the Keras API. # + colab_type="code" id="7MqDQO0KCaWS" colab={} def create_model(input_shape): """Creates a simple convolutional neural network model using the Keras API""" return tf.keras.Sequential([ tf.keras.layers.Conv2D(28, kernel_size=(3, 3), activation='relu', input_shape=input_shape), tf.keras.layers.MaxPooling2D(pool_size=(2, 2)), tf.keras.layers.Flatten(), tf.keras.layers.Dense(128, activation=tf.nn.relu), tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(10, activation=tf.nn.softmax), ]) # + [markdown] colab_type="text" id="4AXoHhrsbdF3" # ## Loss and gradient # + [markdown] colab_type="text" id="5mVuLZhbem8d" # Since you are preparing to use a custom training loop, you need to explicitly write down the loss and gradient functions. # + colab_type="code" id="F2VeZUWUj5S4" colab={} def loss(model, x, y): """Calculates the loss given an example (x, y)""" logits = model(x) return logits, tf.losses.sparse_softmax_cross_entropy(labels=y, logits=logits) def grad(model, x, y): """Calculates the loss and the gradients given an example (x, y)""" logits, loss_value = loss(model, x, y) return logits, loss_value, tf.gradients(loss_value, model.trainable_variables) # + [markdown] colab_type="text" id="k53F5I_IiGyI" # ## Main function # + [markdown] colab_type="text" id="0Qb6nDgxiN_n" # Previous sections highlighted the most important parts of the tutorial. The following code block gives a complete and runnable example of using TPUStrategy with a Keras model and a custom training loop. # + colab_type="code" id="jwJtsCQhHK-E" colab={} tf.keras.backend.clear_session() resolver = tf.contrib.cluster_resolver.TPUClusterResolver(tpu=TPU_WORKER) tf.contrib.distribute.initialize_tpu_system(resolver) strategy = tf.contrib.distribute.TPUStrategy(resolver) # Load MNIST training and test data (x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data() x_train, x_test = x_train / 255.0, x_test / 255.0 # All MNIST examples are 28x28 pixel greyscale images (hence the 1 # for the number of channels). input_shape = (28, 28, 1) # Only specific data types are supported on the TPU, so it is important to # pay attention to these. # More information: # https://cloud.google.com/tpu/docs/troubleshooting#unsupported_data_type x_train = x_train.reshape(x_train.shape[0], *input_shape).astype(np.float32) x_test = x_test.reshape(x_test.shape[0], *input_shape).astype(np.float32) y_train, y_test = y_train.astype(np.int64), y_test.astype(np.int64) # The batch size must be divisible by the number of workers (8 workers), # so batch sizes of 8, 16, 24, 32, ... are supported. BATCH_SIZE = 32 NUM_EPOCHS = 5 train_steps_per_epoch = len(x_train) // BATCH_SIZE test_steps_per_epoch = len(x_test) // BATCH_SIZE # + [markdown] colab_type="text" id="GPrDC8IfOgCT" # ## Start by creating objects within the strategy's scope # # Model creation, optimizer creation, etc. must be written in the context of strategy.scope() in order to use TPUStrategy. # # Also initialize metrics for the train and test sets. More information: `keras.metrics.Mean` and `keras.metrics.SparseCategoricalAccuracy` # + colab_type="code" id="s_suB7CZNw5W" colab={} with strategy.scope(): model = create_model(input_shape) optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01) training_loss = tf.keras.metrics.Mean('training_loss', dtype=tf.float32) training_accuracy = tf.keras.metrics.SparseCategoricalAccuracy( 'training_accuracy', dtype=tf.float32) test_loss = tf.keras.metrics.Mean('test_loss', dtype=tf.float32) test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy( 'test_accuracy', dtype=tf.float32) # + [markdown] colab_type="text" id="d3iLK5ZtO1_R" # ## Define custom train and test steps # + colab_type="code" id="XAF6xfU0N5ID" colab={} with strategy.scope(): def train_step(inputs): """Each training step runs this custom function which calculates gradients and updates weights. """ x, y = inputs logits, loss_value, grads = grad(model, x, y) update_loss = training_loss.update_state(loss_value) update_accuracy = training_accuracy.update_state(y, logits) # Show that this is truly a custom training loop # Multiply all gradients by 2. grads = grads * 2 update_vars = optimizer.apply_gradients( zip(grads, model.trainable_variables)) with tf.control_dependencies([update_vars, update_loss, update_accuracy]): return tf.identity(loss_value) def test_step(inputs): """Each training step runs this custom function""" x, y = inputs logits, loss_value = loss(model, x, y) update_loss = test_loss.update_state(loss_value) update_accuracy = test_accuracy.update_state(y, logits) with tf.control_dependencies([update_loss, update_accuracy]): return tf.identity(loss_value) # + [markdown] colab_type="text" id="AhrK1-yEO7Nf" # ## Do the training # In order to make the reading a little bit easier, the full training loop calls two helper functions, `run_train()` and `run_test()`. # + colab_type="code" id="or5osuheouVU" colab={} def run_train(): # Train session.run(train_iterator_init) while True: try: session.run(dist_train) except tf.errors.OutOfRangeError: break print('Train loss: {:0.4f}\t Train accuracy: {:0.4f}%'.format( session.run(training_loss_result), session.run(training_accuracy_result) * 100)) training_loss.reset_states() training_accuracy.reset_states() def run_test(): # Test session.run(test_iterator_init) while True: try: session.run(dist_test) except tf.errors.OutOfRangeError: break print('Test loss: {:0.4f}\t Test accuracy: {:0.4f}%'.format( session.run(test_loss_result), session.run(test_accuracy_result) * 100)) test_loss.reset_states() test_accuracy.reset_states() # + colab_type="code" id="u5LvzAwjN95j" colab={} with strategy.scope(): training_loss_result = training_loss.result() training_accuracy_result = training_accuracy.result() test_loss_result = test_loss.result() test_accuracy_result = test_accuracy.result() config = tf.ConfigProto() config.allow_soft_placement = True cluster_spec = resolver.cluster_spec() if cluster_spec: config.cluster_def.CopyFrom(cluster_spec.as_cluster_def()) print('Starting training...') # Do all the computations inside a Session (as opposed to doing eager mode) with tf.Session(target=resolver.master(), config=config) as session: all_variables = ( tf.global_variables() + training_loss.variables + training_accuracy.variables + test_loss.variables + test_accuracy.variables) train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train)).batch(BATCH_SIZE, drop_remainder=True) train_iterator = strategy.make_dataset_iterator(train_dataset) test_dataset = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(BATCH_SIZE, drop_remainder=True) test_iterator = strategy.make_dataset_iterator(train_dataset) train_iterator_init = train_iterator.initializer test_iterator_init = test_iterator.initializer() session.run([v.initializer for v in all_variables]) dist_train = strategy.experimental_run(train_step, train_iterator).values dist_test = strategy.experimental_run(test_step, test_iterator).values # Custom training loop for epoch in range(0, NUM_EPOCHS): print('Starting epoch {}'.format(epoch)) run_train() run_test()
site/en/r1/tutorials/distribute/tpu_custom_training.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + # %matplotlib inline import os import os.path import shutil from io import StringIO import scipy import numpy as np import astropy.units as u import astropy.cosmology as cosmo from astropy.constants import m_e, c from astropy.io import ascii import matplotlib as mpl import matplotlib.pyplot as plt import scipy.interpolate as sint # Matplotlib mpl.rcParams['savefig.dpi'] = 180 mpl.rcParams["figure.dpi"] = 180 # FORMAT (for the paper) mpl.rcParams['text.latex.preamble']=[r"\usepackage{amsmath}"] mpl.rcParams.update({'figure.autolayout': True}) mpl.rcParams.update({'figure.subplot.bottom' : 0.15}) mpl.rcParams['text.usetex']=True mpl.rcParams['text.latex.unicode']=True mpl.rc('font', family='serif', serif='cm10') # FIGSIZE WIDTH = 427 # the number latex spits out FACTOR = 0.45 # the fraction of the width you'd like the figure to occupy fig_width_pt = WIDTH * FACTOR inches_per_pt = 1.0 / 72.27 inches_per_pt = 4.0 / mpl.rcParams['savefig.dpi'] golden_ratio = (np.sqrt(5) - 1.0) / 2.0 # because it looks good fig_width_in = fig_width_pt * inches_per_pt # figure width in inches fig_height_in = fig_width_in * golden_ratio # figure height in inches fig_dims = [fig_width_in, fig_height_in] # fig dims as a list mpl.rcParams['figure.figsize'] = fig_width_in, fig_height_in mpl.rcParams['axes.labelweight'] = 'bold' mpl.rcParams['font.weight'] = 'bold' # + from string import Template src = Template( """FLAG power-law (PL)=0, broken-powerlaw (BPL) = 1 for electron distribution $BPL FLAG_SSA SSA=1, Simple Syn =0 $SSA MAG magnetic field of source in Gauss $B INDEX p1 p2 (for PL, it ignores the second index) $alpha1 $alpha2 electron density (erg/cc) $Ke E1MIN E1MAX Energy of gamma rays in eV $Egmin $Egmax Radius Source radius in cm $R GammaMin GammaMax (Minimum and maximum values of gamma for the energy of electrons) $gmin $gmax GammaBreak (Break in the energy. It will be ingored for PL case) $gbr Doppler factor $delta Red Shift $z theta (The angle between our line of sight and jet direction) $theta OmegaM $OmegaM Omega_Lambda $OmegaL """) ### Compile the SED macro if it does not exist if not os.path.exists("sscmodel"): os.system("g++ -lm ic_spectrum_new.C -o sscmodel") # + cosmology = { "BPL":1, "OmegaM":0.3, "OmegaL":0.7, "z":0.601 } #electron_erg = 0.5109989461*1.60218e-6 #### First component (IN) source1 = dict(cosmology) source1['BPL'] = 1 source1['SSA'] = 1 source1['z'] = 0.601 source1['B'] = 0.045 # Gauss source1['R'] = 4.8e16 # cm source1['alpha1'] = 2.0 source1['alpha2'] = 4.0 source1['Ke'] = 0.0031 source1['Egmin'] = 1e-5 source1['Egmax'] = 1e13 source1['gmin'] = np.log(1e4) source1['gmax'] = np.log(8e5) source1['gbr'] = np.log(5.4e4) source1['delta'] = 30 source1['theta'] = 1.9 with open(str("InputICSSC"), 'w+') as f: f.write(src.substitute(source1)) exe="./sscmodel > ssc1.dat"; print(exe) os.system(exe) ### Second component (OUT) source2 = dict(source1) source2['B'] = 0.031 # Gauss source2['R'] = 1.9e18 # cm source2['alpha1'] = 2.1 source2['alpha2'] = 4.0 source2['Ke'] = 0.00002 source2['gmin'] = np.log(1e3) source2['gmax'] = np.log(4e4) source2['gbr'] = np.log(2e4) source2['delta'] = 9 with open(str("InputICSSC"), 'w+') as f: f.write(src.substitute(source2)) exe="./sscmodel > ssc2.dat"; print(exe) os.system(exe) # + with open("ssc1.dat",'r') as f: rawdata = f.readlines() rawdata = '\n'.join([l for l in rawdata if l[0]!=" "]) #data = data.replace(" Number","# Number") #data = data.replace(" Bolometric","# Bolometric") #print(rawdata) model1 = np.loadtxt(StringIO(unicode(rawdata))) with open("ssc2.dat",'r') as f: rawdata = f.readlines() rawdata = '\n'.join([l for l in rawdata if l[0]!=" "]) #data = data.replace(" Number","# Number") #data = data.replace(" Bolometric","# Bolometric") #print(rawdata) model2 = np.loadtxt(StringIO(unicode(rawdata))) # + plt.xscale('log') plt.yscale('log') ########### archival archival = np.loadtxt('archival.csv') archival_points = archival[:,3]>0 archival_uls = archival[:,3]==0 err_pos = \ 10**(archival[:,2][~archival_uls]+archival[:,3][~archival_uls])-\ 10**(archival[:,2][~archival_uls]) err_neg = \ -10**(archival[:,2][~archival_uls]-archival[:,3][~archival_uls])+\ 10**(archival[:,2][~archival_uls]) plt.errorbar(\ x = 10**archival[:,0][~archival_uls],\ y = 10**archival[:,2][~archival_uls],\ yerr = [err_neg,err_pos],\ ms=1.5,marker='.',mfc='0.6',alpha=1,mec='0.6', ecolor='0.6',ls='',lw=0.5,\ capsize=0,zorder=-3,label='ASDC') plt.errorbar(\ x = 10**archival[:,0][archival_uls],\ y = 0.3*10**archival[:,2][archival_uls],\ yerr = [0.0*10**archival[:,2][archival_uls], 0.7*10**archival[:,2][archival_uls]], ms=0.8,marker='v',mfc='0.75',alpha=1,mec='0.75', ecolor='0.75',ls='',lw=0.5,\ capsize=0,zorder=-3,mew=1) eV2Hz = 4.13566553853599E-15 mwl_data = ascii.read('MW_Data.csv',\ Reader=ascii.sextractor.SExtractor,delimiter=' ') mwl_data['energy'].convert_unit_to(u.MeV) mwl_data['energy_edge_lo'].convert_unit_to(u.MeV) mwl_data['energy_edge_hi'].convert_unit_to(u.MeV) data = {} data['E'] = mwl_data['energy'].to('eV')/eV2Hz data['El'] = mwl_data['energy'].to('eV')/eV2Hz-mwl_data['energy_edge_lo'].to('eV')/eV2Hz data['Eh'] = mwl_data['energy_edge_hi'].to('eV')/eV2Hz-mwl_data['energy'].to('eV')/eV2Hz data['F'] = mwl_data['flux'] data['eF'] = mwl_data['flux_error'] uls = dict(data) for k in uls: try: uls[k] = (uls[k][mwl_data['ul']==1]).value except: uls[k] = (uls[k][mwl_data['ul']==1]) for k in data: try: data[k] = (data[k][mwl_data['ul']==0]).value except: data[k] = (data[k][mwl_data['ul']==0]) #print(data) optical = (data['E']>5e13)*(data['E']<5e15) xray = (data['E']>5e15)*(data['E']<5e18) fermi = (data['E']>1e22)*(data['E']<1e26)*(data['El']/data['E'] > 0.2) plt.errorbar( x=data['E'][optical], xerr=[data['El'][optical],data['Eh'][optical]], y=data['F'][optical], yerr=data['eF'][optical], label='UVOT',color='C0', ls='',marker='D',lw=0.8,ms=1.75,mew=1,mfc='None') plt.errorbar( x=data['E'][xray], xerr=[data['El'][xray],data['Eh'][xray]], y=data['F'][xray], yerr=data['eF'][xray], label='XRT',color='C1', ls='',marker='D',lw=0.8,ms=1.75,mew=1,mfc='None') plt.errorbar( x=data['E'][fermi], xerr=[data['El'][fermi],data['Eh'][fermi]], y=data['F'][fermi], yerr=data['eF'][fermi], label='LAT',color='C2', ls='',marker='D',lw=0.8,ms=1.75,mew=1,mfc='None') fermi = uls['Eh']/uls['E']>0.5 plt.errorbar( x=uls['E'][fermi], y=0.3*uls['F'][fermi], yerr=[0.0*uls['F'][fermi],0.7*uls['F'][fermi]], color='C2',label=None, ls='',marker='v',lw=0.8,ms=1.2,mew=1,mfc='None') plt.plot(model1[:,0],model1[:,6],label='SSC (in)', ls='dashed',lw=1,color='black') plt.plot(model2[:,0],model2[:,6],label='SSC (out)', ls='dotted',lw=1,color='black') plt.plot(model1[:,0],model1[:,6]+model2[:,6],label='SSC (sum)', ls='solid',lw=1,color='black') plt.xlim(1e9,1e27) plt.ylim(1e-15,1e-10) plt.yticks(10**np.arange(-15,-9.999,1), np.array(np.arange(-15,-9.999,1),dtype=int)) plt.xticks(10**np.arange(10,26.001,2), np.array(np.arange(10,26.001,2),dtype=int)) #plt.xticklabels(np.arange(10,26.001,2)) plt.ylabel(r'$\mathbf{\log_{10} \nu F_\nu}\ \ \mathrm{(erg/cm^2/s)}$') plt.xlabel(r'$\mathbf{\log_{10}\ \nu}\ \ \mathrm{(Hz)}$') plt.legend(fontsize='small',ncol=3) plt.savefig("PKS1424_TwoZoneModel.png",bbox_inches='tight') # -
extragalactic/Example_SSC.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + import datetime def get_initials(first_name, middle_name): if middle_name: initials = first_name[0] + middle_name[0] return initials initials = first_name[0] + '9' return initials def get_surname(surname_string): if len(surname_string) < 6: surname = surname_string[0:len(surname_string)].upper() else: surname = surname_string[0:6].upper() while len(surname) < 5: surname += '9' return surname def get_decade(dob, gender): split_date = dob.split('-') #Getting the month if len(split_date[1]) > 3: month = datetime.datetime.strptime(split_date[1], "%B").month else: month = datetime.datetime.strptime(split_date[1], "%b").month if gender == 'F': month += 50 seven_eight = str(month) if len (seven_eight ) < 2: seven_eight = '0' + seven_eight #decade for index number 6 decade = split_date[2][2] # day for index 9-10 day = split_date[0] # year digit for index 11 year_digit = split_date[2][3] output_string = decade + seven_eight + day + year_digit return output_string def driver(data): # Start driving here! serial = "" surname = get_surname(data[2]) decade = get_decade(data[3], data[4]) initials = get_initials(data[0], data[1]) serial += surname + decade + initials + '9AA' return serial # - data = ["Andrew", "Robert", "Lee", "02-September-1981", "M"] driver(data)
kata-6/Driving_license.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- #并发 并行 #生产者和消费者 a=0 def A(): global a #全局变量 a +=100 print(a) A() # flush=true 刷新缓冲区 # end="" 换行 from multiprocessing import Process import time counter=0 def sub_task(string): global counter while counter<10: print(string,end="",flush=True) counter +=1 time.sleep(1) def main(): Process(target=sub_task,args=("Ping",)).start() Process(target=sub_task,args=("Pong",)).start() print("smy") if __name__=="__main__": main() # + import multiprocessing import os def info(title): print(title) print(__name__) print('father', os.getppid()) print('self', os.getpid()) print('--------') if __name__ == "__main__": # 除了创建的子进程和子进程调用函数,其他的都是脚本主进程 # info('hello') # 创建一个子进程调用函数 P = multiprocessing.Process(target=info,args=('hello python',)) P.start() P.join() # 和多线程一样,也是等待的意思 print('hello word') # 若没有join则会独立运行 # + # 当进程进行文件读写操作的时候(关键性操作的时候)需要使用锁.. from multiprocessing import Process,RLock import multiprocessing def _write(str_,lock): print(multiprocessing.current_process().name) # 写入文件 path = '/Users/joker/Desktop/joker.txt' # with 会自己帮你关掉 with lock: # 路径 模式a:追加 # 编码模式:utf-8, gbk,gb12128 with open(path, mode='a',encoding='utf8') as f: f.write(str_) if __name__ == "__main__": ps = [] # 创建一个活锁 lock = RLock() for i in range(10): p = Process(target=_write,args=('今天天气好晴朗\n',lock)) p.start() ps.append(p) for i in ps: i.join() # + import multiprocessing def func(conn): conn.send(['Joker is a good man']) print('{} 发送了..'.format(multiprocessing.current_process().name)) print('{} 接受了 {}'.format(multiprocessing.current_process().name,conn.recv())) conn.close() if __name__ == "__main__": conn_a,conn_b = multiprocessing.Pipe() p1 = multiprocessing.Process(target=func,args=(conn_a,)).start() conn_b.send([1, 2, 3, 4, 5, 6, 7]) # 发送数据给conn_a print('main',conn_b.recv()) # - 全局变量不共享 1809 ['a', 'b', 'c'] 1810 [1, 2, 3] ''' # 全局变量不可以进程共享 import multiprocessing import os data = [] def List(): global data data.append(1) data.append(2) data.append(3) print('p',os.getpid(),data) if __name__ == '__main__': p = multiprocessing.Process(target=List,args=()).start() # 子进程 data.append('a') # 脚本主进程 data.append('b') data.append('c') print('main',os.getpid(),data) # + import multiprocessing import time ############################################################################ # https://docs.python.org/zh-cn/3/library/queue.html#queue.Queue.task_done # ############################################################################ # 一定是要一个放,一个取 # maxsize 设置队列的最大长度. queue = multiprocessing.Queue(maxsize=10) def func1(queue): while 1: print('放入..') queue.put(100,timeout=3) def func2(queue): while 1: time.sleep(3) res = queue.get() print(res) if __name__ == "__main__": p1 = multiprocessing.Process(target=func1,args=(queue,)) # p2 = multiprocessing.Process(target=func2,args=(queue,)) p1.start() # p2.start() p1.join() # p2.join() # + # 创建一个共享的列表/数组 # 当你有多个进程需要同时操作某一个数组的时候,你就应该搭建一个共享数组Array. import multiprocessing def func(m,i): m[i] = 10000 def func1(): # Array 是一个对象 list_ = multiprocessing.Array('i',[1,2,3]) return list_ if __name__ == "__main__": list_ = func1() print(list_[:]) p1 = multiprocessing.Process(target=func,args=(list_,0)) p2 = multiprocessing.Process(target=func,args=(list_,1)) p1.start() p2.start() p1.join() p2.join() print(list_[:]) # + import multiprocessing def func(mydict, mylist): mydict["胡旺"] = "牛皮" mydict["lalal"] = "大美女" mylist.append(11) mylist.append(22) mylist.append(33) if __name__ == "__main__": # with multiprocessing.Manager() as MG: # mydict=MG.dict() # mylist=MG.list(range(5)) mydict = multiprocessing.Manager().dict() # [0,1,2,3,4] mylist = multiprocessing.Manager().list(range(5)) p = multiprocessing.Process(target=func, args=(mydict, mylist)) p.start() p.join() print(mylist) print(mydict) # print(list(range(5))) # 很牛逼的list # - import csv import re path="C:/Users/Administrator/Desktop/Homework1/un-general-debates.csv" with open(path,mode='r',encoding='utf-8') as f: csv_reader=csv.reader(f) for row in csv_reader: q=row[3] path_='C:/Users/Administrator/Desktop/smy.txt' with open(path_,'w',encoding='utf-8') as ro: ro.write(q)
Untitled6.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # Higher order: Taylor’s series # ----------------------------- # # We can go a step beyond Euler’s method keeping up to second order terms # in the expansion around $x_0$. Doing so we obtain # $$y(x+\Delta x)=y(x)+y'(x)\Delta x+\frac{1}{2}y''(x)(\Delta x)^2+O(\Delta # x^3) # $$ from the ODE we get $$\begin{eqnarray} # y'(x)&=&f(x,y), \\ # y''(x)&=&\frac{df}{dx}=\frac{\partial f}{\partial x}+\frac{\partial # f}{\partial y}\frac{dy}{dx}=\frac{\partial f}{\partial # x}+\frac{\partial f}{\partial y} f \end{eqnarray}$$ # # Substituting in the Taylor expansion we obtain # # $$y_{n+1}=y_n+f\Delta x+\frac{1}{2}(\Delta x)^2[\frac{\partial # f}{\partial x}+f\frac{\partial f}{\partial y}]+O(\Delta x^3),$$ # # where all the functions and derivatives are evaluated in $(x_n,y_n)$. # # Multistep or Predictor-Corrector methods # ---------------------------------------- # # We can achieve higher accuracy by relating $y_{n+1}$ not only to $y_n$, # but also to points further in the past $y_{n-1},y_{n-2},...$ To derive # such formulas we can formally integrate exactly the equation of motion to obtain: # $$y_{n+1}=y_n+\int_{x_n}^{x_{n+1}}f(x,y)dx$$ # # The problem is that we don’t know $f(x,y)$ over the interval # $(x_n,x_{n+1})$. However, we can use the values of $y$ at $x_n$ and # $x_{n-1}$ to provide a linear extrapolation: # $$f=\frac{(x-x_{n-1})}{\Delta x}f_n-\frac{(x-x_n)}{\Delta x} # f_{n-1}+O(\Delta x^2),$$ with $f_n=f(x_n,y_n)$. Inserting into # the integral we obtain # $$y_{n+1}=y_n+\Delta x(\frac{3}{2}f_n-\frac{1}{2}f_{n-1})+O(\Delta x^3)$$ # Note that the value of $y_0$ is not sufficient information to get this # algorithm started. The value of $y_1$ has to be obtained first by some # other procedure, like the ones described previously. This means that the # method is not **"self starting"**. # # Runge-Kutta methods # ------------------- # # ### 2nd order Runge-Kutta # # Euler’s method rests on the idea that the slope at one point can be used # to extrapolate to the next. A plausible idea to make a better estimate # of the slope is to extrapolate to a point halfway across the interval, # and then to use the derivative at this point to extrapolate across the # whole interval. Thus, # # $$\begin{eqnarray} # k&=&\Delta x f(x_n,y_x), \\ # y_{n+1}&=&y_n+\Delta x f(x+\Delta x/2, y_n+k/2) + O(\Delta # x^3).\end{eqnarray}$$ # # It has the same accuracy as the Taylor series. It requires # the evaluation of $f$ twice for each step. # # ### 4th order Runge-Kutta # # Similar ideas can be used to derive a 3rd or 4th order Runge-Kutta # method. It has been found by experience that the best balance between # accuracy and computational effort is given by a fourth-order algorithm. # Such a method would require evaluating $f$ four times at each step, with # a local accuracy of $O(\Delta x^5)$. It can be written as follows: # $$\begin{eqnarray} # k_1&=&\Delta x f(x_n,y_n), \\ # k_2&=&\Delta x f(x_n+\Delta x/2,y_n+k_1/2), \\ # k_3&=&\Delta x f(x_n+\Delta x/2,y_n+k_2/2), \\ # k_4&=&\Delta x f(x_n+\Delta x,y_n+k_3), \\ # y_{n+1}&=&y_n+\frac{1}{6}(k_1+2k_2+2k_3+k_4)+O(\Delta x^5).\end{eqnarray}$$ # # Runge-Kutta method are self-staring, meaning that they can be used to # obtain the first few iterations for a non self-starting algorithm. # ### Challenge 1.2 # # Repeat the calculation in Challenge 1.1 using 4th order Runge-Kutta def rk(t, y, f, ys, r, dt): k1 = dt * f(t, y, ys, r) k2 = dt * f(t + 0.5 * dt, y + k1 * 0.5, ys, r) k3 = dt * f(t + 0.5 * dt, y + k2 * 0.5, ys, r) k4 = dt * f(t + dt, y + k3, ys, r) y = y + 1. / 6. * (k1 + 2 * k2 + 2 * k3 + k4) return y # + # %matplotlib inline import numpy as np from matplotlib import pyplot T0 = 10. # initial temperature Ts = 83. # temp. of the environment r = 0.1 # cooling rate tmax = 60. # maximum time my_color = ['red', 'green', 'blue', 'black'] fun = lambda t, y, Ts, r: -r * (T - Ts) euler = lambda y, f, dx: y + f*dx my_temp = np.zeros(10) my_dt = np.linspace(0.001, 0.01, 10) my_time = 1. j = 0 for dt in my_dt: nsteps = int(tmax/dt) # number of steps T = T0 for i in range(1,nsteps): T = rk(my_time, T, fun, Ts, r, dt) #T = euler(T, -r*(T-Ts), dt) my_temp[j] = T j += 1 print my_temp print my_dt pyplot.plot(my_dt, my_temp, color=my_color[0], ls='-', lw=3) pyplot.xlabel('dt'); pyplot.ylabel('temperature'); # -
01_02_higher_order_methods.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Python Parsing with NLTK # **(C) 2017 by [<NAME>](http://damir.cavar.me/)** # **License:** [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/) ([CA BY-SA 4.0](https://creativecommons.org/licenses/by-sa/4.0/)) # This is a tutorial related to the discussion of grammar engineering and parsing in the class *Alternative Syntactic Theories* taught at Indiana University in the Linguistics Department in Spring 2017. # ## Working with Grammars # The following examples are taken from the NLTK [parsing HOWTO](http://www.nltk.org/howto/parse.html) page. from nltk import Nonterminal, nonterminals, Production, CFG nt1 = Nonterminal('NP') nt2 = Nonterminal('VP') nt1.symbol() nt1 == Nonterminal('NP') nt1 == nt2 S, NP, VP, PP = nonterminals('S, NP, VP, PP') N, V, P, DT = nonterminals('N, V, P, DT') prod1 = Production(S, [NP, VP]) prod2 = Production(NP, [DT, NP]) prod1.lhs() prod1.rhs() prod1 == Production(S, [NP, VP]) prod1 == prod2 grammar = CFG.fromstring(""" S -> NP VP PP -> P NP PP -> P NP NP -> 'the' N | N PP | 'the' N PP VP -> V NP | V PP | V NP PP N -> 'cat' N -> 'fish' N -> 'dog' N -> 'rug' N -> 'mouse' V -> 'chased' V -> 'sat' P -> 'in' P -> 'on' """) # ## Feature Structures # One can build complex feature structures using the following strategies: # + import nltk fstr = nltk.FeatStruct("[POS='N', AGR=[PER=3, NUM='pl', GND='fem']]") print(fstr) # - # Creating shared paths is also possible: fstr2 = nltk.FeatStruct("""[NAME='Lee', ADDRESS=(1)[NUMBER=74, STREET='rue Pascal'], SPOUSE=[NAME='Kim', ADDRESS->(1)]]""") print(fstr2) # Let us create feature structures and try out unification: # + fs1 = nltk.FeatStruct("[AGR=[PER=3, NUM='pl', GND='fem'], POS='N']") fs2 = nltk.FeatStruct("[POS='N', AGR=[PER=3, GND='fem']]") print(fs1.unify(fs2)) # - # ## Chart Parser # The following examples are taken from the NLTK [parsing HOWTO](http://www.nltk.org/howto/parse.html) page. import nltk nltk.parse.chart.demo(2, print_times=False, trace=1, sent='I saw a dog', numparses=1) # This is an example how to apply top-down parsing: nltk.parse.chart.demo(1, print_times=True, trace=0, sent='I saw John with a dog', numparses=2) # This is how to apply bottom-up parsing: nltk.parse.chart.demo(2, print_times=False, trace=0, sent='I saw John with a dog', numparses=2) nltk.parse.featurechart.demo(print_times=False, print_grammar=True, parser=nltk.parse.featurechart.FeatureChartParser, sent='I saw John with a dog') # ## Loading grammars from files and editing them # We will need the following NLTK modules in this section: from nltk import CFG from nltk.grammar import FeatureGrammar as FCFG # We can load a grammar from a file, that is located in the same folder as the current Jupyter notebook, in the following way: cfg = nltk.data.load('spanish1.cfg') # We instantiate a ChartParser object with this grammar: cp1 = nltk.parse.ChartParser(cfg) # The *ChartParser* object has a parse-function that takes a list of tokens as a parameter. The token list can be generated using a language specific tokenizer. In this case we simply tokenize using the Python-function *split* on strings. The output of the parse function is a list of trees. We loop through the list of parse trees and print them out: for x in cp1.parse("las mujeres adoran el Lucas".split()): print(x) # We can also edit a grammar directly: cfg2 = CFG.fromstring(""" S -> NP VP PP -> P NP NP -> 'the' N | N PP | 'the' N PP VP -> V NP | V PP | V NP PP N -> 'cat' N -> 'dog' N -> 'rug' V -> 'chased' V -> 'sat' V -> 'bit' P -> 'in' P -> 'on' """) # We parse our example sentences using the same approach as above: cp2 = nltk.parse.ChartParser(cfg2) for x in cp2.parse("the cat bit the dog".split()): print(x) # The previous example included a Context-free grammar. In the following example we load a Context-free Grammar with Features, instantiate a *FeatureChartParser*, and loop through the parse trees that are generated by our grammar to print them out: fcfg = nltk.data.load('spanish1.fcfg') fcp1 = nltk.parse.FeatureChartParser(fcfg) for x in fcp1.parse(u"<NAME>ato".split()): print(x) # We can edit a Feature CFG in the same way directly in this notebook and then parse with it: fcfg2 = FCFG.fromstring(""" % start CP # ############################ # Grammar Rules # ############################ CP -> Cbar[stype=decl] Cbar[stype=decl] -> IP[+TNS] IP[+TNS] -> DP[num=?n,pers=?p,case=nom] VP[num=?n,pers=?p] DP[num=?n,pers=?p,case=?k] -> Dbar[num=?n,pers=?p,case=?k] Dbar[num=?n,pers=?p] -> D[num=?n,DEF=?d,COUNT=?c] NP[num=?n,pers=?p,DEF=?d,COUNT=?c] Dbar[num=?n,pers=?p] -> NP[num=?n,pers=?p,DEF=?d,COUNT=?c] Dbar[num=?n,pers=?p,case=?k] -> D[num=?n,pers=?p,+DEF,type=pron,case=?k] NP[num=?n,pers=?p,COUNT=?c] -> N[num=?n,pers=?p,type=prop,COUNT=?c] VP[num=?n,pers=?p] -> V[num=?n,pers=?p,val=1] VP[num=?n,pers=?p] -> V[num=?n,pers=?p,val=2] DP[case=acc] # # ############################ # Lexical Rules # ############################ D[-DEF,+COUNT,num=sg] -> 'a' D[-DEF,+COUNT,num=sg] -> 'an' D[+DEF] -> 'the' D[+DEF,gen=f,num=sg,case=nom,type=pron] -> 'she' D[+DEF,gen=m,num=sg,case=nom,type=pron] -> 'he' D[+DEF,gen=n,num=sg,type=pron] -> 'it' D[+DEF,gen=f,num=sg,case=acc,type=pron] -> 'her' D[+DEF,gen=m,num=sg,case=acc,type=pron] -> 'him' N[num=sg,pers=3,type=prop] -> 'John' | 'Sara' | 'Mary' V[tns=pres,num=sg,pers=3,val=2] -> 'loves' | 'calls' | 'sees' | 'buys' N[num=sg,pers=3,-COUNT] -> 'furniture' | 'air' | 'justice' N[num=sg,pers=3] -> 'cat' | 'dog' | 'mouse' N[num=pl,pers=3] -> 'cats' | 'dogs' | 'mice' V[tns=pres,num=sg,pers=3,val=1] -> 'sleeps' | 'snores' V[tns=pres,num=sg,pers=1,val=1] -> 'sleep' | 'snore' V[tns=pres,num=sg,pers=2,val=1] -> 'sleep' | 'snore' V[tns=pres,num=pl,val=1] -> 'sleep' | 'snore' V[tns=past,val=1] -> 'slept' | 'snored' V[tns=pres,num=sg,pers=3,val=2] -> 'calls' | 'sees' | 'loves' V[tns=pres,num=sg,pers=1,val=2] -> 'call' | 'see' | 'love' V[tns=pres,num=sg,pers=2,val=2] -> 'call' | 'see' | 'love' V[tns=pres,num=pl,val=2] -> 'call' | 'see' | 'love' V[tns=past,val=2] -> 'called' | 'saw' | 'loved' """) # We can now create a parser instance and parse with this grammar: fcp2 = nltk.parse.FeatureChartParser(fcfg2, trace=1) sentence = "John buys him" result = list(fcp2.parse(sentence.split())) if result: for x in result: print(x) else: print("*", sentence) # Countable nouns and articles in a DP: # DPs and pronouns # CP/IP sentence structures # ## Different Parsers # This is a list of the different Feature Parsers in NLTK. # # - nltk.parse.featurechart.FeatureChartParser # - nltk.parse.featurechart.FeatureTopDownChartParser # - nltk.parse.featurechart.FeatureBottomUpChartParser # - nltk.parse.featurechart.FeatureBottomUpLeftCornerChartParser # - nltk.parse.earleychart.FeatureIncrementalChartParser # - nltk.parse.earleychart.FeatureEarleyChartParser # - nltk.parse.earleychart.FeatureIncrementalTopDownChartParser # - nltk.parse.earleychart.FeatureIncrementalBottomUpChartParser # - nltk.parse.earleychart.FeatureIncrementalBottomUpLeftCornerChartParser # # I do not know whether this is an exhaustive list. # (C) 2017 by [<NAME>](http://damir.cavar.me/) - [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/) ([CA BY-SA 4.0](https://creativecommons.org/licenses/by-sa/4.0/))
notebooks/Python Parsing with NLTK.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from bqplot import * import numpy as np import pandas as pd # + # Test data np.random.seed(0) price_data = pd.DataFrame(np.cumsum(np.random.randn(150, 2).dot([[1.0, -0.8], [-0.8, 1.0]]), axis=0) + 100, columns=['Security 1', 'Security 2'], index=pd.date_range(start='01-01-2007', periods=150)) size = 100 x_data = range(size) y_data = np.cumsum(np.random.randn(size) * 100.0) ord_keys = np.array(['A', 'B', 'C', 'D', 'E', 'F']) ordinal_data = np.random.randint(5, size=size) symbols = ['Security 1', 'Security 2'] dates_all = price_data.index.values dates_all_t = dates_all[1:] sec1_levels = np.array(price_data[symbols[0]].values.flatten()) log_sec1 = np.log(sec1_levels) sec1_returns = log_sec1[1:] - log_sec1[:-1] sec2_levels = np.array(price_data[symbols[1]].values.flatten()) # + # Create Scatter sc_x = DateScale() sc_y = LinearScale() scatt = Scatter(x=dates_all, y=sec2_levels, scales={'x': sc_x, 'y': sc_y}) ax_x = Axis(scale=sc_x, label='Date') ax_y = Axis(scale=sc_y, orientation='vertical', tick_format='0.0f', label='Security 2') fig = Figure(marks=[scatt], axes=[ax_x, ax_y]) fig # - # Update Scatter properties scatt.opacities = [0.3, 0.5, 1.] scatt.colors = ['green', 'red', 'blue'] fig
ui-tests/tests/notebooks/scatter.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="view-in-github" # <a href="https://colab.research.google.com/github/iotanalytics/IoTTutorial/blob/main/code/prediction/Mini_Batch_Mean.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="75T93yhRC8sW" # ## Mini Batch Mean # + [markdown] id="jsYoF-ulC-ba" # ## Introduction # # K-means is one of the most popular clustering algorithms, mainly because of its good time performance. With the increasing size of the datasets being analyzed, the computation time of K-means increases because of its constraint of needing the whole dataset in main memory. For this reason, several methods have been proposed to reduce the temporal and spatial cost of the algorithm. A different approach is the Mini batch K-means algorithm. # # Mini Batch K-means algorithm‘s main idea is to use small random batches of data of a fixed size, so they can be stored in memory. Each iteration a new random sample from the dataset is obtained and used to update the clusters and this is repeated until convergence. Each mini batch updates the clusters using a convex combination of the values of the prototypes and the data, applying a learning rate that decreases with the number of iterations. This learning rate is the inverse of the number of data assigned to a cluster during the process. As the number of iterations increases, the effect of new data is reduced, so convergence can be detected when no changes in the clusters occur in several consecutive iterations. # The empirical results suggest that it can obtain a substantial saving of computational time at the expense of some loss of cluster quality, but not extensive study of the algorithm has been done to measure how the characteristics of the datasets, such as the number of clusters or its size, affect the partition quality. # # ![image.png](attachment:image.png) # # Reference: https://upcommons.upc.edu/bitstream/handle/2117/23414/R13-8.pdf # + [markdown] id="EpibVxZdDJCu" # ## Code Example # + id="YelWx0aODck3" import pandas as pd ## example data importing data = pd.read_csv('https://raw.githubusercontent.com/iotanalytics/IoTTutorial/main/data/SCG_data.csv').drop('Unnamed: 0',1).to_numpy() # + colab={"base_uri": "https://localhost:8080/", "height": 600} id="NNXFznkKC7ri" outputId="90cf9426-e1c2-48dd-abee-104a54200521" import numpy as np import matplotlib.pyplot as plt import operator # train-test-split #print(data[0:10]) split_factor = 0.90 split = int(split_factor * data.shape[0]) X_train = data[:split, :1000] y_train = data[:split, -4].reshape((-1, 1)) X_test = data[split:, :1000] y_test = data[split:, -4].reshape((-1, 1)) print("Number of examples in training set = % d"%(X_train.shape[0])) print("Number of examples in testing set = % d"%(X_test.shape[0])) # linear regression using "mini-batch" gradient descent # function to compute hypothesis / predictions def hypothesis(X, theta): return np.dot(X, theta) # function to compute gradient of error function w.r.t. theta def gradient(X, y, theta): h = hypothesis(X, theta) grad = np.dot(X.transpose(), (h - y)) return grad # function to compute the error for current values of theta def cost(X, y, theta): h = hypothesis(X, theta) J = np.dot((h - y).transpose(), (h - y)) J /= 2 return J[0] # function to create a list containing mini-batches def create_mini_batches(X, y, batch_size): mini_batches = [] data = np.hstack((X, y)) np.random.shuffle(data) n_minibatches = data.shape[0] // batch_size i = 0 for i in range(n_minibatches + 1): mini_batch = data[i * batch_size:(i + 1)*batch_size, :] X_mini = mini_batch[:, :-1] Y_mini = mini_batch[:, -1].reshape((-1, 1)) mini_batches.append((X_mini, Y_mini)) if data.shape[0] % batch_size != 0: mini_batch = data[i * batch_size:data.shape[0]] X_mini = mini_batch[:, :-1] Y_mini = mini_batch[:, -1].reshape((-1, 1)) mini_batches.append((X_mini, Y_mini)) return mini_batches # function to perform mini-batch gradient descent def gradientDescent(X, y, learning_rate = 0.001, batch_size = 32): theta = np.zeros((X.shape[1], 1)) error_list = [] max_iters = 3 for itr in range(max_iters): mini_batches = create_mini_batches(X, y, batch_size) for mini_batch in mini_batches: X_mini, y_mini = mini_batch theta = theta - learning_rate * gradient(X_mini, y_mini, theta) error_list.append(cost(X_mini, y_mini, theta)) return theta, error_list #parameters get: for influx streaming seismic data; gen: for generating data # data=create_data('get') theta, error_list = gradientDescent(X_train, y_train) # print("Bias = ", theta[0]) # print("Coefficients = ", theta[1:]) # visualising gradient descent plt.plot(error_list) plt.xlabel("Number of iterations") plt.ylabel("Cost") plt.show() # predicting output for X_test y_pred = hypothesis(X_test, theta) plt.scatter(X_test[:, 1], y_test[:, ], marker = '.') plt.plot(X_test[:, 1], y_pred, color = 'orange') plt.show() # calculating error in predictions error = np.sum(np.abs(y_test - y_pred) / y_test.shape[0]) print("Mean absolute error = ", error) # + [markdown] id="Eec-oyPHDAcL" # ## Discussion # Pros: # * Easily fits in the memory. # * It is computationally efficient. # * If stuck in local minimums, some noisy steps can lead the way out of them # * Average of the training samples produces stable error gradients and convergence. # # Cons # * Low final accuracy: Many times mini batching may result in low final accuracy since the noise in the gradient is really helpful towards the end to extract that last 0.5%.
code/prediction/Mini_Batch_Mean.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # # MNIST digits classification with Keras # # We don't expect you to code anything here because you've already solved it with TensorFlow. # # But you can appreciate how simpler it is with Keras. # # We'll be happy if you play around with the architecture though, there're some tips at the end. # - # <img src="images/mnist_sample.png" style="width:30%"> # + import numpy as np from sklearn.metrics import accuracy_score from matplotlib import pyplot as plt # %matplotlib inline import tensorflow as tf print("We're using TF", tf.__version__) import keras print("We are using Keras", keras.__version__) import sys sys.path.append("../..") import keras_utils from keras_utils import reset_tf_session # - # # Look at the data # # In this task we have 50000 28x28 images of digits from 0 to 9. # We will train a classifier on this data. import preprocessed_mnist X_train, y_train, X_val, y_val, X_test, y_test = preprocessed_mnist.load_dataset_from_file() # X contains rgb values divided by 255 print("X_train [shape %s] sample patch:\n" % (str(X_train.shape)), X_train[1, 15:20, 5:10]) print("A closeup of a sample patch:") plt.imshow(X_train[1, 15:20, 5:10], cmap="Greys") plt.show() print("And the whole sample:") plt.imshow(X_train[1], cmap="Greys") plt.show() print("y_train [shape %s] 10 samples:\n" % (str(y_train.shape)), y_train[:10]) # + # flatten images X_train_flat = X_train.reshape((X_train.shape[0], -1)) print(X_train_flat.shape) X_val_flat = X_val.reshape((X_val.shape[0], -1)) print(X_val_flat.shape) # + # one-hot encode the target y_train_oh = keras.utils.to_categorical(y_train, 10) y_val_oh = keras.utils.to_categorical(y_val, 10) print(y_train_oh.shape) print(y_train_oh[:3], y_train[:3]) # + # building a model with keras from keras.layers import Dense, Activation from keras.models import Sequential # we still need to clear a graph though s = reset_tf_session() model = Sequential() # it is a feed-forward network without loops like in RNN model.add(Dense(256, input_shape=(784,))) # the first layer must specify the input shape (replacing placeholders) model.add(Activation('sigmoid')) model.add(Dense(256)) model.add(Activation('sigmoid')) model.add(Dense(10)) model.add(Activation('softmax')) # - # you can look at all layers and parameter count model.summary() # now we "compile" the model specifying the loss and optimizer model.compile( loss='categorical_crossentropy', # this is our cross-entropy optimizer='adam', metrics=['accuracy'] # report accuracy during training ) # and now we can fit the model with model.fit() # and we don't have to write loops and batching manually as in TensorFlow model.fit( X_train_flat, y_train_oh, batch_size=512, epochs=40, validation_data=(X_val_flat, y_val_oh), callbacks=[keras_utils.TqdmProgressCallback()], verbose=0 ) # # Here're the notes for those who want to play around here # # Here are some tips on what you could do: # # * __Network size__ # * More neurons, # * More layers, ([docs](https://keras.io/)) # # * Other nonlinearities in the hidden layers # * tanh, relu, leaky relu, etc # * Larger networks may take more epochs to train, so don't discard your net just because it could didn't beat the baseline in 5 epochs. # # # * __Early Stopping__ # * Training for 100 epochs regardless of anything is probably a bad idea. # * Some networks converge over 5 epochs, others - over 500. # * Way to go: stop when validation score is 10 iterations past maximum # # # * __Faster optimization__ # * rmsprop, nesterov_momentum, adam, adagrad and so on. # * Converge faster and sometimes reach better optima # * It might make sense to tweak learning rate/momentum, other learning parameters, batch size and number of epochs # # # * __Regularize__ to prevent overfitting # * Add some L2 weight norm to the loss function, theano will do the rest # * Can be done manually or via - https://keras.io/regularizers/ # # # * __Data augmemntation__ - getting 5x as large dataset for free is a great deal # * https://keras.io/preprocessing/image/ # * Zoom-in+slice = move # * Rotate+zoom(to remove black stripes) # * any other perturbations # * Simple way to do that (if you have PIL/Image): # * ```from scipy.misc import imrotate,imresize``` # * and a few slicing # * Stay realistic. There's usually no point in flipping dogs upside down as that is not the way you usually see them.
course-1/week-2/v2/mnist_with_keras.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # > **Tip**: Welcome to the Investigate a Dataset project! You will find tips in quoted sections like this to help organize your approach to your investigation. Before submitting your project, it will be a good idea to go back through your report and remove these sections to make the presentation of your work as tidy as possible. First things first, you might want to double-click this Markdown cell and change the title so that it reflects your dataset and investigation. # # # Project: Investigate a Dataset (Replace this with something more specific!) # # ## Table of Contents # <ul> # <li><a href="#intro">Introduction</a></li> # <li><a href="#wrangling">Data Wrangling</a></li> # <li><a href="#eda">Exploratory Data Analysis</a></li> # <li><a href="#conclusions">Conclusions</a></li> # </ul> # <a id='intro'></a> # ## Introduction # # > **Tip**: In this section of the report, provide a brief introduction to the dataset you've selected for analysis. At the end of this section, describe the questions that you plan on exploring over the course of the report. Try to build your report around the analysis of at least one dependent variable and three independent variables. If you're not sure what questions to ask, then make sure you familiarize yourself with the dataset, its variables and the dataset context for ideas of what to explore. # # > If you haven't yet selected and downloaded your data, make sure you do that first before coming back here. In order to work with the data in this workspace, you also need to upload it to the workspace. To do so, click on the jupyter icon in the upper left to be taken back to the workspace directory. There should be an 'Upload' button in the upper right that will let you add your data file(s) to the workspace. You can then click on the .ipynb file name to come back here. # + # Use this cell to set up import statements for all of the packages that you # plan to use. import pandas as pd import matplotlib.pyplot as plt import seaborn as sns # Remember to include a 'magic word' so that your visualizations are plotted # inline with the notebook. See this page for more: # http://ipython.readthedocs.io/en/stable/interactive/magics.html % matplotlib inline # - # <a id='wrangling'></a> # ## Data Wrangling # # > **Tip**: In this section of the report, you will load in the data, check for cleanliness, and then trim and clean your dataset for analysis. Make sure that you document your steps carefully and justify your cleaning decisions. # # ### General Properties # Load your data and print out a few lines. Perform operations to inspect data # types and look for instances of missing or possibly errant data. df = pd.read_csv('noshowappointments-kagglev2-may-2016.csv') df.head() #info of dataset df.info() #data set shape df.shape df.describe() ##This function allows me to get an estimate of how many values are in each category and thus, ## whether I need to clean up any data. df.nunique() #missing value in data set df.isnull().sum() #check wheather it has duplicate values or not sum(df.duplicated()) # make sure there is no negative value in Age df.Age.max(), df.Age.min() # > **Tip**: You should _not_ perform too many operations in each cell. Create cells freely to explore your data. One option that you can take with this project is to do a lot of explorations in an initial notebook. These don't have to be organized, but make sure you use enough comments to understand the purpose of each code cell. Then, after you're done with your analysis, create a duplicate notebook where you will trim the excess and organize your steps so that you have a flowing, cohesive report. # # > **Tip**: Make sure that you keep your reader informed on the steps that you are taking in your investigation. Follow every code cell, or every set of related code cells, with a markdown cell to describe to the reader what was found in the preceding cell(s). Try to make it so that the reader can then understand what they will be seeing in the following cell(s). # # ### Data Cleaning (Replace this with more specific notes!) # After discussing the structure of the data and any problems that need to be # cleaned, perform those cleaning steps in the second part of this section. df.query('Age == "-1"') # drop negative age data from data set df.drop(df.index[99832], inplace=True) df.info() df.head(1) #remane the columns df.rename(columns = {'PatientId':'Patient_Id', 'AppointmentID':'Appointment_ID', 'ScheduledDay':'Scheduled_Day',\ 'AppointmentDay':'Appointment_Day', 'No-show':'No_show'}, inplace = True) df.head(1) #creating dummy values for gender dummy = pd.get_dummies(df['Gender']) dummy.head() # for confirmation change # merge dummy values to original df df = pd.concat([df, dummy], axis=1) df.head() # for confirmation change # + # now we dont need gender column so we can drop this df.drop(['Gender'], axis = 1, inplace = True) # - df.head(3) # + #creating binning function for cutting of ages column def binning(dataframe, cut_points, labels=None): #Define min and max values minimum = dataframe.min() maximum = dataframe.max() #create list by adding min and max to cut_points break_points = [minimum] + cut_points + [maximum] #Binning using cut function of pandas dataframeBin = pd.cut(dataframe, bins = break_points, labels = labels, include_lowest = True) return dataframeBin #Binning age: cut_points = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100] labels = ["child","teen","young_adult","40s", "50s", "retirement", "70s", "80s", "90s", "100s", "centenarian"] df["Age_Bin"] = binning(df["Age"], cut_points, labels) print(pd.value_counts(df["Age_Bin"], sort = False)) # - # <a id='eda'></a> # ## Exploratory Data Analysis # # > **Tip**: Now that you've trimmed and cleaned your data, you're ready to move on to exploration. Compute statistics and create visualizations with the goal of addressing the research questions that you posed in the Introduction section. It is recommended that you be systematic with your approach. Look at one variable at a time, and then follow it up by looking at relationships between variables. # # ### Research Question 1: Are there more no-shows with alcoholism or vice versa? df.shape # Use this, and more code cells, to explore your data. Don't forget to add # Markdown cells to document your observations and findings. df.groupby(["No_show", "SMS_received"]).size() # **Answer:** # * Of the 35422 patients that did receive text messages, 25698 patients did not show up for their appointments and 9784 patients did show up for their appointments. # * Receiving text messages did not influence patients to show up for their appointments. # # ### Research Question 2: How many patients were in each age group? # + # Continue to explore the data to address your additional research # questions. Add more headers as needed if you have more questions to # investigate. # retrun the Age_Bin column variable to make sure that the groupby function is able to run df["Age_Bin"] = binning(df["Age"], cut_points, labels) # run the groupby function df.groupby(['No_show', 'Age_Bin']).size() # - # `By looking at the values that the groupby function gave us, we can deduce that there is approximately an equal number of patients that show up for their appointments from their childhood years to retirement. This means that the varied number of patients in each no-show age group, from childhood to retirement, will show us how many patients that did not show up for an appointment. We can go ahead and plot only the Age variable to determine the number of patients that did not show up for appointments.` # + # Plot the histogram and set the xtick properties ax = df['Age'].plot(kind='hist') ax.set_xticks(cut_points) ax.set_xticklabels(labels, rotation= 60, ) # Set histogram labels and titles plt.xlabel('Age Group', fontsize= 16) plt.ylabel('Number of Patients', fontsize=16) plt.title('Number of "No-Show" Patients by Age Group', fontsize=22) plt.xlim([0, 120]) # use the magic word to show the bar graph plt.show() # - # **Answer:** By looking at the histogram graph, a great number of patients in their childhood, 50s, and retirement age group did not show up to appointments. # ### Research Question 3: Does a certain gender influence whether one shows up to appointments or not? # + ###To answer this question, we are going to create a pie chart to determine which category influences patients to ###show up the most. ##We are going to use F column so that 1 represents female and 0 represents male. df.groupby(['No_show','F']).size() # + # Set pie chart properties labels = 'Female, No show', 'Female, Showed', 'Male, No Show', 'Male, Show' cut_points = [57245, 14594, 30962, 7725 ] colors = ['red', 'lightcoral', 'blue', 'lightskyblue'] #Set piechart title and axis fig1, ax = plt.subplots() ax.pie(cut_points, labels=labels, colors=colors, autopct='%1.1f%%') plt.title('Proportion of Female and Male Patients', fontsize= 18) # Equal aspect ratio ensures that pie is drawn as a circle. ax.axis('equal') #plot piechart plt.show() # - # **Answer:** By looking at the pie chart, # * approximately 1/5 of female patients- 13.2% out of 51.8% showed up for appointments. # * while approximately 1/4 of male patients- 7.0% out of 28.0% showed up for appointments. # * Being a male influences one to show up to appointments more. # <a id='conclusions'></a> # ## Conclusions # # > **Tip**: Finally, summarize your findings and the results that have been performed. Make sure that you are clear with regards to the limitations of your exploration. If you haven't done any statistical tests, do not imply any statistical conclusions. And make sure you avoid implying causation from correlation! # # > **Tip**: Once you are satisfied with your work here, check over your report to make sure that it is satisfies all the areas of the rubric (found on the project submission page at the end of the lesson). You should also probably remove all of the "Tips" like this one so that the presentation is as polished as possible. # # ## Submitting your Project # # > Before you submit your project, you need to create a .html or .pdf version of this notebook in the workspace here. To do that, run the code cell below. If it worked correctly, you should get a return code of 0, and you should see the generated .html file in the workspace directory (click on the orange Jupyter icon in the upper left). # # > Alternatively, you can download this report as .html via the **File** > **Download as** submenu, and then manually upload it into the workspace directory by clicking on the orange Jupyter icon in the upper left, then using the Upload button. # # > Once you've done this, you can submit your project by clicking on the "Submit Project" button in the lower right here. This will create and submit a zip file with this .ipynb doc and the .html or .pdf version you created. Congratulations! # ### Conclusions: # # * From the first question we are able to find that the Text Message does not influence patients to show up for their appointments. Because from the data their are `25,698` patients which not show up for the appointment out of `88,207`‬ that means about `29.7%` patients don't show up for appointment. # * Whereas `9,784` patients to show up for their appointments out of `22,319`‬ that means about `43.8%` patients showed up for appointment without getting the Text Message. This conclude that Text Message does not influence the patients to show up for their appointments. # * From the second question we are able to find that, `child` (about `20.2%`) are showed up for the appointments in child age group. Similarly for `50s` age group people that is about 19.9% are showed up for the appointments and for `retirement` age group people that is about `21.02%` are showed up for the appointments. # * From last question we are able to find that female population had approximately `1/5` that showed up for their appointments while males had approximately `1/4` that showed up to appointments. from subprocess import call call(['python', '-m', 'nbconvert', 'Investigate_a_Dataset.ipynb'])
Investigate_a_Dataset.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.5 64-bit (''base'': conda)' # name: python3 # --- # + # Importando los módulos que necesitaremos import pandas as pd from datetime import datetime as DateTime from datetime import timedelta as TimeDelta import numpy as np import matplotlib.pyplot as plt import matplotlib.dates as mdates import seaborn as sns # Configurando los estílos de los gráficos plt.ioff() sns.set_context('talk') sns.set_style("whitegrid") # - # ## Adquisición/Transformación de Datos # + # Population population = { 'PERÚ': 32625948, 'LIMA': 10628470, 'PIURA': 2047954, 'LA LIBERTAD': 2016771, 'AREQUIPA': 1497438, 'CAJAMARCA': 1453711, 'JUNIN': 1361467, 'CUSCO': 1357075, 'LAMBAYEQUE': 1310785, 'PUNO': 1237997, 'ANCASH': 1180638, 'CALLAO': 1129854, 'LORETO': 1027559, 'ICA': 975182, 'SAN MARTIN': 899648, 'HUANUCO': 760267, 'AYACUCHO': 668213, 'UCAYALI': 589110, 'APURIMAC': 430736, 'AMAZONAS': 426806, 'TACNA': 370974, 'HUANCAVELICA': 365317, 'PASCO': 271904, 'TUMBES': 251521, 'MOQUEGUA': 192740, 'MADRE DE DIOS': 173811 } # - # DATASET df = pd.read_csv('data/vacunas_covid.csv') # + # PRIMERA DOSIS A NIVEL NACIONAL primera_dosis_df = df[df['DOSIS'] == 1][['UUID','FECHA_VACUNACION']] primera_dosis_gb = primera_dosis_df.groupby(['FECHA_VACUNACION']).agg(['count']) primera_dosis_data = [] for item in primera_dosis_gb.iterrows(): primera_dosis_data.append((DateTime.strptime(str(item[0]),'%Y%m%d'), item[1].get(0))) primeradosis_peru_df = pd.DataFrame(primera_dosis_data, columns=('Date','Count')) primeradosis_peru_df['Area'] = 'PERU' primeradosis_peru_df['CountK'] = primeradosis_peru_df['Count'] / 1000 for i in np.arange(len(primeradosis_peru_df)-1): primeradosis_peru_df.loc[i+1,'Count'] = primeradosis_peru_df.iloc[i+1]['Count'] + primeradosis_peru_df.loc[i,'Count'] primeradosis_peru_df.loc[i+1,'CountK'] = primeradosis_peru_df.iloc[i+1]['CountK'] + primeradosis_peru_df.loc[i,'CountK'] primeradosis_peru_df = primeradosis_peru_df[primeradosis_peru_df['Date'] > DateTime(2020,8,1)] primeradosis_peru_df.head() # + # SEGUNDA DOSIS A NIVEL NACIONAL segunda_dosis_df = df[df['DOSIS'] == 2][['UUID','FECHA_VACUNACION']] segunda_dosis_gb = segunda_dosis_df.groupby(['FECHA_VACUNACION']).agg(['count']) segunda_dosis_data = [] for item in segunda_dosis_gb.iterrows(): segunda_dosis_data.append((DateTime.strptime(str(item[0]),'%Y%m%d'), item[1].get(0))) segundadosis_peru_df = pd.DataFrame(segunda_dosis_data, columns=('Date','Count')) segundadosis_peru_df['Area'] = 'PERU' segundadosis_peru_df['CountK'] = segundadosis_peru_df['Count'] / 1000 for i in np.arange(len(segundadosis_peru_df)-1): segundadosis_peru_df.loc[i+1,'Count'] = segundadosis_peru_df.iloc[i+1]['Count'] + segundadosis_peru_df.loc[i,'Count'] segundadosis_peru_df.loc[i+1,'CountK'] = segundadosis_peru_df.iloc[i+1]['CountK'] + segundadosis_peru_df.loc[i,'CountK'] segundadosis_peru_df = segundadosis_peru_df[segundadosis_peru_df['Date'] > DateTime(2020,8,1)] segundadosis_peru_df.head() # - # ## Visualización de Datos # + dates_range_min = segundadosis_peru_df['Date'].iat[1] if (segundadosis_peru_df['Date'].iat[0] < primeradosis_peru_df['Date'].iat[0]) else primeradosis_peru_df['Date'].iat[0] dates_range_max = segundadosis_peru_df['Date'].iat[-1] if (segundadosis_peru_df['Date'].iat[-1] > primeradosis_peru_df['Date'].iat[-1]) else primeradosis_peru_df['Date'].iat[-1] dates_range = (dates_range_min, dates_range_max) # + primeradosis_peru_df_len = len(primeradosis_peru_df) segundadosis_peru_df_len = len(segundadosis_peru_df) data1_df = primeradosis_peru_df[(primeradosis_peru_df.index - (primeradosis_peru_df_len-1)) % 14 == 0] data2_df = segundadosis_peru_df[(segundadosis_peru_df.index - (segundadosis_peru_df_len-1)) % 14 == 0] area = 'PERÚ' # Creamos la figura, los ejes y agregamos la atribución plt.clf() fig, axs = plt.subplots(figsize=(30,15)) plt.figtext(0.1,0.94, area + ': EVOLUCIÓN DE VACUNADOS CONTRA COVID-19 (' + str(round(data1_df['Count'].iat[-1]*100/population[area],2)) + '%)', ha='left', fontsize=24) plt.figtext(0.1,0.91, 'Rango de fechas de vacunación: del {} al {}'.format(f'{dates_range[0]:%Y-%m-%d}',f'{dates_range[1]:%Y-%m-%d}'), ha='left', fontsize=20, color='#999') plt.figtext(0.9,0.05, """Fuente: https://www.datosabiertos.gob.pe/dataset/vacunaci%C3%B3n-contra-covid-19-ministerio-de-salud-minsa https://malexandersalazar.github.io/, <NAME>, """ + f'{DateTime.now():%Y-%m-%d}', ha='right') formatter = mdates.DateFormatter("%y-%m-%d") locator = mdates.DayLocator(interval=30) axs.xaxis.set_major_formatter(formatter) axs.xaxis.set_major_locator(locator) axs.yaxis.set_major_formatter('{x:,.0f}K') # Dibujamos linea horizontal para señalar la meta axs.hlines(population[area]/1000 * 70/100, dates_range[0], dates_range[1], linestyles= 'dashed', label='Inmunidad de Rebaño (70%)') # Graficamos los datos axs.plot(data1_df['Date'], data1_df['CountK'], 'o-', color='#bf0909', label='% Vacunados (1ra dosis)') axs.plot(data2_df['Date'], data2_df['CountK'], 'o-', color='#00ff00', label='% Vacunados (2da dosis)') # axs.set_xlim(dates_range) axs.set_ylim([0,population[area]/1000]) axs.set_ylabel('POBLACIÓN') DD = TimeDelta(days=3) for i in np.arange(len(data1_df)): axs.text(data1_df['Date'].iat[i] - DD, data1_df['CountK'].iat[i] + population[area]/1000 * 3/100, str(round(data1_df['Count'].iat[i]*100/population[area],2)) + '%', fontsize='small') for i in np.arange(len(data2_df)): axs.text(data2_df['Date'].iat[i] - DD, data2_df['CountK'].iat[i] + population[area]/1000 * 3/100, str(round(data2_df['Count'].iat[i]*100/population[area],2)) + '%', fontsize='small') plt.legend() plt.show() plt.close(fig) # - def generate_plot(area, primeradosis_df, segundadosis_df, dates_range, show_plot=True, save_plot=True): primeradosis_df_len = len(primeradosis_df) segundadosis_df_len = len(segundadosis_df) data1_df = primeradosis_df[(primeradosis_df.index - (primeradosis_df_len-1)) % 9 == 0] data2_df = segundadosis_df[(segundadosis_df.index - (segundadosis_df_len-1)) % 9 == 0] # Creamos la figura, los ejes y agregamos la atribución plt.clf() fig, axs = plt.subplots(figsize=(30,15)) plt.figtext(0.1,0.94, area + ': EVOLUCIÓN DE VACUNADOS CONTRA COVID-19 (' + str(round(data1_df['Count'].iat[-1]*100/population[area],2)) + '%)', ha='left', fontsize=24) plt.figtext(0.1,0.91, 'Rango de fechas de vacunación: del {} al {}'.format(f'{dates_range[0]:%Y-%m-%d}',f'{dates_range[1]:%Y-%m-%d}'), ha='left', fontsize=20, color='#999') plt.figtext(0.9,0.05, """Fuente: https://www.datosabiertos.gob.pe/dataset/vacunaci%C3%B3n-contra-covid-19-ministerio-de-salud-minsa https://malexandersalazar.github.io/, <NAME>, """ + f'{DateTime.now():%Y-%m-%d}', ha='right') formatter = mdates.DateFormatter("%y-%m-%d") locator = mdates.DayLocator(interval=30) axs.xaxis.set_major_formatter(formatter) axs.xaxis.set_major_locator(locator) axs.yaxis.set_major_formatter('{x:,.0f}K') # Dibujamos linea horizontal para señalar la meta axs.hlines(population[area]/1000 * 70/100, dates_range[0], dates_range[1], linestyles= 'dashed', label='Inmunidad de Rebaño (70%)') # Graficamos los datos axs.plot(data1_df['Date'], data1_df['CountK'], 'o-', color='#bf0909', label='% Vacunados (1ra dosis)') axs.plot(data2_df['Date'], data2_df['CountK'], 'o-', color='#00ff00', label='% Vacunados (2da dosis)') # axs.set_xlim(dates_range) axs.set_ylim([0,population[area]/1000]) axs.set_ylabel('POBLACIÓN') DD = TimeDelta(days=2) for i in np.arange(len(data1_df)): axs.text(data1_df['Date'].iat[i] - DD, data1_df['CountK'].iat[i] + population[area]/1000 * 3/100, str(round(data1_df['Count'].iat[i]*100/population[area],2)) + '%', fontsize='small') for i in np.arange(len(data2_df)): axs.text(data2_df['Date'].iat[i] - DD, data2_df['CountK'].iat[i] + population[area]/1000 * 3/100, str(round(data2_df['Count'].iat[i]*100/population[area],2)) + '%', fontsize='small') plt.legend() ## 3. Mostramos, guardamos y generamos el Markdown para las imágenes sns.despine(left=False, bottom=False) if save_plot: filename = 'dist/{}_{}.png'.format(f'{DateTime.now():%Y%m%d}', area.replace(' ', '_')) plt.savefig("../" + filename, bbox_inches='tight') with open("../dist/images.txt", "a", encoding='utf-8') as f: f.write('![alt text]({} "{}")\n\n'.format(filename, area)) if show_plot: plt.show() plt.close(fig) # Eliminando archivo generado para el Markdown de las imágenes if os.path.exists("../dist/images.txt"): os.remove("../dist/images.txt") # Perú generate_plot('PERÚ', primeradosis_peru_df, segundadosis_peru_df, dates_range, False, True) # Deparmanetos departments_ordered = np.sort(df['DEPARTAMENTO'].unique()) for d in departments_ordered: # PRIMERA DOSIS A NIVEL DEPARTAMENTAL dept_df = df[(df['DOSIS'] == 1) & (df['DEPARTAMENTO'] == d)][['UUID','FECHA_VACUNACION']] primera_dosis_gb = dept_df.groupby(['FECHA_VACUNACION']).agg(['count']) primera_dosis_data = [] for item in primera_dosis_gb.iterrows(): primera_dosis_data.append((DateTime.strptime(str(item[0]),'%Y%m%d'), item[1].get(0))) primeradosis_dept_df = pd.DataFrame(primera_dosis_data, columns=('Date','Count')) # primeradosis_dept_df = primeradosis_dept_df[primeradosis_dept_df['Date'] > DateTime(2020,8,1)] primeradosis_dept_df['CountK'] = primeradosis_dept_df['Count'] / 1000 for i in np.arange(len(primeradosis_dept_df)-1): primeradosis_dept_df.loc[i+1,'Count'] = primeradosis_dept_df.iloc[i+1]['Count'] + primeradosis_dept_df.loc[i,'Count'] primeradosis_dept_df.loc[i+1,'CountK'] = primeradosis_dept_df.iloc[i+1]['CountK'] + primeradosis_dept_df.loc[i,'CountK'] # SEGUNDA DOSIS A NIVEL DEPARTAMENTAL dept_df = df[(df['DOSIS'] == 2) & (df['DEPARTAMENTO'] == d)][['UUID','FECHA_VACUNACION']] segunda_dosis_gb = dept_df.groupby(['FECHA_VACUNACION']).agg(['count']) segunda_dosis_data = [] for item in segunda_dosis_gb.iterrows(): segunda_dosis_data.append((DateTime.strptime(str(item[0]),'%Y%m%d'), item[1].get(0))) segundadosis_dept_df = pd.DataFrame(segunda_dosis_data, columns=('Date','Count')) # segundadosis_dept_df = segundadosis_dept_df[segundadosis_dept_df['Date'] > DateTime(2020,8,1)] segundadosis_dept_df['CountK'] = segundadosis_dept_df['Count'] / 1000 for i in np.arange(len(segundadosis_dept_df)-1): segundadosis_dept_df.loc[i+1,'Count'] = segundadosis_dept_df.iloc[i+1]['Count'] + segundadosis_dept_df.loc[i,'Count'] segundadosis_dept_df.loc[i+1,'CountK'] = segundadosis_dept_df.iloc[i+1]['CountK'] + segundadosis_dept_df.loc[i,'CountK'] primeradosis_dept_df = primeradosis_dept_df[primeradosis_dept_df['Date'] > DateTime(2020,8,1)] segundadosis_dept_df = segundadosis_dept_df[segundadosis_dept_df['Date'] > DateTime(2020,8,1)] generate_plot(d, primeradosis_dept_df, segundadosis_dept_df, dates_range, False, True)
src/main.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # 第五部分:支持向量机 # # 接下来的几个掌机会介绍支持向量机学习算法(SVM),它通常被认为是最好的现成监督学习算法之一(很多人认为它是最好的)。为了详细介绍支持向量机,我们需要先了解间隔(margin)的概念,以及使用大间隙(gap)分割数据的想法。接着我们将介绍优化间隔分类器,进而讨论一下语言歧义(language duality)现象。然后介绍支持向量机的核心——如何在一个极高维特征值向量空间(比如无限维)上高效的应用支持向量机算法。最后我们将介绍序列最小优化算法(SMO: sequential minimal optimization)算法——一个更优化的支持向量机的实现。 # # ## 1. 间隔:直观概念 # # 我们从间隔开始讨论支持向量机算法,本节将给出“间隔”以及算法对预测的“信心”的直观概念,在第三节我们会进一步形式化这些概念。 # # 回顾前面的逻辑回归,假设函数的模型$h_\theta(x)=g\left(\theta^Tx\right)$将给出关于$p(y=1\mid x;\theta)$的概率预测。当且仅当$h_\theta(x)\geq 0.5$即输入满足$\theta^Tx\geq 0$时,模型给出关于输入的预测为$1$。对于一个正训练样本(即$y=1$),$\theta^Tx$越大,则$h_\theta(x)=p(y=1\mid x;w,b)$,也就意味着我们将其预测为$1$的“信心”越高。因此,严谨的说,如果$\theta^Tx\gg 0$,则算法预测其为$1$的信心就会非常大。同样的,对于逻辑回归,如果$\theta^Tx\ll 0$,则算法预测其为$0$的信心就会非常大。对于给定的训练集,我们可以说,如果拟合参数$\theta$能够使得所有$y^{(i)}=1$的样本满足$\theta^Tx^{(i)}\gg0$,使所有$y^{(i)}=0$的样本满足$y^{(i)}=0$,则称这是一个良好的拟合,因为这反映出该拟合对分类结果的“信心”很足。所以,“信心”是一个很好的指标,后面我们将使用函数间隔形式化这个指标。 # # 另一种直观的表达,如下图,其中x代表正训练样本,o代表负训练样本,直线就是判别边界(由$\theta^Tx=0$给出,也叫**分类超平面(separating hyperplane)**)。 # # <img src="./resource/chapter06_image01.png" width="400" alt="" align=center /> # # 图中的三个点A、B、C,点A距离判别边界非常远,如果求关于点A的预测,我们会非常确定其值为$y=1$。相反,点C距离判别边界很近,虽然它在判别边界$y=1$一侧,但是如果判别边界稍有变动,它就有可能被放在$y=0$一侧。因此,我们对点A预测的信心强于点C。而点B则在两者之间,通常来说,如果点距离判别边界越远,模型做出预测的信心就越强。也就是说,如果对于给定训练集,可以找到一条能够准确并可信(即样本距离边界很远)的预测所有训练样本的判别边界,则称这个拟合是良好的。我们在后面将使用几何间隔形式化这个概念。 # # ## 2. 标记法 # # 为了更加简洁的介绍支持向量机,我们需要先引入一种新的标记。考虑使用线性分类器解决“特征为$x$目标为$y$”的二元分类问题。这次,我们使用$y\in\{-1,1\}$来标记两个分类(而不是之前的$y\in\{0,1\}$),再使用参数向量$w,b$代替之前的参数向量$\theta$,于是我们现在将分类器写为: # # $$h_{w,b}(x)=g\left(w^Tx+b\right)$$ # # 此处,$g(z)=\begin{cases}1 &z\gt 0\\-1 &z\lt 0\end{cases}$,而$w,b$的记法可以将截距项与特征值的系数分开来记(也不再向特征值向量$x$中添加$x_0=1$分量),即$b$用来代替原来的$\theta_0$,而$w$用来代替原来的$\begin{bmatrix}\theta_1&\cdots&\theta_n\end{bmatrix}^T$。 # # 还需要注意的是,根据函数$g$的定义,分类器将直接给出$-1$或$1$的结果(类似感知算法),省略了估计$y=1$的概率的步骤。 # # ## 3. 函数间隔及几何间隔 # # 本节将形式化函数间隔及几何间隔。对于给定的训练样本$\left(x^{(i)},y^{(i)}\right)$,我们定义关于此训练样本函数间隔的超平面$(w,b)$为: # # $$\hat{\gamma}^{(i)}=y^{(i)}\left(w^Tx^{(i)}+b\right)$$ # # 上式可以解释为,当$y^{(i)}=1$时,为了得到一个较大的函数间隔(即为了使预测有较高的的可信度及准确性),我们需要$w^Tx+b$取一个较大的正数($w^Tx+b\gg 0$);反之,当$y^{(i)}=-1$时,我摸需要$w^Tx+b$取一个较大的负数($w^Tx+b\ll 0$)。此外,如果$y^{(i)}\left(w^Tx+b\right)\gt 0$,则说明关于此样本的预测是正确的。因此,较大的函数间隔意味着较高的可信度和准确性。 # # 对于一个在$g\in\{-1,1\}$取值的线性分类器,有一个性质导致其函数间隔不能有效的反映预测的可信度:对于给定的$g$,如果将$(w,b)$替换为$(2w,2b)$,即$g\left(w^Tx+b\right)$变为$g\left(2w^Tx+2b\right)$,我们会发现分类超平面$h_{w,b}(x)$并不会改变,也就是说$h_{w,b}(x)$只关心$w^Tx+b$的正负,而不关心其大小。但是将$(w,b)$变为$(2w,2b)$相当于给函数间隔乘了系数$2$,于是我们发现,如果通过改变$w,b$的取值,我们可以让函数间隔变得很大,然而分类超平面并没有改变,所以单纯的通过这种方式改变函数间隔的大小没有什么实质意义。直觉告诉我们,应该引入一种标准化条件,比如令$\lVert w\rVert_2=1$,即把$(w,b)$变为$\left(\frac{w}{\lVert w\rVert_2},\frac{b}{\lVert w\rVert_2}\right)$,我们在后面会继续讨论这种方法。 # # 对于给定的训练集$S=\left\{\left(x^{(i)},y^{(i)}\right);i=1,\cdots,m\right\}$,关于$S$以$(w,b)$为参数的函数间隔$\hat\gamma$定义为取所以独立的训练样本中最小的那个函数间隔(即取最坏的一个样本的情况): # # $$\hat\gamma=\operatorname*{min}_{i=1,\cdots,m}\hat\gamma^{(i)}$$ # # 接下来我们讨论几何间隔,考虑下图: # # <img src="./resource/chapter06_image02.png" width="400" alt="" align=center /> # # 直线为$(w,b)$确定的判定边界(即超平面$w^Tx+b=0$),向量$w$正交于分类超平面(在超平面上任取两点$P_1=(x_1,\cdots,x_n), P_2==(x'_1,\cdots,x'_n)$,则两点满足平面方程组$\begin{cases}w^TP_1+b=0\\w^TP_2+b=0\end{cases}$,两式相减得$w^T(P_1-P_2)=0$,即$w^T$正交于该超平面上任意向量,所以$w^T$为超平面法向量)。观察点$A$,设点$A$为训练样本$(x^{(i)},y^{(i)}=1)$,它到判定边界的距离$\gamma^{(i)}$即为线段$AB$的长度。 # # 如何计算$\gamma^{(i)}$?注意到$\frac{w}{\lVert w\rVert}$是标准化后的$w$向量,点$A$为$x^{(i)}$,则点$B$为$x^{(i)}-\gamma^{(i)}\cdot\frac{w}{\lVert w\rVert}$。点$B$在判定边界上,而判定边界上的所有点都满足方程$w^Tx+b=0$,则: # # $$w^T\left(x^{(i)}-\gamma^{(i)}\frac{w}{\lVert w\rVert}\right)+b=0$$ # # 解出$\gamma^{(i)}$得:(注:$w^Tw=\lVert w\rVert^2$) # # $$\gamma^{(i)}=\frac{w^Tx^{(i)}+b}{\lVert w\rVert}=\left(\frac{w}{\lVert w\rVert}\right)^Tx^{(i)}+\frac{b}{\lVert w\rVert}$$ # # 这就是正训练样本$A$被正确的分在判别边界$y=1$一侧的情形,更一般的,我们定义关于样本$\left(x^{(i)},y^{(i)}\right)$以$(w,b)$为参数的函数间隔为: # # $$\gamma^{(i)}=y^{(i)}\left(\left(\frac{w}{\lVert w\rVert}\right)^Tx^{(i)}+\frac{b}{\lVert w\rVert}\right)$$ # # 可以看出,如果$\lVert w\rVert=1$,则函数间隔等于几何间隔($\hat\gamma^{(i)}=\gamma^{(i)}$),这就是两种间隔的关系($\hat\gamma^{(i)}=\frac{\gamma^{(i)}}{\Vert w\Vert}$)。与函数间隔一样,如果改变参数$(w,b)$为$(2w,2b)$,则几何间隔不会改变。这个性质在后面会很方便,利用改变参数$(w,b)$的大小不影响间隔,我们可以在拟合参数时引入$w$的限制条件,比如令$\lVert w\rVert=1$,或$\lvert w_1\rvert=5$,或$\lvert w+b\rvert+\lvert b\rvert=2$,诸如这种限制条件都可以通过给参数$(w,b)$乘以一个适当的系数得到。 # # 对于给定的训练集$S=\left\{\left(x^{(i)},y^{(i)}\right);i=1,\cdots,m\right\}$,也有关于$S$以$(w,b)$为参数的几何间隔$\gamma$定义为取所以独立的训练样本中最小的那个几何间隔(即取最坏的一个样本的情况): # # $$\gamma=\operatorname*{min}_{i=1,\cdots,m}\gamma^{(i)}$$ # # 另外,几何间隔实际上就是点到超平面的距离,高中时学过的点$\left(x^{(i)},y^{(i)}\right)$到直线$ax+by+c=0$的距离为: # # $$d\left(x^{(i)},y^{(i)}\right)=\frac{\lvert ax^{(i)}+by^{(i)}+c\rvert}{\sqrt{a^2+b^2}}$$ # # 推广到高维就是上面的几何间隔,而函数间隔就是未标准化的几何间隔。 # # 最后,最大间隔分类器(maximum margin classifier,可以被看做是支持向量机的前身),实际上就选择特定的$w,b$使几何间隔最大化: # # $$\begin{align}\displaystyle\operatorname*{max}_{w,b}&\quad\gamma\\\mathrm{s.t.}&\quad y^{(i)}\left(\left(\frac{w}{\lVert w\rVert}\right)^Tx^{(i)}+\frac{b}{\lVert w\rVert}\right)\end{align}$$ # # 注:$\mathrm{s.t.}$是“subject to”的缩写,意为“受限于”。
4-SVM/note/LSJU-SVM-01.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <center><img src="http://alacip.org/wp-content/uploads/2014/03/logoEscalacip1.png" width="500"></center> # # # <center> <h1>Curso: Introducción al Python</h1> </center> # # <br></br> # # * Profesor: <a href="http://www.pucp.edu.pe/profesor/jose-manuel-magallanes/" target="_blank">Dr. <NAME>, PhD</a> ([<EMAIL>](mailto:<EMAIL>))<br> # - Profesor del **Departamento de Ciencias Sociales, Pontificia Universidad Católica del Peru**.<br> # - Senior Data Scientist del **eScience Institute** and Visiting Professor at **Evans School of Public Policy and Governance, University of Washington**.<br> # - Fellow Catalyst, **Berkeley Initiative for Transparency in Social Sciences, UC Berkeley**. # # # ## Parte 3: Carga de datos en Python # <a id='beginning'></a> # # En esta sección veremos diversas opciones para cargar / colectar datos en Python: # # 1. [Propietary software.](#part1) # 2. [Google Forms.](#part2) # 3. [Uso de APIs.](#part3) # 4. [Scrapeo de tablas.](#part4) # # # ____ # # # <a id='part1'></a> # ## Data de propietary / common software # # La mejor recomendación es que sus datos esten almacenados en algun lugar de la web. Y si está en su computadora, que esté en la misma carpeta donde estás escribiendo el código. En nuestro caso, todo esta en Github. import pandas as pd # Voy a descargar un archivo en stata, del American National Election Studies (ANES). Y me interesan unas variables en particular: varsOfInterest=["libcpre_self","libcpo_self"] # Pandas se encargará: # + linkToFileDTA="https://github.com/escuela-alacip/introPython/raw/master/data/anes_timeseries_2012.dta" dataStata=pd.read_stata(linkToFileDTA,columns=varsOfInterest) # - dataStata.head() # Si tiene el archivo en Excel: ## asegurese de ener instalado la libreria: xlrd linkToFileXLSX="https://github.com/escuela-alacip/introPython/raw/master/data/ElectricBus.xlsx" dataExcel=pd.read_excel(linkToFileXLSX,0) # no need for '0' dataExcel.head() # Los CSV son muy sencillos de cargar: # + linkToFileCSV="https://github.com/escuela-alacip/introPython/raw/master/data/mealSeattle.csv" dataCSV=pd.read_csv(linkToFileCSV) dataCSV.head() # - # Si tienes varios archivos, con nombres similares, puedes abrirlos asi: # + where='https://github.com/escuela-alacip/introPython/raw/master/data/' allNames=[where+"interview_1_2_p"+str(i)+".csv" for i in range(1,5)] # - # solo los links allNames # + # abrir y hacer lista con ellos allFiles=[] for filename in allNames: allFiles.append(pd.read_csv(filename)) # - #el primero allFiles[0] # Concatenarlos: pd.concat(allFiles[0:4],ignore_index=True) # guardando lo hecho: newOneFile=pd.concat(allFiles[0:4],ignore_index=True) # Hay otro, pero requiere **merge**: lastQ_file=pd.read_csv("https://github.com/escuela-alacip/introPython/raw/master/data/interview_finalQ.csv") lastQ_file newOneFile=newOneFile.merge(lastQ_file,left_on='interview', right_on='interview') # innecesario aqui # the result newOneFile.head() # [Go to page beginning](#beginning) # # _____ # # <a id='part2'></a> # # ## Colectando con GoogleForms # # Aqui seguiremos algunos pasos: # # * Crearé un GForm. # * Lo compartiré con Uds. # * Abriremos los respuestas. # + link='https://docs.google.com/spreadsheets/d/e/2PACX-1vQ-WUZKuNEtiW4-IMns8gl_lbf7pJKye2YlDAfPX2Uw_XntJ_dJ_tkGFMjHEG840jTBzefNwOKuiQ_O/pub?gid=1446415049&single=true&output=csv' myData = pd.read_csv(link) # here it is: myData # - myData.columns newNames=['mt','pnombre','apellido','edad','paisN','paisT','cargo','preG','postG','invesT','genero'] myData.columns=newNames myData.head(2) # ¿Cómo se llama el/la menor de la clase? myData[myData.edad==min(myData.edad)] places=["Mexico", "México "] myData[myData.paisN.isin(places)] 100*len(myData[myData.paisN.isin(places)])/len(myData) myData.paisN.value_counts(normalize=True) # ### Ejercicios: # # Hagamos las siguientes consultas: # # 1. ¿Cómo se llama el/la menor de la clase? # 2. ¿De qué sexo es el/la mayor de la clase? # 3. ¿Quiénes son de México? # 4. ¿Qué porcentaje son de México? # 5. ¿Qué edad tiene el/la mayor de México? # 6. ¿Quiénes son de Brasil o Argentina? # [Ir a Inicio](#beginning) # # ----- # # <a id='part3'></a> # # ## Collecting data from APIs # # Los poratales de datos abiertos tiene APIs que permiten descargar datos con facilidad. En general, un buen portal te indica cómo hacerlo. Veamos la data de llamadas 9-1-1 sobre incendios de [Seattle](https://dev.socrata.com/foundry/data.seattle.gov/kzjm-xkqj): # + import requests import pandas as pd # where is it online? url = "https://data.seattle.gov/resource/kzjm-xkqj.json" # Go for the data: response = requests.get(url) # If we got the data: if response.status_code == 200: data911 = response.json() # - response.status_code # + # You can turn it easily into a pandas data frame: data911DF=pd.DataFrame(data911) # - # here you are... data911DF.head() data911DF.shape # ### Descargando de Twitter # # Twitter tiene su propio API. Para obtener acceso , hay que seguir estos pasos: # # 1. Necesita ua cuenta en Twitter, Si la tiene,vaya a este [link](https://developer.twitter.com/en/apps). # 2. Seleccione **Create a new App** y complete la información básica que se le pide. # 3. Cuando haya creado el App, vaya a _Keys y Tokens_. # 4. En un archivo cimple de texto: # {"consumer_key": "aaa", "access_token_secret": "bbb", "consumer_secret": "ccc", "access_token": "ddd"} # 5. Guardelo como keysAPI.txt en el mismo lugar que su Jupyter notebook. # # Uselo así: # # + import json # get the security info from file keysAPI = json.load(open('data/keysAPI.txt','r')) # - # Verify if you have **tweepy**. You may need to install it via **pip**. # + import tweepy # recovering security info consumer_key = keysAPI['consumer_key'] consumer_secret = keysAPI['consumer_secret'] access_token = keysAPI['access_token'] access_token_secret = keysAPI['access_token_secret'] # - # using security info: auth = tweepy.OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_token, access_token_secret) api=tweepy.API(auth, wait_on_rate_limit=True,wait_on_rate_limit_notify=True,parser=tweepy.parsers.JSONParser()) # + # getting the tweets from a user: tweets = api.user_timeline(screen_name = 'MashiRafael', count = 1000, include_rts = False) # - # Let's see what we have: tweets type(tweets) tweets[0] type(tweets[0]) # + aTweet=tweets[0] for field in aTweet.keys(): print (field) # - aTweet['text'] aTweet['created_at'] # transform the dict into a DF in pandas import pandas as pd correaTweets=pd.DataFrame({'textTweet':[t['text'] for t in tweets]}) correaTweets # [Ir a Inicio](#beginning) # # _____ # # <a id='part4'></a> # # ## "Scrapeo" de tablas # # We are going to get the data from a table from this [wikipage](https://en.wikipedia.org/wiki/List_of_freedom_indices) # + from requests import get from bs4 import BeautifulSoup as BS # Link wikiLink="https://en.wikipedia.org/wiki/List_of_freedom_indices" # para evitar rechazo identification = {"User-Agent":"Mozilla/5.0"} # contactando a Wiki wikiPage =get(wikiLink , headers=identification) # BS trae html wikiSoup =BS(wikiPage.content ,"html.parser") # BS extrae tablaS wikiTables=wikiSoup.findAll("table",{"class":"wikitable sortable"}) # + # #Cuántas? len(wikiTables) # - # elijiendo la que necesito wikiTable=wikiTables[0] # una mirada: wikiTable # Mi tabla está en HTML. Desde HTML se debe leer fila por fila: allRows=wikiTable.find_all('tr') #'tr' es table row, es un TAG en HTML. # la primera será? headersHtml=allRows[0] headersHtml # Busquemos los TAGS 'th': headersHtml.find_all('th') # Pidamos los textos: headersList=[header.get_text() for header in headersHtml.find_all('th')] # tenemos: headersList # Sigamos mismo proceso para lo demás: # + rowsHtml=allRows[1:] #... [1:] omite el header # - # let's see one of these: rowsHtml[0] # Para cada fila: rowsList=[[cell.get_text() for cell in row.find_all('td')] for row in rowsHtml] rowsList[0:3] # a list of lists # Aqui está: pd.DataFrame(data=rowsList , columns=headersList) # _____ # # **AUSPICIO**: # # * El desarrollo de estos contenidos ha sido posible gracias al grant del Berkeley Initiative for Transparency in the Social Sciences (BITSS) at the Center for Effective Global Action (CEGA) at the University of California, Berkeley # # # <center> # <img src="https://www.bitss.org/wp-content/uploads/2015/07/bitss-55a55026v1_site_icon.png" style="width: 200px;"/> # </center> # # * Este curso cuenta con el auspicio de: # # # <center> # <img src="https://www.python.org/static/img/psf-logo@2x.png" style="width: 500px;"/> # </center> # # # # **RECONOCIMIENTO** # # # EL Dr. Magallanes agradece a la Pontificia Universidad Católica del Perú, por su apoyo en la participación en la Escuela ALACIP. # # <center> # <img src="https://dci.pucp.edu.pe/wp-content/uploads/2014/02/Logotipo_colores-290x145.jpg" style="width: 400px;"/> # </center> # # # El autor reconoce el apoyo que el eScience Institute de la Universidad de Washington le ha brindado desde el 2015 para desarrollar su investigación en Ciencia de Datos. # # <center> # <img src="https://escience.washington.edu/wp-content/uploads/2015/10/eScience_Logo_HR.png" style="width: 500px;"/> # </center> # # <br> # <br>
Parte3_P_CargaDeData.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # New Mean and Variance # # Now let's take the formulas from the example below and use them to write a program that takes in two means and variances, and returns a *new*, updated mean and variance for a gaussian. This step is called the parameter or **measurement update** because it is the update that happens when an initial belief (represented by the blue Gaussian, below) is merged with a new piece of information, a measurement with some uncertainty (the orange Gaussian). # # As you've seen in the previous quizzes, the updated Gaussian will be a combination of these two Gaussians with a new mean that is in between both of theirs and a variance that is less than the smallest of the two given variances; this means that after a measurement, our new mean is more certain than that of the initial belief! # <img src='images/mean_var.png' width="50%" height="50%"> # # Below is our usual Gaussian equation and imports. # + # import math functions from math import * import matplotlib.pyplot as plt import numpy as np # gaussian function def f(mu, sigma2, x): ''' f takes in a mean and squared variance, and an input x and returns the gaussian value.''' coefficient = 1.0 / sqrt(2.0 * pi *sigma2) exponential = exp(-0.5 * (x-mu) ** 2 / sigma2) return coefficient * exponential # - # ### QUIZ: Write an `update` function that performs the measurement update. # # This function should combine the given Gaussian parameters and return new values for the mean and squared variance. # # This function does not have to perform any exponential math, it simply has to follow the equations for the measurement update as seen in the image at the top of this notebook. You may assume that the given variances `var1` and `var2` are squared terms. # the update function def update(mean1, var1, mean2, var2): ''' This function takes in two means and two squared variance terms, and returns updated gaussian parameters.''' ## TODO: Calculate the new parameters new_mean = ((var2*mean1) + (var1 * mean2)) // (var2 + var1) new_var = 1.0//((1.0/var2)+(1.0/var1)) return [new_mean, new_var] # test your implementation new_params = update(10, 4, 12, 4) print(new_params) # ### Plot a Gaussian # # Plot a Gaussian by looping through a range of x values and creating a resulting list of Gaussian values, `g`, as shown below. You're encouraged to see what happens if you change the values of `mu` and `sigma2`. # + # display a gaussian over a range of x values # define the parameters mu = new_params[0] sigma2 = new_params[1] # define a range of x values x_axis = np.arange(0, 20, 0.1) # create a corresponding list of gaussian values g = [] for x in x_axis: g.append(f(mu, sigma2, x)) # plot the result plt.plot(x_axis, g) # -
Object tracking and Localization/Intro_to_Kalman_Filters/2. New Mean and Variance, exercise.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + colab={"base_uri": "https://localhost:8080/", "height": 273} id="6l8UyvHfvYq0" executionInfo={"status": "ok", "timestamp": 1611941604037, "user_tz": -120, "elapsed": 1232, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh8WbEU_RjamxH_Q83tw9ND2SKQk9gxqM-d3WDV=s64", "userId": "14036852932086046142"}} outputId="5e1c59fa-a362-4b1c-ae34-5208ea22f2b4" # Read in the term frequency - inverse document frequency table import pandas as pd pd.set_option('max_colwidth', 150) X = pd.read_pickle('/content/drive/MyDrive/Colab Notebooks/project/data/data_table.pkl') X.head() # + colab={"base_uri": "https://localhost:8080/"} id="EIdntL-7v46d" executionInfo={"status": "ok", "timestamp": 1611941604038, "user_tz": -120, "elapsed": 1223, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh8WbEU_RjamxH_Q83tw9ND2SKQk9gxqM-d3WDV=s64", "userId": "14036852932086046142"}} outputId="96731cbc-bdee-4bb8-d2ad-dacd3fc24cc2" # Make the labels import numpy as np y = np.zeros(200) y[0:99] = 1 y # + id="E0MQXOC2bW4w" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1611941615913, "user_tz": -120, "elapsed": 13092, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh8WbEU_RjamxH_Q83tw9ND2SKQk9gxqM-d3WDV=s64", "userId": "14036852932086046142"}} outputId="9f6db269-cc04-494f-8b73-f5267c75c08a" from sklearn.model_selection import cross_val_score from sklearn.model_selection import StratifiedKFold from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier from sklearn.naive_bayes import GaussianNB from sklearn.naive_bayes import MultinomialNB strat_k_fold = StratifiedKFold(n_splits=10, shuffle=True) lr = LogisticRegression() print(f'LogisticRegression: {cross_val_score(lr, X, y, cv=strat_k_fold).mean()}') knn = KNeighborsClassifier(n_neighbors = 5) print(f'KNeighborsClassifier: {cross_val_score(knn, X, y, cv=strat_k_fold).mean()}') gnb = GaussianNB() print(f'GaussianNB: {cross_val_score(gnb, X, y, cv=strat_k_fold).mean()}') mnb = MultinomialNB() print(f'MultinomialNB: {cross_val_score(mnb, X, y, cv=strat_k_fold).mean()}') # + id="MVTbtN9iwRq8" # Split the data from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y) # + id="Ge3O96TNwiJ4" # Perform logistic regression model = LogisticRegression().fit(X_train, y_train) y_predict = [int(p[1] > 0.5) for p in model.predict_proba(X_test)] # + colab={"base_uri": "https://localhost:8080/"} id="cwQV0ljRwncq" executionInfo={"status": "ok", "timestamp": 1611941615920, "user_tz": -120, "elapsed": 13085, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh8WbEU_RjamxH_Q83tw9ND2SKQk9gxqM-d3WDV=s64", "userId": "14036852932086046142"}} outputId="a9f696a7-84c2-46ad-f1c0-06de7da1f63c" # Test model print(f'Predicted:\n{y_predict}') print(f'Actual:\n{y_test}') y_predict == y_test # + colab={"base_uri": "https://localhost:8080/"} id="etg2qvrdwtbE" executionInfo={"status": "ok", "timestamp": 1611941616211, "user_tz": -120, "elapsed": 13371, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh8WbEU_RjamxH_Q83tw9ND2SKQk9gxqM-d3WDV=s64", "userId": "14036852932086046142"}} outputId="d6d214bb-ed17-4aa5-d2b3-6073992a09b6" # Get the words that were most significatant in determining the author. coef = model.coef_.reshape(-1) word_idx_indicating_jovkov = np.argsort(coef)[:10] word_idx_indicating_vazov = np.argsort(coef)[-10:] print('Words indicating Jovkov:') for i in word_idx_indicating_jovkov: print(X.columns[i]) print('\n\nWords indicating Vazov:') for i in word_idx_indicating_vazov: print(X.columns[i]) # + id="W9qY4Py6zGX1" # Pickle! import pickle pickle.dump(model, open('/content/drive/MyDrive/Colab Notebooks/project/data/model.pkl', 'wb'))
3_apply_techniques.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Self-Driving Car Engineer Nanodegree # # ## **Vehicle detection and tracking** # # ![Cover](./writeup_imgs/cover.jpg) # # --- # # ## Overview # # In this project, I will write a software pipeline to identify vehicles in a video from a front-facing camera on a car. The test images and project video are available in the [project repository](https://github.com/miguelangel/sdc--vehicle-detection-and-tracking). # # The complete pipeline can be found [here](https://github.com/miguelangel/sdc--vehicle-detection-and-tracking/blob/master/vehicle_detection_and_tracking.ipynb). # # # ## Goals/Steps # # The goals / steps of this project are the following: # * Import and initialize the packages needed in the project. # * Implement a function that computes Histogram of Oriented Gradients (HOG) features from an image. # * Implement a function that computes binned color features from an image. # * Implement a function that computes color histogram features from an image. # * Combine the previous feature extractors on a function # * Extract features from the dataset, and split them in training and testing sets # * Normalize the features # * Train a Linear SVM classifier. # * Implement a sliding-window technique and use your trained classifier to search for vehicles in images. # * Implement Hog Sub-sampling Window Search, a more efficient method for doing the sliding window # * Handle multiple detections and false positives # * Run pipeline in a video. # ### Step 0: Import and initialize the packages needed in the project # + import cv2 import glob from IPython.display import HTML import matplotlib.image as mpimg import matplotlib.pyplot as plt from matplotlib.patches import Polygon from moviepy.editor import VideoFileClip import numpy as np from scipy.ndimage.measurements import label from skimage.feature import hog from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler from sklearn.svm import LinearSVC import time # My humble tribute to <NAME>, # the best basketball player ever. np.random.seed(23) # %matplotlib inline # - # ### Step 1: Implement a function that computes Histogram of Oriented Gradients (HOG) features from an image # This function returns HOG features and visualization # Features will always be the first element of the return # Image data will be returned as the second element if visualize= True # Otherwise there is no second return element def get_hog_features(img, orient, pix_per_cell, cell_per_block, vis=False, feature_vec=True): """ Function accepts params and returns HOG features (optionally flattened) and an optional matrix for visualization. Features will always be the first return (flattened if feature_vector= True). A visualization matrix will be the second return if visualize = True. """ return_list = hog(img, orientations=orient, pixels_per_cell=(pix_per_cell, pix_per_cell), cells_per_block=(cell_per_block, cell_per_block), block_norm='L2-Hys', transform_sqrt=True, visualise=vis, feature_vector=feature_vec) if vis: hog_features = return_list[0] hog_image = return_list[1] return hog_features, hog_image else: hog_features = return_list return hog_features # Helper method to plot two images side by side def plt_images(img_1, title_1, img_2, title_2, cmap='gray'): f, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 4)) ax1.set_title(title_1, fontsize=16) ax1.imshow(img_1) ax2.set_title(title_2, fontsize=16) ax2.imshow(img_2, cmap=cmap) # + # Run the function on car images #car_images = glob.glob('./data/vehicles_smallset/*/*.jpeg') car_images = glob.glob('./data/vehicles/*/*.png') # Display a random car image and its HOG visualization, ind = np.random.randint(0, len(car_images)) # Read in the image and convert it to grayscale car_img = mpimg.imread(car_images[ind]) gray_img = cv2.cvtColor(car_img, cv2.COLOR_RGB2GRAY) # Call our function with vis=True to see an image output features, hog_image = get_hog_features(gray_img, orient=9, pix_per_cell=8, cell_per_block=2, vis=True, feature_vec=False) # Plot results plt_images(car_img, 'Example of car image', hog_image, 'HOG Visualization') # + # Run the function also on non-car images #non_car_images = glob.glob('./data/non-vehicles_smallset/*/*.jpeg') non_car_images = glob.glob('./data/non-vehicles/*/*.png') # Display a random car image and its HOG visualization, ind = np.random.randint(0, len(non_car_images)) # Read in the image and convert it to grayscale non_car_img = mpimg.imread(non_car_images[ind]) gray_img = cv2.cvtColor(non_car_img, cv2.COLOR_RGB2GRAY) # Call our function with vis=True to see an image output features, hog_image = get_hog_features(gray_img, orient= 9, pix_per_cell= 8, cell_per_block= 2, vis=True, feature_vec=False) hog_features = get_hog_features(gray_img, orient=9, pix_per_cell=8, cell_per_block=2, vis=False, feature_vec=True) # Plot results plt_images(non_car_img, 'Example of non-car image', hog_image, 'HOG Visualization') # - # ### Step 2: Implement a function that computes binned color features from an image # This function computes binned color features def bin_spatial(img, size=(32, 32)): features = cv2.resize(img, size).ravel() # Return the feature vector return features # + # Run the function spatial_features = bin_spatial(car_img, size=(32, 32)) # Plot results fig = plt.figure(figsize=(12,4)) plt.subplot(131) plt.title('Original Image') plt.imshow(car_img) plt.subplot(132) plt.title('Binned color features') plt.plot(spatial_features) # - # ### Step 3: Implement a function that computes color histogram features from an image # This function computes color histogram features def color_hist(img, nbins=32): # Compute the histogram of the color channels separately channel1_hist = np.histogram(img[:,:,0], bins=nbins) channel2_hist = np.histogram(img[:,:,1], bins=nbins) channel3_hist = np.histogram(img[:,:,2], bins=nbins) # Concatenate the histograms into a single feature vector hist_features = np.concatenate((channel1_hist[0], channel2_hist[0], channel3_hist[0])) # Return the individual histograms, bin_centers and feature vector return hist_features # + # Run the function hist_features = color_hist(car_img, nbins=32) # Plot results fig = plt.figure(figsize=(12,4)) plt.subplot(131) plt.title('Original Image') plt.imshow(car_img) plt.subplot(132) plt.title('Color histogram features') plt.plot(hist_features) # - # ### Step 4: Combine the previous feature extractors on a function # + # This function converts an image to a different color space def convert_color(img, conv=''): if conv == 'RGB2HSV': return cv2.cvtColor(img, cv2.COLOR_RGB2HSV) if conv == 'RGB2HLS': return cv2.cvtColor(img, cv2.COLOR_RGB2HLS) if conv == 'RGB2LUV': return cv2.cvtColor(img, cv2.COLOR_RGB2LUV) if conv == 'RGB2YUV': return cv2.cvtColor(img, cv2.COLOR_RGB2YUV) if conv == 'RGB2YCrCb': return cv2.cvtColor(img, cv2.COLOR_RGB2YCrCb) return np.copy(img) # This function extracts features from an image def single_img_features(img, conv='', orient=9, pix_per_cell=8, cell_per_block=2, hog_channel=0, spatial_size=(32, 32), hist_bins=32, spatial_feat=True, hist_feat=True, hog_feat=True): # Define an empty list to receive features img_features = [] # Apply color conversion feature_image = convert_color(img, conv) # Compute HOG features if flag is set if hog_feat == True: if hog_channel == 'ALL': hog_features = [] for channel in range(feature_image.shape[2]): hog_features.extend(get_hog_features(feature_image[:,:,channel], orient, pix_per_cell, cell_per_block, vis=False, feature_vec=True)) else: hog_features = get_hog_features(feature_image[:,:,hog_channel], orient, pix_per_cell, cell_per_block, vis=False, feature_vec=True) # Append features to list img_features.append(hog_features) # Compute spatial features if flag is set if spatial_feat == True: spatial_features = bin_spatial(feature_image, size=spatial_size) # Append features to list img_features.append(spatial_features) # Compute histogram features if flag is set if hist_feat == True: hist_features = color_hist(feature_image, nbins=hist_bins) # Append features to list img_features.append(hist_features) # Return concatenated array of features return np.concatenate(img_features) # - # This function extracts features from a list of paths to images def extract_features(imgs, conv='', orient=9, pix_per_cell=8, cell_per_block=2, hog_channel=0, spatial_size=(32, 32), hist_bins=32, spatial_feat=True, hist_feat=True, hog_feat=True): # Create a list to append feature vectors to features = [] # Iterate through the list of images for file in imgs: # Read in each one by one img = mpimg.imread(file) features.append(single_img_features(img, conv=conv, orient=orient, pix_per_cell=pix_per_cell, cell_per_block=cell_per_block, hog_channel=hog_channel, spatial_size=spatial_size, hist_bins=hist_bins, spatial_feat=spatial_feat, hist_feat=hist_feat, hog_feat=hog_feat)) # Return list of feature vectors return features # ### Step 5: Extract features from the dataset, and split them in training and testing sets # Parameters conv = 'RGB2YCrCb' # Can be '', 'RGB2HSV', 'RGB2HLS','RGB2LUV', 'RGB2YUV', 'RGB2YCrCb' orient = 9 pix_per_cell = 8 cell_per_block = 2 hog_channel = 'ALL' # Can be 0, 1, 2, or "ALL" spatial_size = (32, 32) hist_bins = 32 spatial_feat=True hist_feat=True hog_feat=True y_start_stop=[350, 720] scale=1.5 # + # Extract features from car and non_car datasets car_features = extract_features(car_images, conv=conv, orient=orient, pix_per_cell=pix_per_cell, cell_per_block=cell_per_block, hog_channel=hog_channel, spatial_size=spatial_size, hist_bins=hist_bins, spatial_feat=spatial_feat, hist_feat=hist_feat, hog_feat=hog_feat) non_car_features = extract_features(non_car_images, conv=conv, orient=orient, pix_per_cell=pix_per_cell, cell_per_block=cell_per_block, hog_channel=hog_channel, spatial_size=spatial_size, hist_bins=hist_bins, spatial_feat=spatial_feat, hist_feat=hist_feat, hog_feat=hog_feat) # Create features and labels dataset features = np.vstack((car_features, non_car_features)).astype(np.float64) car_labels = np.ones(len(car_features)) non_car_labels = np.zeros(len(non_car_features)) labels = np.concatenate((car_labels, non_car_labels)) # Split our dataset in training and testing sets X_train, X_test, y_train, y_test = train_test_split(features, labels, test_size=0.2, random_state=23) # Print the results print ("The dataset is balanced:") print ("- Number of car_images:", len(car_images)) print ("- Number of non_car images:", len(non_car_images)) print () print ("Number of samples:") print ("- In training set:", X_train.shape[0]) print ("- In testing set:", X_test.shape[0]) print () print ("X_train before normalization:") print (X_train) # - # ### Step 6: Normalize the features # + # Fit a per-column scaler only on the training data X_scaler = StandardScaler().fit(X_train) # Apply the scaler to both X_train and X_test X_train = X_scaler.transform(X_train) X_test = X_scaler.transform(X_test) # Print the results print("X_train after normalization:") print(X_train) print() scaled_features = np.vstack((X_train, X_test)) print("Features mean after normalization: {:.2f}".format(np.mean(scaled_features))) print("Features variance after normalization: {:.2f}".format(np.var(scaled_features))) # - # ### Step 7: Train a Linear SVM classifier # + # Use a linear SVC svc = LinearSVC() # Check the training time for the SVC t=time.time() clf = svc.fit(X_train, y_train) t2 = time.time() print(round(t2-t, 2), 'Seconds to train LinearSVC...') # Check the score of the SVC print('Test Accuracy of LinearSVC = ', round(clf.score(X_test, y_test), 4)) # Check the prediction time for a single sample t=time.time() n_predict = 10 print('My LinearSVC predicts: ', clf.predict(X_test[0:n_predict])) print('For these',n_predict, 'labels: ', y_test[0:n_predict]) t2 = time.time() print(round(t2-t, 5), 'Seconds to predict', n_predict,'labels with LinearSVC') # - # ### Step 8: Implement a sliding-window technique and use your trained classifier to search for vehicles in images # This funciton takes an image, start and stop positions in both x and y, # window size (x and y dimensions), and overlap fraction (for both x and y). # It returns a list of windows. def slide_window(img, x_start_stop=[None, None], y_start_stop=[None, None], xy_window=(64, 64), xy_overlap=(0.5, 0.5)): # If x and/or y start/stop positions not defined, set to image size if x_start_stop[0] == None: x_start_stop[0] = 0 if x_start_stop[1] == None: x_start_stop[1] = img.shape[1] if y_start_stop[0] == None: y_start_stop[0] = 0 if y_start_stop[1] == None: y_start_stop[1] = img.shape[0] # Compute the span of the region to be searched xspan = x_start_stop[1] - x_start_stop[0] yspan = y_start_stop[1] - y_start_stop[0] # Compute the number of pixels per step in x/y nx_pix_per_step = np.int(xy_window[0]*(1 - xy_overlap[0])) ny_pix_per_step = np.int(xy_window[1]*(1 - xy_overlap[1])) # Compute the number of windows in x/y nx_buffer = np.int(xy_window[0]*(xy_overlap[0])) ny_buffer = np.int(xy_window[1]*(xy_overlap[1])) nx_windows = np.int((xspan-nx_buffer)/nx_pix_per_step) ny_windows = np.int((yspan-ny_buffer)/ny_pix_per_step) # Initialize a list to append window positions to window_list = [] # Loop through finding x and y window positions # Note: you could vectorize this step, but in practice # you'll be considering windows one by one with your # classifier, so looping makes sense for ys in range(ny_windows): for xs in range(nx_windows): # Calculate window position startx = xs*nx_pix_per_step + x_start_stop[0] endx = startx + xy_window[0] starty = ys*ny_pix_per_step + y_start_stop[0] endy = starty + xy_window[1] # Append window position to list window_list.append(((startx, starty), (endx, endy))) # Return the list of windows return window_list # This function receives an image and the list of windows to be searched, # and returns a list of windows for positve detections def search_windows(img, windows, clf, X_scaler, conv='', orient=9, pix_per_cell=8, cell_per_block=2, hog_channel=0, spatial_size=(32, 32), hist_bins=32, spatial_feat=True, hist_feat=True, hog_feat=True): # Create an empty list to receive positive detection windows on_windows = [] # Iterate over all windows in the list for window in windows: # Extract the test window from original image test_img = cv2.resize(img[window[0][1]:window[1][1], window[0][0]:window[1][0]], (64, 64)) # Extract features for that window using single_img_features() features = single_img_features(test_img, conv=conv, orient=orient, pix_per_cell=pix_per_cell, cell_per_block=cell_per_block, hog_channel=hog_channel, spatial_size=spatial_size, hist_bins=hist_bins, spatial_feat=spatial_feat, hist_feat=hist_feat, hog_feat=hog_feat) # Scale extracted features to be fed to classifier test_features = X_scaler.transform(np.array(features).reshape(1, -1)) # Predict using your classifier prediction = clf.predict(test_features) # If positive (prediction == 1) then save the window if prediction == 1: on_windows.append(window) # Return windows for positive detections return on_windows # This function draws boxes in an image def draw_boxes(img, bboxes, color=(0, 0, 255), thick=6): # Make a copy of the image imcopy = np.copy(img) # Iterate through the bounding boxes for bbox in bboxes: # Draw a rectangle given bbox coordinates cv2.rectangle(imcopy, bbox[0], bbox[1], color, thick) # Return the image copy with boxes drawn return imcopy # + # Test our sliding-window technique on a sample image img = mpimg.imread('./test_images/test1.jpg') draw_img = np.copy(img) # The following line is only needed when training # is performed with .png images (scaled 0 to 1 by mpimg) # and the test image is a .jpg (scaled 0 to 255) img = img.astype(np.float32)/255 # Create windows where to search windows = slide_window(img, x_start_stop=[None, None], y_start_stop=y_start_stop, xy_window=(96, 96), xy_overlap=(0.7, 0.7)) # Search windows where cars have been found hot_windows = search_windows(img, windows, clf, X_scaler, conv=conv, orient=orient, pix_per_cell=pix_per_cell, cell_per_block=cell_per_block, hog_channel=hog_channel, spatial_size=spatial_size, hist_bins=hist_bins, spatial_feat=spatial_feat, hist_feat=hist_feat, hog_feat=hog_feat) # Draw boxes on those windows window_img = draw_boxes(draw_img, hot_windows, color=(0, 0, 255), thick=6) # Plot results plt_images(img, 'Test image', window_img, 'Test image boxing potential cars') # - # ### Step 9: Implement Hog Sub-sampling Window Search, a more efficient method for doing the sliding window # This function extracts features using hog sub-sampling and make predictions # It returns both, an image where potential cars are surrounded by boxes, # and also the list of boxes def find_cars(img, y_start_stop, scale, clf, X_scaler, conv='', orient=9, pix_per_cell=8, cell_per_block=2, hog_channel=0, spatial_size=(32, 32), hist_bins=32, spatial_feat=True, hist_feat=True, hog_feat=True): draw_img = np.copy(img) img_tosearch = img[y_start_stop[0]:y_start_stop[1],:,:] ctrans_tosearch = convert_color(img_tosearch, conv=conv) if scale != 1: imshape = ctrans_tosearch.shape ctrans_tosearch = cv2.resize(ctrans_tosearch, (np.int(imshape[1]/scale), np.int(imshape[0]/scale))) ch1 = ctrans_tosearch[:,:,0] ch2 = ctrans_tosearch[:,:,1] ch3 = ctrans_tosearch[:,:,2] # Define blocks and steps as above nxblocks = (ch1.shape[1] // pix_per_cell) - cell_per_block + 1 nyblocks = (ch1.shape[0] // pix_per_cell) - cell_per_block + 1 nfeat_per_block = orient*cell_per_block**2 # 64 was the orginal sampling rate, with 8 cells and 8 pix per cell window = 64 nblocks_per_window = (window // pix_per_cell) - cell_per_block + 1 cells_per_step = 2 # Instead of overlap, define how many cells to step nxsteps = (nxblocks - nblocks_per_window) // cells_per_step + 1 nysteps = (nyblocks - nblocks_per_window) // cells_per_step + 1 # Compute HOG features if flag is set if hog_feat == True: if hog_channel == 'ALL': # Compute individual channel HOG features for the entire image hog1 = get_hog_features(ch1, orient, pix_per_cell, cell_per_block, feature_vec=False) hog2 = get_hog_features(ch2, orient, pix_per_cell, cell_per_block, feature_vec=False) hog3 = get_hog_features(ch3, orient, pix_per_cell, cell_per_block, feature_vec=False) elif hog_channel == 0: hog = get_hog_features(ch1, orient, pix_per_cell, cell_per_block, feature_vec=False) elif hog_channel == 1: hog = get_hog_features(ch2, orient, pix_per_cell, cell_per_block, feature_vec=False) elif hog_channel == 2: hog = get_hog_features(ch3, orient, pix_per_cell, cell_per_block, feature_vec=False) box_list = [] for xb in range(nxsteps): for yb in range(nysteps): ypos = yb*cells_per_step xpos = xb*cells_per_step # Define an empty list to receive features img_features = [] # Extract HOG for this patch if hog_feat == True: if hog_channel == 'ALL': hog_feat1 = hog1[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel() hog_feat2 = hog2[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel() hog_feat3 = hog3[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel() hog_features = np.hstack((hog_feat1, hog_feat2, hog_feat3)) else: hog_features = hog[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel() # Append features to list img_features.append(hog_features) # Extract the image patch xleft = xpos*pix_per_cell ytop = ypos*pix_per_cell subimg = cv2.resize(ctrans_tosearch[ytop:ytop+window, xleft:xleft+window], (64, 64)) # Get color features if spatial_feat == True: spatial_features = bin_spatial(subimg, size=spatial_size) # Append features to list img_features.append(spatial_features) # Compute histogram features if flag is set if hist_feat == True: hist_features = color_hist(subimg, nbins=hist_bins) # Append features to list img_features.append(hist_features) # Scale features and make a prediction test_features = X_scaler.transform(np.hstack(img_features).reshape(1, -1)) test_prediction = clf.predict(test_features) if test_prediction == 1: xbox_left = np.int(xleft*scale) ytop_draw = np.int(ytop*scale) win_draw = np.int(window*scale) cv2.rectangle(draw_img, (xbox_left, ytop_draw+y_start_stop[0]), (xbox_left+win_draw,ytop_draw+win_draw+y_start_stop[0]),(0,0,255),6) box_list.append(((xbox_left, ytop_draw+y_start_stop[0]), (xbox_left+win_draw,ytop_draw+win_draw+y_start_stop[0]))) return draw_img, box_list # + out_img, box_list = find_cars(img, y_start_stop, scale, clf, X_scaler, conv=conv, orient=orient, pix_per_cell=pix_per_cell, cell_per_block=cell_per_block, hog_channel=hog_channel, spatial_size=(spatial_size), hist_bins=hist_bins, spatial_feat=spatial_feat, hist_feat=hist_feat, hog_feat=hog_feat) # Plot results plt_images(img, 'Test image', out_img, 'Test image after find_cars') # - # ### Step 10: Handle multiple detections and false positives # + # This function adds "heat" to a map for a list of bounding boxes def add_heat(heatmap, bbox_list): # Iterate through list of bboxes for box in bbox_list: # Add += 1 for all pixels inside each bbox # Assuming each "box" takes the form ((x1, y1), (x2, y2)) heatmap[box[0][1]:box[1][1], box[0][0]:box[1][0]] += 1 # Return updated heatmap return heatmap # This function zero outs heatmap pixels which are below a threshold def apply_threshold(heatmap, threshold): # Zero out pixels below the threshold heatmap[heatmap <= threshold] = 0 # Return thresholded map return heatmap def draw_labeled_bboxes(img, labels): # Iterate through all detected cars for car_number in range(1, labels[1] + 1): # Find pixels with each car_number label value nonzero = (labels[0] == car_number).nonzero() # Identify x and y values of those pixels nonzeroy = np.array(nonzero[0]) nonzerox = np.array(nonzero[1]) # Define a bounding box based on min/max x and y bbox = ((np.min(nonzerox), np.min(nonzeroy)), (np.max(nonzerox), np.max(nonzeroy))) # Draw the box on the image cv2.rectangle(img, bbox[0], bbox[1], (0,0,255), 6) # Return the image return img # + # Add heat to each box in box list heat = np.zeros_like(img[:,:,0]).astype(np.float) heat = add_heat(heat, box_list) # Apply threshold to help remove false positives heat = apply_threshold(heat, 1.2) # Visualize the heatmap when displaying heatmap = np.clip(heat, 0, 255) # Find final boxes from heatmap using label function labels = label(heatmap) draw_img = draw_labeled_bboxes(np.copy(img), labels) plt_images(draw_img, 'Car positions', heatmap, 'Heat map', cmap='hot') # - # ### Step 11: Run pipeline in a video class ProcessImage: def __init__(self, y_start_stop, scale, clf, X_scaler, conv='', orient=9, pix_per_cell=8, cell_per_block=2, hog_channel='ALL', spatial_size=(32, 32), hist_bins=32, spatial_feat=False, hist_feat=False, hog_feat=True, heat_max_history=20): # Save init parameters self.y_start_stop=y_start_stop self.scale=scale self.clf=clf self.X_scaler=X_scaler self.conv=conv self.orient=orient self.pix_per_cell=pix_per_cell self.cell_per_block=cell_per_block self.hog_channel=hog_channel self.spatial_size=spatial_size self.hist_bins=hist_bins self.spatial_feat=spatial_feat self.hist_feat=hist_feat self.hog_feat=hog_feat self.heat_max_history = heat_max_history self.box_list_history = [] self.heat_history = None def __call__(self, img): # Scaled from [0, 255] to [0,1] img_scaled = np.copy(img).astype(np.float32)/255 # Find cars in the image _ , box_list = find_cars(img_scaled, self.y_start_stop, self.scale, self.clf, self.X_scaler, conv=self.conv, orient=self.orient, pix_per_cell=self.pix_per_cell, cell_per_block=self.cell_per_block, hog_channel=self.hog_channel,spatial_size=self.spatial_size, hist_bins=self.hist_bins, spatial_feat=self.spatial_feat, hist_feat=self.hist_feat, hog_feat=self.hog_feat) # Add box_list to history if len(self.box_list_history) < self.heat_max_history: self.box_list_history.append(box_list) if self.heat_history is None: return img else: heat = self.heat_history else: # Add heat to each box in box list heat = np.zeros_like(img[:,:,0]).astype(np.float) heat = add_heat(heat, np.concatenate(np.array(self.box_list_history))) # Apply threshold to help remove false positives heat = apply_threshold(heat, 45) # Save heat in the history self.heat_history = heat self.box_list_history = [] # Visualize the heatmap when displaying heatmap = np.clip(heat, 0, 255) # Find final boxes from heatmap using label function labels = label(heatmap) out_img = draw_labeled_bboxes(np.copy(img), labels) # Return image return out_img # + # Parameters conv = 'RGB2YCrCb' # Can be '', 'RGB2HSV', 'RGB2HLS','RGB2LUV', 'RGB2YUV', 'RGB2YCrCb' orient = 9 pix_per_cell = 8 cell_per_block = 2 hog_channel = 'ALL' # Can be 0, 1, 2, or "ALL" spatial_size = (32, 32) hist_bins = 32 spatial_feat=True hist_feat=True hog_feat=True y_start_stop=[350, 720] scale=1.5 input_video = './project_video.mp4' output_video = './project_video_solution.mp4' ## You may uncomment the following line for a subclip of the first 5 seconds #clip1 = VideoFileClip(input_video).subclip(25,29) clip1 = VideoFileClip(input_video) # Process video frames with our 'process_image' function process_image = ProcessImage(y_start_stop, scale, clf, X_scaler, conv=conv, orient=orient, pix_per_cell=pix_per_cell, cell_per_block=cell_per_block, hog_channel=hog_channel, spatial_size=(spatial_size), hist_bins=hist_bins, spatial_feat=spatial_feat, hist_feat=hist_feat, hog_feat=hog_feat) white_clip = clip1.fl_image(process_image) # %time white_clip.write_videofile(output_video, audio=False) # - HTML(""" <video width="640" height="360" controls> <source src="{0}"> </video> """.format(output_video))
vehicle_detection_and_tracking.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import sys, os import numpy as np import pandas as pd from matplotlib import pyplot as plt from scipy.stats import bayes_mvs as bayesest import time from PyEcoLib.simulator import Simulator # %matplotlib inline # - mean_size = 1 # femto liter doubling_time = 18 #min tmax = 180 #min sample_time = 2 #min div_steps = 10 ncells = 5000 gr = np.log(2)/doubling_time if not os.path.exists('./data'): os.makedirs('./data') #data path if not os.path.exists('./figures'): os.makedirs('./figures') #Figures path start = time.time() sim = Simulator(ncells=ncells, gr = gr, sb=mean_size, steps = div_steps) sim.divstrat(tmax = tmax, nameDSM = "./data/dataDSMadder.csv") print('It took', np.int(time.time()-start), 'seconds.') start = time.time() sim = Simulator(ncells=ncells, gr = gr, sb=mean_size, steps = div_steps,lamb = 2) sim.divstrat(tmax = tmax, nameDSM = "./data/dataDSMsizer.csv") print('It took', np.int(time.time()-start), 'seconds.') start = time.time() sim = Simulator(ncells=ncells, gr = gr, sb=mean_size, steps = div_steps,lamb = 0.5) sim.divstrat(tmax = tmax, nameDSM = "./data/dataDSMtimer.csv") print('It took', np.int(time.time()-start), 'seconds.') start = time.time() sim = Simulator(ncells=ncells, gr = gr, sb=mean_size, steps = div_steps) sim.szdyn(tmax = tmax, sample_time= 0.1*doubling_time, nameCRM = "./data/dataCRM1.csv") print('It took', np.int(time.time()-start), 'seconds.') CV2sz = 0.02 v0 = mean_size*np.random.gamma(shape=1/CV2sz,scale=CV2sz,size=ncells) start = time.time() sim = Simulator(ncells=ncells, gr = gr, sb=mean_size, steps = div_steps,V0array=v0) sim.szdyn(tmax = tmax, sample_time= 0.1*doubling_time, nameCRM = "./data/dataCRM2.csv") print('It took', np.int(time.time()-start), 'seconds.') CV2div = 0.002 CV2gr = 0.02 # + start = time.time() sim = Simulator(ncells=ncells, gr = gr, sb=mean_size, steps = div_steps, CV2div = CV2div, CV2gr = CV2gr) sim.szdyn(tmax = tmax, sample_time= 0.1*doubling_time, nameCRM = "./data/dataCRM3.csv") print('It took', np.int(time.time()-start), 'seconds.') # + data1=pd.read_csv("./data/dataCRM1.csv") timearray1=data1.time.unique() mnszarray1=[] cvszarray1=[] errcv2szarray1=[] errmnszarray1=[] df=data1 del df['time'] for m in range(len(df)): szs=df.loc[m, :].values.tolist() mean_cntr, var_cntr, std_cntr = bayesest(szs,alpha=0.95) mnszarray1.append(np.mean(szs)) errmnszarray1.append(mean_cntr[1][1]-mean_cntr[0]) cvszarray1.append(np.var(szs)/np.mean(szs)**2) errv=(var_cntr[1][1]-var_cntr[0])/mean_cntr[0]**2+2*(mean_cntr[1][1]-mean_cntr[0])*var_cntr[0]/mean_cntr[0]**3 errcv2szarray1.append(errv) # + data1=pd.read_csv("./data/dataCRM2.csv") timearray2=data1.time.unique() mnszarray2=[] cvszarray2=[] errcv2szarray2=[] errmnszarray2=[] df=data1 del df['time'] for m in range(len(df)): szs=df.loc[m, :].values.tolist() mean_cntr, var_cntr, std_cntr = bayesest(szs,alpha=0.95) mnszarray2.append(np.mean(szs)) errmnszarray2.append(mean_cntr[1][1]-mean_cntr[0]) cvszarray2.append(np.var(szs)/np.mean(szs)**2) errv=(var_cntr[1][1]-var_cntr[0])/mean_cntr[0]**2+2*(mean_cntr[1][1]-mean_cntr[0])*var_cntr[0]/mean_cntr[0]**3 errcv2szarray2.append(errv) # + data1=pd.read_csv("./data/dataCRM3.csv") timearray3=data1.time.unique() mnszarray3=[] cvszarray3=[] errcv2szarray3=[] errmnszarray3=[] df=data1 del df['time'] for m in range(len(df)): szs=df.loc[m, :].values.tolist() mean_cntr, var_cntr, std_cntr = bayesest(szs,alpha=0.95) mnszarray3.append(np.mean(szs)) errmnszarray3.append(mean_cntr[1][1]-mean_cntr[0]) cvszarray3.append(np.var(szs)/np.mean(szs)**2) errv=(var_cntr[1][1]-var_cntr[0])/mean_cntr[0]**2+2*(mean_cntr[1][1]-mean_cntr[0])*var_cntr[0]/mean_cntr[0]**3 errcv2szarray3.append(errv) # - start = time.time() sim = Simulator(ncells=1, gr = gr, sb=mean_size, steps = div_steps) sim.szdynFSP(tmax = tmax, sample_time= 0.1*doubling_time, nameFSP = "./data/dataFSP0.csv") print('It took', np.int(time.time()-start), 'seconds.') start = time.time() CV2sz = 0.02 sim = Simulator(ncells=1, gr = gr, sb=mean_size, steps = div_steps) sim.szdynFSP(tmax = tmax, sample_time= 0.1*doubling_time, nameFSP = "./data/dataFSP.csv",CV2sz=CV2sz) print('It took', np.int(time.time()-start), 'seconds.') # + fig, ax = plt.subplots(2,3, figsize=(16,6),sharex=True) data=pd.read_csv("./data/dataCRM1.csv") tt=data.time del data['time'] mmar=data.columns for column in df.columns[0:10]: ax[0,0].plot(tt/doubling_time,data[column],c="#B9B9B9",label='_nolegend_') data=pd.read_csv("./data/dataCRM2.csv") tt=data.time del data['time'] mmar=data.columns for column in df.columns[0:10]: ax[0,1].plot(tt/doubling_time,data[column],c="#B9B9B9",label='_nolegend_') data=pd.read_csv("./data/dataCRM3.csv") tt=data.time del data['time'] mmar=data.columns for column in df.columns[0:10]: ax[0,2].plot(tt/doubling_time,data[column],c="#B9B9B9") ax[0,0].plot(np.array(timearray1)/doubling_time,mnszarray1,lw=2) ax[0,0].fill_between(np.array(timearray1)/doubling_time,np.array(mnszarray1)-np.array(errmnszarray1),np.array(mnszarray1) +np.array(errmnszarray1),alpha=1, edgecolor='#4db8ff', facecolor='#4db8ff',linewidth=0,label="SSA") ax[1,0].plot(np.array(timearray1)/doubling_time,cvszarray1,lw=2) ax[1,0].fill_between(np.array(timearray1)/doubling_time,np.array(cvszarray1)-np.array(errcv2szarray1),np.array(cvszarray1) +np.array(errcv2szarray1),alpha=1, edgecolor='#4db8ff', facecolor='#4db8ff',linewidth=0) ax[0,1].plot(np.array(timearray2)/doubling_time,mnszarray2,lw=2) ax[0,1].fill_between(np.array(timearray2)/doubling_time,np.array(mnszarray2)-np.array(errmnszarray2),np.array(mnszarray2) +np.array(errmnszarray2),alpha=1, edgecolor='#4db8ff', facecolor='#4db8ff',linewidth=0,label="SSA") ax[1,1].plot(np.array(timearray2)/doubling_time,cvszarray2,lw=2) ax[1,1].fill_between(np.array(timearray2)/doubling_time,np.array(cvszarray2)-np.array(errcv2szarray2),np.array(cvszarray2) +np.array(errcv2szarray2),alpha=1, edgecolor='#4db8ff', facecolor='#4db8ff',linewidth=0) ax[0,2].plot(np.array(timearray3)/doubling_time,mnszarray3,lw=2) ax[0,2].fill_between(np.array(timearray3)/doubling_time,np.array(mnszarray3)-np.array(errmnszarray3),np.array(mnszarray3) +np.array(errmnszarray3),alpha=1, edgecolor='#4db8ff', facecolor='#4db8ff',linewidth=0,label="SSA") ax[1,2].plot(np.array(timearray3)/doubling_time,cvszarray3,lw=2) ax[1,2].fill_between(np.array(timearray3)/doubling_time,np.array(cvszarray3)-np.array(errcv2szarray3),np.array(cvszarray3) +np.array(errcv2szarray3),alpha=1, edgecolor='#4db8ff', facecolor='#4db8ff',linewidth=0) ax[0,0].set_title("Stochastic division",fontsize=15) ax[0,1].set_title("Finite Initial Distribution",fontsize=15) ax[0,2].set_title("Noisy Splitting",fontsize=15) data=pd.read_csv("./data/dataFSP.csv") ax[0,1].plot(data.time/doubling_time,data.Meansize,ls='--',c='g',label="Numeric") ax[1,1].plot(data.time/doubling_time,data.VarSize/data.Meansize**2,ls='--',c='g') data=pd.read_csv("./data/dataFSP0.csv") ax[0,0].plot(data.time/doubling_time,data.Meansize,ls='--',c='g',label="Numeric") ax[1,0].plot(data.time/doubling_time,data.VarSize/data.Meansize**2,ls='--',c='g') ax[0,0].legend(fontsize=15) ax[0,1].legend(fontsize=15) ax[0,0].set_ylabel(r"$\langle s\rangle$ $(\mu m)$",size=15) ax[1,0].set_ylabel("$C_V^2(s)$",size=15) ax[1,0].set_xlabel(r"$t/\tau$",size=15) ax[1,1].set_xlabel(r"$t/\tau$",size=15) ax[1,2].set_xlabel(r"$t/\tau$",size=15) for l in [0,1]: for m in [0,1,2]: ax[l,m].set_xlim([0,6]) taqui=np.arange(0,7,step=1) ax[l,m].set_xticks(np.array(taqui)) ax[l,m].grid() ax[l,m].tick_params(axis='x', labelsize=12) ax[l,m].tick_params(axis='y', labelsize=12) for axis in ['bottom','left']: ax[l,m].spines[axis].set_linewidth(2) ax[l,m].tick_params(axis='both', width=2,length=6) for axis in ['top','right']: ax[l,m].spines[axis].set_linewidth(0) ax[l,m].tick_params(axis='both', width=0,length=6) taqui=np.arange(0,0.13,step=0.02) ax[1,m].set_yticks(np.array(taqui)) taqui=np.arange(0.5,3,step=.5) ax[0,m].set_yticks(np.array(taqui)) ax[1,m].set_ylim([0,0.13]) ax[0,m].set_ylim([0.5,3]) plt.subplots_adjust(hspace=0.15,wspace=0.2) #ax[1].plot(time4,np.array(allvarsz4),c='r') #ax[0].plot(time4,mean_size*np.array(allmeansz4),c='r',label="Numeric") plt.savefig('./figures/size_statistics_comp1.eps',bbox_inches='tight') plt.savefig('./figures/size_statistics_comp1.svg',bbox_inches='tight') plt.savefig('./figures/size_statistics_comp1.png',bbox_inches='tight') # + data2=pd.read_csv("./data/dataDSMadder.csv") data2=data2[data2.time>5*doubling_time] quantnumber=5 pvadd2=data2 CV2darr1=[] deltarr1=[] sbarr1=[] errcv2darr1=[] errdeltarr1=[] errsbarr1=[] for i in range(quantnumber): lperv0=np.percentile(pvadd2.S_b,i*100/quantnumber) hperv0=np.percentile(pvadd2.S_b,(i+1)*100/quantnumber) quanta1=pvadd2[pvadd2.S_b>lperv0] quanta2=quanta1[quanta1.S_b<hperv0] mean_cntr, var_cntr, std_cntr = bayesest((quanta2.S_d-quanta2.S_b)/np.mean(pvadd2.S_d-pvadd2.S_b),alpha=0.95) meanv0_cntr, varv0_cntr, stdv0_cntr = bayesest(quanta2.S_b/np.mean(pvadd2.S_b),alpha=0.95) CV2darr1.append(var_cntr[0]/mean_cntr[0]**2) deltarr1.append(mean_cntr[0]) sbarr1.append(meanv0_cntr[0]) errv=(var_cntr[1][1]-var_cntr[0])/mean_cntr[0]**2+2*(mean_cntr[1][1]-mean_cntr[0])*var_cntr[0]/mean_cntr[0]**3 errcv2darr1.append(errv) errdeltarr1.append(mean_cntr[1][1]-mean_cntr[0]) errsbarr1.append(meanv0_cntr[1][1]-meanv0_cntr[0]) # + data3=pd.read_csv("./data/dataDSMsizer.csv") data3=data3[data3.time>5*doubling_time] quantnumber=5 pvadd2=data3 CV2darr2=[] deltarr2=[] sbarr2=[] errcv2darr2=[] errdeltarr2=[] errsbarr2=[] for i in range(quantnumber): lperv0=np.percentile(pvadd2.S_b,i*100/quantnumber) hperv0=np.percentile(pvadd2.S_b,(i+1)*100/quantnumber) quanta1=pvadd2[pvadd2.S_b>lperv0] quanta2=quanta1[quanta1.S_b<hperv0] mean_cntr, var_cntr, std_cntr = bayesest((quanta2.S_d-quanta2.S_b)/np.mean(pvadd2.S_d-pvadd2.S_b),alpha=0.95) meanv0_cntr, varv0_cntr, stdv0_cntr = bayesest(quanta2.S_b/np.mean(pvadd2.S_b),alpha=0.95) CV2darr2.append(var_cntr[0]/mean_cntr[0]**2) deltarr2.append(mean_cntr[0]) sbarr2.append(meanv0_cntr[0]) errv=(var_cntr[1][1]-var_cntr[0])/mean_cntr[0]**2+2*(mean_cntr[1][1]-mean_cntr[0])*var_cntr[0]/mean_cntr[0]**3 errcv2darr2.append(errv) errdeltarr2.append(mean_cntr[1][1]-mean_cntr[0]) errsbarr2.append(meanv0_cntr[1][1]-meanv0_cntr[0]) # + data4=pd.read_csv("./data/dataDSMtimer.csv") data4=data4[data4.time>5*doubling_time] quantnumber=5 pvadd2=data4 CV2darr3=[] deltarr3=[] sbarr3=[] errcv2darr3=[] errdeltarr3=[] errsbarr3=[] for i in range(quantnumber): lperv0=np.percentile(pvadd2.S_b,i*100/quantnumber) hperv0=np.percentile(pvadd2.S_b,(i+1)*100/quantnumber) quanta1=pvadd2[pvadd2.S_b>lperv0] quanta2=quanta1[quanta1.S_b<hperv0] mean_cntr, var_cntr, std_cntr = bayesest((quanta2.S_d-quanta2.S_b)/np.mean(pvadd2.S_d-pvadd2.S_b),alpha=0.95) meanv0_cntr, varv0_cntr, stdv0_cntr = bayesest(quanta2.S_b/np.mean(pvadd2.S_b),alpha=0.95) CV2darr3.append(var_cntr[0]/mean_cntr[0]**2) deltarr3.append(mean_cntr[0]) sbarr3.append(meanv0_cntr[0]) errv=(var_cntr[1][1]-var_cntr[0])/mean_cntr[0]**2+2*(mean_cntr[1][1]-mean_cntr[0])*var_cntr[0]/mean_cntr[0]**3 errcv2darr3.append(errv) errdeltarr3.append(mean_cntr[1][1]-mean_cntr[0]) errsbarr3.append(meanv0_cntr[1][1]-meanv0_cntr[0]) print(np.mean(pvadd2.S_b)) print(np.mean(pvadd2.S_d-pvadd2.S_b)) # + sim = Simulator(ncells=1, gr = gr, sb=mean_size, steps = div_steps,lamb=0.5) sbar=np.linspace(0.5,1.5,100)*mean_size cv2tim=[] delttim=[] for i in sbar: Added,cv2=sim.SdStat(i) cv2tim.append(cv2) delttim.append(Added) sim = Simulator(ncells=1, gr = gr, sb=mean_size, steps = div_steps) sbar=np.linspace(0.5,1.5,100)*mean_size cv2ad=[] deltad=[] for i in sbar: Added,cv2=sim.SdStat(i) cv2ad.append(cv2) deltad.append(Added) sim = Simulator(ncells=1, gr = gr, sb=mean_size, steps = div_steps,lamb=2) sbar=np.linspace(0.5,1.5,100)*mean_size cv2sz=[] deltsz=[] for i in sbar: Added,cv2=sim.SdStat(i) cv2sz.append(cv2) deltsz.append(Added) # + fig, ax = plt.subplots(1,2, figsize=(12,4)) #ax[0].scatter(data2.S_b/np.mean(data2.S_b),(data2.S_d-data2.S_b)/np.mean(data2.S_b),s=2) #ax[0].scatter(data3.S_b/np.mean(data3.S_b),(data2.S_d-data3.S_b)/np.mean(data3.S_b),s=2) #ax[0].scatter(data4.S_b/np.mean(data4.S_b),(data4.S_d-data2.S_b)/np.mean(data4.S_b),s=2) ax[0].errorbar(np.array(sbarr1),np.array(deltarr1),xerr=errsbarr1,yerr=errdeltarr1, fmt='o',mec='k',capsize=5,markersize='8',elinewidth=3,c='k') ax[1].errorbar(np.array(sbarr1),CV2darr1,xerr=errsbarr1,yerr=errcv2darr1, fmt='o',mec='k',capsize=5,markersize='8',elinewidth=3,c='k') ax[0].errorbar(np.array(sbarr2),np.array(deltarr2),xerr=errsbarr2,yerr=errdeltarr2, fmt='o',mec='k',capsize=5,markersize='8',elinewidth=3,c='r') ax[1].errorbar(np.array(sbarr2),CV2darr2,xerr=errsbarr2,yerr=errcv2darr2, fmt='o',mec='k',capsize=5,markersize='8',elinewidth=3,c='r') ax[0].errorbar(np.array(sbarr3),np.array(deltarr3),xerr=errsbarr3,yerr=errdeltarr3, fmt='o',mec='k',capsize=5,markersize='8',elinewidth=3,c='g') ax[1].errorbar(np.array(sbarr3),CV2darr3,xerr=errsbarr3,yerr=errcv2darr3, fmt='o',mec='k',capsize=5,markersize='8',elinewidth=3,c='g') ax[1].set_ylim([0,0.3]) ax[0].set_xlabel("$s_b/\overline{s_b}$",size=20) ax[1].set_xlabel("$s_b/\overline{s_b}$",size=20) ax[0].set_ylabel("$\Delta/\overline{s_b}$",size=15) ax[1].set_ylabel("$C_V^2(\Delta)$",size=15) #ax[0].set_xlim([0.5,1.5]) for l in [0,1]: #ax[l].set_xlim([0.2,2]) ax[l].grid() ax[l].tick_params(axis='x', labelsize=15) ax[l].tick_params(axis='y', labelsize=15) for axis in ['bottom','left']: ax[l].spines[axis].set_linewidth(2) ax[l].tick_params(axis='both', width=2,length=6) for axis in ['top','right']: ax[l].spines[axis].set_linewidth(0) ax[l].tick_params(axis='both', width=0,length=6) ax[0].plot(np.array(sbar)/mean_size, np.array(delttim)/mean_size, lw=2,c='g',label="$\lambda=0.5$") ax[1].plot(np.array(sbar)/mean_size, cv2tim, lw=2,c='g') ax[0].plot(np.array(sbar)/mean_size, np.array(deltad)/mean_size, lw=2,c='k',label="$\lambda=1$") ax[1].plot(np.array(sbar)/mean_size, cv2ad, lw=2,c='k') ax[0].plot(np.array(sbar)/mean_size, np.array(deltsz)/mean_size, lw=2,c='r',label="$\lambda=2$") ax[1].plot(np.array(sbar)/mean_size, cv2sz, lw=2,c='r') ax[0].set_ylim(0.75,1.35) ax[1].set_ylim(0.03,0.17) ax[0].text(0.55,1.27,"$\lambda = 2$",rotation=-35,fontsize=10) ax[0].text(0.55,1.01,"$\lambda = 1$",fontsize=10) ax[0].text(0.55,0.87,"$\lambda = 0.5$",rotation=35,fontsize=10) ax[1].text(0.5,0.05,"$\lambda = 2$",rotation=15,fontsize=10) ax[1].text(0.5,0.11,"$\lambda = 1$",fontsize=10) ax[1].text(0.5,0.155,"$\lambda = 0.5$",rotation=-10,fontsize=10) #ax[0].set_ylim([0.7,1.5]) plt.savefig('./figures/div_strategy.eps',bbox_inches='tight') plt.savefig('./figures/div_strategy.svg',bbox_inches='tight') plt.savefig('./figures/div_strategy.png',bbox_inches='tight') # + fig, ax = plt.subplots(2,4, figsize=(16,5)) data=pd.read_csv("./data/dataCRM1.csv") tt=data.time del data['time'] for column in data.columns[0:10]: ax[0,0].plot(tt/doubling_time,data[column],c="#B9B9B9",label='_nolegend_') data=pd.read_csv("./data/dataCRM2.csv") tt=data.time del data['time'] for column in data.columns[0:10]: ax[0,1].plot(tt/doubling_time,data[column],c="#B9B9B9",label='_nolegend_') data=pd.read_csv("./data/dataCRM3.csv") tt=data.time del data['time'] for column in data.columns[0:10]: ax[0,2].plot(tt/doubling_time,data[column],c="#B9B9B9",label='_nolegend_') ax[0,0].plot(np.array(timearray1)/doubling_time,mnszarray1,lw=2) ax[0,0].fill_between(np.array(timearray1)/doubling_time,np.array(mnszarray1)-np.array(errmnszarray1),np.array(mnszarray1) +np.array(errmnszarray1),alpha=1, edgecolor='#4db8ff', facecolor='#4db8ff',linewidth=0,label="SSA") ax[1,0].plot(np.array(timearray1)/doubling_time,cvszarray1,lw=2) ax[1,0].fill_between(np.array(timearray1)/doubling_time,np.array(cvszarray1)-np.array(errcv2szarray1),np.array(cvszarray1) +np.array(errcv2szarray1),alpha=1, edgecolor='#4db8ff', facecolor='#4db8ff',linewidth=0) ax[0,1].plot(np.array(timearray2)/doubling_time,mnszarray2,lw=2) ax[0,1].fill_between(np.array(timearray2)/doubling_time,np.array(mnszarray2)-np.array(errmnszarray2),np.array(mnszarray2) +np.array(errmnszarray2),alpha=1, edgecolor='#4db8ff', facecolor='#4db8ff',linewidth=0,label="SSA") ax[1,1].plot(np.array(timearray2)/doubling_time,cvszarray2,lw=2) ax[1,1].fill_between(np.array(timearray2)/doubling_time,np.array(cvszarray2)-np.array(errcv2szarray2),np.array(cvszarray2) +np.array(errcv2szarray2),alpha=1, edgecolor='#4db8ff', facecolor='#4db8ff',linewidth=0) ax[0,2].plot(np.array(timearray3)/doubling_time,mnszarray3,lw=2) ax[0,2].fill_between(np.array(timearray3)/doubling_time,np.array(mnszarray3)-np.array(errmnszarray3),np.array(mnszarray3) +np.array(errmnszarray3),alpha=1, edgecolor='#4db8ff', facecolor='#4db8ff',linewidth=0,label="SSA") ax[1,2].plot(np.array(timearray3)/doubling_time,cvszarray3,lw=2) ax[1,2].fill_between(np.array(timearray3)/doubling_time,np.array(cvszarray3)-np.array(errcv2szarray3),np.array(cvszarray3) +np.array(errcv2szarray3),alpha=1, edgecolor='#4db8ff', facecolor='#4db8ff',linewidth=0) #ax[0].set_ylim([1,1.7]) #ax[1].set_ylim([0,0.15]) ax[0,0].set_title("Stochastic division",fontsize=15) ax[0,1].set_title("Finite Initial Distribution",fontsize=15) ax[0,2].set_title("Noisy Splitting",fontsize=15) data=pd.read_csv("./data/dataFSP.csv") ax[0,1].plot(data.time/doubling_time,data.Meansize,ls='--',c='g',label="Numeric") ax[1,1].plot(data.time/doubling_time,data.VarSize/data.Meansize**2,ls='--',c='g') data=pd.read_csv("./data/dataFSP0.csv") ax[0,0].plot(data.time/doubling_time,data.Meansize,ls='--',c='g',label="Numeric") ax[1,0].plot(data.time/doubling_time,data.VarSize/data.Meansize**2,ls='--',c='g') ax[0,0].legend(fontsize=10) ax[0,1].legend(fontsize=10) ax[0,2].legend(fontsize=10) #ax[0,1].legend(fontsize=10) ax[0,3].errorbar(np.array(sbarr1),np.array(deltarr1),xerr=errsbarr1,yerr=errdeltarr1, fmt='o',mec='k',capsize=3,markersize='6',elinewidth=3,c='k') ax[1,3].errorbar(np.array(sbarr1),CV2darr1,xerr=errsbarr1,yerr=errcv2darr1, fmt='o',mec='k',capsize=3,markersize='6',elinewidth=3,c='k') ax[0,3].errorbar(np.array(sbarr2),np.array(deltarr2),xerr=errsbarr2,yerr=errdeltarr2, fmt='o',mec='k',capsize=3,markersize='6',elinewidth=3,c='r') ax[1,3].errorbar(np.array(sbarr2),CV2darr2,xerr=errsbarr2,yerr=errcv2darr2, fmt='o',mec='k',capsize=3,markersize='6',elinewidth=3,c='r') ax[0,3].errorbar(np.array(sbarr3),np.array(deltarr3),xerr=errsbarr3,yerr=errdeltarr3, fmt='o',mec='k',capsize=3,markersize='6',elinewidth=3,c='g') ax[1,3].errorbar(np.array(sbarr3),CV2darr3,xerr=errsbarr3,yerr=errcv2darr3, fmt='o',mec='k',capsize=3,markersize='6',elinewidth=3,c='g') ax[0,3].plot(np.array(sbar)/mean_size, np.array(delttim)/mean_size, lw=2,c='g',label="$\lambda=0.5$") ax[1,3].plot(np.array(sbar)/mean_size, cv2tim, lw=2,c='g') ax[0,3].plot(np.array(sbar)/mean_size, np.array(deltad)/mean_size, lw=2,c='k',label="$\lambda=1$") ax[1,3].plot(np.array(sbar)/mean_size, cv2ad, lw=2,c='k') ax[0,3].plot(np.array(sbar)/mean_size, np.array(deltsz)/mean_size, lw=2,c='r',label="$\lambda=2$") ax[1,3].plot(np.array(sbar)/mean_size, cv2sz, lw=2,c='r') ax[0,0].set_ylabel(r"$\langle s\rangle$ $(fl)$",size=15) ax[1,0].set_ylabel("$C_V^2(s)$",size=15) ax[1,0].set_xlabel(r"$t/\tau$",size=15) ax[1,1].set_xlabel(r"$t/\tau$",size=15) ax[1,2].set_xlabel(r"$t/\tau$",size=15) ax[1,3].set_xlabel(r"$s_b/\overline{s_b}$",size=15) #ax[0].set_ylim([1,1.7]) #ax[1].set_ylim([0,0.15]) for l in [0,1]: for m in [0,1,2,3]: ax[l,m].grid() ax[l,m].tick_params(axis='x', labelsize=12) ax[l,m].tick_params(axis='y', labelsize=12) for axis in ['bottom','left']: ax[l,m].spines[axis].set_linewidth(2) ax[l,m].tick_params(axis='both', width=2,length=6) for axis in ['top','right']: ax[l,m].spines[axis].set_linewidth(0) ax[l,m].tick_params(axis='both', width=0,length=6) if m !=3: ax[l,m].set_xlim([0,6]) taqui=np.arange(0,7,step=1) ax[l,m].set_xticks(np.array(taqui)) taqui=np.arange(0,0.13,step=0.02) ax[1,m].set_yticks(np.array(taqui)) taqui=np.arange(0.5,3.5,step=0.5) ax[0,m].set_yticks(np.array(taqui)) ax[1,m].set_ylim([0,0.13]) ax[0,m].set_ylim([0.5,2.9]) plt.subplots_adjust(hspace=0.3,wspace=0.35) if not os.path.exists('./figures'): os.makedirs('./figures') ax[0,0].set_title("Stochastic division",fontsize=15) ax[0,1].set_title("Finite Initial Distribution",fontsize=15) ax[0,2].set_title("Noisy Splitting",fontsize=15) ax[0,3].set_title("Division Strategy",fontsize=15) #ax[0,3].legend(fontsize = 10) ax[0,3].set_ylim(0.75,1.35) ax[1,3].set_ylim(0.03,0.17) ax[0,3].text(0.5,1.31,"$\lambda = 2$",rotation=-35,fontsize=10) ax[0,3].text(0.5,1.01,"$\lambda = 1$",fontsize=10) ax[0,3].text(0.5,0.9,"$\lambda = 0.5$",rotation=35,fontsize=10) ax[1,3].text(0.5,0.055,"$\lambda = 2$",rotation=12,fontsize=10) ax[1,3].text(0.5,0.11,"$\lambda = 1$",fontsize=10) ax[1,3].text(0.5,0.16,"$\lambda = 0.5$",rotation=-10,fontsize=10) ax[0,3].set_ylabel(r"$\Delta/\overline{s_b}$",size=15) ax[1,3].set_ylabel(r"$C_v^2(\Delta)$",size=15) #ax[0].legend(fontsize=15) #ax[1].plot(time4,np.array(allvarsz4),c='r') #ax[0].plot(time4,mean_size*np.array(allmeansz4),c='r',label="Numeric") ax[0,0].text(-1,3,"a)",fontsize=15) ax[0,1].text(-1,3.,"b)",fontsize=15) ax[0,2].text(-1,3.,"c)",fontsize=15) ax[1,0].text(-1,0.13,"e)",fontsize=15) ax[1,1].text(-1,0.13,"f)",fontsize=15) ax[1,2].text(-1,0.13,"g)",fontsize=15) ax[0,3].text(0.25,1.35,"d)",fontsize=15) ax[1,3].text(0.25,0.17,"h)",fontsize=15) plt.savefig('./figures/size_statistics_comparison.svg',bbox_inches='tight') plt.savefig('./figures/size_statistics_comparison.png',bbox_inches='tight') plt.savefig('./figures/size_statistics_comparison.eps',bbox_inches='tight') # + data=pd.read_csv("./data/dataCRM1.csv") taumax=50 tauarr1=range(taumax) tarr=data.time.tolist() corarr1=[] for tau in tauarr1: xx=[] yy=[] for i in range(len(tarr)-tau): df=data[data.time==tarr[i]] del df['time'] df2=data[data.time==tarr[i+tau]] del df2['time'] #print(df.iloc[0].tolist()) A=df.iloc[0].tolist() B=df2.iloc[0].tolist() for m in range(len(A)): xx.append(A[m]) for m in range(len(A)): yy.append(B[m]) #xx.append() #yy.append(df2.tolist()) corarr1.append(np.corrcoef(xx,yy)[0][1]) #print() # - data=pd.read_csv("./data/dataCRM2.csv") taumax=50 tauarr2=range(taumax) tarr=data.time.tolist() corarr2=[] for tau in tauarr2: xx=[] yy=[] for i in range(len(tarr)-tau): df=data[data.time==tarr[i]] del df['time'] df2=data[data.time==tarr[i+tau]] del df2['time'] #print(df.iloc[0].tolist()) A=df.iloc[0].tolist() B=df2.iloc[0].tolist() for m in range(len(A)): xx.append(A[m]) for m in range(len(A)): yy.append(B[m]) corarr2.append(np.corrcoef(xx,yy)[0][1]) start = time.time() CV2sz = 0.02 v0 = mean_size*np.random.gamma(shape=1/CV2sz,scale=CV2sz,size=ncells) sim = Simulator(ncells=ncells, gr = gr, sb=mean_size, steps = 10,V0array=v0) sim.szdyn(tmax = tmax, sample_time= 0.1*doubling_time, nameCRM = "./data/dataCRM10stp.csv") print('It took', np.int(time.time()-start), 'seconds.') data=pd.read_csv("./data/dataCRM10stp.csv") taumax=50 tauarr10=range(taumax) tarr=data.time.tolist() corarr10=[] for tau in tauarr10: xx=[] yy=[] for i in range(len(tarr)-tau): df=data[data.time==tarr[i]] del df['time'] df2=data[data.time==tarr[i+tau]] del df2['time'] #print(df.iloc[0].tolist()) A=df.iloc[0].tolist() B=df2.iloc[0].tolist() for m in range(len(A)): xx.append(A[m]) for m in range(len(A)): yy.append(B[m]) #xx.append() #yy.append(df2.tolist()) corarr10.append(np.corrcoef(xx,yy)[0][1]) start = time.time() CV2sz = 0.02 v0 = mean_size*np.random.gamma(shape=1/CV2sz,scale=CV2sz,size=ncells) sim = Simulator(ncells=ncells, gr = gr, sb=mean_size, steps = 50,V0array=v0) sim.szdyn(tmax = tmax, sample_time= 0.1*doubling_time, nameCRM = "./data/dataCRM50stp.csv") print('It took', np.int(time.time()-start), 'seconds.') data=pd.read_csv("./data/dataCRM50stp.csv") taumax=50 tauarr50=range(taumax) tarr=data.time.tolist() corarr50=[] for tau in tauarr50: xx=[] yy=[] for i in range(len(tarr)-tau): df=data[data.time==tarr[i]] del df['time'] df2=data[data.time==tarr[i+tau]] del df2['time'] #print(df.iloc[0].tolist()) A=df.iloc[0].tolist() B=df2.iloc[0].tolist() for m in range(len(A)): xx.append(A[m]) for m in range(len(A)): yy.append(B[m]) #xx.append() #yy.append(df2.tolist()) corarr50.append(np.corrcoef(xx,yy)[0][1]) start = time.time() CV2sz = 0.02 v0 = mean_size*np.random.gamma(shape=1/CV2sz,scale=CV2sz,size=ncells) sim = Simulator(ncells=ncells, gr = gr, sb=mean_size, steps = 1,V0array=v0) sim.szdyn(tmax = tmax, sample_time= 0.1*doubling_time, nameCRM = "./data/dataCRM1stp.csv") print('It took', np.int(time.time()-start), 'seconds.') data=pd.read_csv("./data/dataCRM1stp.csv") taumax=50 tauarr1stp=range(taumax) tarr=data.time.tolist() corarr1stp=[] for tau in tauarr1stp: xx=[] yy=[] for i in range(len(tarr)-tau): df=data[data.time==tarr[i]] del df['time'] df2=data[data.time==tarr[i+tau]] del df2['time'] #print(df.iloc[0].tolist()) A=df.iloc[0].tolist() B=df2.iloc[0].tolist() for m in range(len(A)): xx.append(A[m]) for m in range(len(A)): yy.append(B[m]) #xx.append() #yy.append(df2.tolist()) corarr1stp.append(np.corrcoef(xx,yy)[0][1]) start = time.time() CV2sz = 0.02 v0 = mean_size*np.random.gamma(shape=1/CV2sz,scale=CV2sz,size=ncells) sim = Simulator(ncells=ncells, gr = gr, sb=mean_size, steps = 50,V0array=v0,CV2div=0.005,CV2gr=0.02) sim.szdyn(tmax = tmax, sample_time= 0.1*doubling_time, nameCRM = "./data/dataCRM50stpns.csv") print('It took', np.int(time.time()-start), 'seconds.') data=pd.read_csv("./data/dataCRM50stpns.csv") taumax=50 tauarr50ns=range(taumax) tarr=data.time.tolist() corarr50ns=[] for tau in tauarr50ns: xx=[] yy=[] for i in range(len(tarr)-tau): df=data[data.time==tarr[i]] del df['time'] df2=data[data.time==tarr[i+tau]] del df2['time'] #print(df.iloc[0].tolist()) A=df.iloc[0].tolist() B=df2.iloc[0].tolist() for m in range(len(A)): xx.append(A[m]) for m in range(len(A)): yy.append(B[m]) #xx.append() #yy.append(df2.tolist()) corarr50ns.append(np.corrcoef(xx,yy)[0][1]) # + fig, ax = plt.subplots(1,1, figsize=(6,4)) plt.plot(1.8*np.array(tauarr1stp)/18,corarr1stp,lw=3,ls="--",label="1 steps") #plt.plot(1.8*np.array(tauarr2)/18,corarr2,lw=3,ls=":",label="Finite initial variance") plt.plot(1.8*np.array(tauarr10)/18,corarr10,lw=3,label="10 steps",ls=":") plt.plot(1.8*np.array(tauarr50)/18,corarr50,lw=3,label="50 steps",ls="-.") plt.plot(1.8*np.array(tauarr50ns)/18,corarr50ns,lw=3,label="50 steps + Noise") plt.grid() ax.set_ylabel(r"$\rho(t')$",fontsize=15) ax.set_xlabel(r"$t'/\tau$",fontsize=15) ax.tick_params(axis='x', labelsize=15) ax.tick_params(axis='y', labelsize=15) for axis in ['bottom','left']: ax.spines[axis].set_linewidth(2) ax.tick_params(axis='both', width=2,length=6) for axis in ['top','right']: ax.spines[axis].set_linewidth(0) ax.tick_params(axis='both', width=0,length=6) plt.legend(fontsize=15) #x=np.linspace(0,5,30) #plt.plot(x,np.exp(-x*np.log(2))) plt.savefig('./figures/size_autocorrelation.svg',bbox_inches='tight') plt.savefig('./figures/size_autocorrelation.png',bbox_inches='tight') plt.savefig('./figures/size_autocorrelation.eps',bbox_inches='tight')
examples/AdvancedSizeStatistics/AdvancedSizeStatistics.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="KKCS48bkMj8K" # # Comandos básicos de bash en Jupyter # # > Bloque con sangría # # # + colab={"base_uri": "https://localhost:8080/"} id="85IPKLkgHNPD" outputId="eabba630-2a33-47f0-88fc-ec8387748fff" # !ls # + colab={"base_uri": "https://localhost:8080/"} id="GKr0b9z_MDV2" outputId="0e0b589f-7f96-4931-f521-db2c8192bd4c" language="sh" # ls # + colab={"base_uri": "https://localhost:8080/"} id="OCtzk46WMUE2" outputId="03a92d97-38b3-4c4a-831f-7f5fa9e2f650" # !ls sample_data/ # + colab={"base_uri": "https://localhost:8080/"} id="sqv15CN2M2eX" outputId="4fcce225-0c56-4ad1-d6fa-e25b83786979" # !python --version # + colab={"base_uri": "https://localhost:8080/"} id="OM8c_qo7M_Xb" outputId="7d2d5316-64f3-4e8e-c4af-88b5de656868" import this # + [markdown] id="fC-G0LUjPFIk" # ## Comando básicos de Python, Tipos de datos y operaciones # + [markdown] id="Q2Zb93A-Paoi" # ## Integers # + colab={"base_uri": "https://localhost:8080/"} id="JVq2fvkSPchI" outputId="d242817c-a01c-4397-8431-ceaf647fa385" # declaramos una variable n n = 3 type(n) # + [markdown] id="7N6cNhAFPoLJ" # ## Floats # + colab={"base_uri": "https://localhost:8080/"} id="n22-O0ycPpth" outputId="fcd585b0-b391-4bba-e182-4326defed51c" # si evaluamos directamente el tipo de dato o evaluación al "vuelo" type(0.1) # + colab={"base_uri": "https://localhost:8080/"} id="1-fpkejnPyTA" outputId="7f1a89e2-0d23-4b8c-8ce2-77b0a2140f0a" n # a diferencia de llamar nuevamente una variable # + colab={"base_uri": "https://localhost:8080/"} id="xI02mSO4P6UC" outputId="b1c43757-0c46-4fab-f493-071a9d1c39a8" x = .5 type(x) # + colab={"base_uri": "https://localhost:8080/"} id="kkFsRsbmP_Qp" outputId="af2f35e7-c173-4c85-8fd8-bcee419f7bdc" x # + colab={"base_uri": "https://localhost:8080/"} id="0UlpnPhXPmbl" outputId="8626fd29-908e-4ecb-b2c7-7d5b09954a16" n + x # + colab={"base_uri": "https://localhost:8080/"} id="tVDZ8hDrQOrf" outputId="7d0915e6-7140-411c-a061-d3ec867e20c6" type(n + x) # + colab={"base_uri": "https://localhost:8080/"} id="6g4c7ynXPVTP" outputId="ee6287bb-61f1-401b-cab5-b2d185dd8db7" suma = n + x print(suma) # + colab={"base_uri": "https://localhost:8080/"} id="y-bIfx8rOY7v" outputId="ea711c82-d18c-4733-c748-95ab32080ddc" sum = x + n sum # + id="NHQh7OwwQmqU" dif = x - n # + colab={"base_uri": "https://localhost:8080/"} id="GPNlk17vRBRq" outputId="0afa2887-5b9d-4a22-f39b-e760db27c301" dif # + colab={"base_uri": "https://localhost:8080/"} id="M96FQFBARCR8" outputId="307e9e45-e8d8-436f-ad72-4ef810278c53" prod = x * n prod # + colab={"base_uri": "https://localhost:8080/"} id="4UyA8HRuRFKk" outputId="e24b2e78-ba51-4c65-e44c-b1f2908ef62f" potencia = x ** 3 potencia # + colab={"base_uri": "https://localhost:8080/"} id="muChnwpxRLSm" outputId="a62a7908-1fe1-448b-f083-be71fdaff063" dividir = 22 / 5 dividir # + colab={"base_uri": "https://localhost:8080/"} id="t91YTH1pRQQg" outputId="97fc9ba9-b626-459b-f27b-762d6e652e99" 22 // 5 # + colab={"base_uri": "https://localhost:8080/"} id="G4s7BQLNRUKG" outputId="dfb09c61-8dc2-4d25-8744-fba8ba49abce" 20 % 5 # + colab={"base_uri": "https://localhost:8080/"} id="jyjIYjCLRWJ1" outputId="a76db230-7cef-4189-f593-a4ca6d9bdd34" abs(-5.5) # + colab={"base_uri": "https://localhost:8080/"} id="2_Pf2dTzRo2M" outputId="cc6a2264-aa0a-4505-e103-8a254de89267" int(5.2) # + colab={"base_uri": "https://localhost:8080/"} id="vdB4qlvSRvAS" outputId="9b8c3bd6-e3f6-475d-8131-e28f237cd2d4" float(.3) # + colab={"base_uri": "https://localhost:8080/"} id="cF1-OHSeRyG4" outputId="f9600632-080e-4152-a540-bbecc96b27d7" float(3.0) # + colab={"base_uri": "https://localhost:8080/"} id="MQ_9qyG4R0cH" outputId="d9966fb9-d767-4355-b4e3-0aac3dac0696" int(3.0) # + colab={"base_uri": "https://localhost:8080/"} id="13RDmLfnROXL" outputId="9ced0564-d8ab-4ee3-e308-f8f0d5b14e46" char = "3.5" type(char) # + colab={"base_uri": "https://localhost:8080/"} id="OfCk-eblRKFn" outputId="a64b22e3-0efb-4c24-f67b-94e79592e022" float(char) # podemos convertir un numero en formato string a float o int # + colab={"base_uri": "https://localhost:8080/", "height": 174} id="YE8wm9mMQhG0" outputId="5c51d15e-0c85-4ad7-cbd0-c7d8383a1368" float("Hola") # no podemos convertir un texto a número # + colab={"base_uri": "https://localhost:8080/"} id="yN8gPK5mSrMW" outputId="84bf9329-95ba-428e-e4f6-5626c799e2f7" # Para convertir una cadena de texto (decimal) a un entero int(float(char)) # podemos realizar una operación mixta de dos funciones # + colab={"base_uri": "https://localhost:8080/"} id="r8uXKndqS_Gf" outputId="5f7edbde-4d88-4afb-82f5-648ae8eb3b39" # también podemos convertir a float la cadena de texto y luego convertirla a entero con redondedo round(float(char)) # + colab={"base_uri": "https://localhost:8080/"} id="lwewXK7yTYLs" outputId="ce8d1fda-6172-47b0-99da-e41ecb673fbb" round(float(char), 3) # + id="neGdboc0T3qB" # Para utilizar otros métodos para poder realizar un redondeo a la alza o a la baja import math # importamos una librería con el comando import # + [markdown] id="LafU71CjUIaD" # Cuando importamos una librería, generalemente instalará todas sus dependencias y funciones. Lo que ocurre es que consumirá mucha memoría. Lo idea sería importar solo los métodos (funciones) que necesitemos. # + id="kT3dsrY-Ucf8" from math import floor, ceil # + colab={"base_uri": "https://localhost:8080/"} id="xh8rYye7Uuwg" outputId="ff5971f9-5ed4-482c-b1a6-e86b42e59eb1" floor(6.24) # redondeo al suelo. También podemos utilizar math.floor # + colab={"base_uri": "https://localhost:8080/"} id="PFPOL-uFU0fa" outputId="f6f4fe07-cebf-4e87-edb7-8e3e579fec1f" ceil(6.24) # redondeo al techo. También podemos utilizar math.ceil # + [markdown] id="tf0ZqmdPVWJa" # ## Importamos librerías # + id="Kzjt2ZaKVXws" import numpy as np import seaborn as sns #import marco COMENTAMOS LA LINEA DE CÓDIGO QUE NO NECESITEMOS # + id="AFF-n3rWXizN" # declaramos la variable salarios salarios = 10 ** np.random.uniform(2, 5, 1000) # + colab={"base_uri": "https://localhost:8080/"} id="R8hVxe3dWtVU" outputId="95a3bfb7-2365-4186-c597-05057f0bc5b6" salarios # + colab={"base_uri": "https://localhost:8080/"} id="A58vdFA-WkPw" outputId="abb2f898-d0d3-4921-f5d0-21ecf45da9e0" salarios[:10] # esta operación limita hasta 10 valores de mi array que tenemos # + colab={"base_uri": "https://localhost:8080/"} id="amy8BigqYq5v" outputId="c700eafd-07e8-4881-ecfd-f5f8f318d14f" salarios[:10].astype(int) # + colab={"base_uri": "https://localhost:8080/"} id="AmtpP_7WZkxq" outputId="d30ff9ed-f11e-403c-f058-a1fa5221fa57" salarios[:5].astype(str) # + colab={"base_uri": "https://localhost:8080/"} id="sT1mTyNhZsGc" outputId="3f6efec8-cda4-48f0-9513-8b1bf51b0996" type(salarios[0:5].astype(str)) # indica el tipo de objeto # + colab={"base_uri": "https://localhost:8080/"} id="9pD3lh0naB7_" outputId="fde109d3-9757-4def-ab68-3a796a235c24" salarios.shape # indica la forma o dimensión del objeto # + colab={"base_uri": "https://localhost:8080/", "height": 341} id="BvNZOkTrUHr-" outputId="656913d3-8fc1-49e3-e450-4befb5936815" sns.distplot(salarios) # + colab={"base_uri": "https://localhost:8080/", "height": 341} id="eceql__3UBg1" outputId="a46eb700-fde5-4eeb-bc5c-5b62b4995706" sns.distplot(np.log10(salarios)) # + [markdown] id="tqpbJ0ekhVXW" # ## Caneda de Texto o String # + id="Qbnlp3OBhX_4" nombre = "Marco" # + id="qA2D9jt0hmtn" nombre_2 = 'Marco' # + id="dVxrllxchyOJ" edad = 40 # + colab={"base_uri": "https://localhost:8080/"} id="wfrPT12Yh0Gx" outputId="5312c366-6ffc-4780-a825-fd41e5a879fb" print("hola") # + colab={"base_uri": "https://localhost:8080/", "height": 36} id="AOj-PS-1h2BZ" outputId="6178659d-c7d7-47ff-8d5a-6cd795b78226" nombre # + colab={"base_uri": "https://localhost:8080/"} id="R5TPo4rQh87Q" outputId="735443e0-4462-4abc-9bc4-bbce31f9edb6" print("me llamo", nombre, "y tengo", edad, "años.") # + colab={"base_uri": "https://localhost:8080/", "height": 174} id="53K0N4FgiIB5" outputId="851285af-d4df-4471-bf17-7c264048db2c" print("Me llamo " + nombre + " y tengo " + edad + " años.") # + [markdown] id="FMSLCO2cilqU" # Si utilizamos el primer método no hace falta convertir a número o str la sentencia a cadena de texto. # + colab={"base_uri": "https://localhost:8080/"} id="ej-vchO_bBcL" outputId="d1ce71ca-33b5-494f-a676-2e4e0da9dbe6" print("Me llamo " + nombre + " y tengo " + str(edad) + " años.") # + colab={"base_uri": "https://localhost:8080/"} id="SfSOSuP2i6zk" outputId="af3aa94f-890d-4b15-a2ab-73379eb0049e" print(f"Me llamo {nombre} y tengo {edad} años.") # + colab={"base_uri": "https://localhost:8080/"} id="gy0FWHQ_jPi6" outputId="52d29443-69e8-4da0-9e5e-1ce765a34387" print("Me llamo {0} y tengo {1} años".format(nombre, edad)) # + colab={"base_uri": "https://localhost:8080/"} id="u7Rua-c-jvXm" outputId="9526d7eb-97eb-493a-a254-de91c2fdd4fb" print("Me llamo %s y tengo %s años" % (nombre, edad)) # + id="sp-zrSQ6lBTQ" frase = "Estoy animado esta mañana" # + colab={"base_uri": "https://localhost:8080/", "height": 36} id="NczB7D_GlIem" outputId="c78945bb-f61c-4381-dcdf-c6d127ab6b87" frase # + colab={"base_uri": "https://localhost:8080/", "height": 36} id="U7-AHK5llNLF" outputId="5b938e1f-9d72-41c7-8a9d-1fc3599eb856" frase + nombre # + colab={"base_uri": "https://localhost:8080/", "height": 36} id="Ht2NzpQUlW8k" outputId="a9b7cdf0-7670-4233-8137-a301e3696c2a" frase + " " + nombre # + colab={"base_uri": "https://localhost:8080/"} id="lj-S1DXtlamR" outputId="4a33b28b-5910-40c4-e126-e70e58ce3c97" frase, nombre # no devuelve el resultado esperado como el anterior. # + colab={"base_uri": "https://localhost:8080/"} id="_BXv7aNllgbd" outputId="a24e87e5-be4c-49dc-9191-22297c8c91c3" x = frase, nombre type(x) # una tupla es otro tipo de objeto, tipo lista o array pero tiene una particularidad.... # + colab={"base_uri": "https://localhost:8080/", "height": 36} id="jeoUUHU9l0Fi" outputId="76da1b97-64c9-4d28-8b2e-b4ef2f455521" nombre * 5 # + colab={"base_uri": "https://localhost:8080/", "height": 193} id="ox9tPZWzl3hN" outputId="e5a28d55-1be4-4ae8-e14e-4e79b2b6673d" nombre * 1.5 # sí es posible realizar operaciones con enteros pero no con float # + colab={"base_uri": "https://localhost:8080/"} id="VrTI76Hql-rJ" outputId="d137837e-2eed-48d6-b22a-204c75bd9b41" # Podemos realizar check "a" in nombre # + colab={"base_uri": "https://localhost:8080/"} id="sM1Nh4krmFTf" outputId="e69ea3cb-ad62-4f16-a421-6f1c7f160012" "A" in nombre # + colab={"base_uri": "https://localhost:8080/"} id="bnS84I3mmHen" outputId="b501d3b9-33dd-4698-d771-a8ab763d5da8" "co" in nombre # + colab={"base_uri": "https://localhost:8080/"} id="gb0S-X0OmMBB" outputId="afbdbc49-5ef1-4a94-c36a-b2277d61f204" len(nombre) # + colab={"base_uri": "https://localhost:8080/"} id="v02frIHymdFs" outputId="80cfd20b-adba-4567-dbd7-bd36970e04b2" len(frase) # + colab={"base_uri": "https://localhost:8080/", "height": 36} id="PSiipGBPmexm" outputId="ee89920b-5baf-4df4-942e-11ac6cefb226" nombre.upper() # + colab={"base_uri": "https://localhost:8080/", "height": 36} id="tjKLJekQmhmf" outputId="c2e7f7a7-1890-4304-e250-8439b9c76d42" nombre.lower() # + colab={"base_uri": "https://localhost:8080/"} id="dT2xdpW-mjJg" outputId="b80fb076-cd2f-4b65-dae1-c5ea9dc61927" nombre.endswith('o') # + colab={"base_uri": "https://localhost:8080/"} id="6JydD_2Xmsv4" outputId="61fe1157-eba1-4457-e765-13be7809efc4" nombre.isupper() # + colab={"base_uri": "https://localhost:8080/"} id="q8uiWfO4mzlt" outputId="ce7c7fc7-4220-45b2-d8a5-8d773c23a76b" nombre.isdigit() # + colab={"base_uri": "https://localhost:8080/", "height": 174} id="zJVggvd6m1wI" outputId="894f3468-c7a2-44fc-8459-dbce8255792d" edad.isdigit() # + id="9XTWIXtfm3re" # + colab={"base_uri": "https://localhost:8080/", "height": 174} id="fbMtLLdtm8Jn" outputId="546a60f7-464c-41dc-f68a-9d493a760fac" edad.isint() # + id="box7_hDvm-aS" # + [markdown] id="i4Tkasj_nE4O" # Para comprobar los diferentes métodos que podemos aplicar a esta cadena de texto, utilizaremos **dir()** # + colab={"base_uri": "https://localhost:8080/"} id="inYm01OZnMHN" outputId="1d567b20-76cf-45b9-8383-2a83dd80b937" dir(nombre) # + colab={"base_uri": "https://localhost:8080/", "height": 36} id="xxhhu4eamqHy" outputId="27993a09-3b71-414a-8c05-6908d0f73084" nombre.capitalize() # + colab={"base_uri": "https://localhost:8080/", "height": 36} id="PkwJ_oE8nf0m" outputId="b10329c5-c42d-4495-8ba2-38a43bfc5600" nombre # + colab={"base_uri": "https://localhost:8080/", "height": 36} id="kiqfOrY1nj3P" outputId="b9571ba5-56f9-40ba-fe06-e90b4a11a879" nombre.upper() # + colab={"base_uri": "https://localhost:8080/", "height": 36} id="UMuBntU9nl6u" outputId="31cf655b-2501-4082-b191-1cefe919f323" nombre # + colab={"base_uri": "https://localhost:8080/", "height": 36} id="Zt33D33bnuzW" outputId="62cc63c2-e62a-4dfb-930f-e08bf921538c" nombre = nombre.upper() # para poder transformar el objeto instanciamos a la misma variable nombre # + colab={"base_uri": "https://localhost:8080/", "height": 36} id="45kcSBRTnzyO" outputId="d1fd0a4c-662c-4fff-acb2-4dc4bc5e9969" nombre # + colab={"base_uri": "https://localhost:8080/"} id="ReaHT9JsoDIt" outputId="b73a7805-8195-4a8d-e63d-c8694fc43117" [method for method in dir(nombre) if not method.startswith("__")] # + id="3rCGeBGgoPeT" # esta opción nos permite visualizar la ayuda # nombre.*low*? # + id="RdpEJX8kooBX" # nombre.*ac*? # + colab={"base_uri": "https://localhost:8080/", "height": 36} id="ee5I8pHuohZj" outputId="b832ccda-729d-4709-c7f3-6f32058e09cc" nombre.replace("R", "S") # + colab={"base_uri": "https://localhost:8080/", "height": 36} id="I28io73LpU2b" outputId="9b3c2ebd-865f-4806-9938-27f25441c1dd" # SLICING frase # + colab={"base_uri": "https://localhost:8080/", "height": 36} id="Gk0ltWjepXeB" outputId="3e2335a8-5a48-4941-890f-27e5b50d1f32" frase[3:10] # start posición 1 o índice CERO hasta índice DIEZ (-1) # + colab={"base_uri": "https://localhost:8080/", "height": 36} id="xvM69hD2qkOB" outputId="6b1e87c0-ad82-4c0a-fd75-a47825d06574" frase[:5] # desde el índice 0 hasta el 5 -1 # + colab={"base_uri": "https://localhost:8080/", "height": 36} id="l7CsN9qDql9M" outputId="10f0eeab-bb08-4124-ad0c-b515ba092e7c" frase[12:] # desde el índice 12 hasta el final # + colab={"base_uri": "https://localhost:8080/"} id="JEvCH1p-qrxx" outputId="5c34b2e9-9c5a-4a34-8f57-7b11aad8fa90" palabras = frase.split(" ") palabras # + colab={"base_uri": "https://localhost:8080/"} id="qDiXv8MZq-Tj" outputId="d1d1292b-8b24-4081-d8b5-becebabbac1e" type(frase) # + colab={"base_uri": "https://localhost:8080/"} id="DEsjL_XSq__s" outputId="93f3255e-da27-44be-8dde-878a46945954" type(palabras) # + colab={"base_uri": "https://localhost:8080/"} id="kcB4OcE_rBh4" outputId="9c2fd5e5-17e3-40f8-f886-d9ad6ab2b849" len(palabras) # + colab={"base_uri": "https://localhost:8080/", "height": 36} id="wZNP-YslrHLp" outputId="d2d05c93-7434-4049-9b24-83e8a397a54e" palabras[1] # + id="y8n8J2TdriXu" chunks = frase.split("i") # + colab={"base_uri": "https://localhost:8080/"} id="ZEdHV6qnryBo" outputId="90494da8-d6fd-4e7d-94e9-8c6c67b448d5" chunks # + [markdown] id="dTHG_H1ysTYH" # ## Booleans # + colab={"base_uri": "https://localhost:8080/"} id="eFuEJgVhsVCW" outputId="a51cffb5-9fa0-4f78-d461-07917d35acf1" True # + colab={"base_uri": "https://localhost:8080/"} id="MoaU13nLsW6P" outputId="d2f57508-4c7b-4cea-eb6c-a557dd8d39ec" False # + colab={"base_uri": "https://localhost:8080/"} id="fVO2YC_RsXX6" outputId="770ae2c5-993c-4ff0-cf12-c05afe68aede" type(True) # + id="SLsWS-04sa-_" tiene_entrada = False # + colab={"base_uri": "https://localhost:8080/"} id="n70ZbBogsh6e" outputId="172068be-5006-4d27-8b5d-4f60e99665a9" tiene_entrada # + colab={"base_uri": "https://localhost:8080/"} id="opa7ocl3sjAq" outputId="dceb8749-d17c-4a4d-f872-956e8b81f193" not tiene_entrada # + colab={"base_uri": "https://localhost:8080/"} id="qneNB1O6slkz" outputId="63de55c0-2e12-4bc6-ebc8-726cf8a0e7e2" if tiene_entrada: print("pasa") else: print("vete") # + colab={"base_uri": "https://localhost:8080/"} id="sY3MCtTyryqA" outputId="809cb7d4-fe36-4705-89dc-26976cfe05f5" if not tiene_entrada: print("pasa") else: print("vete") # + colab={"base_uri": "https://localhost:8080/"} id="2pGE97NGpZ44" outputId="3fa02f47-10fe-4ff0-a34a-1a0d999daf79" n # + colab={"base_uri": "https://localhost:8080/"} id="_S-Xetp1tTbb" outputId="7cc906e3-4f16-4461-de33-f600851cd1e3" n > 5 # + colab={"base_uri": "https://localhost:8080/"} id="xbCmxf5vtYF-" outputId="9839d40b-fb1f-4c30-b4b8-b337dad9f288" n < 5 # + colab={"base_uri": "https://localhost:8080/"} id="PRyjhtm7taPN" outputId="3223364e-9c40-4831-e413-3da2855684c8" 4 == 3 + 1 # utilizamos == y no el = (esta es para declarar variables) # + colab={"base_uri": "https://localhost:8080/"} id="ifdgD6vbtUDD" outputId="664eded1-e3d5-4750-8428-2ddae9310f26" 4 == 4 - 1 # + id="IWA1xsPjtuUu" edad = 15 # + colab={"base_uri": "https://localhost:8080/"} id="WnInsQ2itxqX" outputId="7efa0717-b4f2-409a-f239-9b0220d843b8" edad > 18 # + colab={"base_uri": "https://localhost:8080/"} id="bnmMOTKMtynC" outputId="28bf177a-0700-488c-b82b-6b0f98adde4a" if edad > 18: print('mayor') else: print('menor') # + colab={"base_uri": "https://localhost:8080/"} id="DvKieXkWt2qE" outputId="d843364c-e191-468c-ce7e-59a596862ba7" edad # + colab={"base_uri": "https://localhost:8080/"} id="LGff8dcHt3Mx" outputId="1815393b-4572-471d-d89d-da1cce9d0e0d" (edad > 16) or (edad % 3 == 0) # + colab={"base_uri": "https://localhost:8080/"} id="K_it6Ucst_Ya" outputId="324235f3-6494-47a3-86d7-aff06cfdb377" (edad > 16) and (edad % 4 == 0) # + [markdown] id="Jo12Qf77uIyD" # ## None # + id="WBZ5KzSsuJ3x" None # + colab={"base_uri": "https://localhost:8080/"} id="ZlCZq6ERuLsA" outputId="9f4de56f-a22c-407e-fbf5-79af641b60c7" type(None) # + [markdown] id="YL4BDl2nuR97" # ## Null # + colab={"base_uri": "https://localhost:8080/", "height": 174} id="tt8DtKpLuS7k" outputId="d73cd349-ce29-4822-af26-b3794f224acf" null # + colab={"base_uri": "https://localhost:8080/", "height": 174} id="vzsE92eFuTjw" outputId="87bcf630-0dbc-406b-e244-9619f1486e11" b = null # + id="jU8y9N_huXFw" # + id="rNyeC7PRuM-N" # + id="s4a05n3NuDs-"
01_RampUp/week2/.ipynb_checkpoints/01_Python_Tipo_de_datos-checkpoint.ipynb
# + # #!/usr/bin/env python # as shared on mcpipy.com import mcpi.minecraft as minecraft import mcpi.block as block import time # If you are running this script with the bukkit mod, then use a diamond block as the magic center block for teleporting # comment/uncomment below as appropriate magic_block = block.DIAMOND_BLOCK # for bukkit server #magic_block = block.NETHER_REACTOR_CORE # for raspberry pi if __name__ == "__main__": # The script mc = minecraft.Minecraft.create() loc = mc.player.getPos() x = loc.x y = loc.y - 1 z = loc.z for z_z in range (int(z-1), int(z+2)): for x_x in range(int(x-1), int(x+2)): mc.setBlock(x_x,y,z_z, block.COBBLESTONE) mc.setBlock(x_x,y+1,z_z, block.AIR) mc.setBlock(x,y,z, magic_block)
classroom-code/examples/brooksc_teleport_pad.ipynb