code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (Data Science) # language: python # name: python3__SAGEMAKER_INTERNAL__arn:aws:sagemaker:us-east-1:081325390199:image/datascience-1.0 # --- # # Demo: Clarify and dense format JSONLines # + # !curl -OL https://w.amazon.com/bin/download/Deep-engine/Thundera/Onboarding/MinimumBringYourOwnContainer/WebHome/byoc.tar.gz # !cat byoc.tar.gz # + from sagemaker import Session session = Session() bucket = session.default_bucket() prefix = 'sagemaker/DEMO-sagemaker-clarify' region = session.boto_region_name # Define IAM role from sagemaker import get_execution_role from sagemaker.s3 import S3Uploader import pandas as pd import numpy as np import urllib import os role = get_execution_role() # - # # Model # # Please make sure that you have the [simple-star-rating image](https://w.amazon.com/bin/view/Deep-engine/Thundera/Onboarding/MinimumBringYourOwnContainer/#HBYOC) built & pushed to the same region/account that runs this notebook. # Train a dummy model, # + import sagemaker role = sagemaker.get_execution_role() session = sagemaker.Session() account = session.boto_session.client("sts").get_caller_identity()["Account"] region = session.boto_session.region_name image = f"{account}.dkr.ecr.{region}.amazonaws.com/simple-star-rating:latest" estimator = sagemaker.estimator.Estimator( image_uri=image, role=role, instance_count=1, instance_type="ml.c4.xlarge", sagemaker_session=session, ) estimator.fit() # - # Create SageMaker model model_name = 'simple-start-rating-model' model = estimator.create_model(name=model_name) container_def = model.prepare_container_def() session.create_model(model_name, role, container_def) # ## Bias analysis from sagemaker import clarify clarify_processor = clarify.SageMakerClarifyProcessor(role=role, instance_count=1, instance_type='ml.c4.xlarge', sagemaker_session=session) # This is a workaround. TODO: Remove this cell and next cell when [the JSONLines fix](https://sim.amazon.com/issues/P43801935) is released (tracking SIM: https://sim.amazon.com/issues/P43584479) # + # patch image in Thundera devo account. It is the initial launch image plus the JSONLines bug fix clarify_processor.image_uri = "678264136642.dkr.ecr.us-east-1.amazonaws.com/sagemaker-clarify-processing:1.0_jsonlines_patch" # new image with the JSONLines bug fix, to be released soon #clarify_processor.image_uri = "678264136642.dkr.ecr.us-east-1.amazonaws.com/sagemaker-clarify-processing:f988900c" # - bias_dataset_uri = S3Uploader.upload('test_data_for_bias_analysis.jsonl', 's3://{}/{}'.format(bucket, prefix)) bias_dataset_uri # NOTE: The order of headers is important # * The header of label must be the last one # * The order of feature headers should be the same as the order of features # + bias_report_output_path = 's3://{}/{}/clarify-bias'.format(bucket, prefix) bias_data_config = clarify.DataConfig(s3_data_input_path=bias_dataset_uri, s3_output_path=bias_report_output_path, label='star_rating', features='features', headers=['review_body','product_category','star_rating'], dataset_type='application/jsonlines') model_config = clarify.ModelConfig(model_name=model_name, instance_type='ml.c5.xlarge', instance_count=1, accept_type='application/jsonlines', content_type='application/jsonlines', content_template='{"features":$features}') predictions_config = clarify.ModelPredictedLabelConfig( label="predicted_label") # - bias_config = clarify.BiasConfig(label_values_or_threshold=[3], facet_name='product_category', facet_values_or_threshold=['Digital_Software']) clarify_processor.run_bias(data_config=bias_data_config, bias_config=bias_config, model_config=model_config, model_predicted_label_config=predictions_config, pre_training_methods='all', post_training_methods='all') # ## Explainability analysis explainability_dataset_uri = S3Uploader.upload('test_data_for_explainability_analysis.jsonl', 's3://{}/{}'.format(bucket, prefix)) explainability_dataset_uri # NOTE: The format of baseline samples should be the same as dataset samples. By definition, baseline should either be a S3 URI to a data file that includes a list of samples, or an in-place list of samples. In this case we chose the latter, and only put a single sample to the list. # + shap_config = clarify.SHAPConfig(baseline=[{"features":["ok", "Digital_Software"]}], num_samples=10, agg_method='mean_abs') explainability_output_path = 's3://{}/{}/clarify-explainability'.format(bucket, prefix) explainability_data_config = clarify.DataConfig(s3_data_input_path=explainability_dataset_uri, s3_output_path=explainability_output_path, features='features', headers=['review_body','product_category'], dataset_type='application/jsonlines') # - clarify_processor.run_explainability(data_config=explainability_data_config, model_config=model_config, explainability_config=shap_config, model_scores="predicted_label")
07_train/archive/Clarify-JSONlines.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Detecting Fake News # Importing Libraries #import - necessary libraries import numpy as np import pandas as pd import itertools import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.linear_model import PassiveAggressiveClassifier from sklearn.metrics import accuracy_score, confusion_matrix df = pd.read_csv('News.csv') df.head(5) df.shape df.info() df.describe() df.isnull().sum() labels =df['label'] labels.head() plt.hist(labels) df.columns # Split the dataset x_train,x_test,y_train,y_test=train_test_split(df['text'], labels, test_size=0.2, random_state=7) # Let’s initialize a TfidfVectorizer with stop words from the English language and a maximum document frequency of 0.7 (terms with a higher document frequency will be discarded). Stop words are the most common words in a language that are to be filtered out before processing the natural language data. And a TfidfVectorizer turns a collection of raw documents into a matrix of TF-IDF features. # # Now, fit and transform the vectorizer on the train set, and transform the vectorizer on the test set. # + # Initialize a TfidfVectorizer tfidf_vectorizer=TfidfVectorizer(stop_words='english', max_df=0.7) #Fit and transform train set, transform test set tfidf_train=tfidf_vectorizer.fit_transform(x_train) tfidf_test=tfidf_vectorizer.transform(x_test) # - # Next, we’ll initialize a PassiveAggressiveClassifier. This is. We’ll fit this on tfidf_train and y_train. # # Then, we’ll predict on the test set from the TfidfVectorizer and calculate the accuracy with accuracy_score() from sklearn.metrics. # + #Initialize a PassiveAggressiveClassifier pac=PassiveAggressiveClassifier(max_iter=50) pac.fit(tfidf_train,y_train) #Predict on the test set and calculate accuracy y_pred=pac.predict(tfidf_test) score=accuracy_score(y_test,y_pred) print(f'Accuracy: {round(score*100,2)}%') # - # We got an accuracy of 92.74% with this model. # Finally, let’s print out a confusion matrix to gain insight into the number of false and true negatives and positives # Build confusion matrix df2 = pd.DataFrame(confusion_matrix(y_test,y_pred, labels=['FAKE','REAL']), index = ["Actual Real",'Actual Fake'], columns =['Predicted Real','Predicted Fake']) df2
Detecting Fake news.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # Calculating common metrics using sci-kit # Examples from: https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html, # and https://scikit-learn.org/stable/modules/generated/sklearn.metrics.precision_recall_fscore_support.html # and https://scikit-learn.org/stable/modules/generated/sklearn.metrics.classification_report.html # - # # With Numeric Data from sklearn.metrics import precision_score from sklearn.metrics import recall_score from sklearn.metrics import f1_score # + y_true = [0, 1, 2, 0, 1] y_pred = [0, 2, 1, 0, 0] ## 'micro': # Calculate metrics globally by counting the total true positives, false negatives and false positives. ## 'macro': # Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. ## 'weighted': # Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters ‘macro’ to account for label imbalance; it can result in an F-score that is not between precision and recall. # - # tp / (tp + fp) precision_score(y_true, y_pred, average='micro') # tp / (tp + fn) recall_score(y_true, y_pred, average='micro') # harmonic mean f1_score(y_true, y_pred, average='micro') f1_score(y_true, y_pred, average='macro') # # Categorical Data # Import import numpy as np from sklearn.metrics import precision_recall_fscore_support y_true = np.array(['cat', 'dog', 'pig', 'cat', 'dog', 'pig']) y_pred = np.array(['cat', 'pig', 'dog', 'cat', 'cat', 'dog']) # + # Using single call # https://scikit-learn.org/stable/modules/generated/sklearn.metrics.precision_recall_fscore_support.html # - The F-beta score weights recall more than precision by a factor of beta. beta == 1.0 means recall and precision are equally important. # - The support is the number of occurrences of each class in y_true. # - precision_recall_fscore_support(y_true, y_pred, average='micro') precision_recall_fscore_support(y_true, y_pred, average='macro') # Gives value per label precision_recall_fscore_support(y_true, y_pred, average=None, labels=['pig', 'dog', 'cat']) # # Misclassification Report from sklearn.metrics import classification_report y_true = np.array(['cat', 'dog', 'pig', 'cat', 'dog', 'pig']) y_pred = np.array(['cat', 'pig', 'dog', 'cat', 'cat', 'dog']) target_names = ['cat', 'dog', 'pig'] print(classification_report(y_true, y_pred, target_names=target_names))
l4-tasks/Metric Calculations.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.8.1 64-bit ('.env') # metadata: # interpreter: # hash: 75ce031803909c87d5abb7ecf93b814a68744b1af50690fb2477d0a013902920 # name: Python 3.8.1 64-bit ('.env') # --- # An interesting problem came up recently, there was a piece of code absolutely full of the same function calls over and over again, meaning if anything ever need to change, that would have to be changed in over 500 places, not ideal. Thoughts go back to single responsbility, and don't repeat yourself principles for software engineering. So research & thinking begun on the best way to manage this issue. The first thing that came to mind, how could we define these functions and their combinations iteratively. # # Before we dive into this could be implemented, we need to really understand the problem. # # The use case for this repeated code, was to check the variables being passed to an endpoint were what they were expected to be. For example, if an endpoint is awaiting for a string, and an optional number, we want to check these before the operation goes through and potentially breaks something else down the line (bringing us back to the crash early principle). # # We'll start by defining two functions which will check that a variable is the type it's expected to be, and another to ensure it exists (not None in Python). # + def check_type(value, variable_type, variable_name): if type(value) != variable_type: raise Exception(f"Variable '{variable_name}' is invalid type! Expected: {variable_type}.") return value def check_exists(value,variable_name): if value is None: raise Exception(f"Variable '{variable_name}' is None! Check variable exists.") return value # - # Now that we've defined these functions, let's test that they work as expected and raise Exceptions when a problem statement comes up. # + check_type(24,int,'lucky_number') check_type('Hello world', float, 'I thought this was a number') # - x = 55 y = None check_exists(x,'Fifty five') check_exists(y, 'Fifty six') # ## Defining Functions Iteratively # # Now let's make use of the beauty that is looping to create all the combinations for us to use! We're going to encapsulate all these functions inside a dictionary to encapsulate them and provide a common interface for developers to use. # + def log_and_raise(exception_text): # Add logging here raise Exception(exception_text) def create_validators(types): validators = {} for variable_type in types: validators[f"{variable_type.__name__}"] = lambda value, variable_type=variable_type: value if type(value) == variable_type else log_and_raise(f"Variable isn't of type '{variable_type.__name__}'! D:") return validators validate = create_validators([str,float, int]) # - # Now in a handful lines of code, we've created a dictionary with a way to easily generate functions to check variable types, and then log out the error (eg, write to a file) and raise an exception. # # Before we deconstruct what's happening here, let's see it in action. # + validate['str']('This is a string!') validate['int'](42) validate['float'](42.42) x = 'The number forty two' validate['str'](x) # - # Fantastic, as we can see, it's not throwing any errors and continuing through our validations, now let's ensure our exception is raised (and subsequently any logging would be completed). validate['str'](42) # Even better, we get raise an exception when our validation fails ensuring to alert the developers with information about why it failed. Now let's deconstruct how we created it in depth. # # ### Deconstruction of How # # Admittedly, there's a lot going on in those handful of lines which isn't obvious as to whats happening. # # First we define the overarching functions which contains the creation of all these functions, and thereafter initialise a dictionary to store all the following functions within. Next we loop over each of the types provided as a list to the function to create an entry in the dictionary using the `__name__` dunder function (eg, `str` has a dunder `__name__` of 'str'), this let's our developers use the type they want as the key of the dictionary when wanting to validate a variables type. # # ### Lambdas! # # The trickiest part here is how we are actually defining the functions. We make use of the lambda operator in Python to create **anonymous functions**. The structure of a lambda function definition follows: # # ``` Python # lambda arguments: true_statement if conditional_statement else false_statement # ``` # # We make use of a keyword argument of the `variable_type` in our loop otherwise the `variable_type` from the list passed in won't be correctly passed into the lambda function (which we won't discuss in this post). # # Finally we make use of an external function to centralise how we handle errors (making it easy to keep a consistent logging approach), and raise an Exception within that function to ensure any logging occurs before the program ultimately exits. # # ## Conclusion # # There are pros and cons to this approach to this problem. # # **Pros**: # # - Concise way of creating lots of functions # - Consistent interface to use # - Stores all similar functions inside one object (dictionary) # # **Cons**: # # - Not straightforward as to how it works # - Not straightforward to change functionality
content/2021/define-functions-iteratively/notebooks/define-functions-iteratively.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # ### Use pyhomogenize to check netCDF file(s) time axis; `time_control` # Now, we want to use pyhomogenize's `time_control` class. We open a test netCDF file. This will be done automatically by calling the class. import pyhomogenize as pyh time_control = pyh.time_control(pyh.test_netcdf[0]) time_control.ds # Let's have a look on the datasets's time axis time_control.time # We can check whether the time axis contains duplicated, missing or redundant time steps. A redundant time step is a time steps that does not math with the dataset's calendar and/or frequency. duplicates = time_control.get_duplicates() redundants = time_control.get_redundants() missings = time_control.get_missings() duplicates, redundants, missings # We see the time axis doesn't contain any incorrect time steps and no time steps are missing. Not really a auspicious example. We can combine the three above requests by using the function `check_timestamps`. timechecker1 = time_control.check_timestamps() timechecker1 # As we can see the functions returns a `time_control` object again but with three new attributes. timechecker1.duplicated_timesteps, timechecker1.missing_timesteps, timechecker1.redundant_timesteps # We want to test the time axis only for duplicated time steps. # timechecker2 = time_control.check_timestamps(selection='duplicates') # timechecker2.duplicated_timesteps # By setting the parameter correct to the boolean value `True` we can delete the duplicated and redundant time steps if exisitng. Of course, in our great example this is not the case. timechecker3 = time_control.check_timestamps(correct=True) timechecker3.time # We can set the parameter `output` to select the dataset's output file name on disk. If so the parameter `correct` is automatically set to `True`. timechecker4 = time_control.check_timestamps(output='output.nc') # Now, we want to sleect a specific time range. We copy out `time_control` object to keep the original object. from copy import copy time_control1 = copy(time_control) selected1 = time_control1.select_time_range(['2007-02-01','2007-03-30']) selected1 # Here again, we get a `time_control` object. But now with a different time axis. selected1.time # Of course, we can write the result as netCDF file on disk. time_control2 = copy(time_control) selected2 = time_control2.select_time_range(['2007-02-01','2007-03-30'], output='output.nc') selected2.ds # If we want to crop or limit the time axis to a user-specified start and end month values as shown in the above example `basics.date_range_to_frequency_limits` we can do this with netCDF files as well. The time axis should start with the start of an arbitrary season and end with the end of an arbitrary season. time_control3 = copy(time_control) selected3 = time_control3.select_limited_time_range(smonth=[3,6,9,12], emonth=[2,5,8,11], output='output.nc') selected3.time # Now, we want to check whether the time axis is within certain left and right bounds. time_control.within_time_range(['2007-02-01','2007-03-30']) time_control.within_time_range(['2007-02-01','2008-03-30']) time_control.within_time_range(['20070201','20070330'], fmt='%Y%m%d')
notebooks/time_control_class.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # CuPy Acceleration # # Before we begin, let's execute the cell below to display information about the CUDA driver and GPUs running on the server by running the `nvidia-smi` command. To do this, execute the cell block below by clicking on it with your mouse and pressing Ctrl-Enter, or pressing the play button in the toolbar above. You should see some output returned below the grey cell. # !nvidia-smi # ## Copy the Serial code # # Before start modifying the serial code, let's copy the serial code to cupy folder by running the cell below. # !cp ../source_code/serial/* ../source_code/cupy # ## Run the Serial code # %run ../source_code/cupy/cfd.py 64 500 # --- # # # Start Adding CuPy Constructs # # Now, you can start modifying the Python code: # # [cfd.py](../source_code/cupy/cfd.py) # # Remember to **SAVE** your code after changes, before running below cells. # # #### Some Hints # The serial code consists of the `main, jacobi, and write_data` functions. Focus more the jacobi and main functions. Remember to import the cupy library as: ```import cupy as cp ``` at the top of your code. Check if there is any data race in your code. # # ## Run and Profile the CuPy code # # !cd ../source_code/cupy && nsys profile -t nvtx --stats=true --force-overwrite true -o minicfdcupy_profile python3 cfd.py 64 500 # You can examine the output on the terminal or you can download the file and view the timeline by opening the output with the NVIDIA Nsight Systems. # # Download and save the profiler report file by holding down <mark>Shift</mark> and <mark>Right-Clicking</mark> [Here](../source_code/cupy/minicfdcupy_profile.qdrep). # # # ## Validating the Output # # Make sure the error value printed as output matches that of the serial code # # # # Recommendations for adding CuPy Constructs # # After finding the hotspot function take an incremental approach: # # 1) Add `@cp.fuse()` decorator at the top of the function or rewrite the function as a raw kernel(this is rather tedious) # # 2) Ignore the I/O function # # 3) Ensure that only required data moves from `host (CPU function)` to `device (GPU function)` and vice versa # # 4) Cross check the output after incremental changes to check algorithmic scalability # # 5) Start with a small problem size that reduces the execution time. # # # **General tip:** Be aware of *Data Race* situation in which at least two threads access a shared variable at the same time. At least on thread tries to modify the variable. If data race happened, an incorrect result will be returned. So, make sure to validate your output against the serial version. # # # # Links and Resources # # [Introduction to CuPy](https://github.com/gpuhackathons-org/gpubootcamp/blob/master/hpc/nways/nways_labs/nways_MD/English/Python/jupyter_notebook/cupy/cupy_guide.ipynb) # # [NVIDIA Nsight System](https://docs.nvidia.com/nsight-systems/) # # [NVIDIA CUDA Toolkit](https://developer.nvidia.com/cuda-downloads) # # **NOTE**: To be able to see the Nsight System profiler output, please download latest version of Nsight System from [here](https://developer.nvidia.com/nsight-systems). # # Don't forget to check out additional [OpenACC Resources](https://www.openacc.org/resources) and join our [OpenACC Slack Channel](https://www.openacc.org/community#slack) to share your experience and get more help from the community. # # # --- # ## Licensing # # This material is released by OpenACC-Standard.org, in collaboration with NVIDIA Corporation, under the Creative Commons Attribution 4.0 International (CC BY 4.0).
CFD/nways-challenge-labs/Python/jupyter_notebook/minicfd_cupy.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # #### Extracting date from jpeg of payment slips using pytesseract & regular expression # Importing relevant packages import pytesseract pytesseract.pytesseract.tesseract_cmd = r"C:\Program Files\Tesseract-OCR\tesseract.exe" from PIL import Image from pytesseract import image_to_string # + image = Image.open('FilePath/0e0dd005.jpeg') text = image_to_string(image,lang='eng') # languages can be changed based on requirement, by default it will be english # type(text) --> above converts text to strings #strip is used to take out spaces if any in other images provided text = text.replace(" ","") print(text) # - # Importing regular expression & Date time import re as r from datetime import datetime # + # Image 1 id = r.search(r'\d{2}-\w{3}-\d{4}',text) print(id) # - date = datetime.strptime(id.group(),'%d-%b-%Y').date() print(date) # + #Image 2 image1 = Image.open('FilePath/0a0ebd53.jpeg') text1 = image_to_string(image1,lang='eng') # languages can be changed based on requirement, by default it will be english text1 = text1.replace(" ","") print(text1) # - id1 = r.search(r'\d{2}-\w{3}-\d{4}',text1) print(id1) date1 = datetime.strptime(id1.group(),'%d-%b-%Y').date() print(date1)
ImageProcessing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Example 2: - Analyzing dim6 operators in WBF # ## Preparations # Let us first load all the python libraries again # + import sys import os madminer_src_path = "/Users/felixkling/Documents/GitHub/madminer" sys.path.append(madminer_src_path) from __future__ import absolute_import, division, print_function, unicode_literals import numpy as np import math import matplotlib from matplotlib import pyplot as plt from scipy.optimize import curve_fit % matplotlib inline from madminer.fisherinformation import FisherInformation from madminer.fisherinformation import project_information,profile_information from madminer.plotting import plot_fisher_information_contours_2d from madminer.sampling import SampleAugmenter from madminer.sampling import constant_benchmark_theta, multiple_benchmark_thetas from madminer.sampling import constant_morphing_theta, multiple_morphing_thetas, random_morphing_thetas from madminer.ml import MLForge, EnsembleForge from sklearn.metrics import mean_squared_error # - # To not forget anything later, let us globaly define the number of events in the MG sample # + inputfile_sb = 'data/madminer_lhedata.h5' nsamples_sb = 20000 inputfile_s = 'data/madminer_lhedata_wbf_signal.h5' nsamples_s = 10000 # - # ## 5. Obtaining the Fisher Information using Machine Learning # ### 5a) Make (unweighted) training and test samples with augmented data # Let us make event samples, both for signal only and signal + background # + #Define n_estimator n_estimators = 5 #Initialize sa_sb = SampleAugmenter(inputfile_sb, debug=False) sa_s = SampleAugmenter(inputfile_s , debug=False) #augment train sample for i in range(n_estimators): _ , _ , _ = sa_sb.extract_samples_train_local( theta=constant_benchmark_theta('sm'), n_samples=int(nsamples_sb/2), folder='./data/samples_ensemble_sb/', filename='train{}'.format(i) ) _ , _ , _ = sa_s.extract_samples_train_local( theta=constant_benchmark_theta('sm'), n_samples=int(nsamples_s/2), folder='./data/samples_ensemble_s/', filename='train{}'.format(i) ) #augment test sample _ , _ = sa_sb.extract_samples_test( theta=constant_benchmark_theta('sm'), n_samples=int(nsamples_sb/2), folder='./data/samples_ensemble_sb/', filename='test' ) _ , _ = sa_s.extract_samples_test( theta=constant_benchmark_theta('sm'), n_samples=int(nsamples_s/2), folder='./data/samples_ensemble_s/', filename='test' ) # - # ### 5b) Train a neural network to estimate the score # + ensemble_sb = EnsembleForge(estimators=n_estimators) ensemble_sb.train_all( method='sally', x_filename=['data/samples_ensemble_sb/x_train{}.npy'.format(i) for i in range(n_estimators)], t_xz0_filename=['data/samples_ensemble_sb/t_xz_train{}.npy'.format(i) for i in range(n_estimators)] ) ensemble_sb.save('models/samples_ensemble_sb') ensemble_s = EnsembleForge(estimators=n_estimators) ensemble_s.train_all( method='sally', x_filename=['data/samples_ensemble_s/x_train{}.npy'.format(i) for i in range(n_estimators)], t_xz0_filename=['data/samples_ensemble_s/t_xz_train{}.npy'.format(i) for i in range(n_estimators)] ) ensemble_s.save('models/samples_ensemble_s') # - # ### 5c) Evaluate Fisher Information # + fisher_sb = FisherInformation(inputfile_sb, debug=False) fisher_s = FisherInformation(inputfile_s , debug=False) fi_ml_mean_sb, fi_ml_covariance_sb = fisher_sb.calculate_fisher_information_full_detector( theta=[0.,0.], model_file='models/samples_ensemble_sb', unweighted_x_sample_file='data/samples_ensemble_sb/x_test.npy', luminosity=300*1000. ) fi_ml_mean_s, fi_ml_covariance_s = fisher_s.calculate_fisher_information_full_detector( theta=[0.,0.], model_file='models/samples_ensemble_s', unweighted_x_sample_file='data/samples_ensemble_s/x_test.npy', luminosity=300*1000. ) fi_truth_mean_s, fi_truth_covariance_s = fisher_s.calculate_fisher_information_full_truth( theta=[0.,0.], luminosity=300*1000. ) _ = plot_fisher_information_contours_2d( [fi_ml_mean_sb, fi_ml_mean_s, fi_truth_mean_s ], [fi_ml_covariance_s, fi_ml_covariance_s, fi_truth_covariance_s], colors=[u'C0',u'C1',"black"], linestyles=["solid","solid","dashed"], inline_labels=["S+B:ML","S:ML","S:truth"], xrange=(-2,2), yrange=(-2,2) ) # - # ## 6. Cross-Check: Validating score estimation # Let us now validate that the Machine Learning works. The central object consideres here is the score, so we will compare the truth level score with the estimated scores. # ### 6a) Run the Data Augmentation and Machine Learning part # First, we once again augment the dataand machine learning part again. # + sa = SampleAugmenter(inputfile_sb, debug=False) x, theta, t_xz = sa.extract_samples_train_local( theta=constant_benchmark_theta('sm'), n_samples=int(nsamples_sb/2), folder='./data/samples_scores/', test_split=None, filename='train' ) x, theta, t_xz = sa.extract_samples_train_local( theta=constant_benchmark_theta('sm'), n_samples=int(nsamples_sb/2), folder='./data/samples_scores/', test_split=None, filename='test', switch_train_test_events=True ) # - # Next, we use the perform the ML part, but with only one sample. We therefore use `MLForge` instead of `EnsembleForge` forge = MLForge() forge.train( method='sally', x_filename='./data/samples_scores/x_train.npy', t_xz0_filename='./data/samples_scores/t_xz_train.npy' ) forge.save('models/samples_scores') # ### 6b) Obtain scores # + #True score t_truth_train=np.load('./data/samples_scores/t_xz_train.npy') t_truth_test=np.load('./data/samples_scores/t_xz_test.npy') #ML score t_ml_train=forge.evaluate('./data/samples_scores/x_train.npy') t_ml_test=forge.evaluate('./data/samples_scores/x_test.npy') # + ####### #Figure myrange=(-.1,.1) myrangex=[-.1,.1] f, ((ax11, ax12),(ax21, ax22),(ax31, ax32)) = plt.subplots(3, 2) f.set_size_inches(8,12) ####### #Distributions mynbins=20 #t0 ax11.set_xlabel('t0') ax11.hist(t_truth_train[:,0], range=myrange, bins=mynbins, normed=True, histtype='step', color='Black', linestyle='solid', label='PL-train' ) ax11.hist(t_truth_test[:,0], range=myrange, bins=mynbins, normed=True, histtype='step', color='Gray', linestyle='dashed', label='PL-test' ) ax11.hist(t_ml_train[:,0], range=myrange, bins=mynbins, normed=True, histtype='step', color='Red', linestyle='dotted',label='ML-train' ) ax11.hist(t_ml_test[:,0], range=myrange, bins=mynbins, normed=True, histtype='step', color='Blue', linestyle='dotted',label='ML-test' ) ax11.legend(bbox_to_anchor=(0.02, 0.98), loc=2, borderaxespad=0.) #t1 ax12.set_xlabel('t1') ax12.hist(t_truth_train[:,1], range=myrange, bins=mynbins, normed=True, histtype='step', color='Black', linestyle='solid' ) ax12.hist(t_truth_test[:,1], range=myrange, bins=mynbins, normed=True, histtype='step', color='Gray', linestyle='dashed', ) ax12.hist(t_ml_train[:,1], range=myrange, bins=mynbins, normed=True, histtype='step', color='Red', linestyle='dotted' ) ax12.hist(t_ml_test[:,1], range=myrange, bins=mynbins, normed=True, histtype='step', color='Blue', linestyle='dotted' ) ####### #Density ax21.hist2d(t_truth_test[:,0], t_ml_test[:,0], bins=(40, 40), range=(myrange,myrange) ,cmap='jet') ax21.set_xlabel('t0_pl_test') ax21.set_ylabel('t0_ml_test') ax22.hist2d(t_truth_test[:,1], t_ml_test[:,1], bins=(40, 40), range=(myrange,myrange) ,cmap='jet') ax22.set_xlabel('t1_pl_test') ax22.set_ylabel('t1_ml_test') ####### #MSE mse_0 = mean_squared_error(t_truth_test[:,0],t_ml_test[:,0]) mse_1 = mean_squared_error(t_truth_test[:,1],t_ml_test[:,1]) mse_x = mean_squared_error(t_truth_test,t_ml_test) ####### #Scatter ax31.scatter(t_truth_test[:,0], t_ml_test[:,0],s=3,alpha=0.05) ax31.set_xlabel('t1_pl_test') ax31.set_ylabel('t1_ml_test') ax31.set_xlim(myrangex) ax31.set_ylim(myrangex) ax32.scatter(t_truth_test[:,1], t_ml_test[:,1],s=3,alpha=0.05) ax32.set_xlabel('t1_pl_test') ax32.set_ylabel('t1_ml_test') ax32.set_xlim(myrangex) ax32.set_ylim(myrangex) plt.text(myrange[0]+0.025*(myrange[1]-myrange[0]), myrange[0]+0.95*(myrange[1]-myrange[0]), 'MSE(t)='+str(mse_x), fontsize=10) plt.text(myrange[0]+0.025*(myrange[1]-myrange[0]), myrange[0]+0.90*(myrange[1]-myrange[0]), 'MSE(t0)='+str(mse_0), fontsize=10) plt.text(myrange[0]+0.025*(myrange[1]-myrange[0]), myrange[0]+0.85*(myrange[1]-myrange[0]), 'MSE(t1)='+str(mse_1), fontsize=10) ####### #Plot plt.tight_layout() plt.show() # - # ### 6c) Kinematic distribution of scores # We can also look at kinematic distribution of scores # + #Which pbservables to show x_test=np.load('./data/samples_scores/x_test.npy')[:,0] y_test=np.load('./data/samples_scores/x_test.npy')[:,2] x_name='$p_{T,j1}\;\;[GeV]$' y_name='$|\Delta\phi_{jj}|$' x_range=[0,500] y_range=[0,3.2] v_range=[-.1,.1] t_pl_test=np.load('./data/samples_scores/t_xz_test.npy') t_ml_test=forge.evaluate('./data/samples_scores/x_test.npy') ####### #Figure marker_size=5 f, ((ax11, ax12),(ax21, ax22)) = plt.subplots(2, 2) f.set_size_inches(10,8) # t0 PL test im11=ax11.scatter(x_test, y_test, marker_size, c=t_pl_test[:,0],vmin=v_range[0],vmax=v_range[1]) ax11.set_title("PL truth - Test Sample") ax11.set_xlabel(x_name) ax11.set_ylabel(y_name) ax11.set_xlim(x_range) ax11.set_ylim(y_range) cb11=f.colorbar(im11, ax=ax11) cb11.set_label("$t_0$") # t1 PL test im12=ax12.scatter(x_test, y_test, marker_size, c=t_pl_test[:,1],vmin=v_range[0],vmax=v_range[1]) ax12.set_title("PL truth - Test Sample") ax12.set_xlabel(x_name) ax12.set_ylabel(y_name) ax12.set_xlim(x_range) ax12.set_ylim(y_range) cb12=f.colorbar(im12, ax=ax12) cb12.set_label("$t_1$") # t0 ML test im21=ax21.scatter(x_test, y_test, marker_size, c=t_ml_test[:,0],vmin=v_range[0],vmax=v_range[1]) ax21.set_title("ML estimate - Test Sample") ax21.set_xlabel(x_name) ax21.set_ylabel(y_name) ax21.set_xlim(x_range) ax21.set_ylim(y_range) cb21=f.colorbar(im21, ax=ax21) cb21.set_label("$t_0$") # t1 ML test im22=ax22.scatter(x_test, y_test, marker_size, c=t_ml_test[:,1],vmin=v_range[0],vmax=v_range[1]) ax22.set_title("ML estimate - Test Sample") ax22.set_xlabel(x_name) ax22.set_ylabel(y_name) ax22.set_xlim(x_range) ax22.set_ylim(y_range) cb22=f.colorbar(im22, ax=ax22) cb22.set_label("$t_1$") ####### #Plot plt.tight_layout() plt.show() # -
examples/example2_wbf/Part3-ML.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda root] # language: python # name: conda-root-py # --- # + import pandas as pd import numpy as np import matplotlib.pyplot as plt from scipy import stats import tensorflow as tf # %matplotlib inline plt.style.use('ggplot') # + def read_data(file_path): column_names = ['user-id','activity','timestamp', 'x-axis', 'y-axis', 'z-axis'] data = pd.read_csv(file_path,header = None, names = column_names) return data def feature_normalize(dataset): mu = np.mean(dataset,axis = 0) sigma = np.std(dataset,axis = 0) return (dataset - mu)/sigma def plot_axis(ax, x, y, title): ax.plot(x, y) ax.set_title(title) ax.xaxis.set_visible(False) ax.set_ylim([min(y) - np.std(y), max(y) + np.std(y)]) ax.set_xlim([min(x), max(x)]) ax.grid(True) def plot_activity(activity,data): fig, (ax0, ax1, ax2) = plt.subplots(nrows = 3, figsize = (15, 10), sharex = True) plot_axis(ax0, data['timestamp'], data['x-axis'], 'x-axis') plot_axis(ax1, data['timestamp'], data['y-axis'], 'y-axis') plot_axis(ax2, data['timestamp'], data['z-axis'], 'z-axis') plt.subplots_adjust(hspace=0.2) fig.suptitle(activity) plt.subplots_adjust(top=0.90) plt.show() def windows(data, size): start = 0 while start < data.count(): yield int(start), int(start + size) start += (size / 2) def segment_signal(data,window_size = 90): segments = np.empty((0,window_size,3)) labels = np.empty((0)) for (start, end) in windows(data['timestamp'], window_size): x = data["x-axis"][start:end] y = data["y-axis"][start:end] z = data["z-axis"][start:end] if(len(dataset['timestamp'][start:end]) == window_size): segments = np.vstack([segments,np.dstack([x,y,z])]) labels = np.append(labels,stats.mode(data["activity"][start:end])[0][0]) return segments, labels def weight_variable(shape): initial = tf.truncated_normal(shape, stddev = 0.1) return tf.Variable(initial) def bias_variable(shape): initial = tf.constant(0.0, shape = shape) return tf.Variable(initial) def depthwise_conv2d(x, W): return tf.nn.depthwise_conv2d(x,W, [1, 1, 1, 1], padding='VALID') def apply_depthwise_conv(x,kernel_size,num_channels,depth): weights = weight_variable([1, kernel_size, num_channels, depth]) biases = bias_variable([depth * num_channels]) return tf.nn.relu(tf.add(depthwise_conv2d(x, weights),biases)) def apply_max_pool(x,kernel_size,stride_size): return tf.nn.max_pool(x, ksize=[1, 1, kernel_size, 1], strides=[1, 1, stride_size, 1], padding='VALID') # - dataset = read_data('actitracker_raw.txt') dataset['x-axis'] = feature_normalize(dataset['x-axis']) dataset['y-axis'] = feature_normalize(dataset['y-axis']) dataset['z-axis'] = feature_normalize(dataset['z-axis']) for activity in np.unique(dataset["activity"]): subset = dataset[dataset["activity"] == activity][:180] plot_activity(activity,subset) segments, labels = segment_signal(dataset) labels = np.asarray(pd.get_dummies(labels), dtype = np.int8) reshaped_segments = segments.reshape(len(segments), 1,90, 3) train_test_split = np.random.rand(len(reshaped_segments)) < 0.70 train_x = reshaped_segments[train_test_split] train_y = labels[train_test_split] test_x = reshaped_segments[~train_test_split] test_y = labels[~train_test_split] # + input_height = 1 input_width = 90 num_labels = 6 num_channels = 3 batch_size = 10 kernel_size = 60 depth = 60 num_hidden = 1000 learning_rate = 0.0001 training_epochs = 8 total_batches = train_x.shape[0] // batch_size # + X = tf.placeholder(tf.float32, shape=[None,input_height,input_width,num_channels]) Y = tf.placeholder(tf.float32, shape=[None,num_labels]) c = apply_depthwise_conv(X,kernel_size,num_channels,depth) p = apply_max_pool(c,20,2) c = apply_depthwise_conv(p,6,depth*num_channels,depth//10) shape = c.get_shape().as_list() c_flat = tf.reshape(c, [-1, shape[1] * shape[2] * shape[3]]) f_weights_l1 = weight_variable([shape[1] * shape[2] * depth * num_channels * (depth//10), num_hidden]) f_biases_l1 = bias_variable([num_hidden]) f = tf.nn.tanh(tf.add(tf.matmul(c_flat, f_weights_l1),f_biases_l1)) out_weights = weight_variable([num_hidden, num_labels]) out_biases = bias_variable([num_labels]) y_ = tf.nn.softmax(tf.matmul(f, out_weights) + out_biases) # + loss = -tf.reduce_sum(Y * tf.log(y_)) optimizer = tf.train.GradientDescentOptimizer(learning_rate = learning_rate).minimize(loss) correct_prediction = tf.equal(tf.argmax(y_,1), tf.argmax(Y,1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) # + cost_history = np.empty(shape=[1],dtype=float) with tf.Session() as session: tf.global_variables_initializer().run() for epoch in range(training_epochs): for b in range(total_batches): offset = (b * batch_size) % (train_y.shape[0] - batch_size) batch_x = train_x[offset:(offset + batch_size), :, :, :] batch_y = train_y[offset:(offset + batch_size), :] _, c = session.run([optimizer, loss],feed_dict={X: batch_x, Y : batch_y}) cost_history = np.append(cost_history,c) print "Epoch: ",epoch," Training Loss: ",c," Training Accuracy: ", session.run(accuracy, feed_dict={X: train_x, Y: train_y}) print "Testing Accuracy:", session.run(accuracy, feed_dict={X: test_x, Y: test_y})
Activity Detection.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: complex-systems # language: python # name: complex-systems # --- # # Phase correlations # # In this notebook we play around with linear phase correlations of white noise. # + from plotly import tools from plotly import offline as py from plotly import graph_objs as go from plotly import figure_factory as ff py.init_notebook_mode(connected=True) # - # In order to create linear phase correlated white noise we start with normal white noise $(x_1,\dots,x_n)$. The real fourier transform is given through $(X_0,\dots,X_{n/2})$. $X_0$ equals the signal mean and should not be modified. We can write $X_i=|X_i|e^{i\phi_i}$. Now we apply the transform, # $$\phi_i=\phi_{i+j}+\phi+\eta_i$$, # wherein $phi\in[-\pi,+\pi[$ is a constant, $\eta_i$ a noise term. With regard to the index shift $i+j$ we apply the circular condition $i+j = i+j-n/2$ if $i+j>n/2-1$. def correlate_phases(x, offset, phase, noise): X = np.fft.rfft(x) phi = np.arctan2(X.imag, X.real) phi[1:] = np.roll(phi[1:], -offset) + phase + noise X = np.abs(X) * np.exp(1j * phi) return np.fft.irfft(X) # + x = np.random.normal(0, 1, 100) n = np.random.normal(0, 0.5, 50) y1 = correlate_phases(x, 1, 0, n) y2 = correlate_phases(x, 1, np.pi, n) # + layout = go.Layout( legend=dict(orientation='h') ) figure = go.Figure([ go.Scatter(y=x, name='white noise'), go.Scatter(y=y1, name='phase correlated (δ=0)'), # go.Scatter(y=y2, name='phase correlated (δ=π)'), ], layout) py.iplot(figure) # - # If we apply the linear phase correlation without constant phase offset. We find that there are areas where the signal has large overlap with the original uncorrelated white noise. # + layout = go.Layout( legend=dict(orientation='h') ) figure = go.Figure([ go.Scatter(y=x, name='white noise'), go.Scatter(y=y2, name='phase correlated (δ=π)'), ], layout) py.iplot(figure) # - # When using a constant phase offset to obtain the linear phase correlated white noise the local similarity is reduced compared to the previous time series. However, global structural features are still similar. z1 = correlate_phases(x, 1, 0, n) z2 = correlate_phases(x, 2, 0, n) # + layout = go.Layout( legend=dict(orientation='h') ) figure = go.Figure([ go.Scatter(y=x, name='white noise'), go.Scatter(y=z1, name='phase correlated (i=1)'), go.Scatter(y=z2, name='phase correlated (i=2)'), ], layout) py.iplot(figure) # -
notebooks/noise/Phase Correlations.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Домашняя работа 23.10.2021 # №1 # # С помощью команды input() вводится некоторые наборы символов (Пример: "1183241abc00120+4 +10445 "). # Нужно отсортировать те участки набора, которые состоят чисел (т.е. в результате получить "1112348abc00012+4 +01445 "). # Для сортировки можно пользоваться любым из реализованных алгоритмов. # + def isNum (s): if s in ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']: return True else: return False # '123a1' -> [1, 2, '-', 1] def lineToList(line, symbol = '-'): result = [] for el in line: result += [int(el)] if isNum(el) else [symbol] return result #[1, 2, 3, '-', '-'] -> [3, 4] def getIndexSymbols(arg_list, symbol = '-'): result = [] for i in range(0, len(arg_list)): if arg_list[i] == symbol: result += [i] return result # [1, 2, 3, '-', '-', 2] & '---ab-' -> '123ab2' def unificationListNumAndLine(arg_list_num, line, symbol = '-'): result = '' for i in range(0, len(arg_list_num)): if arg_list_num[i] != symbol: result += str(arg_list_num[i]) else: result += line[i] return result def lineShortNum(line): list_num_list = lineToList(line) indexSymbols = getIndexSymbols(list_num_list) #print(list_num_list) if indexSymbols[0] != 0: indexSymbols = [-1] + indexSymbols if indexSymbols[-1] != len(line): indexSymbols += [len(line)] for i in range(0, len(indexSymbols) - 1): list_num = list_num_list[indexSymbols[i] + 1: indexSymbols[i + 1]] if len(list_num) > 1: #print(list_num, end=' -> ') list_num_list[indexSymbols[i] + 1: indexSymbols[i + 1]] = BubbleSort(list_num) #print(list_num_list[indexSymbols[i] + 1: indexSymbols[i + 1]]) #else: #print('--', list_num) return unificationListNumAndLine(list_num_list, line) # - print (lineShortNum(input())) # Доп функции def BubbleSort(arg_list): list_len = len(arg_list) swapped = True numberOfOperations = 0 right_border = 0 border = 0 while swapped: swapped = False for i in range(0, list_len - 1 - right_border): numberOfOperations += 1 if arg_list[i] > arg_list[i + 1]: arg_list[i], arg_list[i + 1] = arg_list[i + 1], arg_list[i] swapped = True border = i right_border = list_len - 1 - border return arg_list # №2 # # QuickSort реализовать с выбором медианного числа списка, вместо случайно выбранного random_value. # # Медианой ряда чисел (медианой числового ряда) называется число, стоящее посередине упорядоченного по возрастанию ряда чисел — в случае, если количество чисел нечётное. Если же количество чисел в ряду чётно, то медианой ряда является полусумма двух стоящих посередине чисел упорядоченного по возрастанию ряда. # + def median(arg_list): n = len(arg_list) index = n // 2 if n % 2: return arg_list[index] a, b = arg_list[index - 1:index + 1] return (a + b) / 2 def QuickSort(arg_list): if len(arg_list) < 2: return arg_list median_value = arg_list[int(median(list(range(0, len(arg_list) - 1))))] lesser_values, same_values, larger_values = [], [], [] for elem in arg_list: if elem < median_value: lesser_values += [elem] elif elem == median_value: same_values += [elem] else: larger_values += [elem] lesser_values_sorted = QuickSort(lesser_values) larger_values_sorted = QuickSort(larger_values) return lesser_values_sorted + same_values + larger_values_sorted # - print(QuickSort([4, 3, 5, 3, 6, 43, 0, 32])) # №3 # # Рисовать блок-схемы алгоритмов сортировки и поиска. # + active="" # см файлы 07_10_2021 сортировка.png # 23_10_2021 поиск элемента.png # - # №4 # # Реализуйте сортировку вставкой, но при поиске позиции для максимального элемента # + def max_el_index(arg_list): index_max = 0 max_element = arg_list[index_max] for i in range(1, len(arg_list)): if max_element < arg_list[i]: index_max = i max_element = arg_list[index_max] return index_max def sortInsertMax(arg_list): for i in range(len(arg_list) - 1, 0, -1): el_index = max_el_index(arg_list[0:i]) if arg_list[i] < arg_list[el_index]: arg_list[i], arg_list[el_index] = arg_list[el_index], arg_list[i] return arg_list # - print(sortInsertMax([1, 4, 43, 23,1, 0, 8, 12]))
hw 23_10_2021.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: cv101 # language: python # name: cv101 # --- # ## Import libraries # + # Path tools import sys,os sys.path.append(os.path.join("..")) # adding home directory to sys path so we can import the utility function # Neural networks with numpy from utils.neuralnetwork import NeuralNetwork # Sklearn - machine learning tools from sklearn.preprocessing import LabelBinarizer from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report from sklearn import datasets # - # The ```utils.neuralnetwork``` is a python class which includes a neural network function called ```NeuralNetwork```. Programming neural networks from the ground up takes a lot of knowledge and many hours, which is why we use this function. We could have used existing Python libraries (e.g. ```tensorflow keras```), but these have something more complex we would need to learn first. Later om we will built our own networks. # ## Load sample data # We are going to use the neural network function form utils to study the handwriting dataset. Rather than working with the full dataset, we are going to use the ```load_digits``` function to only use part of it. This function returns around 180 samples per class from the data. digits = datasets.load_digits() # __Some preprocessing__ # # We need to make sure the data is floats rather than ints. # Convert to floats data = digits.data.astype("float") # __Remember min-max regularization?__ # # This is instead of dividing everything by 255. It's a bit of a smarter way to normalize. # We use max-min regurlization to normalize each data point, which gives us a more compressed and regular dataset to work with. # # What happens if you try the other way? # MinMax regularization data = (data - data.min())/(data.max() - data.min()) # Check the shape of the data: # Print dimensions print(f"[INFO] samples: {data.shape[0]}, dim: {data.shape[1]}") # The data has around 1800 samples, and the dimensions are 64. # ## Split data # We split the data into training and test: # split data X_train, X_test, y_train, y_test = train_test_split(data, # original data digits.target, # labels test_size=0.2) # 20%% of the data goes into the test set # We're converting labels to binary representations. # # Why am I doing this? Why don't I do it with LogisticRegression() for example? # convert labels from integers to vectors y_train = LabelBinarizer().fit_transform(y_train) # initializing the binarizer and fit it to the training data y_test = LabelBinarizer().fit_transform(y_test) # doing the same for the test data # ^We convert the labels to binary representations using the ```LabelBinarizer``` function. If we had three labels (e.g. cat, dog, mouse), when we binarize them we get only 2 labels (dog = 1, cat = 0, mouse = 0). Hence by binarizing we take the labels and turn them into a binary representation. We do this because classifiers work with 0s and 1s and not string labels. Hence, the output needs to be either a 0 or a 1 even if we have more than two labels. # Because we are dealing with multple labels, we need to convert the labels into a binary representaion, to enable the computer to be able to map the data. # Let's look at the first ten labels now after having binarized them y_train[:10] # Now the labels have become binarized. Each label is either a 0 or 1. Hence, we have created a binary representation of the labels. This can be done for any kind of labels. # ## Training the network! # ```NeuralNetwork(input_layer, hidden_layer, output_layer)``` # The neural network function takes a list of numbers in which each number represent a layer in the neural network. The first layer is the input layer which corresponds to the size of the data. Since the data is 8x8 the input layer should be 64. The output layer is 10 (ten classes that we want to predict). The hidden layer is up to us to specify. We put in 32 for the first hidden layer and 16 for the second hidden layer. These numbers are arbitrary - we decide them. The only real limitation we need to think about is that the sum of the nodes in the hidden layers should be less than the sum of the nodes in the input and output layers. In this case the number of nodes in our hidden layers should not exceed 64 + 10 = 74 nodes. # This is a very simple networks because we are only defining the number of nodes in the layers and the number of epochs (iterations). # NB! This NeuralNetwork function has a default bias term included. The bias is based on the shape of the data. # train network print("[INFO] training network...") nn = NeuralNetwork([X_train.shape[1], 32, 16, 10]) # we specify the input layer, hidden layers, and output layers. The input layer is the size of the data. # Why can we use X_train.shape[1] to indicate the number of nodes in the input layer? # [1] is the number of columns in the data (the number of pixel in each image). For each individual entry in the data, we have an array of 64 values (8x8 - 8 rows and 8 columns) which represent one image. The whole X_train object is a collection of many images that are each 64 pixels. Hence, when we take the entry number 1 we take the number of pixels per image, which is the number of nodes in the input layer. # Fit network to data - training the network for 1000 epochs (iterations) print(f"[INFO] {nn}") nn.fit(X_train, y_train, epochs=1000) # epoch = iteration - a full pass through the entire dataset # We can see that for every epoch we get a lower and lower loss. Hence, each epoch minimizes the loss which is done by learning the weights better and better. For each epoch the model becomes slightly better. # __How many epochs should one use?__ # In many ways it is through trial-and-error - seeing which model performs best. We could also perform a cross-validation and plot the training score next to the cross-validation score, which allows us to see how well the model converges and whether it underfits or overfits. The number of epochs is dependent on whetehr it is over- or underfitting the data. If the model is underfitting, then you would increase the number of epochs, while if the model is overfitting, you would decrease the number of epochs in order to stop the model because it learns too much from the data. # # Generally speaking it is either: # - Trial-and-error # - Educated guess # - Computational, algorithm calculation of optimal parameters # Now we have our trained network. Now we can evaluate it. # ## Evaluating the network # We use the model to predict the test class and use the classfication report to produce an output in which we can interpret how well the model performs. # evaluate network print(["[INFO] evaluating network..."]) predictions = nn.predict(X_test) predictions = predictions.argmax(axis=1) print(classification_report(y_test.argmax(axis=1), predictions)) # From this evaluation we can get up to 98% accuracy for these handwritten digits. We can go back to training the model, and adjust the number of epochs for instance and see what that does to the accuracy. # With a very small amount of epochs the model might not converge. # With this neural network class we can tweek different parameters and get different results and try to get the best model possible. # Often you would start out with a logistric regression classifier to get some simple benchmarks, and then you would create a simple neural network and see how these results compare to the benchmark results created by the logistic regression classifier.
notebooks/session7_NeuralNetworks_au617836.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd df=pd.read_csv('heart_failure_clinical_records_dataset.csv') df.head() df.describe() df.head() df.columns df.info() # # Splitting and Scaling X=df.drop(['DEATH_EVENT'], axis = 1) y=df['DEATH_EVENT'] X X.shape df.shape # ## Scalling and Splitting from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,random_state=3) from sklearn.preprocessing import MinMaxScaler s = MinMaxScaler() X_train_s = s.fit_transform(X_train) X_test_s = s.transform(X_test) # # Model # ## Random Forest from sklearn.model_selection import GridSearchCV from sklearn.ensemble import RandomForestClassifier forest = RandomForestClassifier() n_estimators = [100,300,500] max_depth = [2,5,10,20] min_samples_split = [2,5,10] min_samples_leaf = [1,2,5,10] parameters = dict(n_estimators=n_estimators, max_depth=max_depth, min_samples_split=min_samples_split, min_samples_leaf=min_samples_leaf) grid = GridSearchCV(estimator=forest, param_grid=parameters, cv=6, scoring='accuracy') grid.fit(X_train_s, y_train) print('Best Parameters: \n',grid.best_params_) print('Best Score: ',grid.best_score_) forest_final = RandomForestClassifier(max_depth=10, min_samples_leaf= 2, min_samples_split= 2, n_estimators= 100) forest_final.fit(X_train_s, y_train) filename = 'finalized_model_forest.pkl' import pickle pickle.dump(forest_final, open(filename, 'wb')) y_pred =forest_final.predict(X_test_s) from sklearn.metrics import accuracy_score print(accuracy_score(y_test,y_pred)) importance_rf = grid.best_estimator_.feature_importances_ cols = df.columns[:-1] df_imp_rf = pd.Series(index = cols ,data =importance_rf).to_frame() df_imp_rf = df_imp_rf.rename(columns={0 :'Score'}) print('Importances:') df_imp_rf
Help_for_creation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## 1 Vectors import numpy as np vector_as_list = [1.0, 2.3, 4.0] #first it is possible to consider a vector as a list print(vector_as_list) vector = np.array(vector_as_list) #It is better to use Numpy array for math operations print(vector) type(vector) np.shape(vector) #vector dimensions # ## 1.1 Vector products a = np.array([1,2,3]) b = np.array([1,2,3]) c = np.dot(a,b) #scalar product print(c) c = np.cross(a,b) #vector product print(c) a = a.reshape(3,1) #You can reshape numpy arrays. "a" will be a column vector, "b" will be a row vector b = b.reshape(1,3) print("a = {}".format(a)) print("b = {}".format(b)) c = np.dot(a,b) #"c" will be a matrix according to linear algebra rules print(c) # ## 1.2 Vector manipulation b = b.T # If you only want to transpose a vector, you can use ".T" method instead of reshape. print(b) d = np.concatenate((a, b), axis=0) #concatenation of vectors. Notice the differences in "axis" parameters print(d) d = np.concatenate((a, b), axis=1) print(d) d = np.concatenate((a, b), axis=None) print(d) d = np.append(d,[1,1,3]) #you can add elements to a vector using np.append print(d) # ## 2 Matrices matrix = np.array([[1, 2, 3.5], [3.2, 4.8, 2.5], [1, 2, 3]]) #initialization is similar to vector init. print(matrix) matrix = np.arange(9.0) # Another way how to init a matrix is a reshaping a vector print(matrix) matrix = matrix.reshape(3,3) matrix identity_mat = np.identity(3) #identity matrix print(identity_mat) ones_mat = np.ones((3,3)) #all elements are ones. np.zeros((n,m)) exists too print(ones_mat) # ## 2.1 Matrix product matrix_product = np.matmul(matrix,matrix) #you can use np.matmul and also np.dot. #Pay attention to the difference for special cases print(matrix_product) matrix_product = np.dot(matrix,matrix) print(matrix_product) matrix*2 # all elements multiplied by 2 matrix+2 #similar fot addition and power matrix**2 # ## 2.2 Accesing elements in matrix # matrix first_row = matrix[0,:] print(first_row) first_column = matrix[:,0] first_column np.shape(first_column) first_column = matrix[:,[0]] # see the difference using [0] vs. 0 first_column np.shape(first_column) matrix[0,2] #one way to acces matrix element. first number is a row, second is a column matrix[0][2] matrix.item(2) # another way is using item. Argument of item is an order of element last_column = matrix[:,-1] #accesing from the end using -1 last_column #also the same difference using [-1] # ## 2.3 Matrix manipulation # matrix.T #Transposition is the same for vectors and matrices np.transpose(matrix) # another way of transposition # ## 2.4 Linear algebra using linalg matrix = np.array([[1, 2, 3.5], [3.2, 4.8, 2.5], [1, 2, 3]]) matrix np.linalg.det(matrix) #determinant of a matrix np.linalg.matrix_rank(matrix) #rank of a matrix np.linalg.inv(matrix) # calculation of inverse matrix eig_val, eig_vectors = np.linalg.eig(matrix) #computes eigen values and vectors eig_val eig_vectors # # Matlplotlib import matplotlib.pyplot as plt time = np.arange(0.0,10.0,0.1) #Time vector 0.0, 0.1,0.2 ... 9.9 #It is also possible to use np.linspace np.shape(time) signal = 0.8*np.sin(time) + 1.2*np.cos(2*time +1) #some random function which we want to visualize signal2 = 0.2*np.sin(time) + 0.5 #another one plt.plot(time, signal) #plot: x axis = time, y axis = signal plt.show() #display the plot plt.plot(time, signal) #plotting both functions in one figure plt.plot(time, signal2) plt.show() #display the plot plt.plot(time, signal) plt.plot(time, signal2) plt.grid(True) #add a grid plt.title("Title of the plot") #add a title plt.xlabel('time') #x axis label plt.ylabel('value') #y axis label plt.show() # ## Subplots fig, (ax1,ax2) = plt.subplots(nrows = 2, ncols = 1) #instance of subplots with 2 rows and 1 columns ax1.plot(time,signal) ax2.plot(time,signal2,"r+") #r+ means r = red and + is the marker style plt.show() fig, (ax1,ax2) = plt.subplots(nrows = 2, ncols = 1) ax1.plot(time,signal) ax1.grid(True)# adding a grid for the first plot ax1.set_title("signal 1", fontsize=20) #setting a title for the first plot with changed font size ax1.set_xlabel("time") #setting a x label for the first plot ax2.plot(time,signal2,"go",ms = 10)#g=green, o = markerstyle, ms = marker size ax2.set_title("signal 2", fontsize=20) #setting a title for the second plot fig.set_figheight(10) #setting a figureheight [inches] fig.set_figwidth(10) #setting a figurewidth [inches] fig.subplots_adjust(hspace = 0.3) #the amount of height reserved for space between subplots, #expressed as a fraction of the average axis height plt.show() # ## $\LaTeX$ expressions noise = 0.2*np.random.randn(100) #create a noise. ranodm.randn is from 0 to 1 signal_noise = signal + noise fig = plt.figure(figsize = (8,4),dpi = 100) #another way, how to set figure size and dpi plt.plot(time, signal) plt.plot(time, signal_noise) plt.grid(True) #add a grid plt.title(r'$s(t) = A \cdot \sin(\omega t)+ B \cdot \cos(2\omega t + \varphi)$') #title using latex expression plt.xlabel('time [s]') #x axis label plt.ylabel("value [-]") #y axis label plt.savefig("foo.png")#you can export your plot plt.show() # # Mass-spring-damper ODE using Scipy # See: https://en.wikipedia.org/wiki/Mass-spring-damper_model # $$m\ddot{x} + c\dot{x} + kx = F $$ # <img src="https://upload.wikimedia.org/wikipedia/commons/thumb/4/45/Mass_spring_damper.svg/1024px-Mass_spring_damper.svg.png" width="250"> # $$F = -mg $$ # # $$\ddot{x} + \frac{c}{m}\dot{x} + \frac{k}{m}x = -g $$ # # Substitution with $x_1$ and $x_2$ # # $$x_1 = x$$ # # $$\dot{x_1} = x_2 = \dot{x}$$ # # $$\dot{x_2} = \ddot{x} = - \frac{k}{m}x_1 - \frac{c}{m}x_2 - g$$ # from scipy import integrate # Function to solve def func(t, z, k, c, m, g): x1, x2 = z x1_dot = x2 x2_dot = -k/m*x1 - c/m*x2 - g return [x1_dot, x2_dot] # + # parameters of the system k = 100 # spring stiffness c = 1.0 # damping parameter m = 100 # mass g = 9.81 # gravity # simulation time t_start = 0 t_stop = 20 # intial conditions x1_0 = 0 x2_0 = 0 # - # solve the system sol = integrate.solve_ivp(func,[t_start, t_stop], [x1_0, x2_0], args=(k, c, m, g)) # Visualization fig = plt.figure(figsize = (8,4),dpi = 100) plt.plot(sol.t, sol.y[0], label = "position") plt.plot(sol.t, sol.y[1], label = "speed") plt.grid(True) plt.title("Mass-spring-damper model") #add a title plt.xlabel('time [s]') #x axis label plt.ylabel('position [m], speed [m/s]') #y axis label plt.legend(loc='upper right') plt.show() # ## Simulation options # solve the system sol_2 = integrate.solve_ivp(func,[t_start, t_stop], [x1_0, x2_0], args=(k, c, m, g), max_step = 0.01) # Visualization fig = plt.figure(figsize = (8,4),dpi = 100) plt.plot(sol.t, sol.y[1], label = "speed") plt.plot(sol_2.t, sol_2.y[1], label = "speed max_step = 0.01") plt.grid(True) plt.title("Mass-spring-damper model - solver comaprison") #add a title plt.xlabel('time [s]') #x axis label plt.ylabel('speed [m/s]') #y axis label plt.legend(loc='upper right') plt.show() # + # solve the system sol_3 = integrate.solve_ivp(func,[t_start, t_stop], [x1_0, x2_0], args=(k, c, m, g), method="RK23") # Visualization fig = plt.figure(figsize = (8,4),dpi = 100) plt.plot(sol.t, sol.y[1], label = "speed") plt.plot(sol_3.t, sol_3.y[1], label = "speed method = RK23") plt.grid(True) plt.title("Mass-spring-damper model - solver comaprison") #add a title plt.xlabel('time [s]') #x axis label plt.ylabel('speed [m/s]') #y axis label plt.legend(loc='upper right') plt.show() # - # ## Dense output # solve the system dense_sol = integrate.solve_ivp(func,[t_start, t_stop], [x1_0, x2_0], args=(k, c, m, g), dense_output=True) t = np.arange(t_start, t_stop, 0.01) #prepare desired time vector y_dense = dense_sol.sol(t) # get solution values at your desired time # Visualization fig = plt.figure(figsize = (8,4),dpi = 100) plt.plot(sol.t, sol.y[1], label = "speed") plt.plot(t, y_dense[1], label = "speed - dense output") plt.grid(True) plt.title("Mass-spring-damper model - solver comaprison") #add a title plt.xlabel('time [s]') #x axis label plt.ylabel('speed [m/s]') #y axis label plt.legend(loc='upper right') plt.show() # ## Few more examples # ### Image loading to numpy array import matplotlib.image as mpimg img = mpimg.imread("img/cat.png") #import image into numpy array impgplot = plt.imshow(img) #plot the image np.shape(img) #image is a matrix of pixels, each pixel has three values Red, Green, Blue img[0,0,:] #returns RGB values for the first pixel img[100:200,:,:] = np.zeros((100,1200,3)) #rewrite some pixels to zeros impgplot = plt.imshow(img) #plot the image # ### Example - Simple moving average # See: https://en.wikipedia.org/wiki/Moving_average # The goal is to filter noisy data using a simple moving average filter def moving_average(time, signal, filter_size): """Filter a noisy data using simple moving average. Basic formula is: y(k) + y(k+1) ... + y(k+n-1) y_filtered(j) = ------------------------------------- where n is a filter size n The bigger is the filter size, the bigger is a time shift (j) of filtered data. This function has a shift offset: Filtered time is shifted to the center of calculated average. """ i = int(np.ceil((filter_size - 1)/2)) #offset for the time shift. If the filter size is an odd number, # it is rounded to the higher integer j = int(time.size - (filter_size-i)) #offset index for time shift. See next line. filtered_time = time[i:j] #Filtered time is shifted to the center of time vector. filtered_signal = [] #initialization of filtered signal for k in range(signal.size - filter_size): #calculation loop of moving average m = k + filter_size calculation = np.sum(signal[k:m])/filter_size #this is the basic formula filtered_signal = np.append(filtered_signal, calculation) return filtered_time, filtered_signal time = np.arange(0,10,0.01) #time vector signal = 0.6*np.cos(time)+np.sin(2*time+np.pi) #signal without noise noise = np.random.rand(signal.size) #create noise signal_noise = signal + (noise - 0.5) #signal with noise filt_time, filtered_sig = moving_average(time,signal_noise,50) #calling the moving average function fig = plt.figure(figsize = (10,5),dpi = 100) plt.plot(time,signal_noise,linewidth=1.0, label = "signal with noise",color = "c") plt.plot(time,signal,linewidth=3.0,label = "original signal",color = "r") plt.plot(filt_time,filtered_sig, linewidth=3.0,label = "filtered signal",color = "b") plt.title("Moving average filter") plt.xlabel("time[s]") plt.legend(loc='upper right') #place a legend plt.annotate('see the time shift between \noriginal and filtered signal', xy=(0.2, 0.5), xytext=(1, 1.3), arrowprops=dict(facecolor='black', shrink=0.05),) plt.show()
courses/numpy_matplotlib.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <span style="font-family: Arial; font-size:3em;color:red;"> <b> Yelp </p> Ratings </p> Presentation </p> Notebook </b> </span> # <br><br> Let's see who's better at classifying Yelp reviews -- you (humans) or our model? </br> # <img style="float:left" src="https://uproxx.files.wordpress.com/2015/10/south-park-yelp.png?w=650" /> import nltk import unicodedata import pandas as pd import numpy as np import matplotlib.pyplot as plt import pickle import re from nltk.corpus import wordnet from nltk.tokenize import RegexpTokenizer wnl = nltk.WordNetLemmatizer() nltk.download('averaged_perceptron_tagger') from collections import Counter from sklearn.metrics import confusion_matrix # ## Load data from survey csv_path = '../data/new_review_examples.csv' review_1 = pd.read_csv(csv_path, usecols = [3], names=['reviews'], nrows=1) review_2 = pd.read_csv(csv_path, usecols = [4], names=['reviews'], nrows=1) review_3 = pd.read_csv(csv_path, usecols = [5], names=['reviews'], nrows=1) review_4 = pd.read_csv(csv_path, usecols = [6], names=['reviews'], nrows=1) review_5 = pd.read_csv(csv_path, usecols = [7], names=['reviews'], nrows=1) review_1_stars = pd.read_csv(csv_path, usecols = [3], names=['stars'], skiprows=1) review_2_stars = pd.read_csv(csv_path, usecols = [4], names=['stars'], skiprows=1) review_3_stars = pd.read_csv(csv_path, usecols = [5], names=['stars'], skiprows=1) review_4_stars = pd.read_csv(csv_path, usecols = [6], names=['stars'], skiprows=1) review_5_stars = pd.read_csv(csv_path, usecols = [7], names=['stars'], skiprows=1) premade_reviews = pd.concat([review_1, review_2, review_3, review_4, review_5], ignore_index=True) premade_reviews['actual_stars'] = [2,4,1,3,5] submitted_reviews = pd.read_csv(csv_path, usecols = [1,2], names=['reviews', 'stars'], skiprows=1, encoding = 'ISO-8859-1') review_1_counts = dict(Counter(review_1_stars.stars.tolist())) review_1_counts = [review_1_counts.get(1, 0), review_1_counts.get(2,0), review_1_counts.get(3,0), review_1_counts.get(4,0), review_1_counts.get(5,0)] review_2_counts = dict(Counter(review_2_stars.stars.tolist())) review_2_counts = [review_2_counts.get(1, 0), review_2_counts.get(2,0), review_2_counts.get(3,0), review_2_counts.get(4,0), review_2_counts.get(5,0)] review_3_counts = dict(Counter(review_3_stars.stars.tolist())) review_3_counts = [review_3_counts.get(1, 0), review_3_counts.get(2,0), review_3_counts.get(3,0), review_3_counts.get(4,0), review_3_counts.get(5,0)] review_4_counts = dict(Counter(review_4_stars.stars.tolist())) review_4_counts = [review_4_counts.get(1, 0), review_4_counts.get(2,0), review_4_counts.get(3,0), review_4_counts.get(4,0), review_4_counts.get(5,0)] review_5_counts = dict(Counter(review_5_stars.stars.tolist())) review_5_counts = [review_5_counts.get(1, 0), review_5_counts.get(2,0), review_5_counts.get(3,0), review_5_counts.get(4,0), review_5_counts.get(5,0)] # ## Visualize survey data # <img style="float:left" src="https://img.memecdn.com/bad-yelp-rebiew_c_4185847.webp" /> # + plt.figure(figsize=(14,6)) barWidth = 0.2 bars1 = review_1_counts bars2 = review_2_counts bars3 = review_3_counts bars4 = review_4_counts bars5 = review_5_counts r1 = np.arange(len(bars1)) r2 = [x + barWidth for x in r1] r3 = [x + barWidth for x in r2] r4 = [x + barWidth for x in r3] r5 = [x + barWidth for x in r4] plt.bar(r1, bars1, color='#101357', width=barWidth, edgecolor='white', label='review_1') plt.bar(r2, bars2, color='#fea49f', width=barWidth, edgecolor='white', label='review_2') plt.bar(r3, bars3, color='#fbaf08', width=barWidth, edgecolor='white', label='review_3') plt.bar(r4, bars4, color='#00a0a0', width=barWidth, edgecolor='white', label='review_4') plt.bar(r5, bars5, color='#007f4f', width=barWidth, edgecolor='white', label='review_5') plt.xlabel('Star', fontsize=22) plt.xticks([r + barWidth for r in range(len(bars1))], ['1', '2', '3', '4', '5'], fontsize=20) axes = plt.gca() axes.set_ylim([0,max(max(bars1), max(bars2), max(bars3), max(bars4), max(bars5))]) plt.title('Star Count per Review', fontweight='bold', fontsize=25) plt.legend(fontsize=12) plt.show() # - # Now let's look at how your guesses measured up to the actual star ratings given to these reviews print('The five reviews for which we solicited guesses from you all:\n') for i in range(premade_reviews.shape[0]): print('Review {}:\n{}\n\nActual Star Rating: {}\n\n'.format(i+1, premade_reviews.loc[i, 'reviews'],premade_reviews.loc[i, 'actual_stars'])) # Let's better visualize your accuracy in guessing reviews' star ratings with pie charts def make_pie(review_counts): def get_labels(): non_zero_labels = [] for i in range(len(review_counts)): if review_counts[i] != 0: non_zero_labels.append(i+1) return non_zero_labels labels = get_labels() sizes = [review_counts[i-1] for i in labels] colors = ['gold', 'yellowgreen', 'lightcoral', 'lightskyblue', 'pink'] plt.pie(sizes, labels=labels, colors=colors, shadow=True, startangle=140, autopct='%1.1f%%') plt.axis('equal') plt.show() for i, counts in enumerate([review_1_counts, review_2_counts, review_3_counts, review_4_counts, review_5_counts]): print('Review {}'.format(i+1)) print('Actual star rating: {}'.format(premade_reviews.loc[i, 'actual_stars'])) make_pie(counts) # Now let's take a look at some of the reviews you guys submitted in our circulated survey print('Total number of submitted reviews: {}'.format(submitted_reviews.shape[0])) print('Submitted reviews dataframe:') submitted_reviews for i in range(0,3): print('{}\n\nStar Rating: {}\n\n'.format(submitted_reviews.loc[i, 'reviews'], submitted_reviews.loc[i, 'stars'])) # Below we can see the distribution of the star ratings you assigned to the reviews you wrote # + submitted_reviews_counts = dict(Counter(submitted_reviews.stars.tolist())) submitted_reviews_counts = [submitted_reviews_counts.get(1,0), submitted_reviews_counts.get(2,0), submitted_reviews_counts.get(3,0), submitted_reviews_counts.get(4,0), submitted_reviews_counts.get(5,0)] bins = np.arange(7) - 0.5 plt.hist(submitted_reviews.stars, bins, edgecolor='black') plt.xticks(range(6)) plt.xlim([0, 6]) plt.yticks(range(max(submitted_reviews_counts)+1)) plt.ylim([0, max(submitted_reviews_counts)+1]) plt.title('Star Counts for Submitted Reviews', fontsize=15, fontweight='bold') plt.xlabel('Star', fontsize=13) plt.ylabel('Count', fontsize=13) plt.show(); # - # ## Clean reviews for modeling ## # # Before we can compare your results to our model's results, we need to clean the reviews and vectorize them for input into the model. # # Cleaning/preprocessing steps: # # - Lowercase<br> # - Remove non-ASCII characters<br> # - Tokenize on pattern \w+\'?\w+ (maintains internal apostrophes)<br> # - Replace non-alphabetic characters (e.g., digits, %, *, &, $) with ''<br> # - Remove stopwords (based on customized list that retains words with negative connotation (e.g., not, won't, no))<br> # - Lemmatize (canonical/dictionary form of a word... babies --> baby)<br> # - Remove rare tokens (30 threshold, tokens that appear less than or equal to 30 times in corpus will be removed)<br> # <img style="float:left" src="https://pics.me.me/if-beauty-and-the-beast-happened-today-be-our-guest-11644791.png" /> # + neg_stops = ['no', 'nor', 'not', 'don', "don't", 'ain', 'aren', "aren't", 'couldn', "couldn't", 'didn', "didn't", 'doesn', "doesn't", 'hadn', "hadn't", 'hasn', "hasn't", 'haven', "haven't", 'isn', "isn't", 'mightn', "mightn't", 'mustn', "mustn't", 'needn', "needn't", 'shan', "shan't", 'shouldn', "shouldn't", 'wasn', "wasn't", 'weren', "weren't", "won'", "won't", 'wouldn', "wouldn't", 'but', "don'", "ain't"] common_nonneg_contr = ["could've", "he'd", "he'd've", "he'll", "he's", "how'd", "how'll", "how's", "i'd", "i'd've", "i'll", "i'm", "i've", "it'd", "it'd've", "it'll", "it's", "let's", "ma'am", "might've", "must've", "o'clock", "'ow's'at", "she'd", "she'd've", "she'll", "she's", "should've", "somebody'd", "somebody'd've", "somebody'll", "somebody's", "someone'd", "someone'd've", "someone'll", "someone's", "something'd", "something'd've", "something'll", "something's", "that'll", "that's", "there'd", "there'd've", "there're", "there's", "they'd", "they'd've", "they'll", "they're", "they've", "'twas", "we'd", "we'd've", "we'll", "we're", "we've", "what'll", "what're", "what's", "what've", "when's", "where'd", "where's", "where've", "who'd", "who'd've", "who'll", "who're", "who's", "who've", "why'll", "why're", "why's", "would've", "y'all", "y'all'll", "y'all'd've", "you'd", "you'd've", "you'll", "you're", "you've"] letters = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'] ranks = ['st', 'nd', 'rd', 'th'] # - def create_stopword_list(nltk_english = True, contractions = True, single_letters = True, rank_suffixes = True, remove_negs = True): # Figure out if the stopwords corpus is present try: dir(nltk.corpus.stopwords) except AttributeError: nltk.download('stopwords') # Assemble all the stopwords into a list stops = [] if nltk_english: stops += nltk.corpus.stopwords.words('english') if contractions: stops += common_nonneg_contr if single_letters: stops += letters if rank_suffixes: stops += ranks stops += [""] + ['us'] + [''] + ["'"] # Remove all negative stopwords and any duplicates if remove_negs: stops = list(set(stops) - set(neg_stops)) return stops def _process_review(df): def get_wordnet_pos(word): tag = nltk.pos_tag([word])[0][1][0].lower() tag_dict = {"a": wordnet.ADJ, "n": wordnet.NOUN, "v": wordnet.VERB, "r": wordnet.ADV} return tag_dict.get(tag, wordnet.NOUN) def _clean_review(text): text = text.lower() text = unicodedata.normalize('NFKD', text).encode('ascii', 'ignore').decode('utf8', 'ignore') tokenizer = nltk.RegexpTokenizer('\w+\'?\w+') filtered_tokens = [(re.sub(r"[^A-Za-z\s']", '', token)) for token in tokenizer.tokenize(text)] stops = create_stopword_list() tokens = [token for token in filtered_tokens if token not in stops] tokens = [re.sub("'s", '', token) for token in tokens if re.sub("'s", '', token) != ''] for i, token in enumerate(tokens): tokens[i] = wnl.lemmatize(token, pos= get_wordnet_pos(token)) tokens = [token for token in tokens if token not in stops] return tokens df['review_tokens'] = df['reviews'].apply(lambda x: _clean_review(x)) return df # Let's now clean/preprocess the reviews you submitted in our survey... submitted_reviews = _process_review(submitted_reviews) # Below are some examples of what the reviews look like in tokenized form after preprocessing for i in range(0,3): print('Full review:\n{}'.format(submitted_reviews.loc[i, 'reviews'])) print('\nTokenized review: \n{}\n\n'.format(submitted_reviews.loc[i, 'review_tokens'])) # Now that we have our reviews in tokenized form, let's remove rare tokens using our previously pickled list of rare tokens based on our corpus of over 6.6 million Yelp reviews. Our list of rare tokens included 583595 tokens. rare_tokens_30 = pickle.load(open('../data/rare_tokens_threshold30_copy.pkl', 'rb')) def _remove_rare_tokens(df): def _filter_rare_tokens(tokens): tokens_to_remove = list((set(tokens) & set(rare_tokens_30))) frequent_tokens = [token for token in tokens if token not in tokens_to_remove] return frequent_tokens df['review_tokens'] = df['review_tokens'].apply(lambda x: _filter_rare_tokens(x)) return df submitted_reviews = _remove_rare_tokens(submitted_reviews) # Now that we've fully preprocessed the reviews you submitted, let's do the same for the five reviews for which we had you guess star ratings. premade_reviews = _process_review(premade_reviews) premade_reviews = _remove_rare_tokens(premade_reviews) # ## Use model to predict ratings # # Below we will load in our pickled TF-IDF vectorizer fit to our data and our pickled stochastic gradient descent classifier (SGDC) model. We will first vectorize the reviews and then feed them (both preexisting and submitted by you) into the model to compare model predictions with human guesses. # <img style="float:left" src="https://img.wonderhowto.com/img/17/27/63452217416656/0/human-vs-computer-scrabble-showdown.w1456.jpg" /> tfidfer = pickle.load(open('../data/pickled_TfidfVectorizer.pkl', 'rb')) SGDC = pickle.load(open('../data/SGDC.pkl', 'rb')) premade_reviews['review_tokens'] = premade_reviews['review_tokens'].apply(' '.join) submitted_reviews['review_tokens'] = submitted_reviews['review_tokens'].apply(' '.join) tfidf_premade_reviews = tfidfer.transform(premade_reviews['review_tokens']) tfidf_submitted_reviews = tfidfer.transform(submitted_reviews['review_tokens']) # Now let's feed our vectorized reviews into the model for prediction submitted_reviews_predictions = SGDC.predict(tfidf_submitted_reviews) premade_reviews_predictions = SGDC.predict(tfidf_premade_reviews) def plot_confusion_matrix(y_true, y_pred, classes, normalize=False, title=None, cmap=plt.cm.Blues): """ This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ if not title: if normalize: title = 'Normalized confusion matrix' else: title = 'Confusion matrix, without normalization' # Compute confusion matrix cm = confusion_matrix(y_true, y_pred) # Only use the labels that appear in the data classes = classes if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] print("Normalized confusion matrix") else: print('Confusion matrix, without normalization') print(cm) fig, ax = plt.subplots() im = ax.imshow(cm, interpolation='nearest', cmap=cmap) ax.figure.colorbar(im, ax=ax) # We want to show all ticks... ax.set(xticks=np.arange(cm.shape[1]), yticks=np.arange(cm.shape[0]), # ... and label them with the respective list entries xticklabels=classes, yticklabels=classes, title=title, ylabel='True label', xlabel='Predicted label') # Rotate the tick labels and set their alignment. plt.setp(ax.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor") # Loop over data dimensions and create text annotations. fmt = '.2f' if normalize else 'd' thresh = cm.max() / 2. for i in range(cm.shape[0]): for j in range(cm.shape[1]): ax.text(j, i, format(cm[i, j], fmt), ha="center", va="center", color="white" if cm[i, j] > thresh else "black") fig.tight_layout() return ax # Now let's visualize how the classifier performed in classifying the reviews you submitted. plot_confusion_matrix(submitted_reviews.stars, submitted_reviews_predictions, [1,2,3,4,5], title='Confusion matrix for submitted reviews', cmap=plt.cm.Blues, normalize=True); # Based on the output above, how did the model do in classifying the reviews you all submitted? # Finally, let's look at who outperformed whom in classifying the five premade reviews -- humans or the model? plot_confusion_matrix(premade_reviews.actual_stars, premade_reviews_predictions, [1,2,3,4,5], title='Confusion matrix for premade reviews', cmap=plt.cm.Blues, normalize=True); # Let's compare the output above to you guys' performance in classifying the reviews via revisiting the pie charts generated earlier in the notebook: for i, counts in enumerate([review_1_counts, review_2_counts, review_3_counts, review_4_counts, review_5_counts]): print('Review {}'.format(i+1)) print('Actual star rating: {}'.format(premade_reviews.loc[i, 'actual_stars'])) make_pie(counts) # So what do you think? Would you trust yourselves more than the model for this classification task?
notebooks/presentation_draft.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:py3k] # language: python # name: conda-env-py3k-py # --- # + # Code used in part 1 of How I used machine learning to classify emails and turn them into insights. import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.feature_extraction.text import TfidfVectorizer, ENGLISH_STOP_WORDS from sklearn.pipeline import make_pipeline from sklearn.decomposition import PCA from sklearn.cluster import KMeans, MiniBatchKMeans from sklearn.manifold import TSNE from sklearn.decomposition import TruncatedSVD from sklearn.preprocessing import normalize import nltk from nltk.corpus import stopwords from IPython.display import display, clear_output from helpers import * # %matplotlib inline # - from sklearn.metrics.pairwise import linear_kernel from query import EmailDataset from sklearn.svm import LinearSVC emails = pd.read_csv('emails.csv') # + # email_df = pd.read_csv('split_emails.csv') emails = pd.read_csv('emails.csv') # Lets create a new frame with the data we need. email_df = pd.DataFrame(parse_into_emails(emails.message)) # Drop emails with empty body, to or from_ columns. email_df.drop(email_df.query("body == '' | to == '' | from_ == ''").index, inplace=True) # - # Flag email as important if the title start with re, fw or fwd email_df.loc[:, 'isImportant'] = email_df.subject.str.lower().str.split(':').apply(lambda x:x[0] in ['re', 'fw', 'fwd']) email_df["folder"] = emails.file.str.split('/').apply(lambda x: '/'.join(x[:-1]) if type(x)==list and len(x) else None) email_df["user"] = email_df.folder.str.split('/').apply(lambda x:x[0] if type(x)==list and len(x) else None) folder_num = email_df.groupby('user').apply(lambda x:x.folder.unique().shape[0]) email_low = email_df.loc[email_df.user.isin(folder_num.loc[folder_num<15].index)] email_mid = email_df.loc[email_df.user.isin(folder_num.loc[(folder_num<=50) & (folder_num>=15)].index)] email_high = email_df.loc[email_df.user.isin(folder_num.loc[(folder_num>50)].index)] def model_eval(train, test, email, label = 'subject'): stop_words = ENGLISH_STOP_WORDS.union(['ect', 'hou', 'com', 'recipient']) X, model, features, trans = {}, {}, {}, {} for i in email.user.unique(): view = train.loc[train.user == i] vect = TfidfVectorizer(analyzer='word', stop_words=stop_words, max_df=0.3, min_df=2) X[i] = vect.fit_transform(view.loc[:, label]) features[i] = vect.get_feature_names() trans[i] = vect lsvc = LinearSVC(multi_class='ovr', C = 1.0) lsvc.fit(X[i], view.folder) model[i] = lsvc count = 0 for i in range(test.shape[0]): # clear_output(wait=True) tmp = model[test.iloc[i].user].predict(trans[test.iloc[i].user].transform([test.iloc[i].loc[label]])) if (test.iloc[i].folder == tmp[0]): count += 1 # print("Finished {:.2f}%".format(i/test.shape[0]*100)) print("Accuracy {:2f}%".format(count/test.shape[0]*100)) def model_eval(train, test, email): stop_words = ENGLISH_STOP_WORDS.union(['ect', 'hou', 'com', 'recipient']) X, model, features, trans = {}, {}, {}, {} for i in email.user.unique(): view = train.loc[train.user == i] vect = TfidfVectorizer(analyzer='word', stop_words=stop_words, max_df=0.3, min_df=2) X[i] = vect.fit_transform(view.subject) features[i] = vect.get_feature_names() trans[i] = vect lsvc = LinearSVC(multi_class='ovr', C = 1.0) lsvc.fit(X[i], view.folder) model[i] = lsvc count = 0 for i in range(test.shape[0]): # clear_output(wait=True) tmp = model[test.iloc[i].user].predict(trans[test.iloc[i].user].transform([test.iloc[i].subject])) if (test.iloc[i].folder == tmp[0]): count += 1 # print("Finished {:.2f}%".format(i/test.shape[0]*100)) print("Accuracy {:2f}%".format(count/test.shape[0]*100)) for email in [email_low, email_mid, email_high]: # sparsity score sparsity = email.groupby(['user', 'folder']).subject.count().groupby(['user']).apply(lambda x:1/(x**2).sum()*x.sum()**2) names = sparsity.loc[sparsity<sparsity.quantile(1/3)].index email_dense = email.loc[email.user.isin(names)] names = sparsity.loc[sparsity>sparsity.quantile(2/3)].index email_sparse = email.loc[email.user.isin(names)] names = sparsity.loc[(sparsity>=sparsity.quantile(1/3)) & (sparsity<=sparsity.quantile(2/3))].index email_normal = email.loc[email.user.isin(names)] for e in [email_dense, email_sparse, email_normal]: check = e.copy() reorder = np.arange(check.shape[0]) check.index = reorder np.random.shuffle(reorder) check = check.iloc[reorder, :] check = check.dropna() train, test = check.iloc[:int(e.shape[0]/5*4)], check.iloc[int(e.shape[0]/5*4):] model_eval(train, test, e) for email in [email_low, email_mid, email_high]: # sparsity score sparsity = email.groupby(['user', 'folder']).subject.count().groupby(['user']).apply(lambda x:1/(x**2).sum()*x.sum()**2) names = sparsity.loc[sparsity<sparsity.quantile(1/3)].index email_dense = email.loc[email.user.isin(names)] names = sparsity.loc[sparsity>sparsity.quantile(2/3)].index email_sparse = email.loc[email.user.isin(names)] names = sparsity.loc[(sparsity>=sparsity.quantile(1/3)) & (sparsity<=sparsity.quantile(2/3))].index email_normal = email.loc[email.user.isin(names)] for e in [email_dense, email_sparse, email_normal]: check = e.copy() reorder = np.arange(check.shape[0]) check.index = reorder np.random.shuffle(reorder) check = check.iloc[reorder, :] check = check.dropna() train, test = check.iloc[:int(e.shape[0]/5*4)], check.iloc[int(e.shape[0]/5*4):] model_eval(train, test, e, 'body')
SVM_email_classifier.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="1wnlfr-ScjVH" colab_type="text" # # 10 Minutes to pandas # + id="PvPOHGOYch_l" colab_type="code" colab={} import numpy as np import pandas as pd # + [markdown] id="XbU6HaSxcWbG" colab_type="text" # ## Object creation # + id="Ih5zPPFQPbXP" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 141} outputId="8278abbf-d4a4-4557-ccf2-275355f2de6a" s = pd.Series([1, 3, 4, np.nan, 6, 8]) s # + [markdown] id="z1TZQOWddBxe" colab_type="text" # Membuat Series dengan melewatkan list nilai, membuat pandas membuat index integer default. pd.Series adalah fungsi untuk membuat Series. # + id="_-fwshsedyJG" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 70} outputId="22490eaa-8e5e-4a6f-f510-e7f3de8a4f31" dates = pd.date_range('20130101', periods=6) dates # + [markdown] id="pumi-5VAeEY8" colab_type="text" # **date_range** digunakan untuk membuat DataFrame dengan index waktu dan kolom berlabel. # + id="WwUCW8HUeWmU" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 227} outputId="ebda7098-364c-40a3-e305-c47623e80a7d" df = pd.DataFrame(np.random.randn(6, 4), index=dates, columns=list('ABCD')) df # + [markdown] id="gxfT_goZeu4z" colab_type="text" # fungsi **DataFrame** membuat data dalam bentuk tabel dengan label. # + id="CfiCjniZfQiR" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 167} outputId="2e0e9d7f-ef33-4a16-9196-380313b61631" df2 = pd.DataFrame({'A': 1., 'B': pd.Timestamp('20130102'), 'C': pd.Series(1, index=list(range(4)), dtype='float32'), 'D': np.array([3] * 4, dtype='int32'), 'E': pd.Categorical(["test", "train", "test", "train"]), 'F': 'foo'}) df2 # + id="jqTAdZc1h909" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 141} outputId="cbc5424c-008c-4408-dcaa-7054e1e41a7b" df2.dtypes # untuk melihat tipe data uang ada pada DataFrame # + [markdown] id="frQLFtzPi3nE" colab_type="text" # ## Viewing data # + id="m1a1RTzEiQD3" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 197} outputId="210169a0-013c-4349-e170-a618420d58e1" df.head () # untuk menampilkan data pada dataframe sebanyak 5 data # + id="9aAe7eMajE_W" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 70} outputId="be385e95-ab3c-4eed-ba11-07333aaa0176" df.index # untuk menampilkan index # + id="3yuvRoGzjVJ4" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="15041cc9-db51-4a48-8c56-69a2d2c1c4cf" df.columns # untuk menampilkan kolom yang ada pada Dataframe # + id="OE2oQ6Jojt56" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 123} outputId="f2438963-badc-449c-819c-608d5f9f9c2f" df.to_numpy() # untuk konversi dataframe ke array numpy # + id="QbJBOyl1j7CV" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 105} outputId="621ee458-46e2-49f6-c036-e6905287248f" df2.to_numpy() # + id="-ddz0uTFlHLT" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 287} outputId="f5f35b85-6672-4a84-f450-105f773b438e" df.describe() # untuk menampilkan ringkasan statitik dari data # + id="euzB5EgelXg1" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 167} outputId="e7ada5c7-e809-4bc8-bb2e-54f171c702e0" df.T # untuk mentranspose data artinya menukar kolom jadi baris # + id="EWQb-Ykwl0OZ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 227} outputId="c361758b-447b-4999-ead1-4c2697c4f895" df.sort_index(axis=1, ascending=False) # mengurutkan berdasarkan axis # + id="vpN42CyWmHph" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 227} outputId="9f733981-015c-416c-b7cb-3b69d6f62b23" df.sort_values(by='B') # mengurutkan berdasarkan nilai # + [markdown] id="fnz8jvpnmmol" colab_type="text" # ## Selection # + [markdown] id="m6ATFEw8muRV" colab_type="text" # ### Getting # + id="r0GHAEj4mkdw" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 141} outputId="109eccad-33d3-4223-fba9-16145267df5d" df['A'] # memilih satu kolom # + id="Pu5Hf3nlnD9i" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 137} outputId="63c167a0-79b9-4938-be90-431cb584fb49" df[0:3] # memilih dengan [] yang mengiris baris # yaitu menampilkan data index 0 sampai 3(index 3 tidak termasuk) # + id="SHhdyqk2nmEt" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 137} outputId="4d623c69-788d-4e93-b80f-10a8a9eaf83d" df['20130102':'20130104'] # + [markdown] id="Gw7XvAewn54k" colab_type="text" # ### Selection by label # + id="31p6QaZCn78y" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 105} outputId="a5af79b1-adcd-4135-c5b2-049ac546c7c4" df.loc[dates[0]] # untuk menampilkan data berdasarkan label pada dates index ke 0 # + id="jJ9xQXyFoTUP" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 227} outputId="9a8f4d8b-bd80-427e-e9d5-18ea4e9843a9" df.loc[:, ['A', 'B']] # memilih pada multi-axis dengan label # + id="BPrJsqtKopoF" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 137} outputId="50fcac47-08ac-4e1e-c0ff-99fa5fd33f88" df.loc['20130102':'20130104', ['A', 'B']] # memilih berdasarkan label dengan menentukan data yang ditampilkan # + id="vEDosVkKpFkg" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 70} outputId="73e4fe3a-22fc-4899-892c-fa4b5dc63b54" df.loc['20130102', ['A', 'B']] # memilih menampilkan data pada label dengan data pada baris yang dimaksud # + id="LsSRqMO2pa3s" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="6abbe6c3-f502-426b-c888-553f97629a4f" df.loc[dates[0], 'A'] # untuk mendapatkan nilai skalar # + id="aZDKKUw9pjn1" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="e7f6fa6b-800d-4d83-f0a3-b006c8ac6ff5" df.at[dates[0], 'A'] # untuk mendapatkan nilai akses ke skalar # + [markdown] id="zbmaA8TSpwz9" colab_type="text" # ### Selection by position # + id="FqU7Jgpopvj1" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 105} outputId="f7780eb2-758f-49c5-aaa0-54852613e108" df.iloc[3] # memilih melalui posisi bilangan bulat yang dilewati # + id="2oP3Dr3gqQQd" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 107} outputId="969595f1-ae3b-4224-837e-d7ffb0d2ca3a" df.iloc[3:5, 0:2] # memilih baris index dari dan batas kemudian kolom ke dan batas # + id="X3vJ8-2eukS1" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 107} outputId="4246e3eb-c33d-43ec-b063-1d46fe066a2c" df.iloc[1:3, :] # untuk mengiris baris secara eksplisit # + id="M4FWOv7Auu3l" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 227} outputId="a340fffe-bde9-454f-dfac-9e88635b0797" df.iloc[:, 1:3] # untuk mengiris kolom secara eksplisit # + id="tmLTiZMVu9pI" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="4df63753-92ea-499e-a404-0ba006e917b9" df.iloc[1, 1] # untuk mendapatkan nilai secara eksplisit # + id="P3ctl9N9vEBV" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="1ab49641-f6e8-4aa5-e340-62831b2e4274" df.iat[1, 1] # untuk mendapatkan akses cepat ke skalar # + [markdown] id="Wk4BFkFQvkV5" colab_type="text" # ### Boolean indexing # + id="HCuZoDe1vqbk" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 167} outputId="49613fad-de33-4660-ec12-105b0a2a9aca" df[df['A'] > 0] # menggunakan nilai satu kolom untuk memilih data # + id="jvtDxJ9pv57z" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 227} outputId="78fc9d7c-fc21-476f-c8ff-91adfd65824d" df[df > 0] # memilih nilai dari dataframe tempat kondisi boolean terpenuhi # + id="_Hb65QkowHK5" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 227} outputId="0d058976-0f80-4342-a88e-fc6aa61d7dfc" df2 = df.copy() df2['E'] = ['one', 'one', 'two', 'three', 'four', 'three'] df2 # + id="E5b0dke0wh8m" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 107} outputId="2e58bad2-59e4-4541-eae8-02e79ef3ee5d" # method isin() digunakan untuk memeriksa apakah nilai terkandung dalam data df2[df2['E'].isin(['two', 'four'])] # + [markdown] id="u2UZP1GtvmwU" colab_type="text" # ### Setting # + id="WWsqocbPw7p4" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 141} outputId="c21f920f-34cc-42b9-d591-341dd472c555" # Mengatur kolom baru secara otomatis menyelaraskan data dengan indeks. s1 = pd.Series([1, 2, 3, 4, 5, 6], index=pd.date_range('20130102', periods=6)) s1 # + id="ptNrhck8xPcv" colab_type="code" colab={} # Menetapkan nilai berdasarkan label df.at[dates[0], 'A'] = 0 # + id="0vX-ZmD3xZoA" colab_type="code" colab={} # Menetapkan nilai berdasarkan posisi df.iat[0, 1] = 0 # + id="aWm3gYv1xpAS" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 227} outputId="3562d766-5869-4abd-ca61-fa152df208b7" # menetapkan dengan array NumPy df.loc[:, 'D'] = np.array([5] * len(df)) df # + id="C8L0cGhryCVH" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 227} outputId="ab01e020-f6d5-4f66-d458-64f150337765" df2 = df.copy() df2[df2 > 0] = -df2 df2 # + [markdown] id="Tdszet0fyP-m" colab_type="text" # ## Missing data # + id="cJFeLnjo9YIY" colab_type="code" colab={} df1 = df.reindex(index=dates[0:4], columns=list(df.columns) + ['E']) # + [markdown] id="NWbbTeEK9xRv" colab_type="text" # Reindexing memungkinkan Anda untuk mengubah / menambah / menghapus indeks pada sumbu yang ditentukan. Ini mengembalikan salinan data. # + id="GdGBX4AQ92f7" colab_type="code" colab={} df1.loc[dates[0]:dates[1], 'E'] = 1 # + id="S5hSSqrV99xR" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 167} outputId="ee2a4013-f1ab-4e50-ae82-c9dccd0d27a3" df1 # + id="XL5jF_rQ-Bv8" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 107} outputId="36c843b7-9bf9-480e-b180-045acb2ec429" # Menghapus setiap baris yang memiliki missing data. df1.dropna(how='any') # + id="Y7JjwqH8-MB1" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 167} outputId="2c9673fd-fc02-4a07-b235-38faff5444a4" # Mengisi data yang hilang. df1.fillna(value=5) # + id="4VHp5C-j-Yv6" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 167} outputId="d52b0ce8-4aa7-4f3e-cb11-c61afb80364d" # mendapatkan boolean mask dimana nilainya adalah nan pd.isna(df1) # + [markdown] id="ytEZolyW-sn7" colab_type="text" # ## Operations # + [markdown] id="sRGv6aow-vxp" colab_type="text" # ### Stats # + id="TKIrPHLt-1fv" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 105} outputId="68ecb7e5-9509-44b5-96a6-24f0dab5f386" # Melakukan statistik deskriptif # Mencari rata-rata df.mean() # + id="FbfHf1OkA2xy" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 141} outputId="5abcca74-4b8c-4b53-eb83-2ea0b2c6f052" # Operasi yang sama pada sumbu lainnya df.mean(1) # + id="KuhT4sFIA-kw" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 141} outputId="716d683f-09d4-4ed2-81f3-23daa659cd52" s = pd.Series([1, 3, 5, np.nan, 6, 8], index=dates).shift(2) s # + id="UKAZ3NjnBKBc" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 227} outputId="bb054ab6-12a5-4f84-eab8-a8c3c61ff7f9" df.sub(s, axis='index') # + [markdown] id="LFZ3cGNHBqVU" colab_type="text" # ### Apply # + id="ylxeOQxtB0XF" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 227} outputId="0a5373f2-b2ec-4a3c-afd0-e610349e5448" df.apply(np.cumsum) # + id="wi0wV7VxB73h" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 105} outputId="e3dcb35e-d2c0-4667-d192-62638d2b70db" df.apply(lambda x: x.max() - x.min()) # + [markdown] id="WWFIEpJrC2y1" colab_type="text" # ### Histogramming # + id="zHR-II2xC1zI" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 212} outputId="941baedf-6012-47cf-be64-2245a8e0400f" s = pd.Series(np.random.randint(0, 7, size=10)) s # + id="EE-dh8zYDJWd" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 141} outputId="18a99404-462f-4fff-f9f2-ac8cbf39b487" s.value_counts() # + [markdown] id="OJ0dprbsDT6h" colab_type="text" # ### String Methods # + id="ChATayOjDyRu" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 194} outputId="4c0ef081-a97e-4141-f7c6-b728730513a3" s = pd.Series(['A', 'B', 'C', 'Aaba', 'Baca', np.nan, 'CABA', 'dog', 'cat']) s.str.lower() # + [markdown] id="nhCFJOauD86p" colab_type="text" # ## Merge # + [markdown] id="gzpf6LplEAc-" colab_type="text" # ### Concat # + id="wgE296UeD7kq" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 347} outputId="e9fdd140-8ab7-477d-c57a-63af648bb25f" df = pd.DataFrame(np.random.randn(10, 4)) df # + id="HPCwBpcIEMj5" colab_type="code" colab={} # memecahnya menjadi beberapa bagian pieces = [df[:3], df[3:7], df[7:]] # + id="FPTXs9GkEcox" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 347} outputId="3e353ebb-a8ef-4ca0-b641-e78f6858940a" pd.concat(pieces) # + [markdown] id="VAdHBjIkEiFL" colab_type="text" # ### Join # + id="U8xrzd5YEhpv" colab_type="code" colab={} left = pd.DataFrame({'key': ['foo', 'foo'], 'lval': [1, 2]}) # + id="hGFxCNodEoMV" colab_type="code" colab={} right = pd.DataFrame({'key': ['foo', 'foo'], 'rval': [4, 5]}) # + id="O6YYwwvrErpi" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 107} outputId="5f8eb511-14d0-4663-e96e-0406e2807028" left # + id="AITgfeN3EuOb" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 107} outputId="1e9461cb-c07f-43de-8fdd-c30d90596a8b" right # + id="ZZDAG31cEw5m" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 167} outputId="abaf7681-85bc-447f-825d-77238f031abc" pd.merge(left, right, on='key') # + id="DboYvSGhE_c9" colab_type="code" colab={} left = pd.DataFrame({'key': ['foo', 'bar'], 'lval': [1, 2]}) # + id="QKNnyPVZFBZZ" colab_type="code" colab={} right = pd.DataFrame({'key': ['foo', 'bar'], 'rval': [4, 5]}) # + id="bpQyxrQTFFeC" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 107} outputId="d6223f17-b8ee-415d-c838-9c20263d51ff" left # + id="H5nOiVlqFHam" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 107} outputId="d1843dbc-b6c5-4dc5-9d9e-81e6d3e324b0" right # + id="7tvYQFmpFIwv" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 107} outputId="72fd6a9c-407b-4980-d30d-4fad1573145e" pd.merge(left, right, on='key') # + [markdown] id="BWgalbnEFVDW" colab_type="text" # ## Grouping # + id="2gSbpbNfFTQL" colab_type="code" colab={} df = pd.DataFrame({'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'], 'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'], 'C': np.random.randn(8), 'D': np.random.randn(8)}) # + id="b5Nq_CNqFuqa" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 287} outputId="d1839c84-7d24-4c9b-9e53-3217b115c6fa" df # + id="81vW-yOAF0ob" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 137} outputId="0aa80afe-34c2-4223-973a-f8105a88dcb4" # Mengelompokkan dan kemudian menerapkan fungsi jumlah () ke grup yang dihasilkan. df.groupby('A').sum() # + id="ZvN13ZHnGCPl" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 257} outputId="51b002f3-90e2-44e8-c947-5d25dd2a37dd" # Mengelompokkan berdasarkan beberapa kolom membentuk indeks hierarkis, dan menerapkan fungsi sum df.groupby(['A', 'B']).sum() # + [markdown] id="YbUPgwovHh75" colab_type="text" # ## Reshaping # + [markdown] id="Ne4yrFJVHnrA" colab_type="text" # ### Stack # + id="p38kznCAHmge" colab_type="code" colab={} tuples = list(zip(*[['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'], ['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']])) # + id="W9-CmRdXH7Yq" colab_type="code" colab={} index = pd.MultiIndex.from_tuples(tuples, names=['first', 'second']) # + id="x0d0jjEfH808" colab_type="code" colab={} df = pd.DataFrame(np.random.randn(8, 2), index=index, columns=['A', 'B']) # + id="jYivEFxFIEiA" colab_type="code" colab={} df2 = df[:4] # + id="c2QGlsgwIGCZ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 197} outputId="a59fd390-dd9b-486c-baf9-b39a261424f7" df2 # + id="CtdUt7AyIRSu" colab_type="code" colab={} # Method stack() "compress" level di kolom DataFrame stacked = df2.stack() # + id="uldZ82BqIb5Z" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 194} outputId="a5edc625-e410-49e8-edac-c47718b3d616" stacked # + id="f0YtRne6IfOt" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 197} outputId="3685a7e4-73b8-40ee-e44a-f7600c5ccae4" stacked.unstack() # + id="TmmEcYm3Iot5" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 197} outputId="78f6d89f-fd7c-4c21-e5a5-933bb77f789c" stacked.unstack(1) # + id="SMAQmJR7Itcn" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 197} outputId="b8a04a37-5e69-4f2d-a24d-0d4c0eb46e74" stacked.unstack(0) # + [markdown] id="5K_HPM8kIzQO" colab_type="text" # ### Pivot tables # + id="b3P1Gd2kIyf7" colab_type="code" colab={} df = pd.DataFrame({'A': ['one', 'one', 'two', 'three'] * 3, 'B': ['A', 'B', 'C'] * 4, 'C': ['foo', 'foo', 'foo', 'bar', 'bar', 'bar'] * 2, 'D': np.random.randn(12), 'E': np.random.randn(12)}) # + id="Ap60nGVLJCZb" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 407} outputId="f4fec8fb-dc2b-468f-9231-6a3b3793ebb2" df # + id="4Kc6mBkjJJF9" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 347} outputId="1347cba6-6136-49ad-92ba-1de59866adab" # Membuat tabel pivot pd.pivot_table(df, values='D', index=['A', 'B'], columns=['C']) # + [markdown] id="WxtlRVmKJO_g" colab_type="text" # ## Time series # + id="-dUopY9sJSbV" colab_type="code" colab={} rng = pd.date_range('1/1/2012', periods=100, freq='S') # + id="ggsXfpIPJVRW" colab_type="code" colab={} ts = pd.Series(np.random.randint(0, 500, len(rng)), index=rng) # + id="alLk4L1QJZMG" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 52} outputId="fd09b4da-883b-4561-fb53-07b533068eaf" ts.resample('5Min').sum() # + id="CYeVKll-Jdmx" colab_type="code" colab={} # Representasi zona waktu rng = pd.date_range('3/6/2012 00:00', periods=5, freq='D') # + id="A1BuF7IIJlmM" colab_type="code" colab={} ts = pd.Series(np.random.randn(len(rng)), rng) # + id="33vMvcAhJpYG" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 123} outputId="116de5a6-27b0-4571-cce9-6698e8bcec1e" ts # + id="zSqzwQSRJuWm" colab_type="code" colab={} ts_utc = ts.tz_localize('UTC') # + id="2Q6eS-B7JxqH" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 123} outputId="be5ae264-653c-4c90-d6bd-950416f03a87" ts_utc # + id="ciCWSRoXJ3Cc" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 123} outputId="baabdbc4-b5af-4fe2-8fec-e3df72da7277" # Konversi ke zona waktu lain ts_utc.tz_convert('US/Eastern') # + id="VTJqyXh7J7uq" colab_type="code" colab={} # Konversi antara representasi rentang waktu rng = pd.date_range('1/1/2012', periods=5, freq='M') # + id="HWN7pEYfKFyt" colab_type="code" colab={} ts = pd.Series(np.random.randn(len(rng)), index=rng) # + id="He6aEpDvKJcT" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 123} outputId="dfaebe29-8136-4f83-9456-63860a0a31d7" ts # + id="mTxskYEXKMvl" colab_type="code" colab={} ps = ts.to_period() # + id="ntl01tqVKOHX" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 123} outputId="1cbe644a-e1f3-4d40-f000-f744ddfc6d44" ps # + id="DS5dLqxKKfo3" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 123} outputId="ac9a2a22-5c5c-4276-ef2d-b4b9c40cd118" ps.to_timestamp() # + [markdown] id="Z0Xd_7XcKvW4" colab_type="text" # Mengonversi frekuensi triwulanan dengan tahun yang berakhir pada November hingga 9 pagi di akhir bulan setelah akhir kuartal # + id="czm_e_rUKyVe" colab_type="code" colab={} prng = pd.period_range('1990Q1', '2000Q4', freq='Q-NOV') # + id="BlZoRCsRK_6m" colab_type="code" colab={} ts = pd.Series(np.random.randn(len(prng)), prng) # + id="HYsACgU0LCzL" colab_type="code" colab={} ts.index = (prng.asfreq('M', 'e') + 1).asfreq('H', 's') + 9 # + id="rjsWg4cmLFxn" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 123} outputId="0106e31a-cfdb-488f-c417-2297179815c1" ts.head() # + [markdown] id="sGp2jNjfLIXc" colab_type="text" # ## Categoricals # + id="DHadStWnMfOr" colab_type="code" colab={} df = pd.DataFrame({"id": [1, 2, 3, 4, 5, 6], "raw_grade": ['a', 'b', 'b', 'a', 'a', 'e']}) # + id="g2w_fHEwMlmm" colab_type="code" colab={} # Ubah nilai mentah menjadi tipe data kategorikal. df["grade"] = df["raw_grade"].astype("category") # + id="O5BZLdv_OYxE" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 158} outputId="5df86a3d-1282-4827-f623-c3aebb13bd91" df["grade"] # + id="Osrtt38fOiK9" colab_type="code" colab={} # Ubah nama kategori menjadi nama yang lebih bermakna df["grade"].cat.categories = ["very good", "good", "very bad"] # + id="THRBNpGmPjAp" colab_type="code" colab={} # Atur ulang kategori dan secara bersamaan tambahkan kategori yang hilang df["grade"] = df["grade"].cat.set_categories(["very bad", "bad", "medium", "good", "very good"]) # + id="gKGEC3lIP4U9" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 158} outputId="49aa107b-6b77-4034-9710-6e4d2699d47e" df["grade"] # + [markdown] id="xm2L04pBQEuo" colab_type="text" # Penyortiran adalah per pesanan dalam kategori, bukan urutan leksikal. # + id="IlxZEbXlQGhD" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 227} outputId="8dc5525d-57eb-49ca-929c-15a12925bf81" df.sort_values(by="grade") # + [markdown] id="1qw8Rn2KQN7g" colab_type="text" # Pengelompokan berdasarkan kolom kategoris juga menunjukkan kategori kosong. # + id="SuY9JIheQPJk" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 141} outputId="444fac1d-005f-44d6-9e42-9db4cdfc0f07" df.groupby("grade").size() # + [markdown] id="lfo-tRSSQVZb" colab_type="text" # ## Plotting # + id="QlI3MaPbQbdm" colab_type="code" colab={} import matplotlib.pyplot as plt # + id="HKlmqsyHQeBg" colab_type="code" colab={} plt.close('all') # + id="TYaFnGOTQhWw" colab_type="code" colab={} ts = pd.Series(np.random.randn(1000), index=pd.date_range('1/1/2000', periods=1000)) # + id="d2YnruQGQksH" colab_type="code" colab={} ts = ts.cumsum() # + id="Ro7Eh60PQpCF" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 294} outputId="ca5291d5-9e2c-4586-d8b6-e9df815d1c4a" ts.plot() # + id="LAwN64J-Q-sJ" colab_type="code" colab={} df = pd.DataFrame(np.random.randn(1000, 4), index=ts.index, columns=['A', 'B', 'C', 'D']) # + id="y2ZocQQkRFlY" colab_type="code" colab={} df = df.cumsum() # + id="4kAFb7EQRIDt" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 52} outputId="a07392e6-0281-40e0-baa2-6b0cf6c891d7" plt.figure() # + id="YDy1vcksRNe_" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 294} outputId="aefc81e8-fd94-4447-b71b-19a7a08ded2f" df.plot() # + [markdown] id="VMHqKgfERoDq" colab_type="text" # ## Getting data in/out # + [markdown] id="1oPGsA6NRpxf" colab_type="text" # ### CSV # + id="kFK84KelRu_d" colab_type="code" colab={} # Menulis ke file csv df.to_csv('foo.csv') # + id="mF5Vu_GyR7Dv" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 406} outputId="d38934c1-2254-4235-a1a8-d4e2392c70d9" # Membaca file csv pd.read_csv('foo.csv') # + [markdown] id="xkIsETB3RtkT" colab_type="text" # ### HDF5 # + id="udIozvksSF8T" colab_type="code" colab={} # Menulis ke HDF5 Store df.to_hdf('foo.h5', 'df') # + id="sYbJOMrPSMLp" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 406} outputId="0f4d0d40-1f11-49cc-997b-10c11c754391" # Membaca dari HDF5 Store pd.read_hdf('foo.h5', 'df') # + [markdown] id="QxEnGA7hSbX7" colab_type="text" # ### Excel # + id="dtSG0EiLSdpE" colab_type="code" colab={} # Menulis file excel df.to_excel('foo.xlsx', sheet_name='Sheet1') # + id="5mSrQUc6Sjw2" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 406} outputId="644ba379-22f4-47c4-c763-8415783791ca" # Membaca file excel pd.read_excel('foo.xlsx', 'Sheet1', index_col=None, na_values=['NA']) # + [markdown] id="4FfVNMGFSp3x" colab_type="text" # ## Gotchas # + id="apM3ZUFXSspn" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 330} outputId="f373428b-751e-427a-ab1e-ba622b8d1c67" if pd.Series([False, True, False]): print("I was true")
minggu-13/src.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Realizing Velocity Prediction with CNN # # This part of the notebook attempts to realize the velocity prediction with CNN as opposed to the LSTM models used in the original [paper](https://arxiv.org/pdf/1708.03535.pdf). import os import mido import keras import numpy as np import sklearn.model_selection as ms import matplotlib.pyplot as plt # + import tensorflow as tf from keras.backend.tensorflow_backend import set_session config = tf.ConfigProto() config.gpu_options.allow_growth = True session = tf.Session(config=config) set_session(session) # - from keras import backend as K K.tensorflow_backend._get_available_gpus() # ### Modeling # Now we have represented our data, we would like to see if we can build a model that predicts the velocities through the 3D matrix we generated X = np.load('matricies/notes.npy') Y = np.load('matricies/velocities.npy') labels = np.load('matricies/labels.npy') X_classical = X[(labels == 1).ravel()] Y_classical = Y[(labels == 1).ravel()] # X_classical = X_classical[:,:,:,1] X_train, X_test, Y_train, Y_test = ms.train_test_split(X_classical, Y_classical, test_size=0.1, random_state=43) print(X_train.shape, X_test.shape, Y_train.shape, Y_test.shape) # #### 2.1 Attempt to use a CNN # # The code block below will split the dataset into train and test datasets. def model1(input_shape): X_input = keras.layers.Input(input_shape) X_on_off = keras.layers.Lambda(lambda X:X[:,:,:,0])(X_input) X_sustain = keras.layers.Lambda(lambda X:X[:,:,:,1])(X_input) X = X_sustain X = keras.layers.ZeroPadding1D((185, 0))(X) X = keras.layers.Conv1D(filters=input_shape[-2], kernel_size=64, dilation_rate=1, name='Conv0', kernel_initializer=keras.initializers.glorot_normal(seed=None), bias_initializer=keras.initializers.glorot_normal(seed=None), data_format="channels_last")(X) X = keras.layers.BatchNormalization(axis = 2, name = 'bn0')(X) X = keras.layers.Conv1D(filters=input_shape[-2], kernel_size=32, dilation_rate=2, padding='valid', name='Conv1', kernel_initializer=keras.initializers.glorot_normal(seed=None), bias_initializer=keras.initializers.glorot_normal(seed=None), data_format="channels_last")(X) X = keras.layers.BatchNormalization(axis = 2, name = 'bn1')(X) X = keras.layers.Conv1D(filters=input_shape[-2], kernel_size=16, dilation_rate=4, padding='valid', name='Conv2', kernel_initializer=keras.initializers.glorot_normal(seed=None), bias_initializer=keras.initializers.glorot_normal(seed=None), data_format="channels_last")(X) X = keras.layers.BatchNormalization(axis = 2, name = 'bn2')(X) X = keras.layers.Activation('relu')(X) print(X.shape) print(X_on_off.shape) X = keras.layers.Multiply()([X, X_on_off]) # X = keras.layers.Flatten()(X) model = keras.models.Model(inputs=X_input, outputs=X, name='basic') return model m1 = model1(input_shape=X_train.shape[1:]) m1.compile(optimizer='adam', loss='mean_squared_error', metrics=['accuracy']) m1.summary() m1.fit(X_train, Y_train, epochs = 20, batch_size=8) # Run multiple times to train further! m1.save('classical.h5') m1 = keras.models.load_model('m1_conv.h5') preds = m1.evaluate(X_test, Y_test) print ("Loss = " + str(preds[0])) print ("Test Accuracy = " + str(preds[1])) Y_0_hat = m1.predict(np.expand_dims(X_test[0], axis=0)) Y_0_hat = Y_0_hat.reshape(-1, X_test.shape[-2]) Y_0_hat.shape plt.figure(figsize = (200,10)) plt.imshow(Y_0_hat) plt.figure(figsize = (200,10)) plt.imshow(Y_test[0].reshape(-1, X_test.shape[-2])) # It doesn't look bad!!! IM HAPPY # # # Jazz # # Now we need to move on to train another velocity generator for jazz. We will be using the same architecture X_jazz = X[(labels == 0).ravel()] Y_jazz = Y[(labels == 0).ravel()] # X_classical = X_classical[:,:,:,1] X_train, X_test, Y_train, Y_test = ms.train_test_split(X_jazz, Y_jazz, test_size=0.1, random_state=43) print(X_train.shape, X_test.shape, Y_train.shape, Y_test.shape) m2 = model1(input_shape=X_train.shape[1:]) m2.compile(optimizer='adam', loss='mean_squared_error', metrics=['accuracy']) m2.summary() m2.fit(X_train, Y_train, epochs = 50, batch_size=16) # Run multiple times to train further! m2.save('jazz.h5') m2 = keras.models.load_mode('jazz.h5') Y_0_hat = m1.predict(np.expand_dims(X_test[0], axis=0)) Y_0_hat = Y_0_hat.reshape(-1, X_test.shape[-2]) Y_0_hat.shape plt.figure(figsize = (200,10)) plt.imshow(Y_0_hat) plt.figure(figsize = (200,10)) plt.imshow(Y_test[0].reshape(-1, X_test.shape[-2])) # # Generate # # Now let us generate some music with this. target_ticks_per_beat = 8 notes_to_keep_down = 32 def generate(notes_mat, velocity_mat, bpm, seconds): new_midi = mido.MidiFile(type=0) new_midi.ticks_per_beat = target_ticks_per_beat track = mido.MidiTrack() new_midi.tracks.append(track) track.append(mido.MetaMessage('set_tempo', tempo=mido.bpm2tempo(bpm), time=0)) track.append(mido.MetaMessage('time_signature', numerator=4, denominator=4, clocks_per_click=24, notated_32nd_notes_per_beat=8, time=0)) track.append(mido.MetaMessage('track_name', name='Test Track', time=0)) T, N, D = notes_mat.shape notes_on = [False] * N prev_event_t = 0 Ty = seconds * bpm // 60 * target_ticks_per_beat print('generating {0} ticks of music'.format(Ty)) for t in range(Ty): for n in range(N-2): if notes_mat[t, n, 0] > 0: velocity = velocity_mat[t, n] track.append(mido.Message('note_on', note=n + notes_to_keep_down, velocity=velocity, time=t-prev_event_t)) prev_event_t = t notes_on[n] = True if notes_on[n] and notes_mat[t, n, 1] == 0: track.append(mido.Message('note_on', note=n + notes_to_keep_down, velocity=0, time=t-prev_event_t)) prev_event_t = t notes_on[n] = False if notes_mat[t, N-2, 0] == 1: track.append(mido.Message('control_change', control=64, value=127, time=t-prev_event_t)) prev_event_t = t notes_on[N-2] = True if notes_mat[t, N-1, 0] == 1: track.append(mido.Message('control_change', control=67, value=127, time=t-prev_event_t)) prev_event_t = t notes_on[N-2] = True if notes_on[N-2] and notes_mat[t, N-2, 1] == 0: track.append(mido.Message('control_change', control=64, value=0, time=t-prev_event_t)) prev_event_t = t notes_on[N-2] = False if notes_on[N-1] and notes_mat[t, N-1, 1] == 0: track.append(mido.Message('control_change', control=67, value=0, time=t-prev_event_t)) prev_event_t = t notes_on[N-1] = False return new_midi new_midi = generate(X_test[0], np.round(Y_0_hat).astype(np.int32), 120, 30) old_midi = generate(X_test[0], Y_test[0], 120, 30) new_midi.save('vel_generate.midi') old_midi.save('vel_original.midi') # # Cross Generate # # Now let us see if music play style transfer will work by generating a velocity matrix from a jazz music with the classical music model. # X_classical.shape Y_0_hat = m2.predict(np.expand_dims(X_classical[100], axis=0)) Y_0_hat = Y_0_hat.reshape(-1, X_test.shape[-2]) Y_0_hat.shape # plt.figure(figsize = (200,10)) # plt.imshow(Y_0_hat) # + # plt.figure(figsize = (200,10)) # plt.imshow(Y_classical[100]) # - new_midi = generate(X_classical[100], np.round(Y_0_hat).astype(np.int32), 110, 30) old_midi = generate(X_classical[100], Y_classical[100], 110, 30) new_midi.save('vel_generate_c2j.midi') old_midi.save('vel_original_c2j.midi') Y_0_hat = m1.predict(np.expand_dims(X_jazz[64], axis=0)) Y_0_hat = Y_0_hat.reshape(-1, X_test.shape[-2]) Y_0_hat.shape new_midi = generate(X_jazz[64], np.round(Y_0_hat).astype(np.int32), 110, 30) old_midi = generate(X_jazz[64], Y_jazz[64], 110, 30) new_midi.save('vel_generate_j2c.midi') old_midi.save('vel_original_j2c.midi') plt.figure(figsize = (200,10)) plt.imshow(Y_0_hat) plt.figure(figsize = (200,10)) plt.imshow(Y_jazz[64])
Haojun Dialated CNN.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/ZbyszekMa/dw_matrix/blob/master/Matrix1_05.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="dHTmdRgMi-tj" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 298} outputId="ad821294-74c6-4f29-aa07-de102eda01ef" # !pip install eli5 # + id="DQWgi9z2jGgU" colab_type="code" colab={} import pandas as pd import numpy as np from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_absolute_error from sklearn.model_selection import cross_val_score import eli5 from eli5.sklearn import PermutationImportance from ast import literal_eval from tqdm import tqdm_notebook # + id="M0YqUtmikk3p" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="6e732e4b-7237-4642-c441-13796ad2a9ec" # cd "/content/drive/My Drive/Colab Notebooks/dw_matrix" # + id="AXd83FQkk20r" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="543a9d02-4c19-4677-962c-a1eafdaf2a43" # ls # + id="cXKmZddak3tv" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="36d0f36b-9589-4d41-b2ec-97168014e0ea" # ls data # + id="979ZdUVRlErb" colab_type="code" colab={} df = pd.read_csv('data/men_shoes.csv', low_memory=False) # + id="V7wd1O6glkPf" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 225} outputId="638c2204-dd46-4bf3-8846-99b99ef22c3b" df.columns # + id="llJMj7IpmL6M" colab_type="code" colab={} def run_model(feats, model = DecisionTreeRegressor(max_depth=5)): X = df[ feats ].values y = df[ 'prices_amountmin' ].values scores = cross_val_score(model, X, y, scoring='neg_mean_absolute_error') return np.mean(scores), np.std(scores) # + id="ydHiCUqjmwkq" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="508ea295-3f5c-4ab5-ed92-52d4cdffe815" df['brand_cat'] = df['brand'].factorize()[0] run_model(['brand_cat']) # + id="uKwZJsL6nGFI" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="30e13f1f-44ae-4ffe-cc6e-60a795cd160e" model = RandomForestRegressor(max_depth=5, n_estimators=100, random_state=0) run_model(['brand_cat'], model) # + id="rvLhO53spEvL" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 521} outputId="5520639d-e974-4805-9b56-410ff936e06b" df.head() # + id="v9o62UbopWGb" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="77dd49a6-9ab7-49cc-dea6-e303abbe24d9" df['brand_cat'] = df['brand'].map(lambda x: str(x).lower()).factorize()[0] run_model(['brand_cat']) # + id="UxER-CG9p-bo" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="79d7602e-d15e-4ab0-96fb-a73d6ed2cf5a" model = RandomForestRegressor(max_depth=5, n_estimators=100, random_state=0) run_model(['brand_cat'], model) # + id="v0VAdkTDqgbq" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 141} outputId="a21fe106-c7f6-421c-f6cc-cf5fdc768792" df.features.head().values # + id="B6y6AykIqxft" colab_type="code" colab={} def parse_features(x): if str(x) == 'nan': return [] return literal_eval(x.replace('\\"', '"')) df['features_parsed'] = df['features'].map(parse_features) # + id="2DWdNa6NtVC6" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 121} outputId="cfe1c470-fad8-40dd-85c8-692dfa4c3bd6" df['features_parsed'].head() # + id="tDQLjiidtfL7" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 141} outputId="f1c265ce-a876-407f-8b51-87e9763dfaee" df['features_parsed'].head().values # + id="CAA3PNESt1ns" colab_type="code" colab={} def parse_features(x): output_dict = {} if str(x) == 'nan': return output_dict features = literal_eval(x.replace('\\"', '"')) for item in features: key = item['key'].lower().strip() value = item['value'][0].lower().strip() output_dict[key] = value return output_dict df['features_parsed'] = df['features'].map(parse_features) # + id="3g8LQD12vqVL" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 141} outputId="547c5d71-6dea-4e13-f075-595368986282" df['features_parsed'].head().values # + id="2Qw2Q39UwIOs" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="2e6df471-b6d7-4d05-e73a-e691e764c71f" keys = set() df['features_parsed'].map( lambda x: keys.update(x.keys()) ) len(keys) # + id="B1vYqKL0w4x6" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 66, "referenced_widgets": ["df17f72538fb44a18127b0bf1a961ad5", "2ced3af6fdf14661a63576e731dcc08d", "<KEY>", "0134956f0c454191a5685edebfea070e", "<KEY>", "56fd99ca8d83440bb28a75e023dde9dc", "5eaf5bb9e4db4021a0816fa2ec65d32f", "<KEY>"]} outputId="ef258b95-3247-45b4-e4dd-7209d6a8ff3c" def get_name_feat(key): return 'feat_' + key for key in tqdm_notebook(keys): df[get_name_feat(key)] = df.features_parsed.map(lambda feats: feats[key] if key in feats else np.nan) # + id="izBcMfDB0cMa" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 156} outputId="4ca9a901-64df-4458-fd90-05b321af94c0" df.columns # + id="KXU67E8I0o0K" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="fd0c7053-33f1-4b13-e464-21c8c4081450" df[ df['feat_athlete'].isnull() ].shape # + id="zww8NMaj1Fi6" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="8c2532bf-8c51-4eea-8aac-f00521d33d9c" df.shape[0] # + id="aoCveKs81MVs" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="3a39cb78-<KEY>" df[ False == df['feat_athlete'].isnull() ].shape[0] / df.shape[0] * 100 # + id="7PZfqigO1XH7" colab_type="code" colab={} keys_stat = {} for key in keys: keys_stat[key] = df[ False == df[get_name_feat(key)].isnull() ].shape[0] / df.shape[0] * 100 # + id="zDbbrOXW2kT7" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 104} outputId="26bd1562-5346-4dc0-ea2e-8c8c41cd1088" {k:v for k,v in keys_stat.items() if v > 30} # + id="vbbZPAcE3Gta" colab_type="code" colab={} df['feat_brand_cat'] = df['feat_brand'].factorize()[0] df['feat_color_cat'] = df['feat_color'].factorize()[0] df['feat_gender_cat'] = df['feat_gender'].factorize()[0] df['feat_manufacturer part number_cat'] = df['feat_manufacturer part number'].factorize()[0] df['feat_material_cat'] = df['feat_material'].factorize()[0] # + id="eOTH3mjW6Fli" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="97289a2b-2fb2-41de-9ee4-1a727548c4db" df[ df.brand == df.feat_brand ].shape # + id="9kGC2BBQ6Zbt" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 206} outputId="e878b38c-050b-40fe-bf01-2d1416800383" df[ df.brand == df.feat_brand ][ ['brand', 'feat_brand'] ].head() # + id="4WP9WLpt62Xc" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 206} outputId="2d9afc1b-47eb-4bf8-d369-8955c45ff235" df['brand'] = df['brand'].map(lambda x: str(x).lower() ) df[ df.brand != df.feat_brand ][ ['brand', 'feat_brand'] ].head() # + id="XoJJy6uO7FNa" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="d1962e3a-fabc-408a-c819-231fbf85afa5" df['brand'] = df['brand'].map(lambda x: str(x).lower() ) df[ df.brand == df.feat_brand ].shape # + id="Zt5pPfhN74Ts" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="fa3adf4d-cac3-467a-e4e3-368902373cd3" model = RandomForestRegressor(max_depth=5, n_estimators=100) run_model(['brand_cat'], model) # + id="MvsqiK6J8kU7" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="236b9013-dc1b-46f1-96b1-6b6dd0763f7e" feats = ['brand_cat', 'feat_brand_cat', 'feat_color_cat', 'feat_gender_cat', 'feat_manufacturer part number_cat', 'feat_material_cat'] model = RandomForestRegressor(max_depth=5, n_estimators=100) run_model(feats, model) # + id="_4M_h8ni-EB8" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 139} outputId="96430d62-b598-4aeb-bc00-d8949a531efe" X = df[ feats ].values y = df['prices_amountmin'].values m = RandomForestRegressor(max_depth=5, n_estimators=100, random_state=0) m.fit(X, y) perm = PermutationImportance(m, random_state=1).fit(X, y); eli5.show_weights(perm, feature_names=feats) # + id="c5mGNi2m_yDL" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="0c951d7f-6bad-47b9-ad77-618128706fb4" feats = ['brand_cat', 'feat_brand_cat', 'feat_gender_cat', 'feat_material_cat'] model = RandomForestRegressor(max_depth=5, n_estimators=100) run_model(feats, model) # + id="5tW6vew2AFhO" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 104} outputId="8df96b68-3713-49a6-c49c-c36f80cb65cd" X = df[ feats ].values y = df['prices_amountmin'].values m = RandomForestRegressor(max_depth=5, n_estimators=100, random_state=0) m.fit(X, y) perm = PermutationImportance(m, random_state=1).fit(X, y); eli5.show_weights(perm, feature_names=feats) # + id="y2aa6TnHAKYe" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 225} outputId="0c78312f-f5b2-4b1d-d3a1-520f13374a0d" df['brand'].value_counts() # + id="K04-zSdKAsCb" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 225} outputId="7e7ee5e5-cfdb-40dc-e12b-9f610f811603" df['brand'].value_counts(normalize=True) # + id="8XqQJbSoA6bb" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 104} outputId="b90aa6b7-9d16-48ac-8a3d-02c2058866e3" df[ df['brand'] == 'nike'].features_parsed.head().values # + id="TXXhfF2-BIDL" colab_type="code" colab={} df['feat_brand_cat'] = df['feat_brand'].factorize()[0] df['feat_color_cat'] = df['feat_color'].factorize()[0] df['feat_gender_cat'] = df['feat_gender'].factorize()[0] df['feat_manufacturer part number_cat'] = df['feat_manufacturer part number'].factorize()[0] df['feat_material_cat'] = df['feat_material'].factorize()[0] df['feat_sport_cat'] = df['feat_sport'].factorize()[0] df['feat_style_cat'] = df['feat_style'].factorize()[0] # + id="YYncaqFAB88p" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="9e4f862d-b1e6-44e0-966b-69aee5b4b130" feats = ['brand_cat', 'feat_brand_cat', 'feat_gender_cat', 'feat_material_cat', 'feat_style_cat', 'feat_sport_cat'] model = RandomForestRegressor(max_depth=5, n_estimators=100) run_model(feats, model) # + id="9SPbeYnvCjof" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 139} outputId="9d0fd584-5b23-4fb7-f3ad-76a17df636f7" X = df[ feats ].values y = df['prices_amountmin'].values m = RandomForestRegressor(max_depth=5, n_estimators=100, random_state=0) m.fit(X, y) perm = PermutationImportance(m, random_state=1).fit(X, y); eli5.show_weights(perm, feature_names=feats) # + id="u08X2EKpCsaN" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 141} outputId="4d4eb34c-2397-4667-d715-2ef59067b2f5" df[ df['brand'] == 'nike'].features_parsed.sample(5).values # + id="cwFVb0EEDSVs" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 295} outputId="d57c81d7-619d-4aa6-c378-24529b600496" df['feat_age group'].value_counts() # + id="EtuSf6XPD0Sr" colab_type="code" colab={} df['feat_brand_cat'] = df['feat_brand'].factorize()[0] df['feat_color_cat'] = df['feat_color'].factorize()[0] df['feat_gender_cat'] = df['feat_gender'].factorize()[0] df['feat_manufacturer part number_cat'] = df['feat_manufacturer part number'].factorize()[0] df['feat_material_cat'] = df['feat_material'].factorize()[0] df['feat_sport_cat'] = df['feat_sport'].factorize()[0] df['feat_style_cat'] = df['feat_style'].factorize()[0] for key in keys: df[get_name_feat(key) + '_cat'] = df[get_name_feat(key)].factorize()[0] # + id="Ybylc3SsE2F7" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="e7312455-80c6-4bed-8da1-fcfd94884dcd" feats_cat = [x for x in df.columns if 'cat' in x] feats_cat # + id="JaL0B1Q2FTM8" colab_type="code" colab={} feats = ['brand_cat', 'feat_brand_cat', 'feat_gender_cat', 'feat_material_cat', 'feat_style_cat', 'feat_sport_cat', 'feat_metal type_cat', 'feat_shape_cat'] #feats += feats_cat #feats = list(set(feats)) model = RandomForestRegressor(max_depth=5, n_estimators=100) result = run_model(feats, model) # + id="6dj3XivbF_ks" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 191} outputId="315eda38-d42a-491f-da16-9401a13c6849" X = df[ feats ].values y = df['prices_amountmin'].values m = RandomForestRegressor(max_depth=5, n_estimators=100, random_state=0) m.fit(X, y) print(result) perm = PermutationImportance(m, random_state=1).fit(X, y); eli5.show_weights(perm, feature_names=feats) # + id="c1X1TOq8G0ct" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 295} outputId="ffeba715-c7b7-48f8-dd80-0c3bbcc79630" df ['weight'].unique() # + id="Hedd3lj5Hx6M" colab_type="code" colab={}
Matrix1_05.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Hierarchical Clustering # Importing the libraries import numpy as np import matplotlib.pyplot as plt import pandas as pd import scipy.cluster.hierarchy as sch from sklearn.cluster import AgglomerativeClustering # Importing the dataset dataset = pd.read_csv('Mall_Customers.csv') X = dataset.iloc[:, [3, 4]].values # Using the dendrogram to find the optimal number of clusters dendrogram = sch.dendrogram(sch.linkage(X, method = 'ward')) plt.title('Dendrogram') plt.xlabel('Customers') plt.ylabel('Euclidean distances') plt.show() # Fitting Hierarchical Clustering to the dataset hc = AgglomerativeClustering(n_clusters = 5, affinity = 'euclidean', linkage = 'ward') y_hc = hc.fit_predict(X) # Visualising the clusters plt.scatter(X[y_hc == 0, 0], X[y_hc == 0, 1], s = 100, c = 'red', label = 'Cluster 1') plt.scatter(X[y_hc == 1, 0], X[y_hc == 1, 1], s = 100, c = 'blue', label = 'Cluster 2') plt.scatter(X[y_hc == 2, 0], X[y_hc == 2, 1], s = 100, c = 'green', label = 'Cluster 3') plt.scatter(X[y_hc == 3, 0], X[y_hc == 3, 1], s = 100, c = 'cyan', label = 'Cluster 4') plt.scatter(X[y_hc == 4, 0], X[y_hc == 4, 1], s = 100, c = 'magenta', label = 'Cluster 5') plt.title('Clusters of customers') plt.xlabel('Annual Income (k$)') plt.ylabel('Spending Score (1-100)') plt.legend() plt.show()
Day 15 - Hierarchical Clustering/Hierarchical Clustering.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [anaconda2] # language: python # name: Python [anaconda2] # --- # <H2>MADATORY PYTHON LIBRARIES</H2> import pandas as pd import ftputil import numpy as np import os # <h2> PROVIDER SELECTION CRITERIA</h2> # If you had a look to the notebook dedicated to the In Situ netCDFs selection criteria (here), selecting by provider is available for files in all directories: data = { 'Selection criteria': ['provider'], 'index_history.txt': [' ✔'], 'index_monthly.txt': [' ✔'],'index_latest.txt': [' ✔ ']} pd.DataFrame(data=data) # <H2>SET OUTPUT DIRECTORY</H2> output_directory = os.getcwd() #default to current working directory # <h2> SET YOUR CREDENTIALS</h2> user = '' #type CMEMS user name password = '' #type CMEMS password # <h2> TARGET A PRODUCT, HOST AND INDEX FILE</h2> product_name = 'INSITU_MED_NRT_OBSERVATIONS_013_035' #type aimed In Situ product host = 'nrt.cmems-du.eu' #type aimed host (nrt.cmems-du.eu or my.cmems-du) index_file = 'index_latest.txt' #type aimed index file # <h2> SPECIFY PROVIDER </h2> aimed_provider = 'SOCIB' # <h2> DOWNLOAD THE FILES MATCHING THE ABOVE PROVIDER</h2> #connect to CMEMS FTP with ftputil.FTPHost(host, user, password) as ftp_host: #open the index file to read with ftp_host.open("Core"+'/'+product_name+'/'+index_file, "r") as indexfile: #read the index file as a comma-separate-value file index = np.genfromtxt(indexfile, skip_header=6, unpack=False, delimiter=',', dtype=None, names=['catalog_id', 'file_name','geospatial_lat_min', 'geospatial_lat_max', 'geospatial_lon_min','geospatial_lon_max','time_coverage_start', 'time_coverage_end', 'provider', 'date_update', 'data_mode', 'parameters']) #loop over the lines/netCDFs and download the most sutable ones for you for netCDF in index: #getting ftplink, filepath and filename ftplink = netCDF['file_name'].decode('utf-8') filepath = '/'.join(ftplink.split('/')[3:len(ftplink.split('/'))]) ncdf_file_name = ftplink[ftplink.rfind('/')+1:] #download netCDF if meeting selection criteria provider = netCDF['provider'].decode('utf-8') if aimed_provider in provider: if ftp_host.path.isfile(filepath): cwd = os.getcwd() os.chdir(output_directory) ftp_host.download(filepath, ncdf_file_name) # remote, local os.chdir(cwd)
PythonNotebooks/In_Situ_data_download_by_provider.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="TA21Jo5d9SVq" # # # ![JohnSnowLabs](https://nlp.johnsnowlabs.com/assets/images/logo.png) # # [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/streamlit_notebooks/TRANSLATION_MARIAN.ipynb) # # # # + [markdown] id="CzIdjHkAW8TB" # # **Translate text** # + [markdown] id="y1sZkQkt3sXX" # ### Spark NLP documentation and instructions: # https://nlp.johnsnowlabs.com/docs/en/quickstart # # ### You can find details about Spark NLP annotators here: # https://nlp.johnsnowlabs.com/docs/en/annotators # # ### You can find details about Spark NLP models here: # https://nlp.johnsnowlabs.com/models # # + [markdown] id="wIeCOiJNW-88" # ## 1. Colab Setup # + id="CGJktFHdHL1n" # Install java # ! apt-get update -qq # ! apt-get install -y openjdk-8-jdk-headless -qq > /dev/null # ! java -version # Install pyspark # ! pip install --ignore-installed -q pyspark==2.4.4 # Install Spark NLP # ! pip install --ignore-installed spark-nlp==2.7.0 # Install Spark NLP Display for visualization # ! pip install --upgrade -q spark-nlp-display # + [markdown] id="eCIT5VLxS3I1" # ## 2. Start the Spark session # + [markdown] id="khjM-z9ORFU3" # Import dependencies and start Spark session. # + executionInfo={"elapsed": 68779, "status": "ok", "timestamp": 1609852152413, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "10508284328555930330"}, "user_tz": -300} id="sw-t1zxlHTB7" import os import json os.environ['JAVA_HOME'] = "/usr/lib/jvm/java-8-openjdk-amd64" import pandas as pd import numpy as np from pyspark.ml import Pipeline from pyspark.sql import SparkSession import pyspark.sql.functions as F from sparknlp.annotator import * from sparknlp.base import * import sparknlp from sparknlp.pretrained import PretrainedPipeline spark = sparknlp.start() # + [markdown] id="9RgiqfX5XDqb" # ## 3. Select the DL model # + [markdown] id="TPqBOgDhnvjB" # For complete model list: # https://nlp.johnsnowlabs.com/models # # For `Translation` models: # https://nlp.johnsnowlabs.com/models?tag=translation # + [markdown] id="2Y9GpdJhXIpD" # ## 4. A sample text in Italian for demo - we'll translate Italian text to English # + executionInfo={"elapsed": 1025, "status": "ok", "timestamp": 1609852368254, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "10508284328555930330"}, "user_tz": -300} id="vBOKkB2THdGI" text = """La Gioconda è un dipinto ad olio del XVI secolo creato da Leonardo. Si tiene al Louvre di Parigi.""" # + [markdown] id="XftYgju4XOw_" # ## 5. Define Spark NLP pipeline # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 7106, "status": "ok", "timestamp": 1609852387998, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "10508284328555930330"}, "user_tz": -300} id="lBggF5P8J1gc" outputId="0f7d9419-acb3-4fe6-9163-d0bf410159ad" documentAssembler = DocumentAssembler()\ .setInputCol("text")\ .setOutputCol("document") ## More accurate Sentence Detection using Deep Learning sentencerDL = SentenceDetectorDLModel()\ .pretrained("sentence_detector_dl", "xx")\ .setInputCols(["document"])\ .setOutputCol("sentences") marian = MarianTransformer.pretrained("opus_mt_it_en", "xx")\ .setInputCols(["sentences"])\ .setOutputCol("translation") nlp_pipeline = Pipeline(stages=[ documentAssembler, sentencerDL, marian ]) # + [markdown] id="mv0abcwhXWC-" # ## 6. Run the pipeline # + executionInfo={"elapsed": 20738, "status": "ok", "timestamp": 1609852408006, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "10508284328555930330"}, "user_tz": -300} id="EYf_9sXDXR4t" empty_df = spark.createDataFrame([['']]).toDF('text') pipeline_model = nlp_pipeline.fit(empty_df) lmodel = LightPipeline(pipeline_model) res = lmodel.fullAnnotate(text) # + [markdown] id="UQY8tAP6XZJL" # ## 7. Visualize results # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 19068, "status": "ok", "timestamp": 1609852408008, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "10508284328555930330"}, "user_tz": -300} id="Ar32BZu7J79X" outputId="13426913-4ee6-409c-8aa7-04790fc25b02" print ('Original:', text, '\n\n') print ('Translated:\n') for sentence in res[0]['translation']: print (sentence.result)
tutorials/streamlit_notebooks/TRANSLATION_MARIAN.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd # %pwd # - data1 = pd.read_csv("/Users/jinyoungpark/Desktop/Projects/open-data-shop/shop1.csv", encoding="cp949").copy() data2 = pd.read_csv("/Users/jinyoungpark/Desktop/Projects/open-data-shop/shop2.csv", encoding="cp949").copy() data3 = pd.read_csv("/Users/jinyoungpark/Desktop/Projects/open-data-shop/shop3.csv", encoding="cp949").copy() data4 = pd.read_csv("/Users/jinyoungpark/Desktop/Projects/open-data-shop/shop4.csv", encoding="cp949").copy() data1.head() data2.head() data3.head() data4.head() # + #data1[c] # + #data1.columns # + #data1["상호명"].head() # - coffee1 = data1[["상호명","경도","위도","도로명주소"]] coffee2 = data2[["상호명","경도","위도","도로명주소"]] coffee3 = data3[["상호명","경도","위도","도로명주소"]] coffee4 = data4[["상호명","경도","위도","도로명주소"]] #df[df['A'].str.contains("hello")] coffee1[coffee1["상호명"].str.contains("(스타벅스|이디야)", regex=True)] coffee2 = coffee2[coffee2["상호명"].str.contains("(스타벅스|이디야)", regex=True)] coffee2 coffee3[coffee3["상호명"].str.contains("(스타벅스|이디야)", regex=True)] coffee4 = coffee4[coffee4["상호명"].str.contains("(스타벅스|이디야)", regex=True)] #result = pd.concat([df1, df4], axis=1, join='inner') coffee = coffee2.append(coffee4) coffee coffee['브랜드명'] = '' coffee.loc[coffee['상호명'].str.contains('스타벅스'), '브랜드명'] = '스타벅스' coffee.loc[coffee['상호명'].str.contains('이디야'), '브랜드명'] = '이디야' # + # starbucks = coffee.loc[coffee["상호명"].str.contains("스타벅스")] # ediya = coffee.loc[coffee["상호명"].str.contains("이디야")] # df_seoul.loc[ # df_seoul['상호명'].str.contains('뚜레쥬르|파리(바게|크라상)') & # (~df_bread['상호명'].str.contains('파스쿠찌|잠바주스')) # + import folium map_korea = folium.Map(location=[35.9078, 127.7669], zoom_start = 7) # Uses lat then lon. The bigger the zoom number, the closer in you get map_korea # Calls the map to display #source :https://www.kaggle.com/daveianhickey/how-to-folium-for-maps-heatmaps-time-analysis # + #map_korea = folium.Map(location=[51.5074, 0.1278], # tiles = "Stamen Toner", # zoom_start = 12) # # 'width=int' and 'height=int' can also be added to the map #folium.Marker([51.5079, 0.0877], popup='London Bridge').add_to(map_hooray) #map_hooray # for n in coffee.index: # # 팝업에 들어갈 텍스트를 지정해 줍니다. # popup_name = coffee['상호명'][n] + ' - ' + coffee['도로명주소'][n] # # 브랜드명에 따라 아이콘 색상을 달리해서 찍어줍니다. # if coffee['상호명'][n] is '이디야' : # icon_color = 'green' # if coffee['상호명'][n] is '스타벅스': # icon_color = 'blue' # else: # pass # folium.Marker([coffee['위도'][n], coffee['경도'][n]], # popup=popup_name, # icon=folium.Icon(color=icon_color)).add_to(map) # map # - starbucks['위도'] = starbucks['위도'].astype(float) starbucks['경도'] = starbucks['경도'].astype(float) ediya['위도'] = ediya['위도'].astype(float) ediya['경도'] = ediya['경도'].astype(float) # + import warnings warnings.filterwarnings('ignore') from plotnine import * (ggplot(coffee) + aes(x='경도', y='위도', color='브랜드명') + geom_point() + theme(text=element_text(family='NanumBarunGothic')) + scale_fill_gradient(low = 'blue', high = 'green') ) # + #map_korea = folium.Map(location=[35.9078, 127.7669],zoom_start = 7) for n in coffee.index: # 팝업에 들어갈 텍스트를 지정해 줍니다. popup_name = coffee['브랜드명'][n] + ' - ' + coffee['도로명주소'][n] # 브랜드명에 따라 아이콘 색상을 달리해서 찍어줍니다. if coffee['브랜드명'][n] == '스타벅스' : icon_color = 'pink' if coffee['브랜드명'][n] == '이디야': icon_color = 'blue' else: pass folium.Marker([coffee['위도'][n], coffee['경도'][n]], popup=popup_name, icon=folium.Icon(color=icon_color)).add_to(map_korea) for each in coffee[0:100].iterrows(): map_korea.simple_marker( location = [each[1]['Y'],each[1]['X']], clustered_marker = True) # - display(map_korea)
shop.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: PyCharm (CONV19Xray) # language: python # name: pycharm-db6f0221 # --- # + [markdown] pycharm={"name": "#%% md\n"} # # CONV2019 Model # + pycharm={"is_executing": false, "name": "#%%\n"} # %matplotlib inline import matplotlib.pylab as plt import numpy as np import torch from torch.utils import data import torchxrayvision as xrv import math import cv2 import uuid import os from git import Repo import importlib.util # - # ## Util Functions # + pycharm={"name": "#%%\n", "is_executing": false} def show_images(images, ncols = 1): nrows = math.ceil(len(images) / ncols) fig=plt.figure(figsize=(10,5)) for ind,img in enumerate(images): ax = fig.add_subplot(nrows, ncols, ind+1) ax.imshow(img) ax.grid('on', linestyle='--') ax.axis('off') #ax.set_title("{},{}".format(img.shape[0],img.shape[1])) ax.set_aspect('equal') plt.tight_layout() plt.subplots_adjust(wspace=0.2, hspace=0.2) plt.show() # - # ## Load dataset # + pycharm={"name": "#%%\n", "is_executing": false} torchxrayvision_folder = importlib.util.find_spec("torchxrayvision").submodule_search_locations[0] repo_uri = "https://github.com/ieee8023/covid-chestxray-dataset.git" repo_name = os.path.splitext(os.path.basename(repo_uri))[0] repot_folder = os.path.join(torchxrayvision_folder, repo_name) if not os.path.isdir(repot_folder): Repo.clone_from(repo_uri, repot_folder) ds_covid19 = xrv.datasets.COVID19_Dataset() #use loader # + [markdown] pycharm={"name": "#%% md\n"} # # Visualization # + pycharm={"name": "#%%\n", "is_executing": false} # filter out covid19 images covid19_images_list = [ ds_covid19[i]["PA"][0] for i in range(len(ds_covid19)) if dict(zip(ds_covid19.pathologies, ds_covid19[i]["lab"]))["COVID-19"] == 1.0] # filter out normal images normal_images_list = [ds_covid19[i]["PA"][0] for i in range(len(ds_covid19)) if dict(zip(ds_covid19.pathologies, ds_covid19[i]["lab"]))["COVID-19"] == 0.0] # convert images to np.uint8 covid19_images_list = [cv2.normalize(img,None,0,255,cv2.NORM_MINMAX,cv2.CV_8U) for img in covid19_images_list] normal_images_list = [cv2.normalize(img,None,0,255,cv2.NORM_MINMAX,cv2.CV_8U) for img in normal_images_list] # - # ### 1. Infected Patients # + pycharm={"name": "#%%\n", "is_executing": false} show_images(covid19_images_list[:10], ncols=5) # - # #### 2. Not Infected Patients # + pycharm={"name": "#%%\n", "is_executing": false} show_images(normal_images_list[:10], ncols=5)
dataset.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Challenge 1 - Combining Dataframes with pandas # Create a new DataFrame by joining the contents of the surveys.csv and species.csv tables. Then calculate and plot the distribution of: # # taxa by plot # taxa by sex by plot # # Import files import pandas as pd import numpy as np surveys_df = pd.read_csv("data/surveys.csv") species_df = pd.read_csv("data/species.csv") # Explore headers to define join keys print(surveys_df.columns) print(species_df.columns) # Merge both files using species_id # If there species_id that do not have taxa, identify that taxa as Unidentified # merged_data = pd.merge(left=surveys_df,right=species_df, how='inner', left_on='species_id', right_on='species_id') #merged_left['taxa'] = merged_left['taxa'].fillna('Unidentified') merged_data.head() # Grouping by plot grouped_by_plot = merged_data.groupby('plot_id') # I am not sure if your want to count the "unique" taxa found in each plot. The next code reflects that: taxa_count=grouped_by_plot['taxa'].nunique() taxa_count.head() # %matplotlib inline taxa_count.plot(kind='bar') # The previous plot shows how many taxas were found per plot. # # However, you might wanted us to show a stacked bar plot to show the number of animals per taxa found in each plot. The next code reflects that. # Checking what values are in taxa field. Creating a list with those values taxa_labels=list(set(merged_data.taxa)) plot_labels=list(set(merged_data.plot_id)) print(taxa_labels) print(plot_labels) # Creation of lists that contain information of the sum of animals per taxa per plot grouped_by_plot = merged_data.groupby(['plot_id','taxa'])['record_id'].count() grouped_by_plot.head() per_plot=[] for animal in taxa_labels: sum_list=[] for plot in plot_labels: try: sum_list.append(grouped_by_plot[plot][animal]) except: sum_list.append(0) per_plot.append(list(sum_list)) print(per_plot) # Creation of the Data Frame d={taxa_labels[0]:pd.Series(per_plot[0], index=plot_labels), taxa_labels[1]:pd.Series(per_plot[1], index=plot_labels), taxa_labels[2]:pd.Series(per_plot[2], index=plot_labels), taxa_labels[3]:pd.Series(per_plot[3], index=plot_labels), taxa_labels[4]:pd.Series(per_plot[4], index=plot_labels)} final_dataframe=pd.DataFrame(d) final_dataframe.head # Plot final_dataframe.plot(kind='bar',stacked=True) import matplotlib.pyplot as plt plt.legend(bbox_to_anchor=(1.04,1),loc='upper left') grouped_by_plot_and_sex = merged_data.groupby(['plot_id','sex','taxa'])['record_id'].count() grouped_by_plot_and_sex sex = ['M','F'] per_plot_and_sex=[] for animal in taxa_labels: per_sex=[] for s in sex: sum_list=[] for plot in plot_labels: try: sum_list.append(grouped_by_plot_and_sex[plot][s][animal]) except: sum_list.append(0) per_sex.append(list(sum_list)) per_plot_and_sex.append(per_sex) print(per_plot_and_sex) d={taxa_labels[0]+sex[0]:pd.Series(per_plot_and_sex[0][0], index=plot_labels), taxa_labels[0]+sex[1]:pd.Series(per_plot_and_sex[0][1], index=plot_labels), taxa_labels[1]+sex[0]:pd.Series(per_plot_and_sex[1][0], index=plot_labels), taxa_labels[1]+sex[1]:pd.Series(per_plot_and_sex[1][1], index=plot_labels), taxa_labels[2]+sex[0]:pd.Series(per_plot_and_sex[2][0], index=plot_labels), taxa_labels[2]+sex[1]:pd.Series(per_plot_and_sex[2][1], index=plot_labels), taxa_labels[3]+sex[0]:pd.Series(per_plot_and_sex[3][0], index=plot_labels), taxa_labels[3]+sex[1]:pd.Series(per_plot_and_sex[3][1], index=plot_labels), taxa_labels[4]+sex[0]:pd.Series(per_plot_and_sex[4][0], index=plot_labels), taxa_labels[4]+sex[1]:pd.Series(per_plot_and_sex[4][1], index=plot_labels) } final_dataframe=pd.DataFrame(d) final_dataframe # There is a problem in the data set. # # The sex of the animal was not properly recorded. In consequence, many registers have nothing in that cell. That is the reason why for most of the "taxa" the cells shows "0" (for example, there are neither male nor female birds). Before plotting the data, it would be interesting to use some technique to include either "M" of "F". However, this is not the purpose of this excercise, and the data will be plotted as it is. final_dataframe.plot(kind='bar',stacked=True) import matplotlib.pyplot as plt plt.legend(bbox_to_anchor=(1.04,1),loc='upper left') # ## Challenge 2 # 1. Rewrite the one_year_csv_writer and yearly_data_csv_writer functions to have keyword arguments with default values # # 2. Modify the functions so that they don’t create yearly files if there is no data for a given year and display an alert to the user (Hint: use conditional statements to do this. For an extra challenge, use try statements!) # # 3. Add some code to your function that writes out the CSV files, to check for a directory to write to. # # # # To work,before running the functions, the expresion "import pandas as pd" MUST be run. # + def one_year_csv_writer(all_data, this_year = 1977, counter = 0): """ Writes a csv file for data from a given year. this_year --- year for which data is extracted all_data --- DataFrame with multi-year data previously run: import pandas as pd. Otherwise, the fuction will not work """ #Creation of the directory if counter == 0: import os if 'yearly_files' in os.listdir('.'): print('Processed directory exists') else: os.mkdir('data/yearly_files') print('Processed directory created') # Select data for the year if min(surveys_df['year'].unique()) <= this_year and max(surveys_df['year'].unique()) >= this_year: surveys_year = all_data[all_data.year == this_year] # Write the new DataFrame to a csv file filename = 'data/yearly_files/function_surveys_workshop' + str(this_year) + '.csv' surveys_year.to_csv(filename) else: print('File for year '+ str(this_year)+ ' not created') def yearly_data_csv_writer(all_data, start_year = 1977, end_year = 1977): """ Writes separate CSV files for each year of data. start_year --- the first year of data we want end_year --- the last year of data we want all_data --- DataFrame with multi-year data previously run: import pandas as pd. Otherwise, the fuction will not work """ # "end_year" is the last year of data we want to pull, so we loop to end_year+1 for counter, year in enumerate(range(start_year, end_year+1)): one_year_csv_writer(all_data, year, counter) # - # Run the next lines to test the functions with default values # # The created files have the name 'function_surveys_workshop_year' surveys_df = pd.read_csv('data/surveys.csv') one_year_csv_writer(surveys_df) # Run the next lines to test the functions with given values. # # # yearly_data_csv_writer(surveys_df,1980,1990) yearly_data_csv_writer(surveys_df,1900,1905) # ## Challenge 3 # # Make a variety of line plots from your data. If you are using the streamgage data, these could include (1) a hydrograph of the entire month of September 2013, (2) the discharge record for the week of the 2013 Front Range flood (September 9 through 15), (3) discharge vs. time of day, for every day in the record in one figure (Hint: use loops to combine strings and give every line a different style and color), and (4) minimum, maximum, and mean daily discharge values. Add axis labels, titles, and legends to your figures. Make at least one figure with multiple plots using the function subplot(). # Importing data # # The first step is to remove the header of th file that is not part of the dataset. # # 1. Read line by line the text file and select the data I am interested in. # + import pandas as pd import re # Open the file and read the lines f=open('data/bouldercreek_09_2013.txt','r') lines = f.readlines() f.close() # create a list with the data modified_lines=[] for line in lines: text=re.findall(r'USGS\t.*',line) for element in text: modified_lines.append(element) #Select the data we are interested in modified_split=[] for line in modified_lines: line_split=line.split('\t') modified_split.append(line_split) date_time=[] for line in modified_split: d_t=line[2].split(" ") date_time.append(d_t) #creation of individual lists with the data date=[] time=[] discharge=[] for line in date_time: date.append(line[0]) time.append(line[1]) for line in modified_split: discharge.append(float(line[4])*0.0283168) #transformed to m3/s # - # 2.Creation of the Dataframe discharge_df=pd.DataFrame({'date':date,'time':time,'discharge':discharge},columns=['date','time','discharge']) discharge_df.head() # Creation of plots # (1) a hydrograph of the entire month of September 2013, y=discharge_df.discharge x=list(range(0,len(y))) plt.plot(x,y) plt.title('Hydrograp for September') plt.xlabel('September 2013 data points') plt.ylabel('Discharge [$m^3$/s]') plt.legend() # (2) The discharge record for the week of the 2013 Front Range flood (September 9 through 15) # # + import re discharge_record=[] for date in discharge_df.date: dr=re.findall(r'2013-09-09',date) for tx in dr: discharge_record.append(tx) for date in discharge_df.date: dr=re.findall(r'2013-09-1[1-5]',date) for tx in dr: discharge_record.append(tx) record=discharge_df[discharge_df.date.isin(discharge_record)] record.date=pd.to_datetime(record.date) record.head() plt.plot(record.date,record.discharge) plt.xticks(rotation='vertical') plt.title('Discharge values for the Front Range flood') plt.xlabel('Date') plt.ylabel('Discharge [$m^3$/s]') plt.legend() # - # (3) Discharge vs. time of day, for every day in the record in one figure # # Unfortunately, I could not write the time in the x ticks # + days=list(set(discharge_df.date)) days=sorted(days) hours=list(set(discharge_df.time)) hours=sorted(hours) plt.figure(figsize=(15,8.5)) for day in days: y=discharge_df.discharge[discharge_df.date==day] x=list(range(1,len(y)+1)) plt.plot(x,y) plt.legend(days,bbox_to_anchor=(1.04,1),loc='upper left') ax=plt.gca() ax.set_xlim([1,96]); plt.title('Discharge values vs. time of day, for every day') plt.xlabel('Time') plt.ylabel('Discharge [$m^3$/s]') # - # (4) minimum, maximum, and mean daily discharge values. # + mean_values=[] max_values=[] min_values=[] for day in days: y=discharge_df.discharge[discharge_df.date==day] y_max=y.max() y_min=y.min() y_mean=y.mean() mean_values.append(y_mean) max_values.append(y_max) min_values.append(y_min) days_plot=pd.to_datetime(days) fig,axs = plt.subplots(3,sharex=True,sharey=True,figsize=(8,8)) # Mean ax=axs[0] ax.plot(days_plot,mean_values) ax.set_title('Mean Discharge') #Maximum ax=axs[1] ax.plot(days_plot,max_values) ax.set_title('Maximum Discharge') #Minimum ax=axs[2] ax.plot(days_plot,min_values) ax.set_title('Minimum Discharge') plt.xlabel('Date') fig.text(0.06,0.5,'Discharge [$m^3$/s]',ha='center',va='center',rotation='vertical') # -
Week03/Assignment/dvaca.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + import pdb import numpy as np import pandas as pd import os import pylab as plt from utils import clean_args from utils import clean_nans from utils import fast_sed from utils import fast_sed_fitter from utils import fast_Lir from utils import stagger_x from utils import subset_averages_from_ids from utils import main_sequence_s15 from bincatalogs import Field_catalogs from astropy.cosmology import Planck15 as cosmo import astropy.units as u try: from simstack import PickledStacksReader, measure_cib except: from simstack.simstack import PickledStacksReader, measure_cib # %matplotlib inline # - conv_luv_to_sfr = 2.17e-10 conv_lir_to_sfr = 1.72e-10 L_sun = 3.839e26 # W c = 299792458.0 # m/s a_nu_flux_to_mass = 6.7e19 h = 6.62607004e-34 #m2 kg / s #4.13e-15 #eV/s k = 1.38064852e-23 #m2 kg s-2 K-1 8.617e-5 #eV/K popcolor=['red','blue','green','orange','black','grey','chocolate','darkviolet','pink','magenta','dodgerblue','lavender','blue','red','green','orange','black','grey','chocolate','darkviolet','pink','magenta','dodgerblue','lavender'] path_pickles = os.environ['PICKLESPATH'] path_maps = os.environ['MAPSPATH'] path_catalogs= os.environ['CATSPATH'] # + #Location of the stacked parameter file shortnmae = 'test' path_config = path_pickles + '/output/simstack_fluxes/test/' #file_config = 'uvista__DR2__2pop__7_maps_s15_binning.cfg' #shortname = 'uVista_Laigle_v1.1__2pop__7bands__s15_bins_in_slices' shortname = 'IRAC_test_all_z' file_config = 'example.cfg' if os.path.exists(path_config+file_config) == True: print path_config+file_config # - #PickledStacksReader will look at the config file defined in previous cell # to determine if bootstrap or not, and read and organize it print path_config print '-----' print file_config stacked_flux_densities = PickledStacksReader(path_config,file_config) #Instance of PickledStacksReader class contains the following print dir(stacked_flux_densities) #For example, params contains: print stacked_flux_densities.params.keys() #And the params['bins'] key contains: stacked_flux_densities.params['bins'] # + print stacked_flux_densities.params['catalogs'] print stacked_flux_densities.params['populations'] print stacked_flux_densities.params['boot0'] print stacked_flux_densities.nw print stacked_flux_densities.nz print stacked_flux_densities.nm print stacked_flux_densities.npops print stacked_flux_densities.simstack_nuInu_array stacked_object = stacked_flux_densities for iwv in range(stacked_object.nw): for i in range(stacked_object.nz): zn = stacked_object.z_nodes[i:i+2] z_suf = '{:.2f}'.format(zn[0])+'-'+'{:.2f}'.format(zn[1]) for j in range(stacked_object.nm): mn = stacked_object.m_nodes[j:j+2] m_suf = '{:.2f}'.format(mn[0])+'-'+'{:.2f}'.format(mn[1]) for p in range(stacked_object.npops): arg = clean_args('z_'+z_suf+'__m_'+m_suf+'_'+stacked_object.pops[p+1]) print stacked_object.bin_ids print 'helo', arg print stacked_object.bin_ids ng = len(stacked_object.bin_ids[arg]) cib[iwv,i,j,p] += 1e-9 * float(ng) / area_sr * stacked_object.simstack_nuInu_array[iwv,i,j,p] # - #simstack contains a function to estimate the total CIB of the objects in the stacked_flux_densities object. area_deg defaults to COSMOS, which is 1.62deg2 measure_cib(stacked_flux_densities,tcib = True, area_deg= 1.62) #Location of the bootstrap parameter file shortname = 'uVista_Laigle_v1.1__2pop__7bands__s15_bins_in_slices' path_config = path_pickles + 'simstack/stacked_flux_densities/bootstrapped_fluxes/'+shortname+'/' file_config = 'uvista__DR2__2pop__7_maps_s15_binning.cfg' if os.path.exists(path_config+file_config) == True: print path_config+file_config #Here PickledStacksReader is pointed to bootstraps, which it reads, estimates the variance, and stores. boot_errs = PickledStacksReader(path_config,file_config) print np.shape(boot_errs.bootstrap_flux_array) print np.shape(boot_errs.boot_error_bars) boot_errs.params['bins'] print boot_errs.params['bins']['z_nodes'] print boot_errs.params['number_of_boots'] wvs = stacked_flux_densities.wvs m_nodes = np.array(stacked_flux_densities.m_nodes) z_nodes = np.array(stacked_flux_densities.z_nodes) z_mid = (z_nodes[1:]+z_nodes[:-1])/2. m_mid = (m_nodes[1:]+m_nodes[:-1])/2. nwv = len(wvs) nz = stacked_flux_densities.nz nm = stacked_flux_densities.nm npop = stacked_flux_densities.npops pop = stacked_flux_densities.pops all_stacked_fluxes = stacked_flux_densities.simstack_flux_array boot_norm = clean_nans(np.mean(boot_errs.bootstrap_flux_array,axis=4)/stacked_flux_densities.simstack_flux_array) all_stacked_errors = np.sqrt(boot_errs.boot_error_bars**2 + stacked_flux_densities.simstack_error_array**2) bootstrap_means = np.mean(boot_errs.bootstrap_flux_array,axis=4) catalog_used = stacked_flux_densities.params['catalogs']['catalog_path']+stacked_flux_densities.params['catalogs']['catalog_file'] tbl = pd.read_table(catalog_used,sep=',') #uVista = Field_catalogs(tbl) #uVista.separate_sf_qt() tbl.keys() # + #Plot stacked results Flux vs Z, M, SF/QT stagger_z = stagger_x(z_mid, nm, wid=0.025, log=True) wv_mod = np.linspace(20,1000,100) for iwv in range(nwv): wv = wvs[iwv] plt.figure(figsize=(9,6)) plt.title(str(int(wv))+' um') plt.yscale('log') #plt.xscale('log') plt.xlim([0.0,4.5]) plt.ylabel('Flux Density [mJy]',fontsize=14) plt.xlabel('Redshift',fontsize=14) for k in range(npop)[:]: for j in range(nm)[::-1]: mn = m_nodes[j:j+2] xplot= [z[j] for z in stagger_z] plt.plot(z_mid,1e3*stacked_flux_densities.simstack_flux_array[iwv,:,j,k],'o',color=popcolor[k],label=pop[k]+':,M='+str(mn[0])+'-'+str(mn[1]),markersize=2.4*(j+1)) plt.plot(z_mid,1e3*stacked_flux_densities.simstack_flux_array[iwv,:,j,k],color=popcolor[k],linewidth=0.75*(j+1)) plt.errorbar(z_mid,1e3*stacked_flux_densities.simstack_flux_array[iwv,:,j,k],yerr=1e3*np.sqrt(stacked_flux_densities.simstack_error_array[iwv,:,j,k]**2+boot_errs.boot_error_bars[iwv,:,j,k]**2),fmt=None,ecolor=popcolor[k],elinewidth=3) plt.errorbar(z_mid,1e3*stacked_flux_densities.simstack_flux_array[iwv,:,j,k],yerr=1e3*boot_errs.boot_error_bars[iwv,:,j,k],fmt=None,ecolor='black',elinewidth=2)#,marker='o',markersize=12) plt.legend() plt.show() # - #Plot stacked results Flux vs Z, M, SF/QT stagger_wvs = stagger_x(wvs, nm, wid=0.025, log=True) # a handy function to avoid overlapping error bars wv_mod = np.linspace(20,1000,100) for iz in range(nz): zn = z_nodes[iz:iz+2] z_suf = '{:.2f}'.format(zn[0])+'-'+'{:.2f}'.format(zn[1]) plt.figure(figsize=(11,7)) plt.title('z =' +'{:.2f}'.format(zn[0])+'-'+'{:.2f}'.format(zn[1])) plt.yscale('log',fontsize=20) plt.xscale('log',fontsize=20) plt.ylim([1e-2,9e1]) plt.xlim([10,1000]) plt.ylabel('Flux Density [mJy]',fontsize=20) plt.xlabel('Wavelength [microns]',fontsize=20) for k in range(npop): for j in range(nm)[::-1]: mn = m_nodes[j:j+2] m_suf = '{:.2f}'.format(mn[0])+'-'+'{:.2f}'.format(mn[1]) # Number of galaxies in each bin is estimated by looking up the bin_ids of all the objects in the bin # key is the identifier for each bin (in redshift, stellar mass, and population name) key = clean_args('z_'+z_suf+'__m_'+m_suf+'_'+pop[k]) ngal_bin = len(stacked_flux_densities.bin_ids[key]) if ngal_bin > 0: flux_wv = (all_stacked_fluxes[:,iz,j,k]) flux_err= all_stacked_errors[:,iz,j,k] m = fast_sed_fitter(np.array(wvs),flux_wv,covar=flux_err) LIR = fast_Lir(m, np.mean(zn)) ymod = fast_sed(m,wv_mod) xplot= [wv[j] for wv in stagger_wvs] plt.plot(xplot,1e3*flux_wv,'o',color=popcolor[k],label=[str(ngal_bin)+' '+pop[k],str((m_nodes[j]+m_nodes[j+1])/2.0)],markersize=2*(j+1)) plt.plot(wv_mod,1e3*ymod[0],color=popcolor[k],linewidth=j)#,label=str((1.0+z_mid[iz])*m['T_observed'].value)) plt.errorbar(xplot,1e3*flux_wv,yerr=1e3*flux_err,fmt=None,ecolor=popcolor[k],elinewidth=3)#,marker='o',markersize=12) plt.legend() plt.show()
notebooks/plot_simstack_output.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import sys sys.path.append("/usr/lib/spark/python") sys.path.append("/usr/lib/spark/python/lib/py4j-0.10.4-src.zip") sys.path.append("/usr/lib/python3/dist-packages") import os os.environ["HADOOP_CONF_DIR"] = "/etc/hadoop/conf" import os os.environ["PYSPARK_PYTHON"] = "python3" os.environ["PYSPARK_DRIVER_PYTHON"] = "ipython" from pyspark.mllib.clustering import KMeans, KMeansModel from pyspark import SparkConf, SparkContext from osgeo import gdal from io import BytesIO import matplotlib.pyplot as plt import rasterio from rasterio import plot from rasterio.io import MemoryFile # + appName = "kmeans_mlib_hdfs" masterURL="spark://pheno0.phenovari-utwente.surf-hosted.nl:7077" try: sc.stop() except NameError: print("A new Spark Context will be created.") sc = SparkContext(conf = SparkConf().setAppName(appName).setMaster(masterURL)) # + file_path = "hdfs:///user/hadoop/spring-index/LastFreeze/1980.tif" clusters_path = "hdfs:///user/pheno/spring-index/BloomFinal/clusters_5_35.tif" file_data = sc.binaryFiles(file_path).take(1) file_dataByteArray = bytearray(file_data[0][1]) clusters_data = sc.binaryFiles(clusters_path).take(1) clusters_dataByteArray = bytearray(clusters_data[0][1]) # + file_dataset = MemoryFile(file_dataByteArray).open() print(file_dataset.profile) clusters_dataset = MemoryFile(clusters_dataByteArray).open() print(clusters_dataset.profile) # - plot.show((file_dataset,1)) plot.show((clusters_dataset,1))
applications/notebooks/raul/plot_GeoTiff_clusters_python2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### 1. Load molecule from qdk.chemistry import Molecule caffeine = Molecule.from_xyz("data/xyz/caffeine.xyz") caffeine # cat "data/xyz/caffeine.xyz" caffeine.mol type(caffeine.mol) caffeine.smiles caffeine.num_electrons caffeine.atoms # ### 2. Load Broombridge and simulate in Q# # %%writefile GetEnergyByTrot.qs namespace Microsoft.Quantum.Chemistry.Trotterization { open Microsoft.Quantum.Core; open Microsoft.Quantum.Intrinsic; open Microsoft.Quantum.Canon; open Microsoft.Quantum.Chemistry; open Microsoft.Quantum.Chemistry.JordanWigner; open Microsoft.Quantum.Simulation; open Microsoft.Quantum.Characterization; open Microsoft.Quantum.Convert; open Microsoft.Quantum.Math; operation GetEnergyByTrotterization ( JWEncodedData: JordanWignerEncodingData, nBitsPrecision : Int, trotterStepSize : Double, trotterOrder : Int ) : (Double, Double) { let (nSpinOrbitals, fermionTermData, inputState, energyOffset) = JWEncodedData!; let (nQubits, (rescaleFactor, oracle)) = TrotterStepOracle(JWEncodedData, trotterStepSize, trotterOrder); let statePrep = PrepareTrialState(inputState, _); let phaseEstAlgorithm = RobustPhaseEstimation(nBitsPrecision, _, _); let estPhase = EstimateEnergy(nQubits, statePrep, oracle, phaseEstAlgorithm); let estEnergy = estPhase * rescaleFactor + energyOffset; return (estPhase, estEnergy); } } # %%writefile TrotterizationExample.csproj <Project Sdk="Microsoft.Quantum.Sdk/0.14.2011120240"> <PropertyGroup> <OutputType>Exe</OutputType> <TargetFramework>netcoreapp3.1</TargetFramework> </PropertyGroup> <ItemGroup> <PackageReference Include="Microsoft.Quantum.Chemistry" Version="0.14.2011120240" /> </ItemGroup> </Project> import qsharp from qdk.chemistry.broombridge import load_and_encode # + qsharp.reload() from Microsoft.Quantum.Chemistry.Trotterization import GetEnergyByTrotterization # - # ### Caffeine encoded_data_caffeine = load_and_encode("data/broombridge/caffeine.yaml") # %%time GetEnergyByTrotterization.estimate_resources( JWEncodedData=encoded_data_caffeine, nBitsPrecision=7, trotterStepSize=0.4, trotterOrder=1) # ### Pyridine encoded_data_pyridine = load_and_encode("data/broombridge/pyridine.yaml") # %%time GetEnergyByTrotterization.estimate_resources( JWEncodedData=encoded_data_pyridine, nBitsPrecision=7, trotterStepSize=0.4, trotterOrder=1)
examples/chemistry/Molecule.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Classification of MNIST data set with KNN Algorithm # Import the required libraries. import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn import datasets # Load MNIST data set from sci-kit Learn Library. mnist_data = datasets.load_digits() mnist_data # Show the first image from the data set. plt.subplot() plt.imshow(mnist_data.images[0]) # Show the first image gray-scale. plt.subplot() plt.imshow(mnist_data.images[0], cmap=plt.cm.gray_r) # Identify the target value for the first image. mnist_data.target[0] # Show the second image gray-scale. plt.subplot() plt.imshow(mnist_data.images[1], cmap=plt.cm.gray_r) # Identify the target value for the first image. mnist_data.target[1] # Show the data for the first image. mnist_data.images[0] # Show the type of the data. type(mnist_data) # Show the type of the data. type(mnist_data.images) # Show the type of the data. type(mnist_data.images[0]) # Show the first row of the data. mnist_data.images[0][0] # Show the shape of mnist_data.images as a numpy.ndarray. np.shape(mnist_data.images) # Show the shape of mnist_data.images[0] as a numpy.ndarray. np.shape(mnist_data.images[0]) # Count of the samples. len(mnist_data.images) # The data set of mnist_data.images has 1797 samples of 8*8 images. For classification purposes, # we have to reshape this 8*8 images to 64-element lists. # To put it in another way, we must transform an 8*8 matrix to a single list (with 64 elements). X = mnist_data.images.reshape(len(mnist_data.images), -1) # Now we have 1797 samples, which each sample has 64 features. np.shape(X) # The content of this data set. X # Specify the mnist_data targets. y = mnist_data.target type(y), y, len(y) # Import KNN classifier from sci-kit library. from sklearn.neighbors import KNeighborsClassifier # Create a KNN classifier. knn_classifier = KNeighborsClassifier() # Learn from the data. knn_classifier.fit(X,y) # Simple test of the model. The model predict the 200th image to be 1. knn_classifier.predict([X[200]]) # The actual value of the 200th image is 1 either. y[200] # Now notice that using all of the data for learning phase is not the correct method for ML. # We should split our data to train/test data sets. # We usually use 75% of the data for training and 25% of the data for testing. # In this example, we use 1000 samples for training and the other for testing. knn_classifier_new = KNeighborsClassifier() knn_classifier_new.fit(X[:1000],y[:1000]) # Now let's evaluate our model with specific metrics. from sklearn import metrics # We must give our test data set to metrics. # With metrics.classification_report, the accuracy, precision and recall of our model are found out. predicted_values = knn_classifier_new.predict(X[1000:]) expected_values = y[1000:] print(metrics.classification_report(expected_values, predicted_values)) # Show the confusion matrix of our model. print(metrics.confusion_matrix(expected_values, predicted_values)) # Now notice the performance of this model with hyper-parameter changing. # At first, let's change the number of neighbors to 3 (defualt is 5). knn_classifier_new2 = KNeighborsClassifier(n_neighbors=3) knn_classifier_new2.fit(X[:1000],y[:1000]) # With metrics.classification_report, the accuracy, precision and recall of our model are found out. predicted_values2 = knn_classifier_new2.predict(X[1000:]) expected_values = y[1000:] print(metrics.classification_report(expected_values, predicted_values2)) # Now let's change the number of neighbors to 10 (defualt is 5). knn_classifier_new3 = KNeighborsClassifier(n_neighbors=10) knn_classifier_new3.fit(X[:1000],y[:1000]) # With metrics.classification_report, the accuracy, precision and recall of our model are found out. predicted_values3 = knn_classifier_new3.predict(X[1000:]) expected_values = y[1000:] print(metrics.classification_report(expected_values, predicted_values3)) # Now let's change the number of neighbors to 1 (defualt is 5). knn_classifier_new4 = KNeighborsClassifier(n_neighbors=1) knn_classifier_new4.fit(X[:1000],y[:1000]) # With metrics.classification_report, the accuracy, precision and recall of our model are found out. predicted_values4 = knn_classifier_new4.predict(X[1000:]) expected_values = y[1000:] print(metrics.classification_report(expected_values, predicted_values4))
Classification-KNN-MNIST.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # get all the imports to a requirements.txt file # # run on shell: # # grep -r import * > imports.txt # + import re import numpy as np import pandas as pd # - with open("imports.txt") as fp: raw = fp.readlines() df = pd.Series(raw) len(df) df.head() df[0] df = df.str.strip() df.head() re.findall(r'import\ {.*}\ ', df[0]) df[0:10].apply(split('import')[1].split().head()) ans = [v.split('import')[1].split()[0].strip().split('.')[0].replace('\\n', '').replace('`', '').replace('"', '') for v in df] ans = pd.Series(ans) len(ans) ans = pd.Series(ans.unique()) ans print(len(ans)) ans = ans[ans.str.contains("/") == False] len(ans) print(len(ans)) ans = ans[ans.str.contains(r"\\") == False] len(ans) ans = pd.Series(ans.unique()) ans ans = pd.Series(ans.unique()) ans ans = ans.apply(lambda x: x.replace(',', '')) with open('requirements.txt', 'w') as fp: for f in sorted(ans.values): fp.write('{}\n'.format(f))
Get imports.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/stalgiag/DAIN/blob/master/DAIN.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="VFvWTMpQsd_M" colab_type="text" # Check for CUDA # + id="cEmh8QZar2hp" colab_type="code" colab={} # !nvcc --version # + [markdown] id="ZOWhwi4rtDS8" colab_type="text" # Install Pytorch # + id="engciW6zshoc" colab_type="code" colab={} # different torch install needed # # !pip3 uninstall torch # !pip3 install -U https://download.pytorch.org/whl/cu100/torch-1.0.0-cp36-cp36m-linux_x86_64.whl # + [markdown] id="NEduSAUYtJMd" colab_type="text" # Check Pytorch Install Works with Cuda # + id="IvoIKI7ktNPs" colab_type="code" colab={} # !python -c "import torch; print(torch.__version__); print(torch.cuda.is_available())" # + id="XLx_9yLJun-M" colab_type="code" colab={} # we will verify that GPU is enabled for this notebook # following should print: CUDA is available! Training on GPU ... # # if it prints otherwise, then you need to enable GPU: # from Menu > Runtime > Change Runtime Type > Hardware Accelerator > GPU import torch import numpy as np # check if CUDA is available train_on_gpu = torch.cuda.is_available() if not train_on_gpu: print('CUDA is not available. Training on CPU ...') else: print('CUDA is available! Training on GPU ...') # + id="Lz-NtXA7uqTc" colab_type="code" colab={} # check that gcc is installed # should print version number along bottom # !gcc -v # + [markdown] id="7EsEBnGgHXc9" colab_type="text" # Clone my modified fork # + id="JubU-aj7vdd8" colab_type="code" colab={} #clone repo - my fork has a few simple mods that make this work with colab # !git clone https://github.com/stalgiag/DAIN.git # + [markdown] id="3IqSiw6RHbp8" colab_type="text" # Setup # + id="Sx5bWsHMvnaM" colab_type="code" colab={} #move into repo and setup #may take awhile # %cd DAIN # %cd my_package # !./build.sh # %cd ../PWCNet/correlation_package_pytorch1_0 # !./build.sh # + id="_95lsUHoyW_k" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="3564d75b-6753-47d3-f475-0dc470e8c496" # %cd ../../ # + id="3Z6rYeR7yq4V" colab_type="code" colab={} # !mkdir model_weights # !mkdir MiddleBurySet # + id="L0pwdZbIy4T9" colab_type="code" colab={} # %cd model_weights # !wget http://vllab1.ucmerced.edu/~wenbobao/DAIN/best.pth # + id="mkLiYLKUy7mV" colab_type="code" colab={} # %cd ../MiddleBurySet # !wget http://vision.middlebury.edu/flow/data/comp/zip/other-color-allframes.zip # !unzip other-color-allframes.zip # !wget http://vision.middlebury.edu/flow/data/comp/zip/other-gt-interp.zip # !unzip other-gt-interp.zip # %cd .. # + id="aVBs35qX7Jcc" colab_type="code" colab={} # !nvidia-smi # + id="mV4TmBPYzNS8" colab_type="code" colab={} #test that it works # !CUDA_VISIBLE_DEVICES=0 python demo_MiddleBury.py # + id="_Sdlbu8Q_Shk" colab_type="code" colab={} # slow motion fun # get rid of the uid flag if you don't want this to overwrite # !CUDA_VISIBLE_DEVICES=0 python demo_MiddleBury_slowmotion.py --netName DAIN_slowmotion --time_step 0.1 --uid 111 # + id="RuED4BrnAMtD" colab_type="code" colab={} # %cd MiddleBurySet/other-result-author/111/Beanbags # !apt-get install imagemagick # !convert -delay 1 -loop 0 *.png beanbag.gif # + [markdown] id="_uAqazVuGqlC" colab_type="text" # This builds 10x slomo gif at `DAIN/MiddleBurySet/other-result-author/111/Beanbags/beanbag.gif` # + [markdown] id="quBRBorYi3pW" colab_type="text" # The following is for building with multiple frames # + id="jSBuwpj1nGbj" colab_type="code" colab={} # %cd ../.. # + id="GxNJmB8XbDvt" colab_type="code" colab={} # %%shell # this pass clears the directory CUDA_VISIBLE_DEVICES=0 python demo_MiddleBury_slowmotion.py --netName DAIN_slowmotion --time_step 0.025 --uid "11111" --force --firstImage "frame09.png" --secondImage "frame10.png" --clear_dir True for i in {10..13} do echo $i echo $(($i + 1)) CUDA_VISIBLE_DEVICES=0 python demo_MiddleBury_slowmotion.py --netName DAIN_slowmotion --time_step 0.025 --uid "11111" --force --firstImage "frame$i.png" --secondImage "frame$(($i + 1)).png" done # + id="SzH_I19lPMVk" colab_type="code" colab={} #be sure to insert your content folder name that you want to zip at the end of next line # %cd MiddleBurySet/other-result-author/11111/<insert content folder name here>/ # !zip -r res40x2.zip *
DAIN.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import os import sys from time import sleep from uuid import uuid4 import numpy as np import GCode import GRBL from utils import picture # - # # Spindle Cutting Tests # # ## Objective # - Play around with 'OEM' WeiTol NJ3.2001 Cutters. # ## Test Setup # # - Oak Board 63mm x 300mm x 19mm # - WeiTol NJ3.2001. # - CSI3010SW dialed all the way up: 31.6V # # Code: sys.path.append("..") # + cnc = GRBL.GRBL(port="/dev/cnc_3018") print("Laser Mode: {}".format(cnc.laser_mode)) def init(feed=10): program = GCode.GCode() program.G21() # Metric Units program.G91() # Absolute positioning. program.G1(F=feed) return program def end(): program = GCode.GCode() return program # - cnc.cmd("?") cnc.reset() cnc.cmd("!") status1 = cnc.cmd("?") cnc.cmd("!") cnc.reset() status2 = cnc.cmd("?") for status in [status1, status2]: status_clean = [s for s in status if s != "ok"] print(status_clean) status_clean2 = [s.strip("<>") for s in status_clean] if len(status_clean2) != 1: raise Exception(status_clean2) status = status_clean2[0] print(status) stati = status.split("|") print(stati) print("") def test_program(feed=10): prog = GCode.GCode() prog.M3(S=10000) dZ = -0.1 dX = 10 X = 0 Z = 0 for loops in range(20): prog.G1(Z=dZ, F=1) prog.G1(X=dX, F=feed) X += dX Z += dZ prog.M3(S=0) prog.G0(Z=np.round(-Z, 4)) # TODO: Add this to core library. prog.G0(X=np.round(-X, 4)) prog.G0(Z=2) return prog test_run = GCode.GCode() # TODO: Get z-axis probe. test_run += init() for XFeed in [50]: test_run += test_program(feed=XFeed) gcode_file = "SpindleTests-Copy3.gcode" # + test_run.save(gcode_file) del test_run test_run = GCode.GCode() test_run.load(gcode_file) # - test_run.buffer[0:5] picture() while 1: try: cnc.run(test_run) while 1: print(cnc.status) sleep(5) except KeyboardInterrupt as error: print("Feed Hold") cnc.cmd("!") while 1: try: cnc.reset() break except: pass print("^C") break except: raise cnc.reset() picture() # # Test Aborted. # # Cuts were way too aggressive.
PreviousDevelopment/SpindleTests-Copy3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # What is torch.nn really? # # From https://pytorch.org/tutorials/beginner/nn_tutorial.html?highlight=mnist. # First, let's download some datasets. # + from pathlib import Path import os import requests DATA_PATH = Path("../data/external") PATH = DATA_PATH / "mnist" # Good lord, SSL cert expired in 2013 URL = "http://deeplearning.net/data/mnist/" FILENAME = "mnist.pkl.gz" if not(PATH / FILENAME).exists(): try: os.mkdir(PATH) except FileExistsError: pass content = requests.get(URL + FILENAME).content (PATH / FILENAME).open("wb").write(content) # - # Is it there? # !ls ../data/external/mnist # !file ../data/external/mnist/mnist.pkl.gz # Good! # + import pickle import gzip with gzip.open((PATH / FILENAME).as_posix(), "rb") as f: # I'm not sure why we're dropping the third item here. ((x_train, y_train), (x_valid, y_valid), _) = pickle.load(f, encoding="latin-1") # - # Let's look at a sample. # + from matplotlib import pyplot import numpy as np pyplot.imshow(x_train[0].reshape((28, 28)), cmap='gray') print(x_train.shape) # - # We need to convert this to tensors. # + import torch x_train, y_train, x_valid, y_valid = map(torch.tensor, (x_train, y_train, x_valid, y_valid)) n, c = x_train.shape x_train, y_train.shape, y_train.min(), y_train.max() # - # Now let's make a neural net from scratch, using nothing but PyTorch tensors & their operations. # # - We're using Xavier initialization ([paper](http://proceedings.mlr.press/v9/glorot10a/glorot10a.pdf), [blog post explanation](https://prateekvjoshi.com/2016/03/29/understanding-xavier-initialization-in-deep-neural-networks/)). # - We're setting gradients on weights *after* initialization, so that we don't track that in the gradients. I guess that another way to do this would be to initialize them with gradients, then explicitly zero them afterward. # + import math weights = torch.randn(784, 10) / math.sqrt(784) weights.requires_grad_() bias = torch.zeros(10, requires_grad=True) # - # Now let's write our activation function, `log_softmax()`, and create a model. # + def log_softmax(x): return x - x.exp().sum(-1).log().unsqueeze(-1) def model(xb): return log_softmax(xb @ weights + bias) # + bs = 64 # batch size xb = x_train[0:bs] # mini-batch from X preds = model(xb) xb.shape, preds[0], preds.shape # - # # Digression: Remembering how matrix multiplication works # # - If A is a matrix of m rows and n columns; # - and B is a matrix of n rows and p columns; # - and C is their dot product (A x B = C); # - then C is m rows and p columns in size. # # Thus: # # - xb => 64 x 784 # - weights => 784, 10 # - xb x weights => 64 x 10 # Now to implement a negative log likelihood function to use as loss. # + def nll(input, target): return -input[range(target.shape[0]), target].mean() loss_func = nll # - yb = y_train[0:bs] loss_func(preds, yb) # + def accuracy(out, yb): preds = torch.argmax(out, dim=1) return (preds == yb).float().mean() print(accuracy(preds, yb)) # + lr = 0.5 epochs = 2 for epoch in range(epochs): # Holy shit....n is defined *way* up at the top: # n, c = x_train.shape for i in range((n-1) // bs + 1): start_i = i * bs end_i = start_i + bs xb = x_train[start_i:end_i] yb = y_train[start_i:end_i] pred = model(xb) loss = loss_func(pred, yb) loss.backward() with torch.no_grad(): weights -= weights.grad * lr bias -= bias.grad * lr weights.grad.zero_() bias.grad.zero_() # - print(loss_func(model(xb), yb), accuracy(model(xb), yb)) # # TODO # # There are a few things I don't understand here. # # - Why is the accuracy up to 1 when we still have a loss? # - Why is the nll function returning the negative of input? # # Next step: "Using torch.nn.functional". # + import torch.nn.functional as F loss_func = F.cross_entropy def model(xb): return xb @ weights + bias # - print(loss_func(model(xb), yb), accuracy(model(xb), yb)) # + from torch import nn class Mnist_Logistic(nn.Module): def __init__(self): super().__init__() self.weights = nn.Parameter(torch.randn(784, 10) / math.sqrt(784)) self.bias = nn.Parameter(torch.zeros(10)) def forward(self, xb): return xb @ self.weights + self.bias # - model = Mnist_Logistic() print(loss_func(model(xb), yb)) # + def fit(): for epoch in range(epochs): for i in range((n - 1) // bs + 1): start_i = i * bs end_i = start_i + bs xb = x_train[start_i:end_i] yb = y_train[start_i:end_i] pred = model(xb) loss = loss_func(pred, yb) loss.backward() with torch.no_grad(): for p in model.parameters(): p -= p.grad * lr model.zero_grad() fit() # - print(loss_func(model(xb), yb)) class Mnist_Logistic(nn.Module): def __init__(self): super().__init__() self.lin = nn.Linear(784, 10) def forward(self, xb): return self.lin(xb) model = Mnist_Logistic() print(loss_func(model(xb), yb)) fit() print(loss_func(model(xb), yb)) from torch import optim # + def get_model(): model = Mnist_Logistic() return model, optim.SGD(model.parameters(), lr=0.1) model, opt = get_model() print(loss_func(model(xb), yb)) for epoch in range(epochs): for i in range((n - 1) // bs + 1): start_i = i * bs end_i = start_i + bs xb = x_train[start_i:end_i] yb = y_train[start_i:end_i] pred = model(xb) loss = loss_func(pred, yb) loss.backward() opt.step() opt.zero_grad() print(loss_func(model(xb), yb)) # + from torch.utils.data import TensorDataset train_ds = TensorDataset(x_train, y_train) # + model, opt = get_model() for epoch in range(epochs): for i in range((n - 1) // bs + 1): xb, yb = train_ds[i*bs: i* bs + bs] pred = model(xb) loss = loss_func(pred, yb) loss.backward() opt.step() opt.zero_grad() print(loss_func(model(xb), yb)) # + from torch.utils.data import DataLoader train_d = TensorDataset(x_train, y_train) train_dl = DataLoader(train_ds, batch_size=bs) # + model, opt = get_model() for epoch in range(epochs): for xb, yb in train_dl: pred = model(xb) loss = loss_func(pred, yb) loss.backward() opt.step() opt.zero_grad() print(loss_func(model(xb), yb)) # + train_ds = TensorDataset(x_train, y_train) train_dl = DataLoader(train_ds, batch_size=bs, shuffle=True) valid_ds = TensorDataset(x_valid, y_valid) valid_dl = DataLoader(valid_ds, batch_size=bs*2) # + model, opt = get_model() for epoch in range(epochs): model.train() for xb, yb in train_dl: pred = model(xb) loss = loss_func(pred, yb) loss.backward() opt.step() opt.zero_grad() model.eval() with torch.no_grad(): valid_loss = sum(loss_func(model(xb), yb) for xb, yb in valid_dl) print(epoch, valid_loss / len(valid_dl)) # mean loss over all # - def loss_batch(model, loss_func, xb, yb, opt=None): loss = loss_func(model(xb), yb) if opt is not None: loss.backward() opt.step() opt.zero_grad() return loss.item(), len(xb) # + import numpy as np def fit(epochs, model, loss_func, opt, train_dl, valid_dl): for epoch in range(epochs): model.train() for xb, yb in train_dl: loss_batch(model, loss_func, xb, yb, opt) model.eval() with torch.no_grad(): losses, nums = zip(*[loss_batch(model, loss_func, xb, yb) for xb, yb in valid_dl]) val_loss = np.sum(np.multiply(losses, nums)) / np.sum(nums) print(epoch, val_loss) # - def get_data(train_ds, valid_ds, bs): return( DataLoader(train_ds, batch_size=bs, shuffle=True), DataLoader(valid_ds, batch_size=bs * 2) ) train_dl, valid_dl = get_data(train_ds, valid_ds, bs) model, opt = get_model() fit(20, model, loss_func, opt, train_dl, valid_dl) # Huzzah, this worked! (And changing the LR to 0.1 made performance better.) # Now let's switch to a CNN. # + class Mnist_CNN(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(1, 16, kernel_size=3, stride=2, padding=1) self.conv2 = nn.Conv2d(16, 16, kernel_size=3, stride=2, padding=1) self.conv3 = nn.Conv2d(16, 10, kernel_size=3, stride=2, padding=1) def forward(self, xb): xb = xb.view(-1, 1, 28, 28) xb = F.relu(self.conv1(xb)) xb = F.relu(self.conv2(xb)) xb = F.relu(self.conv3(xb)) xb = F.avg_pool2d(xb, 4) return xb.view(-1, xb.size(1)) lr = 0.1 # + model = Mnist_CNN() opt = optim.SGD(model.parameters(), lr=lr, momentum=0.9) fit(20, model, loss_func, opt, train_dl, valid_dl) # - # Wow, that's pretty good performance! # + class Lambda(nn.Module): def __init__(self, func): super().__init__() self.func = func def forward(self, x): return self.func(x) def preprocess(x): return x.view(-1, 1, 28, 28) # + model = nn.Sequential( Lambda(preprocess), nn.Conv2d(1, 16, kernel_size=3, stride=2, padding=1), nn.ReLU(), nn.Conv2d(16, 16, kernel_size=3, stride=2, padding=1), nn.ReLU(), nn.Conv2d(16, 10, kernel_size=3, stride=2, padding=1), nn.ReLU(), nn.AvgPool2d(4), Lambda(lambda x: x.view(x.size(0), -1)), ) opt = optim.SGD(model.parameters(), lr=lr, momentum=0.9) fit(20, model, loss_func, opt, train_dl, valid_dl) # + def preprocess(x, y): return x.view(-1, 1, 28, 28), y class WrappedDataLoader: def __init__(self, dl, func): self.dl = dl self.func = func def __len__(self): return len(self.dl) def __iter__(self): batches = iter(self.dl) for b in batches: yield(self.func(*b)) train_dl, valid_dl = get_data(train_ds, valid_ds, bs) train_dl = WrappedDataLoader(train_dl, preprocess) valid_dl = WrappedDataLoader(valid_dl, preprocess) # - # Next up: "we can replace nn.AvbPool2d with nn.AdaptiveAvgPool2d" # + model = nn.Sequential( nn.Conv2d(1, 16, kernel_size=3, stride=2, padding=1), nn.ReLU(), nn.Conv2d(16, 16, kernel_size=3, stride=2, padding=1), nn.ReLU(), nn.Conv2d(16, 10, kernel_size=3, stride=2, padding=1), nn.ReLU(), nn.AdaptiveAvgPool2d(1), Lambda(lambda x: x.view(x.size(0), -1)), ) opt = optim.SGD(model.parameters(), lr=lr, momentum=0.9) # - fit(20, model, loss_func, opt, train_dl, valid_dl) model
notebooks/Chapter_4-Supplementary-What_Is_torch.nn_Really.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import matplotlib.pyplot as plt import seaborn as sns import numpy as np import pandas as pd df_auc = pd.read_csv("../results/AUC_results.csv") df_auc.head() # + plt.style.use('default') sns.set() sns.set_style('whitegrid') sns.set_palette('Set1') x = np.array([0, 100, 200, 300, 400]) # Prop y1 = np.array(df_auc[(df_auc["method"] == "FW2S_MDL") & ( df_auc["dataset"] == "mean_changing")]["AUC_mean"]) e1 = np.array(df_auc[(df_auc["method"] == "FW2S_MDL") & ( df_auc["dataset"] == "mean_changing")]["AUC_std"]) # S-MDL(0th order) y2 = np.array(df_auc[(df_auc["method"] == "SDMDL_0") & ( df_auc["dataset"] == "mean_changing")]["AUC_mean"]) e2 = np.array(df_auc[(df_auc["method"] == "SDMDL_0") & ( df_auc["dataset"] == "mean_changing")]["AUC_std"]) # S-MDL(1st order) y3 = np.array(df_auc[(df_auc["method"] == "SDMDL_1") & ( df_auc["dataset"] == "mean_changing")]["AUC_mean"]) e3 = np.array(df_auc[(df_auc["method"] == "SDMDL_1") & ( df_auc["dataset"] == "mean_changing")]["AUC_std"]) # S-MDL(2nd order) y4 = np.array(df_auc[(df_auc["method"] == "SDMDL_2") & ( df_auc["dataset"] == "mean_changing")]["AUC_mean"]) e4 = np.array(df_auc[(df_auc["method"] == "SDMDL_2") & ( df_auc["dataset"] == "mean_changing")]["AUC_std"]) # CF y5 = np.array(df_auc[(df_auc["method"] == "ChangeFinder") & ( df_auc["dataset"] == "mean_changing")]["AUC_mean"]) e5 = np.array(df_auc[(df_auc["method"] == "ChangeFinder") & ( df_auc["dataset"] == "mean_changing")]["AUC_std"]) # BOCPD y6 = np.array(df_auc[(df_auc["method"] == "BOCPD") & ( df_auc["dataset"] == "mean_changing")]["AUC_mean"]) e6 = np.array(df_auc[(df_auc["method"] == "BOCPD") & ( df_auc["dataset"] == "mean_changing")]["AUC_std"]) fig = plt.figure() ax = fig.add_subplot(1, 1, 1) fontsize = 20 ax.errorbar(x, y1, yerr=e1, marker='o', label='FW2S-MDL', capthick=1, capsize=8, lw=1, color='red') ax.errorbar(x, y2, yerr=e2, marker='o', label='S-MDL(0th order)', capthick=1, capsize=8, lw=1, color='blue') ax.errorbar(x, y3, yerr=e3, marker='o', label='S-MDL(1st order)', capthick=1, capsize=8, lw=1, color='green') ax.errorbar(x, y4, yerr=e4, marker='o', label='S-MDL(2nd order)', capthick=1, capsize=8, lw=1, color='purple') ax.errorbar(x, y5, yerr=e5, marker='o', label='CF', capthick=1, capsize=8, lw=1, color='orange') ax.errorbar(x, y6, yerr=e6, marker='o', label='BOCPD', capthick=1, capsize=8, lw=1, color='brown') ax.set_ylim(0, 1) ax.set_xlabel('U (length of transition period)', fontsize=fontsize) ax.set_ylabel('AUC', fontsize=fontsize) ax.legend() plt.show() # + plt.style.use('default') sns.set() sns.set_style('whitegrid') sns.set_palette('Set1') x = np.array([0, 100, 200, 300, 400]) # Prop y1 = np.array(df_auc[(df_auc["method"] == "FW2S_MDL") & ( df_auc["dataset"] == "variance_changing")]["AUC_mean"]) e1 = np.array(df_auc[(df_auc["method"] == "FW2S_MDL") & ( df_auc["dataset"] == "variance_changing")]["AUC_std"]) # S-MDL(0th order) y2 = np.array(df_auc[(df_auc["method"] == "SDMDL_0") & ( df_auc["dataset"] == "variance_changing")]["AUC_mean"]) e2 = np.array(df_auc[(df_auc["method"] == "SDMDL_0") & ( df_auc["dataset"] == "variance_changing")]["AUC_std"]) # S-MDL(1st order) y3 = np.array(df_auc[(df_auc["method"] == "SDMDL_1") & ( df_auc["dataset"] == "variance_changing")]["AUC_mean"]) e3 = np.array(df_auc[(df_auc["method"] == "SDMDL_1") & ( df_auc["dataset"] == "variance_changing")]["AUC_std"]) # S-MDL(2nd order) y4 = np.array(df_auc[(df_auc["method"] == "SDMDL_2") & ( df_auc["dataset"] == "variance_changing")]["AUC_mean"]) e4 = np.array(df_auc[(df_auc["method"] == "SDMDL_2") & ( df_auc["dataset"] == "variance_changing")]["AUC_std"]) # CF y5 = np.array(df_auc[(df_auc["method"] == "ChangeFinder") & ( df_auc["dataset"] == "variance_changing")]["AUC_mean"]) e5 = np.array(df_auc[(df_auc["method"] == "ChangeFinder") & ( df_auc["dataset"] == "variance_changing")]["AUC_std"]) # BOCPD y6 = np.array(df_auc[(df_auc["method"] == "BOCPD") & ( df_auc["dataset"] == "variance_changing")]["AUC_mean"]) e6 = np.array(df_auc[(df_auc["method"] == "BOCPD") & ( df_auc["dataset"] == "variance_changing")]["AUC_std"]) fig = plt.figure() ax = fig.add_subplot(1, 1, 1) fontsize = 20 ax.errorbar(x, y1, yerr=e1, marker='o', label='FW2S-MDL', capthick=1, capsize=8, lw=1, color='red') ax.errorbar(x, y2, yerr=e2, marker='o', label='SD-MDL(0th order)', capthick=1, capsize=8, lw=1, color='blue') ax.errorbar(x, y3, yerr=e3, marker='o', label='SD-MDL(1st order)', capthick=1, capsize=8, lw=1, color='green') ax.errorbar(x, y4, yerr=e4, marker='o', label='SD-MDL(2nd order)', capthick=1, capsize=8, lw=1, color='purple') ax.errorbar(x, y5, yerr=e5, marker='o', label='CF', capthick=1, capsize=8, lw=1, color='orange') ax.errorbar(x, y6, yerr=e6, marker='o', label='BOCPD', capthick=1, capsize=8, lw=1, color='brown') ax.set_ylim(0, 1) ax.set_xlabel('U (length of transition period)', fontsize=fontsize) ax.set_ylabel('AUC', fontsize=fontsize) ax.legend() plt.show() # - df_f1 = pd.read_csv("../results/F1_score_results.csv") df_f1.head() # + plt.style.use('default') sns.set() sns.set_style('whitegrid') sns.set_palette('Set1') x = np.array([0, 100, 200, 300, 400]) # Prop y1 = np.array(df_f1[(df_f1["method"] == "AW2S_MDL") & ( df_f1["dataset"] == "mean_changing")]["F1_score_mean"]) e1 = np.array(df_f1[(df_f1["method"] == "AW2S_MDL") & ( df_f1["dataset"] == "mean_changing")]["F1_score_std"]) # Hierarchical(0th order) y2 = np.array(df_f1[(df_f1["method"] == "Hierarchical 0th") & ( df_f1["dataset"] == "mean_changing")]["F1_score_mean"]) e2 = np.array(df_f1[(df_f1["method"] == "Hierarchical 0th") & ( df_f1["dataset"] == "mean_changing")]["F1_score_std"]) # Hierarchical(1st order) y3 = np.array(df_f1[(df_f1["method"] == "Hierarchical 1st") & ( df_f1["dataset"] == "mean_changing")]["F1_score_mean"]) e3 = np.array(df_f1[(df_f1["method"] == "Hierarchical 1st") & ( df_f1["dataset"] == "mean_changing")]["F1_score_std"]) # Hierarchical(2nd order) y4 = np.array(df_f1[(df_f1["method"] == "Hierarchical 2nd") & ( df_f1["dataset"] == "mean_changing")]["F1_score_mean"]) e4 = np.array(df_f1[(df_f1["method"] == "Hierarchical 2nd") & ( df_f1["dataset"] == "mean_changing")]["F1_score_std"]) # CF y5 = np.array(df_f1[(df_f1["method"] == "ChangeFinder") & ( df_f1["dataset"] == "mean_changing")]["F1_score_mean"]) e5 = np.array(df_f1[(df_f1["method"] == "ChangeFinder") & ( df_f1["dataset"] == "mean_changing")]["F1_score_std"]) # BOCPD y6 = np.array(df_f1[(df_f1["method"] == "BOCPD") & ( df_f1["dataset"] == "mean_changing")]["F1_score_mean"]) e6 = np.array(df_f1[(df_f1["method"] == "BOCPD") & ( df_f1["dataset"] == "mean_changing")]["F1_score_std"]) fig = plt.figure() ax = fig.add_subplot(1, 1, 1) ax.errorbar(x, y1, yerr=e1, marker='o', label='AW2S-MDL', capthick=1, capsize=8, lw=1, color='red') ax.errorbar(x, y2, yerr=e2, marker='o', label='Hierarchical(0th order)', capthick=1, capsize=8, lw=1, color='blue') ax.errorbar(x, y3, yerr=e3, marker='o', label='Hierarchical(1st order)', capthick=1, capsize=8, lw=1, color='green') ax.errorbar(x, y4, yerr=e4, marker='o', label='Hierarchical(2nd order)', capthick=1, capsize=8, lw=1, color='purple') ax.errorbar(x, y5, yerr=e5, marker='o', label='CF', capthick=1, capsize=8, lw=1, color='orange') ax.errorbar(x, y6, yerr=e6, marker='o', label='BOCPD', capthick=1, capsize=8, lw=1, color='brown') ax.set_ylim(0, 1) ax.set_xlabel('U (length of transition period)', fontsize=fontsize) ax.set_ylabel('F1-score', fontsize=fontsize) ax.legend() plt.show() # + plt.style.use('default') sns.set() sns.set_style('whitegrid') sns.set_palette('Set1') x = np.array([0, 100, 200, 300, 400]) # Prop y1 = np.array(df_f1[(df_f1["method"] == "AW2S_MDL") & ( df_f1["dataset"] == "variance_changing")]["F1_score_mean"]) e1 = np.array(df_f1[(df_f1["method"] == "AW2S_MDL") & ( df_f1["dataset"] == "variance_changing")]["F1_score_std"]) # Hierarchical(0th order) y2 = np.array(df_f1[(df_f1["method"] == "Hierarchical 0th") & ( df_f1["dataset"] == "variance_changing")]["F1_score_mean"]) e2 = np.array(df_f1[(df_f1["method"] == "Hierarchical 0th") & ( df_f1["dataset"] == "variance_changing")]["F1_score_std"]) # Hierarchical(1st order) y3 = np.array(df_f1[(df_f1["method"] == "Hierarchical 1st") & ( df_f1["dataset"] == "variance_changing")]["F1_score_mean"]) e3 = np.array(df_f1[(df_f1["method"] == "Hierarchical 1st") & ( df_f1["dataset"] == "variance_changing")]["F1_score_std"]) # Hierarchical(2nd order) y4 = np.array(df_f1[(df_f1["method"] == "Hierarchical 2nd") & ( df_f1["dataset"] == "variance_changing")]["F1_score_mean"]) e4 = np.array(df_f1[(df_f1["method"] == "Hierarchical 2nd") & ( df_f1["dataset"] == "variance_changing")]["F1_score_std"]) # CF y5 = np.array(df_f1[(df_f1["method"] == "ChangeFinder") & ( df_f1["dataset"] == "variance_changing")]["F1_score_mean"]) e5 = np.array(df_f1[(df_f1["method"] == "ChangeFinder") & ( df_f1["dataset"] == "variance_changing")]["F1_score_std"]) # BOCPD y6 = np.array(df_f1[(df_f1["method"] == "BOCPD") & ( df_f1["dataset"] == "variance_changing")]["F1_score_mean"]) e6 = np.array(df_f1[(df_f1["method"] == "BOCPD") & ( df_f1["dataset"] == "variance_changing")]["F1_score_std"]) fig = plt.figure() ax = fig.add_subplot(1, 1, 1) ax.errorbar(x, y1, yerr=e1, marker='o', label='AW2S-MDL', capthick=1, capsize=8, lw=1, color='red') ax.errorbar(x, y2, yerr=e2, marker='o', label='Hierarchical(0th order)', capthick=1, capsize=8, lw=1, color='blue') ax.errorbar(x, y3, yerr=e3, marker='o', label='Hierarchical(1st order)', capthick=1, capsize=8, lw=1, color='green') ax.errorbar(x, y4, yerr=e4, marker='o', label='Hierarchical(2nd order)', capthick=1, capsize=8, lw=1, color='purple') ax.errorbar(x, y5, yerr=e5, marker='o', label='CF', capthick=1, capsize=8, lw=1, color='orange') ax.errorbar(x, y6, yerr=e6, marker='o', label='BOCPD', capthick=1, capsize=8, lw=1, color='brown') ax.set_ylim(0, 1) ax.set_xlabel('U (length of transition period)', fontsize=fontsize) ax.set_ylabel('F1-score', fontsize=fontsize) ax.legend() plt.show() # -
jupyter/AUC_F_result.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # - Reference # - https://github.com/eriklindernoren/Keras-GAN/blob/master/cyclegan/cyclegan.py # - https://github.com/eriklindernoren/Keras-GAN/blob/master/cyclegan/data_loader.py from tensorflow_addons.layers import InstanceNormalization from tensorflow.keras.layers import Input, Dropout, Concatenate from tensorflow.keras.layers import LeakyReLU from tensorflow.keras.layers import UpSampling2D, Conv2D from tensorflow.keras.models import Model from tensorflow.keras.optimizers import Adam import matplotlib.pyplot as plt import numpy as np import pandas as pd import datetime import os from glob import glob import cv2 from matplotlib.pyplot import imread DATA_DIR_PATH = './data' OUTPUT_DIR_PATH = os.path.join(DATA_DIR_PATH, '03_out') # + os.makedirs(os.path.join(DATA_DIR_PATH, 'datasets'), exist_ok=True) os.makedirs(os.path.join(OUTPUT_DIR_PATH, 'images'), exist_ok=True) os.makedirs(os.path.join(OUTPUT_DIR_PATH, 'saved_models'), exist_ok=True) # - class DataLoader(): def __init__(self, dataset_name, img_res=(128, 128)): self.dataset_name = dataset_name self.img_res = img_res self.datasets_path = os.path.join(DATA_DIR_PATH, 'datasets') def load_data(self, domain, batch_size=1, is_testing=False): data_type = "train%s" % domain if not is_testing else "test%s" % domain #path = glob('./datasets/%s/%s/*' % (self.dataset_name, data_type)) path = glob(os.path.join(self.datasets_path, self.dataset_name, data_type, '*')) batch_images = np.random.choice(path, size=batch_size) imgs = [] for img_path in batch_images: img = self.imread(img_path) if not is_testing: #img = scipy.misc.imresize(img, self.img_res) img = cv2.resize(img, self.img_res) if np.random.random() > 0.5: img = np.fliplr(img) else: #img = scipy.misc.imresize(img, self.img_res) img = cv2.resize(img, self.img_res) imgs.append(img) imgs = np.array(imgs)/127.5 - 1. return imgs def load_batch(self, batch_size=1, is_testing=False): data_type = "train" if not is_testing else "val" #path_A = glob('./datasets/%s/%sA/*' % (self.dataset_name, data_type)) #path_B = glob('./datasets/%s/%sB/*' % (self.dataset_name, data_type)) path_A = glob(os.path.join(self.datasets_path, self.dataset_name, '{}A'.format(data_type), '*')) path_B = glob(os.path.join(self.datasets_path, self.dataset_name, '{}B'.format(data_type), '*')) self.n_batches = int(min(len(path_A), len(path_B)) / batch_size) total_samples = self.n_batches * batch_size # Sample n_batches * batch_size from each path list so that model sees all # samples from both domains path_A = np.random.choice(path_A, total_samples, replace=False) path_B = np.random.choice(path_B, total_samples, replace=False) for i in range(self.n_batches-1): batch_A = path_A[i*batch_size:(i+1)*batch_size] batch_B = path_B[i*batch_size:(i+1)*batch_size] imgs_A, imgs_B = [], [] for img_A, img_B in zip(batch_A, batch_B): img_A = self.imread(img_A) img_B = self.imread(img_B) #img_A = scipy.misc.imresize(img_A, self.img_res) img_A = cv2.resize(img_A, self.img_res) #img_B = scipy.misc.imresize(img_B, self.img_res) img_B = cv2.resize(img_B, self.img_res) if not is_testing and np.random.random() > 0.5: img_A = np.fliplr(img_A) img_B = np.fliplr(img_B) imgs_A.append(img_A) imgs_B.append(img_B) imgs_A = np.array(imgs_A)/127.5 - 1. imgs_B = np.array(imgs_B)/127.5 - 1. yield imgs_A, imgs_B def load_img(self, path): img = self.imread(path) #img = scipy.misc.imresize(img, self.img_res) img = cv2.resize(img, self.img_res) img = img/127.5 - 1. return img[np.newaxis, :, :, :] def imread(self, path): #return scipy.misc.imread(path, mode='RGB').astype(np.float) return imread(path).astype(np.float) # + IMG_ROWS = 128 IMG_COLS = 128 data_loader = DataLoader('apple2orange', img_res=(IMG_ROWS, IMG_COLS)) # - class CycleGAN: def __init__(self, data_loader): self.img_save_dir = os.path.join(OUTPUT_DIR_PATH, 'images') # Input shape self.img_rows = IMG_ROWS self.img_cols = IMG_COLS self.channels = 3 self.img_shape = (self.img_rows, self.img_cols, self.channels) # Configure data loader #self.dataset_name = 'apple2orange' #self.data_loader = DataLoader(dataset_name=self.dataset_name, # img_res=(self.img_rows, self.img_cols)) if data_loader: self.data_loader = data_loader self.dataset_name = self.data_loader.dataset_name # Calculate output shape of D (PatchGAN) patch = int(self.img_rows / 2**4) self.disc_patch = (patch, patch, 1) # Number of filters in the first layer of G and D self.gf = 32 self.df = 64 # Loss weights self.lambda_cycle = 10.0 # Cycle-consistency loss self.lambda_id = 0.1 * self.lambda_cycle # Identity loss optimizer = Adam(0.0002, 0.5) # Build and compile the discriminators self.d_A = self.build_discriminator() self.d_B = self.build_discriminator() self.d_A.compile(loss='mse', optimizer=optimizer, metrics=['accuracy']) self.d_B.compile(loss='mse', optimizer=optimizer, metrics=['accuracy']) # Build the generators self.g_AB = self.build_generator() self.g_BA = self.build_generator() # Input images from both domains img_A = Input(shape=self.img_shape) img_B = Input(shape=self.img_shape) # Translate images to the other domain fake_B = self.g_AB(img_A) fake_A = self.g_BA(img_B) # Translate images back to original domain reconstr_A = self.g_BA(fake_B) reconstr_B = self.g_AB(fake_A) # Identity mapping of images img_A_id = self.g_BA(img_A) img_B_id = self.g_AB(img_B) # For the combined model we will only train the generators self.d_A.trainable = False self.d_B.trainable = False # Discriminators determines validity of translated images valid_A = self.d_A(fake_A) valid_B = self.d_B(fake_B) # Combined model trains generators to fool discriminators self.combined = Model(inputs=[img_A, img_B], outputs=[valid_A, valid_B, reconstr_A, reconstr_B, img_A_id, img_B_id ]) self.combined.compile(loss=['mse', 'mse', 'mae', 'mae', 'mae', 'mae'], loss_weights=[1, 1, self.lambda_cycle, self.lambda_cycle, self.lambda_id, self.lambda_id ], optimizer=optimizer) def build_generator(self): """U-Net Generator""" def conv2d(layer_input, filters, f_size=4): """Layers used during downsampling""" d = Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input) d = LeakyReLU(alpha=0.2)(d) d = InstanceNormalization()(d) return d def deconv2d(layer_input, skip_input, filters, f_size=4, dropout_rate=0): """Layers used during upsampling""" u = UpSampling2D(size=2)(layer_input) u = Conv2D(filters, kernel_size=f_size, strides=1, padding='same', activation='relu')(u) if dropout_rate: u = Dropout(dropout_rate)(u) u = InstanceNormalization()(u) u = Concatenate()([u, skip_input]) return u # Image input d0 = Input(shape=self.img_shape) # Downsampling d1 = conv2d(d0, self.gf) d2 = conv2d(d1, self.gf*2) d3 = conv2d(d2, self.gf*4) d4 = conv2d(d3, self.gf*8) # Upsampling u1 = deconv2d(d4, d3, self.gf*4) u2 = deconv2d(u1, d2, self.gf*2) u3 = deconv2d(u2, d1, self.gf) u4 = UpSampling2D(size=2)(u3) output_img = Conv2D(self.channels, kernel_size=4, strides=1, padding='same', activation='tanh')(u4) return Model(d0, output_img) def build_discriminator(self): def d_layer(layer_input, filters, f_size=4, normalization=True): """Discriminator layer""" d = Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input) d = LeakyReLU(alpha=0.2)(d) if normalization: d = InstanceNormalization()(d) return d img = Input(shape=self.img_shape) d1 = d_layer(img, self.df, normalization=False) d2 = d_layer(d1, self.df*2) d3 = d_layer(d2, self.df*4) d4 = d_layer(d3, self.df*8) validity = Conv2D(1, kernel_size=4, strides=1, padding='same')(d4) return Model(img, validity) def train(self, epochs, batch_size=1, sample_interval=50): print(datetime.datetime.now().isoformat(), 'Start') start_time = datetime.datetime.now() # Adversarial loss ground truths valid = np.ones((batch_size,) + self.disc_patch) fake = np.zeros((batch_size,) + self.disc_patch) for epoch in range(epochs): for batch_i, (imgs_A, imgs_B) in enumerate(self.data_loader.load_batch(batch_size)): # Translate images to opposite domain fake_B = self.g_AB.predict(imgs_A) fake_A = self.g_BA.predict(imgs_B) # Train the discriminators (original images = real / translated = Fake) dA_loss_real = self.d_A.train_on_batch(imgs_A, valid) dA_loss_fake = self.d_A.train_on_batch(fake_A, fake) dA_loss = 0.5 * np.add(dA_loss_real, dA_loss_fake) dB_loss_real = self.d_B.train_on_batch(imgs_B, valid) dB_loss_fake = self.d_B.train_on_batch(fake_B, fake) dB_loss = 0.5 * np.add(dB_loss_real, dB_loss_fake) # Total disciminator loss d_loss = 0.5 * np.add(dA_loss, dB_loss) # Train the generators g_loss = self.combined.train_on_batch([imgs_A, imgs_B], [valid, valid, imgs_A, imgs_B, imgs_A, imgs_B]) elapsed_time = datetime.datetime.now() - start_time # Plot the progress print("[Epoch %d/%d] [Batch %d/%d] [D loss: %f, acc: %3d%%] [G loss: %05f, adv: %05f, recon: %05f, id: %05f] time: %s " % ( epoch, epochs, batch_i, self.data_loader.n_batches, d_loss[0], 100*d_loss[1], g_loss[0], np.mean(g_loss[1:3]), np.mean(g_loss[3:5]), np.mean(g_loss[5:6]), elapsed_time)) # If at save interval => save generated image samples if batch_i % sample_interval == 0: self.sample_images(epoch, batch_i) print(datetime.datetime.now().isoformat(), 'End') def sample_images(self, epoch, batch_i): dir_path = os.path.join(self.img_save_dir, self.dataset_name) #os.makedirs('images/%s' % self.dataset_name, exist_ok=True) os.makedirs(dir_path, exist_ok=True) r, c = 2, 3 imgs_A = self.data_loader.load_data(domain="A", batch_size=1, is_testing=True) imgs_B = self.data_loader.load_data(domain="B", batch_size=1, is_testing=True) # Demo (for GIF) #imgs_A = self.data_loader.load_img('datasets/apple2orange/testA/n07740461_1541.jpg') #imgs_B = self.data_loader.load_img('datasets/apple2orange/testB/n07749192_4241.jpg') # Translate images to the other domain fake_B = self.g_AB.predict(imgs_A) fake_A = self.g_BA.predict(imgs_B) # Translate back to original domain reconstr_A = self.g_BA.predict(fake_B) reconstr_B = self.g_AB.predict(fake_A) gen_imgs = np.concatenate([imgs_A, fake_B, reconstr_A, imgs_B, fake_A, reconstr_B]) # Rescale images 0 - 1 gen_imgs = 0.5 * gen_imgs + 0.5 titles = ['Original', 'Translated', 'Reconstructed'] fig, axs = plt.subplots(r, c) cnt = 0 for i in range(r): for j in range(c): axs[i, j].imshow(gen_imgs[cnt]) axs[i, j].set_title(titles[j]) axs[i, j].axis('off') cnt += 1 #fig.savefig("images/%s/%d_%d.png" % (self.dataset_name, epoch, batch_i)) file_path = os.path.join(dir_path, '%d_%d.png' % (epoch, batch_i)) fig.savefig(file_path) plt.close() gan = CycleGAN(data_loader) #gan.train(epochs=1, batch_size=1, sample_interval=2) from tensorflow.keras.utils import plot_model # + gan.d_A.summary() plot_model(gan.d_A, show_shapes=True, dpi=48, expand_nested=True) # + gan.g_AB.summary() plot_model(gan.g_AB, show_shapes=True, dpi=48, expand_nested=True) # + gan.combined.summary() from tensorflow.keras.layers import Layer gan.combined._layers = [ layer for layer in gan.combined._layers if isinstance(layer, Layer) ] plot_model(gan.combined, show_shapes=True, dpi=48, expand_nested=True) # -
scripts/cyclegan/03_keras_cyclegan_model.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="view-in-github" # <a href="https://colab.research.google.com/github/AI4Finance-LLC/FinRL-Library/blob/master/FinRL_single_stock_trading.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="Idd1jem0TnST" # # Deep Reinforcement Learning for Stock Trading from Scratch: Single Stock Trading # # Tutorials to use OpenAI DRL to trade single stock in one Jupyter Notebook | Presented at NeurIPS 2020: Deep RL Workshop # # * This blog is based on our paper: FinRL: A Deep Reinforcement Learning Library for Automated Stock Trading in Quantitative Finance, presented at NeurIPS 2020: Deep RL Workshop. # * Check out medium blog for detailed explanations: https://towardsdatascience.com/finrl-for-quantitative-finance-tutorial-for-single-stock-trading-37d6d7c30aac # * Please report any issues to our Github: https://github.com/AI4Finance-LLC/FinRL-Library/issues # * **Pytorch Version** # # # # + [markdown] id="5vglc_9N5-KZ" # ## Content # + [markdown] id="ex2ord116AbP" # * [1. Problem Definition](#0) # * [2. Getting Started - Load Python packages](#1) # * [2.1. Install Packages](#1.1) # * [2.2. Check Additional Packages](#1.2) # * [2.3. Import Packages](#1.3) # * [2.4. Create Folders](#1.4) # * [3. Download Data](#2) # * [4. Preprocess Data](#3) # * [4.1. Technical Indicators](#3.1) # * [4.2. Perform Feature Engineering](#3.2) # * [5.Build Environment](#4) # * [5.1. Training & Trade Data Split](#4.1) # * [5.2. User-defined Environment](#4.2) # * [5.3. Initialize Environment](#4.3) # * [6.Implement DRL Algorithms](#5) # * [7.Backtesting Performance](#6) # * [7.1. BackTestStats](#6.1) # * [7.2. BackTestPlot](#6.2) # * [7.3. Baseline Stats](#6.3) # * [7.3. Compare to Stock Market Index](#6.4) # + [markdown] id="M1eLhMW36cLi" # <a id='0'></a> # # Part 1. Problem Definition # + [markdown] id="3eimeRv06YoK" # This problem is to design an automated trading solution for single stock trading. We model the stock trading process as a Markov Decision Process (MDP). We then formulate our trading goal as a maximization problem. # # The components of the reinforcement learning environment are: # # # * Action: The action space describes the allowed actions that the agent interacts with the # environment. Normally, a ∈ A includes three actions: a ∈ {−1, 0, 1}, where −1, 0, 1 represent # selling, holding, and buying one stock. Also, an action can be carried upon multiple shares. We use # an action space {−k, ..., −1, 0, 1, ..., k}, where k denotes the number of shares. For example, "Buy # 10 shares of AAPL" or "Sell 10 shares of AAPL" are 10 or −10, respectively # # * Reward function: r(s, a, s′) is the incentive mechanism for an agent to learn a better action. The change of the portfolio value when action a is taken at state s and arriving at new state s', i.e., r(s, a, s′) = v′ − v, where v′ and v represent the portfolio # values at state s′ and s, respectively # # * State: The state space describes the observations that the agent receives from the environment. Just as a human trader needs to analyze various information before executing a trade, so # our trading agent observes many different features to better learn in an interactive environment. # # * Environment: single stock trading for AAPL # # # The data of the single stock that we will be using for this case study is obtained from Yahoo Finance API. The data contains Open-High-Low-Close price and volume. # # We use Apple Inc. stock: AAPL as an example throughout this article, because it is one of the most popular and profitable stocks. # + [markdown] id="xD3f90UnTnSU" # <a id='1'></a> # # Part 2. Getting Started- Load Python Packages # + [markdown] id="UBUcBKap-oII" # <a id='1.1'></a> # ## 2.1. Install all the packages through FinRL library # # + colab={"base_uri": "https://localhost:8080/"} id="40axJBAP5mer" outputId="0e61993b-e962-4b70-a178-fe86404dd1ca" ## install finrl library # !pip install git+https://github.com/AI4Finance-LLC/FinRL-Library.git # + [markdown] id="iXyHD6ir5sxk" # # <a id='1.2'></a> # ## 2.2. Check if the additional packages needed are present, if not install them. # * Yahoo Finance API # * pandas # * numpy # * matplotlib # * stockstats # * OpenAI gym # * stable-baselines # * tensorflow # * pyfolio # + [markdown] id="QG17M4JwTnSZ" # <a id='1.3'></a> # ## 2.3. Import Packages # + colab={"base_uri": "https://localhost:8080/"} id="0-0bsNMMTnSZ" outputId="e1c336ac-1280-4b16-b1d4-c2971d6667c2" import pandas as pd import numpy as np import matplotlib import matplotlib.pyplot as plt matplotlib.use('Agg') import datetime from finrl.config import config from finrl.marketdata.yahoodownloader import YahooDownloader from finrl.preprocessing.preprocessors import FeatureEngineer from finrl.preprocessing.data import data_split from finrl.env.env_stocktrading import StockTradingEnv from finrl.model.models import DRLAgent from finrl.trade.backtest import backtest_stats, baseline_stats, backtest_plot import sys sys.path.append("../FinRL-Library") # + id="uIPbzYs1TnSd" #Diable the warnings import warnings warnings.filterwarnings('ignore') # + [markdown] id="pos2IZAL54pp" # <a id='1.4'></a> # ## 2.4. Create Folders # + id="zp6lL6dZ53rX" import os if not os.path.exists("./" + config.DATA_SAVE_DIR): os.makedirs("./" + config.DATA_SAVE_DIR) if not os.path.exists("./" + config.TRAINED_MODEL_DIR): os.makedirs("./" + config.TRAINED_MODEL_DIR) if not os.path.exists("./" + config.TENSORBOARD_LOG_DIR): os.makedirs("./" + config.TENSORBOARD_LOG_DIR) if not os.path.exists("./" + config.RESULTS_DIR): os.makedirs("./" + config.RESULTS_DIR) # + [markdown] id="SBPM0sVvTnSg" # <a id='2'></a> # # Part 3. Download Data # Yahoo Finance is a website that provides stock data, financial news, financial reports, etc. All the data provided by Yahoo Finance is free. # * FinRL uses a class **YahooDownloader** to fetch data from Yahoo Finance API # * Call Limit: Using the Public API (without authentication), you are limited to 2,000 requests per hour per IP (or up to a total of 48,000 requests a day). # # + [markdown] id="GCGVmtGzjORf" # # # ----- # class YahooDownloader: # Provides methods for retrieving daily stock data from # Yahoo Finance API # # Attributes # ---------- # start_date : str # start date of the data (modified from config.py) # end_date : str # end date of the data (modified from config.py) # ticker_list : list # a list of stock tickers (modified from config.py) # # Methods # ------- # fetch_data() # Fetches data from yahoo API # # + colab={"base_uri": "https://localhost:8080/", "height": 35} id="FBEiH5gOgMOx" outputId="de388576-0110-46f4-9257-dfe327b3eac5" # from config.py start_date is a string config.START_DATE # + colab={"base_uri": "https://localhost:8080/", "height": 35} id="sWLMNQ8CgMRx" outputId="f1864c8b-2b5c-4867-9122-ccf83ebc2902" # from config.py end_date is a string config.END_DATE # + [markdown] id="AmFpuBEZhkF3" # ticker_list is a list of stock tickers, in a single stock trading case, the list contains only 1 ticker # + colab={"base_uri": "https://localhost:8080/"} id="JtIFikNyTnSg" outputId="4c6a860f-6944-4728-984d-d0dc8d8f2c78" # Download and save the data in a pandas DataFrame: data_df = YahooDownloader(start_date = '2009-01-01', end_date = '2021-01-01', ticker_list = ['AAPL']).fetch_data() # + colab={"base_uri": "https://localhost:8080/"} id="E8ZKQmw6TnSl" outputId="954cbef4-f75b-4d4b-80f2-195eb13954ef" data_df.shape # + colab={"base_uri": "https://localhost:8080/", "height": 195} id="Vi6D_ED6TnSs" outputId="2313cd51-596f-4a58-922b-5ec837bbcbda" data_df.head() # + [markdown] id="oWiqgpLzTnS3" # <a id='3'></a> # # Part 4. Preprocess Data # Data preprocessing is a crucial step for training a high quality machine learning model. We need to check for missing data and do feature engineering in order to convert the data into a model-ready state. # * FinRL uses a class **FeatureEngineer** to preprocess the data # * Add **technical indicators**. In practical trading, various information needs to be taken into account, for example the historical stock prices, current holding shares, technical indicators, etc. # # + [markdown] id="rJ9zmxpRks41" # class FeatureEngineer: # Provides methods for preprocessing the stock price data # # Attributes # ---------- # df: DataFrame # data downloaded from Yahoo API # feature_number : int # number of features we used # use_technical_indicator : boolean # we technical indicator or not # use_turbulence : boolean # use turbulence index or not # # Methods # ------- # preprocess_data() # main method to do the feature engineering # + [markdown] id="tHu7i-T_wRPc" # <a id='3.1'></a> # # ## 4.1 Technical Indicators # * FinRL uses stockstats to calcualte technical indicators such as **Moving Average Convergence Divergence (MACD)**, **Relative Strength Index (RSI)**, **Average Directional Index (ADX)**, **Commodity Channel Index (CCI)** and other various indicators and stats. # * **stockstats**: supplies a wrapper StockDataFrame based on the **pandas.DataFrame** with inline stock statistics/indicators support. # # # + colab={"base_uri": "https://localhost:8080/"} id="2RHwY1dHk09N" outputId="47be0df8-9e74-473e-f78e-9f267bcce3df" ## we store the stockstats technical indicator column names in config.py tech_indicator_list=config.TECHNICAL_INDICATORS_LIST print(tech_indicator_list) # + colab={"base_uri": "https://localhost:8080/"} id="rImfGAfCkR8j" outputId="5b55c2c7-3217-49ad-95b7-d9af0ad91d7a" ## user can add more technical indicators ## check https://github.com/jealous/stockstats for different names tech_indicator_list=tech_indicator_list+['kdjk','open_2_sma','boll','close_10.0_le_5_c','wr_10','dma','trix'] print(tech_indicator_list) # + [markdown] id="etvRo2rSwZPg" # <a id='3.2'></a> # ## 4.2 Perform Feature Engineering # + colab={"base_uri": "https://localhost:8080/"} id="SAOsx0m-9u2k" outputId="6238a0f8-e0bb-4468-ec75-ab3381ea09a9" fe = FeatureEngineer( use_technical_indicator=True, tech_indicator_list = tech_indicator_list, use_turbulence=False, user_defined_feature = False) data_df = fe.preprocess_data(data_df) # + colab={"base_uri": "https://localhost:8080/", "height": 298} id="wytX_qwWMHP5" outputId="8113269b-39a0-462b-80b2-b8664eb27a66" data_df.head() # + [markdown] id="bwLhXo1cTnTQ" # <a id='4'></a> # # Part 5. Build Environment # Considering the stochastic and interactive nature of the automated stock trading tasks, a financial task is modeled as a **Markov Decision Process (MDP)** problem. The training process involves observing stock price change, taking an action and reward's calculation to have the agent adjusting its strategy accordingly. By interacting with the environment, the trading agent will derive a trading strategy with the maximized rewards as time proceeds. # # Our trading environments, based on OpenAI Gym framework, simulate live stock markets with real market data according to the principle of time-driven simulation. # # The action space describes the allowed actions that the agent interacts with the environment. Normally, action a includes three actions: {-1, 0, 1}, where -1, 0, 1 represent selling, holding, and buying one share. Also, an action can be carried upon multiple shares. We use an action space {-k,…,-1, 0, 1, …, k}, where k denotes the number of shares to buy and -k denotes the number of shares to sell. For example, "Buy 10 shares of AAPL" or "Sell 10 shares of AAPL" are 10 or -10, respectively. The continuous action space needs to be normalized to [-1, 1], since the policy is defined on a Gaussian distribution, which needs to be normalized and symmetric. # + [markdown] id="1D1FlBdOL4b3" # <a id='4.1'></a> # ## 5.1 Training & Trade data split # * Training: 2009-01-01 to 2018-12-31 # * Trade: 2019-01-01 to 2020-09-30 # + id="xNOdqfTKL6K-" #train = data_split(data_df, start = config.START_DATE, end = config.START_TRADE_DATE) #trade = data_split(data_df, start = config.START_TRADE_DATE, end = config.END_DATE) train = data_split(data_df, start = '2009-01-01', end = '2019-01-01') trade = data_split(data_df, start = '2019-01-01', end = '2021-01-01') # + id="KUxOshVouLXt" ## data normalization, this part is optional, have little impact #feaures_list = list(train.columns) #feaures_list.remove('date') #feaures_list.remove('tic') #feaures_list.remove('close') #print(feaures_list) #from sklearn import preprocessing #data_normaliser = preprocessing.StandardScaler() #train[feaures_list] = data_normaliser.fit_transform(train[feaures_list]) #trade[feaures_list] = data_normaliser.transform(trade[feaures_list]) # + [markdown] id="KMHyaSBBDGbe" # <a id='4.2'></a> # ## 5.2 User-defined Environment: a simulation environment class # + [markdown] id="90V3S7cpDcQs" # <a id='4.3'></a> # ## 5.3 Initialize Environment # * **stock dimension**: the number of unique stock tickers we use # * **hmax**: the maximum amount of shares to buy or sell # * **initial amount**: the amount of money we use to trade in the begining # * **transaction cost percentage**: a per share rate for every share trade # * **tech_indicator_list**: a list of technical indicator names (modified from config.py) # + colab={"base_uri": "https://localhost:8080/"} id="OiH0xO96mGcL" outputId="490190d9-80dd-4ba0-c15f-2143cdd8341f" ## we store the stockstats technical indicator column names in config.py ## check https://github.com/jealous/stockstats for different names tech_indicator_list # + colab={"base_uri": "https://localhost:8080/"} id="GnWOKS7DyGM7" outputId="09dff84b-19e1-4c4a-c40f-d5280195eaf4" # the stock dimension is 1, because we only use the price data of AAPL. len(train.tic.unique()) # + colab={"base_uri": "https://localhost:8080/"} id="ipJcnLvQGAba" outputId="dc32ebc6-2191-4541-99d2-7e9861da5df6" stock_dimension = len(train.tic.unique()) state_space = 1 + 2*stock_dimension + len(config.TECHNICAL_INDICATORS_LIST)*stock_dimension print(f"Stock Dimension: {stock_dimension}, State Space: {state_space}") # + id="JY5wTwYjGTrg" env_kwargs = { "hmax": 100, "initial_amount": 100000, "buy_cost_pct": 0.001, "sell_cost_pct": 0.001, "state_space": state_space, "stock_dim": stock_dimension, "tech_indicator_list": config.TECHNICAL_INDICATORS_LIST, "action_space": stock_dimension, "reward_scaling": 1e-4 } e_train_gym = StockTradingEnv(df = train, **env_kwargs) # + colab={"base_uri": "https://localhost:8080/"} id="vmyi2IPnyEkP" outputId="a968a96a-a1ee-426e-eed8-ee74d20b9bb8" env_train, _ = e_train_gym.get_sb_env() print(type(env_train)) # + [markdown] id="mdnzYtM1TnTW" # <a id='5'></a> # # Part 6: Implement DRL Algorithms # * The implementation of the DRL algorithms are based on **OpenAI Baselines** and **Stable Baselines**. Stable Baselines is a fork of OpenAI Baselines, with a major structural refactoring, and code cleanups. # * FinRL library includes fine-tuned standard DRL algorithms, such as DQN, DDPG, # Multi-Agent DDPG, PPO, SAC, A2C and TD3. We also allow users to # design their own DRL algorithms by adapting these DRL algorithms. # + id="BRFIZDw8TnTX" agent = DRLAgent(env = env_train) # + [markdown] id="zD1NHzGyTnTc" # ### Model Training: 5 models, A2C DDPG, PPO, TD3, SAC # # # + [markdown] id="Y9CM5DeIr9GC" # ### Model 1: A2C # + colab={"base_uri": "https://localhost:8080/"} id="rOk-Mr73-EEq" outputId="1c56a5c6-91ad-4b81-dd07-24ae69e64c23" agent = DRLAgent(env = env_train) A2C_PARAMS = {"n_steps": 5, "ent_coef": 0.005, "learning_rate": 0.0002} model_a2c = agent.get_model(model_name="a2c",model_kwargs = A2C_PARAMS) # + colab={"base_uri": "https://localhost:8080/"} id="DXHEidJh-E60" outputId="9ae1ede1-a8ee-4e56-c4f3-ef809e05d664" trained_a2c = agent.train_model(model=model_a2c, tb_log_name='a2c', total_timesteps=50000) # + [markdown] id="9upN8FI2r_X1" # ### Model 2: DDPG # + colab={"base_uri": "https://localhost:8080/"} id="gUFJlHsi-Ka-" outputId="1b7db70f-f740-401c-ab55-a567b4235e18" agent = DRLAgent(env = env_train) DDPG_PARAMS = {"batch_size": 64, "buffer_size": 500000, "learning_rate": 0.0001} model_ddpg = agent.get_model("ddpg",model_kwargs = DDPG_PARAMS) # + colab={"base_uri": "https://localhost:8080/"} id="I5J_Scwp-Nis" outputId="a538962d-33c8-44d3-94af-6ef61fc0d230" trained_ddpg = agent.train_model(model=model_ddpg, tb_log_name='ddpg', total_timesteps=30000) # + [markdown] id="1sve9WGvsC__" # ### Model 3: PPO # + colab={"base_uri": "https://localhost:8080/"} id="BOjuycpS-Qvn" outputId="13ab0019-419b-4e6e-949d-c2617ae296b3" agent = DRLAgent(env = env_train) PPO_PARAMS = { "n_steps": 2048, "ent_coef": 0.005, "learning_rate": 0.0001, "batch_size": 128, } model_ppo = agent.get_model("ppo",model_kwargs = PPO_PARAMS) # + colab={"base_uri": "https://localhost:8080/"} id="G9hCHA8A-RSy" outputId="df865883-f3a8-42b2-951f-b129b44bd248" trained_ppo = agent.train_model(model=model_ppo, tb_log_name='ppo', total_timesteps=80000) # + [markdown] id="CiBG2ZknsG73" # ### Model 4: TD3 # + colab={"base_uri": "https://localhost:8080/"} id="UcRdoBc8-Xze" outputId="cc0a65bf-f79d-48ce-c173-6ae08747b0fa" agent = DRLAgent(env = env_train) TD3_PARAMS = {"batch_size": 128, "buffer_size": 1000000, "learning_rate": 0.0003} model_td3 = agent.get_model("td3",model_kwargs = TD3_PARAMS) # + colab={"base_uri": "https://localhost:8080/"} id="CB5cAAio-ZSs" outputId="741a1ed9-9bb0-4763-f237-5cd461ac100e" trained_td3 = agent.train_model(model=model_td3, tb_log_name='td3', total_timesteps=30000) # + [markdown] id="oDk2qrlTLZCp" # ### Model 4: SAC # + colab={"base_uri": "https://localhost:8080/"} id="el8sK4fo-dl1" outputId="20c5793c-397b-4dc3-a702-857178e61813" agent = DRLAgent(env = env_train) SAC_PARAMS = { "batch_size": 128, "buffer_size": 100000, "learning_rate": 0.00003, "learning_starts": 100, "ent_coef": "auto_0.1", } model_sac = agent.get_model("sac",model_kwargs = SAC_PARAMS) # + colab={"base_uri": "https://localhost:8080/"} id="gaF80PR0-d_d" outputId="b26b6435-ca69-4000-b2b3-22596677d8ff" trained_sac = agent.train_model(model=model_sac, tb_log_name='sac', total_timesteps=30000) # + [markdown] id="KE1Xm9N9TnTn" # ### Trading # * we use the environment class we initialized at 5.3 to create a stock trading environment # * Assume that we have $100,000 initial capital at 2019-01-01. # * We use the trained model of PPO to trade AAPL. # + colab={"base_uri": "https://localhost:8080/", "height": 298} id="kLFUwxhCoHN_" outputId="22540921-fc5e-460b-f9ef-1438131b45d7" trade.head() # + id="bLHl6V7eqV6_" ## make a prediction and get the account value change trade = data_split(data_df, start = '2019-01-01', end = '2021-01-01') e_trade_gym = StockTradingEnv(df = trade, **env_kwargs) env_trade, obs_trade = e_trade_gym.get_sb_env() df_account_value, df_actions = DRLAgent.DRL_prediction(model=trained_sac, trade_data = trade, test_env = env_trade, test_obs = obs_trade) # + [markdown] id="qYXxFzD5TnTw" # <a id='6'></a> # # Part 7: Backtesting Performance # Backtesting plays a key role in evaluating the performance of a trading strategy. Automated backtesting tool is preferred because it reduces the human error. We usually use the Quantopian pyfolio package to backtest our trading strategies. It is easy to use and consists of various individual plots that provide a comprehensive image of the performance of a trading strategy. # + [markdown] id="8GwqOO-v1NVz" # <a id='6.1'></a> # ## 7.1 BackTestStats # pass in df_account_value, this information is stored in env class # # + colab={"base_uri": "https://localhost:8080/"} id="C9cpPm9YYxHC" outputId="7c7a8e34-7ed3-441d-9036-829240430843" print("==============Get Backtest Results===========") now = datetime.datetime.now().strftime('%Y%m%d-%Hh%M') perf_stats_all = BackTestStats(account_value=df_account_value) perf_stats_all = pd.DataFrame(perf_stats_all) perf_stats_all.to_csv("./"+config.RESULTS_DIR+"/perf_stats_all_"+now+'.csv') # + id="e-Tenjb0hcNr" # + [markdown] id="y4Gw3HNr1TDU" # <a id='6.2'></a> # ## 7.2 BackTestPlot # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="MA_8LuZE1J3X" outputId="3d2c46b2-2b6e-4cfd-9648-68ae7d37d89c" print("==============Compare to AAPL itself buy-and-hold===========") # %matplotlib inline BackTestPlot(account_value=df_account_value, baseline_ticker = 'AAPL', baseline_start = '2019-01-01', baseline_end = '2021-01-01') # + [markdown] id="qSFMdgCJE4O-" # <a id='6.3'></a> # ## 7.3 Baseline Stats # + colab={"base_uri": "https://localhost:8080/"} id="MpR7aQIwqdC4" outputId="24029a71-3596-4090-d7bc-7f44dd6000fc" print("==============Get Baseline Stats===========") baesline_perf_stats=BaselineStats('AAPL') # + colab={"base_uri": "https://localhost:8080/"} id="vSFRXQfYFTQf" outputId="a5951bab-aa47-4fa8-9cd9-9fc62b5b02f5" print("==============Get Baseline Stats===========") baesline_perf_stats=BaselineStats('^GSPC') # + [markdown] id="tunrA4mjE9la" # <a id='6.4'></a> # ## 7.4 Compare to Stock Market Index # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="5Ca2gHxi1gzX" outputId="68de7e8c-882a-4e8a-c9a9-cea8ea6ca212" print("==============Compare to S&P 500===========") # %matplotlib inline # S&P 500: ^GSPC # Dow Jones Index: ^DJI # NASDAQ 100: ^NDX BackTestPlot(df_account_value, baseline_ticker = '^GSPC') # + id="3_aHGEOTCluG" # -
notebooks/old/FinRL_single_stock_trading.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Naive Linear Regression of Score with no opponents # --- # Creating a regression from a teams score through each game without consideration for who their opponent is would create a sort of blind score projection. There is a heavy feeling of uselessness here as considering NFL is a game with 2 teams, where the number of points scored is directly related to your opponents ability to defend, that this might be a less than useful regression. Ideally thhough this model would at least provide some support to the idea that the predicted score is also reliant on the opposing team. import pandas as pd #DataFrames df = pd.read_csv('../data/external/nfl_games.csv') df.head() df['date'] = pd.to_datetime(df['date']) df.info() # We are going to want to convert the datetime format to a usable numeric for linear regression. To do this we can use the datetime library import datetime as dt #datetime df['date'] = df['date'].map(dt.datetime.toordinal) df.head() # Going to go ahead and clean up unnecessary columns as we only care for the date, the team, and the score. Then I will be creating the linear regressions for each team and storing them in a DataFrame. df = df.drop(['season', 'neutral', 'playoff', 'elo1', 'elo2', 'elo_prob1', 'result1'], 1) names = df['team1'].unique() names # Now we will iterate over the array of names and pull their score data from the dataframe for each team import numpy as np #Array from sklearn.linear_model import LinearRegression #Linear Regression from sklearn.model_selection import train_test_split #Training and Testing models = {} x_tests = {} y_actuals = {} for team in names: home_team = df[df.team1 == team].drop(['team2','score2'],1) away_team = df[df.team2 == team].drop(['team1','score1'],1) away_team.columns=home_team.columns combined = home_team.append(away_team, ignore_index=True).sort_values('date') x = combined.iloc[:, 0].values y = combined.iloc[:, 2].values print(y) x_train, x_test, y_train, y_actual = train_test_split(x, y, test_size=0.1, random_state=0) x_train= x_train.reshape(-1, 1) y_train= y_train.reshape(-1, 1) x_test = x_test.reshape(-1, 1) models[team] = LinearRegression().fit(x_train,y_train) x_tests[team] = x_test y_actuals[team] = y_actual # From this ValueError I have come to realize that not every team has played more than 1 NFL game. This method of modelling is probably a dead-end here as there wouldn't be enough datapoints for most teams. There are a good amount with 50+ entries but for an overall score prediction model I would assert that individual 2D linear regression isn't ideal. # # Conclusion # --- # Don't do this. No seriously, don't try to predict individual team scores by past results with no consideration to opponents or other factors.
notebooks/2.0-bks-Single-Team-Model.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import enum from abc import ABCMeta, abstractmethod, abstractproperty import numpy as np import pandas as pd from matplotlib import pyplot as plt # %matplotlib inline # - # # Bayesian bandits # # Let us explore a simple task of multi-armed bandits with Bernoulli distributions and several strategies for it. # # Bandits have $K$ actions. An action leads to a reward $r=1$ with a probability $0 \le \theta_k \le 1$ unknown to an agent but fixed over time. The agent goal is to minimize total suboptimality over $T$ actions: # # $$\rho = T\theta^* - \sum_{t=1}^T r_t,$$ # # and $\theta^* = \max_k\{\theta_k\}$. # # **Real-life example** - clinical trials: we have $k$ drug types and $T$ patients. After some drug is taken a patient is cured with a probability $\theta_k$. The goal is to find an optimal drug. The survey on clinical trials - https://arxiv.org/pdf/1507.08025.pdf. # # ### Hometask # # The task is complementary for the reinforcement learning task, the maximum is 10 points for both. # # Implement all the unimplemented agents: # # 1. [1 балл] $\varepsilon$-greedy # 2. [1 балл] UCB # 3. [1 балл] Thompson sampling # 4. [2 балла] Custom strategy # 5. [2 балла] $\varepsilon$-greedy для riverflow # 6. [3 балла] PSRL agent для riverflow class BernoulliBandit: def __init__(self, n_actions=5): self._probs = np.random.random(n_actions) @property def action_count(self): return len(self._probs) def pull(self, action): if np.random.random() > self._probs[action]: return 0.0 return 1.0 def optimal_reward(self): """ Used for regret calculation """ return np.max(self._probs) def step(self): """ Used in nonstationary version """ pass def reset(self): """ Used in nonstationary version """ # + class AbstractAgent(metaclass=ABCMeta): def init_actions(self, n_actions): self._successes = np.zeros(n_actions) self._failures = np.zeros(n_actions) self._total_pulls = 0 @abstractmethod def get_action(self): """ Get current best action :rtype: int """ pass def update(self, action, reward): """ Observe reward from action and update agent's internal parameters :type action: int :type reward: int """ self._total_pulls += 1 if reward == 1: self._successes[action] += 1 else: self._failures[action] += 1 @property def name(self): return self.__class__.__name__ class RandomAgent(AbstractAgent): def get_action(self): return np.random.randint(0, len(self._successes)) # - # ### Epsilon-greedy agent # # > **for** $t = 1,2,...$ **do** # # >> **for** $k = 1,...,K$ **do** # # >>> $\hat\theta_k \leftarrow \alpha_k / (\alpha_k + \beta_k)$ # # >> **end for** # # >> $x_t \leftarrow argmax_{k}\hat\theta$ with probability $1 - \varepsilon$ or random action with probability $\varepsilon$ # # >> Apply $x_t$ and observe $r_t$ # # >> $(\alpha_{x_t}, \beta_{x_t}) \leftarrow (\alpha_{x_t}, \beta_{x_t}) + (r_t, 1-r_t)$ # # > **end for** # # Implement the algorithm described: class EpsilonGreedyAgent(AbstractAgent): def __init__(self, epsilon = 0.01): self._epsilon = epsilon def init_actions(self, n_actions): self._a = np.ones(n_actions) * 1e-5 self._b = np.ones(n_actions) * 1e-5 self._n_actions = n_actions def get_action(self): if np.random.random() < self._epsilon: return np.random.randint(self._n_actions) thetas = self._a / (self._a + self._b) return np.argmax(thetas) def update(self, action, reward): self._a[action] += reward self._b[action] += (1 - reward) @property def name(self): return self.__class__.__name__ + "(epsilon={})".format(self._epsilon) # ### UCB agent # Epsilon-greedy strategy has no preferences on the random choice stage. Probably it is better to choose the actions we are not sure enough in, but having a potential to become optimal in the future. We could create the measure for both optimality and uncertainty in the same time. # # One of possible sollutions for that is UCB1 algorithm: # # > **for** $t = 1,2,...$ **do** # >> **for** $k = 1,...,K$ **do** # >>> $w_k \leftarrow \alpha_k / (\alpha_k + \beta_k) + \sqrt{2log\ t \ / \ (\alpha_k + \beta_k)}$ # # >> **end for** # # >> $x_t \leftarrow argmax_{k}w$ # # >> Apply $x_t$ and observe $r_t$ # # >> $(\alpha_{x_t}, \beta_{x_t}) \leftarrow (\alpha_{x_t}, \beta_{x_t}) + (r_t, 1-r_t)$ # # > **end for** # # Other solutions and optimality analysis - https://homes.di.unimi.it/~cesabian/Pubblicazioni/ml-02.pdf. class UCBAgent(AbstractAgent): def __init__(self): pass def init_actions(self, n_actions): self._a = np.ones(n_actions) * 1e-5 self._b = np.ones(n_actions) * 1e-5 self._t = 1 def get_action(self): doubleus = self._a / (self._a + self._b) + np.sqrt(2 * np.log(self._t) / (self._a + self._b)) return np.argmax(doubleus) def update(self, action, reward): self._a[action] += reward self._b[action] += (1 - reward) self._t += 1 # ### Thompson sampling # # UCB1 algorithm does not consider the rewards distribution. If it is known, we can improve it with Thompson sampling. # # We believe that $\theta_k$ are independent and indentically distributed. As a prior distribution for them we will use beta-distribution with parameters $\alpha=(\alpha_1, \dots, \alpha_k)$ and $\beta=(\beta_1, \dots, \beta_k)$. Thus for each parameter $\theta_k$ an apriori probability function looks like # $$ # p(\theta_k) = # \frac{\Gamma(\alpha_k + \beta_k)}{\Gamma(\alpha_k) + \Gamma(\beta_k)} # \theta_k^{\alpha_k - 1}(1 - \theta_k)^{\beta_k - 1} # $$ # After receiving new evidence the distribution is updated following the Bayes rule. # # Beta distribution is comfortable to work with because of conjugation - actions aposterior distribution will be Beta too so we can easily update them: # # > **for** $t = 1,2,...$ **do** # >> **for** $k = 1,...,K$ **do** # >>> Sample $\hat\theta_k \sim beta(\alpha_k, \beta_k)$ # # >> **end for** # # >> $x_t \leftarrow argmax_{k}\hat\theta$ # # >> Apply $x_t$ and observe $r_t$ # # >> $(\alpha_{x_t}, \beta_{x_t}) \leftarrow (\alpha_{x_t}, \beta_{x_t}) + (r_t, 1-r_t)$ # # > **end for** # # Hometask: https://web.stanford.edu/~bvr/pubs/TS_Tutorial.pdf. class ThompsonSamplingAgent(AbstractAgent): def __init__(self): pass def init_actions(self, n_actions): self._a = np.ones(n_actions) * 1e-5 self._b = np.ones(n_actions) * 1e-5 def get_action(self): thetas = np.random.beta(self._a, self._b) return np.argmax(thetas) def update(self, action, reward): self._a[action] += reward self._b[action] += (1 - reward) def plot_regret(env, agents, n_steps=5000, n_trials=50): scores = { agent.name : [0.0 for step in range(n_steps)] for agent in agents } for trial in range(n_trials): env.reset() for a in agents: a.init_actions(env.action_count) for i in range(n_steps): optimal_reward = env.optimal_reward() for agent in agents: action = agent.get_action() reward = env.pull(action) agent.update(action, reward) scores[agent.name][i] += optimal_reward - reward env.step() # change bandit's state if it is unstationary plt.figure(figsize=(17, 8)) for agent in agents: plt.plot(np.cumsum(scores[agent.name]) / n_trials) plt.legend([agent.name for agent in agents]) plt.ylabel("regret") plt.xlabel("steps") plt.show() # + # Uncomment agents agents = [ EpsilonGreedyAgent(), UCBAgent(), ThompsonSamplingAgent() ] plot_regret(BernoulliBandit(), agents, n_steps=10000, n_trials=10) # - # # Non-stationary bandits # # But what if the probabilities change over time? For example class DriftingBandit(BernoulliBandit): def __init__(self, n_actions=5, gamma=0.01): """ Idea from https://github.com/iosband/ts_tutorial """ super().__init__(n_actions) self._gamma = gamma self._successes = None self._failures = None self._steps = 0 self.reset() def reset(self): self._successes = np.zeros(self.action_count) + 1.0 self._failures = np.zeros(self.action_count) + 1.0 self._steps = 0 def step(self): action = np.random.randint(self.action_count) reward = self.pull(action) self._step(action, reward) def _step(self, action, reward): self._successes = self._successes * (1 - self._gamma) + self._gamma self._failures = self._failures * (1 - self._gamma) + self._gamma self._steps += 1 self._successes[action] += reward self._failures[action] += 1.0 - reward self._probs = np.random.beta(self._successes, self._failures) # Letus look at the reward probabilities changing over time: # + drifting_env = DriftingBandit(n_actions=5) drifting_probs = [] for i in range(20000): drifting_env.step() drifting_probs.append(drifting_env._probs) plt.figure(figsize=(14, 8)) plt.plot(pd.DataFrame(drifting_probs).rolling(window=20).mean()) plt.xlabel("steps") plt.ylabel("Success probability") plt.title("Reward probabilities over time") plt.legend(["Action {}".format(i) for i in range(drifting_env.action_count)]) plt.show() # - # **Task** - create an agent that will work better then any of the stationary algorithms above. class YourAgent(AbstractAgent): def __init__(self, coef=0.995): self._coef = coef def init_actions(self, n_actions): self._a = np.ones(n_actions) * 1e-5 self._b = np.ones(n_actions) * 1e-5 self._t = 1 def get_action(self): doubleus = (self._a / (self._a + self._b) + np.sqrt(2 * np.log(self._t) / (self._a + self._b))) return np.argmax(doubleus) def update(self, action, reward): self._a[action] = self._coef * self._a[action] + reward self._b[action] = self._coef * self._b[action] + (1 - reward) self._t = self._coef * self._t + 1 # + drifting_agents = [ ThompsonSamplingAgent(), EpsilonGreedyAgent(), UCBAgent(), YourAgent() ] plot_regret(DriftingBandit(), drifting_agents, n_steps=20000, n_trials=10) # - # ## Exploration в MPDP # # The next task, "river flow", illustrates the importance of exploration with Markov decision-making processes example. # <img src="river_swim.png"> # # Illustration from https://arxiv.org/abs/1306.0940 # Both rewards and transitions are unknown to the agent. # # The optimal strategy is to go right against the flow, but the easiest one is to keep going left and get small rewards each time. class RiverSwimEnv: LEFT_REWARD = 5.0 / 1000 RIGHT_REWARD = 1.0 def __init__(self, intermediate_states_count=4, max_steps=16): self._max_steps = max_steps self._current_state = None self._steps = None self._interm_states = intermediate_states_count self.reset() def reset(self): self._steps = 0 self._current_state = 1 return self._current_state, 0.0, False @property def n_actions(self): return 2 @property def n_states(self): return 2 + self._interm_states def _get_transition_probs(self, action): if action == 0: if self._current_state == 0: return [0, 1.0, 0] else: return [1.0, 0, 0] elif action == 1: if self._current_state == 0: return [0, .4, .6] if self._current_state == self.n_states - 1: return [.4, .6, 0] else: return [.05, .6, .35] else: raise RuntumeError("Unknown action {}. Max action is {}".format(action, self.n_actions)) def step(self, action): """ :param action: :type action: int :return: observation, reward, is_done :rtype: (int, float, bool) """ reward = 0.0 if self._steps >= self._max_steps: return self._current_state, reward, True transition = np.random.choice(range(3), p=self._get_transition_probs(action)) if transition == 0: self._current_state -= 1 elif transition == 1: pass else: self._current_state += 1 if self._current_state == 0: reward = self.LEFT_REWARD elif self._current_state == self.n_states - 1: reward = self.RIGHT_REWARD self._steps += 1 return self._current_state, reward, False # Lets impement Q-learning agent with an $\varepsilon$-greedy strategy and look at its performance. class QLearningAgent: def __init__(self, n_states, n_actions, lr=0.2, gamma=0.95, epsilon=0.1): self._gamma = gamma self._epsilon = epsilon self._q_matrix = np.zeros((n_states, n_actions)) self._lr = lr def get_action(self, state): if np.random.random() < self._epsilon: return np.random.randint(0, self._q_matrix.shape[1]) else: return np.argmax(self._q_matrix[state]) def get_q_matrix(self): """ Used for policy visualization """ return self._q_matrix def start_episode(self): """ Used in PSRL agent """ pass def update(self, state, action, reward, next_state): max_val = self._q_matrix[next_state].max() self._q_matrix[state][action] += self._lr * \ (reward + self._gamma * max_val - self._q_matrix[state][action]) def train_mdp_agent(agent, env, n_episodes): episode_rewards = [] for ep in range(n_episodes): state, ep_reward, is_done = env.reset() agent.start_episode() while not is_done: action = agent.get_action(state) next_state, reward, is_done = env.step(action) agent.update(state, action, reward, next_state) state = next_state ep_reward += reward episode_rewards.append(ep_reward) return episode_rewards # + env = RiverSwimEnv() agent = QLearningAgent(env.n_states, env.n_actions) rews = train_mdp_agent(agent, env, 1000) plt.figure(figsize=(15, 8)) plt.plot(pd.DataFrame(np.array(rews)).ewm(alpha=.1).mean()) plt.xlabel("Episode count") plt.ylabel("Reward") plt.show() # - # Implement policy decisions visualization: def plot_policy(agent): fig = plt.figure(figsize=(15, 8)) ax = fig.add_subplot(111) ax.matshow(agent.get_q_matrix().T) ax.set_yticklabels(['', 'left', 'right']) plt.xlabel("State") plt.ylabel("Action") plt.title("Values of state-action pairs") plt.show() plot_policy(agent) # We can see that the agent uses non-optimalstrategy and keeps going left, not knowing the better option. # ## Posterior sampling RL # Let us implement Thompson Sampling for MPDP! # # The algorithm: # # >**for** episode $k = 1,2,...$ **do** # >> sample $M_k \sim f(\bullet\ |\ H_k)$ # # >> compute policy $\mu_k$ for $M_k$ # # >> **for** time $t = 1, 2,...$ **do** # # >>> take action $a_t$ from $\mu_k$ # # >>> observe $r_t$ and $s_{t+1}$ # >>> update $H_k$ # # >> **end for** # # >**end for** # # $M_k$ is modeled as two matrices: transitions and rewards. Transition matrix is sampled from Dirichlet distribution while reward matrix is sampled from Gamma-normal distribution. # # The distibutions are updated following the bayes rule (see https://en.wikipedia.org/wiki/Conjugate_prior for continous distributions). # # Follow-up reading - https://arxiv.org/abs/1306.0940. def bellman(val, trans, rewards): q = np.zeros(rewards.shape) for state in range(rewards.shape[0]): for action in range(rewards.shape[1]): q[state][action] = rewards[state][action] + val.dot(trans[state,:,action]) return q.max(axis=1) # + def sample_normal_gamma(mu, lmbd, alpha, beta): """ https://en.wikipedia.org/wiki/Normal-gamma_distribution """ tau = np.random.gamma(alpha, beta) mu = np.random.normal(mu , 1.0 / np.sqrt(lmbd * tau)) return mu, tau class PsrlAgent: def __init__(self, n_states, n_actions, horizon=10): self._n_states = n_states self._n_actions = n_actions self._horizon = horizon # params for transition sampling - Dirichlet distribution self._transition_counts = np.zeros((n_states, n_states, n_actions)) + 1.0 # params for reward sampling - Normal-gamma distribution self._mu_matrix = np.zeros((n_states, n_actions)) + 1.0 self._state_action_counts = np.zeros((n_states, n_actions)) + 1.0 # lambda self._alpha_matrix = np.zeros((n_states, n_actions)) + 1.0 self._beta_matrix = np.zeros((n_states, n_actions)) + 1.0 self._val = np.zeros(n_states) def _value_iteration(self, transitions, rewards): for _ in range(self._horizon): self._val = bellman(self._val, transitions, rewards) return self._val def start_episode(self): # sample new mdp self._sampled_transitions = np.apply_along_axis(np.random.dirichlet, 1, self._transition_counts) sampled_reward_mus, sampled_reward_stds = sample_normal_gamma( self._mu_matrix, self._state_action_counts, self._alpha_matrix, self._beta_matrix ) self._sampled_rewards = sampled_reward_mus self._current_value_function = self._value_iteration(self._sampled_transitions, self._sampled_rewards) def get_action(self, state): return np.argmax(self._sampled_rewards[state] + self._current_value_function.dot(self._sampled_transitions[state])) def update(self, state, action, reward, next_state): mu0 = self._mu_matrix[state][action] v = self._state_action_counts[state][action] a = self._alpha_matrix[state][action] b = self._beta_matrix[state][action] self._mu_matrix[state][action] = (mu0 * v + reward) / (v + 1) self._state_action_counts[state][action] = v + 1 self._alpha_matrix[state][action] = a + 1/2 self._beta_matrix[state][action] = b + (v / (v + 1)) * (((reward - mu0) ** 2) / 2) self._transition_counts[state][next_state][action] += 1 def get_q_matrix(self): return self._sampled_rewards + self._current_value_function.dot(self._sampled_transitions) # + horizon = 20 env = RiverSwimEnv(max_steps=horizon) agent = PsrlAgent(env.n_states, env.n_actions, horizon=horizon) rews = train_mdp_agent(agent, env, 1000) plt.figure(figsize=(15, 8)) plt.plot(pd.DataFrame(np.array(rews)).ewm(alpha=.1).mean()) plt.xlabel("Episode count") plt.ylabel("Reward") plt.show() # - plot_policy(agent)
CV/bandits/bandits.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Text classification using Tensorflow # # Basic Tensorflow model to predict text categories based on word2vec. Trained on 20newsgroup dataset from scikit using RELU. Better approach to be experimented further. # import pandas as pd import numpy as np import tensorflow as tf from collections import Counter from sklearn.datasets import fetch_20newsgroups # ## Manipulating data and passing it into Neural Network # + vocab = Counter() def get_word_to_index(vocab): ''' Returns a hash of words and indexes in a sentence ''' word_to_index = {} for i,word in enumerate(vocab): word_to_index[word] = i return word_to_index # - # ## Building the Neural Network categories = ["comp.graphics","sci.space","rec.sport.baseball"] newsgroup_train = fetch_20newsgroups(subset='train', categories=categories) newsgroup_test = fetch_20newsgroups(subset='test', categories=categories) # + for text in newsgroup_train.data: for word in text.split(' '): vocab[word.lower()] +=1 for text in newsgroup_train.data: for word in text.split(' '): vocab[word.lower()] +=1 # - word2index = get_word_to_index(vocab) # #### Converting all words to vector arrays using frequency in vocabulary list # + total_words = len(vocab) def text_to_vector(text): ''' Returns the vector array of corresponding vocabulary using of indexes of individual words ''' layer = np.zeros(total_words, dtype=float) for word in text.split(' '): layer[word2index[word]] +=1 return layer def category_to_vector(category): y = np.zeros((3), dtype=float) if category == 0: y[0] = 1 elif category == 1: y[1] = 1 else: y[2] = 1 return y # - def get_batch(df, i, batch_size): ''' Returns np array of text vectors df: dataframe to be converted to batches i: number of batches batch_size: singular batch size Single batch contains multiple layers of text vectors. Test and train data contain numerous batches ''' batches = [] results = [] texts = df.data[i*batch_size, i*batch_size + batch_size] results = df.target[i*batch_size, i*batch_size + batch_size] for text in texts: batches.append(text_to_vector(text)) for result in results: batches.append(text_to_vector(result)) return np.array(batches), np.array(results) # + #Parameters learning_rate = 0.01 training_epochs = 100 batch_size = 150 display_step = 1 #Network Parameters n_hidden_1 = 100 # 1st layer number of features n_hidden_2 = 100 # 2nd layer number of features n_input = total_words # Words in vocab n_classes = 3 # Categories: graphics, sci.space and baseball input_tensor = tf.placeholder(tf.float32[None, n_input], name="Input") output_tensor = tf.placeholder(tf.float32[None, n_classes], name="Output")
.ipynb_checkpoints/text_classification_RELU-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + colab={"base_uri": "https://localhost:8080/"} id="Mbwoh4mHz4gj" outputId="18a7cdc8-868c-4ab1-a876-e98a2f05126f" viewer = "😎 Physicist" ## variable assingment print(f"Hello world {viewer}") # + colab={"base_uri": "https://localhost:8080/"} id="T8k9ewhL0r42" outputId="9f877115-89db-4fd3-ad16-70718aaa5028" print(f"Why are you repeating Hello world {viewer}? 😥") # + [markdown] id="EqFyBMzi1zJf" # # # + [markdown] id="C7jBJa7-3Ecq" # # Let's create multiple sections # + id="1UTtJnZC3M9Z" # + [markdown] id="uuGbLaoa3O1J" # ## SubSection # + id="llgUOilP3OSO" # + [markdown] id="b1wwWzRM3UhY" # # Another Section # + id="uw5sOLWo3XP3" # + [markdown] id="u9FwuToP4dz-" # # Real code # + [markdown] id="i43FasD44j2O" # Importing numpy library; # [Documentation](https://numpy.org/doc/stable/) # + id="_H-Fld1D4gpl" import numpy as np # + [markdown] id="PgQNRJ3l4lKm" # Using functions from np # + id="5jqHhIdm4ivV" normal_dist = np.random.normal(30,5,60) # + colab={"base_uri": "https://localhost:8080/"} id="cMwccV1H5rKS" outputId="b7ee51eb-8680-481e-869f-26afba7d30ae" print(normal_dist) # + [markdown] id="a5hnH-RT5Zm0" # importing matplotlib.pyplot; [Documentation](https://matplotlib.org/3.3.3/api/_as_gen/matplotlib.pyplot.html#:~:text=pyplot%20is%20a%20state%2Dbased,pyplot%20as%20plt%20x%20%3D%20np.) # + colab={"base_uri": "https://localhost:8080/", "height": 265} id="EZsSjsqw5p8S" outputId="9d1a7e73-c8c1-47c1-9194-1156a2617ecd" import matplotlib.pyplot as plt figure = plt.hist(normal_dist) # + id="4k67E0KAE3lc" normal_dist2 = np.random.normal(34,3,60) # + colab={"base_uri": "https://localhost:8080/", "height": 265} id="lfONmki2ru-T" outputId="512c5cd0-821e-4112-e617-aa31537ecbd2" fig1 = plt.hist(normal_dist) fig2 = plt.hist(normal_dist2) # + colab={"base_uri": "https://localhost:8080/"} id="tHyYBCP-sBZs" outputId="babc123d-1d05-4b2a-fbb0-3d8e30c0a449" from scipy.stats import ttest_ind test = ttest_ind(normal_dist,normal_dist2) print(test) # + colab={"base_uri": "https://localhost:8080/"} id="Jt9vx99-7X60" outputId="2a66fcee-61ca-4265-8207-9ce9bf69590b" from scipy.stats import ttest_rel test = ttest_rel(normal_dist,normal_dist2) print(test) # + colab={"base_uri": "https://localhost:8080/"} id="mv7Lz5wWD078" outputId="9c8c393a-74b8-41a2-c97c-e5335851a1f9" from scipy.stats import wilcoxon test = wilcoxon(normal_dist,normal_dist2) print(test) # + colab={"base_uri": "https://localhost:8080/"} id="lSQl37U4FydJ" outputId="069976af-e06c-4c1b-b623-59974a051455" from scipy.stats import mannwhitneyu test = mannwhitneyu(normal_dist,normal_dist2) print(test) # + id="vDeNGkc4JPQ_"
StatsPart1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %load_ext autoreload # %autoreload 2 import os, sys import time import math import numpy as np # %matplotlib inline import matplotlib.pyplot as plt import scipy.stats as stats from sklearn.preprocessing import MinMaxScaler, PowerTransformer # - def load_results(file): rv = [] with open(file) as results_by_len_file: for line in results_by_len_file: line = line.strip() if (line == "") or (line == "smallsent"): continue tokens = line.split() rv.append([float(tok) for tok in tokens]) rv = np.array(rv) return rv wt_2_wt_2_baseline = load_results('./../data/wikitext-2/test.txt.WT2-20201204-202513.eval-by-sent.baseline') wt_2_wt_2_rnd_tuned = load_results('./../data/wikitext-2/test.txt.WT2-20201204-202513.eval-by-sent.rnd_tuned') wt_2_penn_baseline = load_results('./../data/penn/test.txt.WT2-20201204-202513.eval-by-sent.baseline') wt_2_penn_rnd_tuned = load_results('./../data/penn/test.txt.WT2-20201204-202513.eval-by-sent.rnd_tuned') x, y = wt_2_wt_2_baseline[:, 0], wt_2_wt_2_baseline[:, 1] slope, intercept, r_value, p_value, std_err = stats.linregress(x, y) print("r_value: %s, p_value: %s" % (r_value, p_value)) plt.plot(x, y, '.', label='original data') plt.plot(x, intercept + slope*x, 'r', label='fitted line') plt.legend() plt.show() x, y = wt_2_wt_2_rnd_tuned[:, 0], wt_2_wt_2_rnd_tuned[:, 1] slope, intercept, r_value, p_value, std_err = stats.linregress(x, y) print("r_value: %s, p_value: %s" % (r_value, p_value)) plt.plot(x, y, '.', label='original data') plt.plot(x, intercept + slope*x, 'r', label='fitted line') plt.legend() plt.show() x, y = wt_2_penn_baseline[:, 0], wt_2_penn_baseline[:, 1] slope, intercept, r_value, p_value, std_err = stats.linregress(x, y) print("r_value: %s, p_value: %s" % (r_value, p_value)) plt.plot(x, y, '.', label='original data') plt.plot(x, intercept + slope*x, 'r', label='fitted line') plt.legend() plt.show() x, y = wt_2_penn_rnd_tuned[:, 0], wt_2_penn_rnd_tuned[:, 1] slope, intercept, r_value, p_value, std_err = stats.linregress(x, y) print("r_value: %s, p_value: %s" % (r_value, p_value)) plt.plot(x, y, '.', label='original data') plt.plot(x, intercept + slope*x, 'r', label='fitted line') plt.legend() plt.show() x, y = wt_2_wt_2_baseline[:, 0], wt_2_wt_2_baseline[:, 1] - wt_2_wt_2_rnd_tuned[:, 1] slope, intercept, r_value, p_value, std_err = stats.linregress(x, y) print("r_value: %s, p_value: %s" % (r_value, p_value)) plt.plot(x, y, '.', label='original data') plt.plot(x, intercept + slope*x, 'r', label='fitted line') plt.legend() plt.show() x, y = wt_2_penn_baseline[:, 0], wt_2_penn_baseline[:, 1] - wt_2_penn_rnd_tuned[:, 1] slope, intercept, r_value, p_value, std_err = stats.linregress(x, y) print("r_value: %s, p_value: %s" % (r_value, p_value)) plt.plot(x, y, '.', label='original data') plt.plot(x, intercept + slope*x, 'r', label='fitted line') plt.legend() plt.show() x, y = wt_2_wt_2_baseline[:, 0], wt_2_wt_2_baseline[:, 2] - wt_2_wt_2_rnd_tuned[:, 2] slope, intercept, r_value, p_value, std_err = stats.linregress(x, y) print("r_value: %s, p_value: %s" % (r_value, p_value)) plt.plot(x, y, '.', label='original data') plt.plot(x, intercept + slope*x, 'r', label='fitted line') plt.legend() plt.show() x, y = wt_2_penn_baseline[:, 0], wt_2_penn_baseline[:, 2] - wt_2_penn_rnd_tuned[:, 2] slope, intercept, r_value, p_value, std_err = stats.linregress(x, y) print("r_value: %s, p_value: %s" % (r_value, p_value)) plt.plot(x, y, '.', label='original data') plt.plot(x, intercept + slope*x, 'r', label='fitted line') plt.legend() plt.show() penn_wt_2_baseline = load_results('./../data/wikitext-2/test.txt.penn-small-batch-20201125-154421.eval-by-sent.baseline') penn_wt_2_rnd_tuned = load_results('./../data/wikitext-2/test.txt.penn-small-batch-20201125-154421.eval-by-sent.rnd_tuned') penn_penn_baseline = load_results('./../data/penn/test.txt.penn-small-batch-20201125-154421.eval-by-sent.baseline') penn_penn_rnd_tuned = load_results('./../data/penn/test.txt.penn-small-batch-20201125-154421.eval-by-sent.rnd_tuned') x, y = penn_wt_2_baseline[:, 0], penn_wt_2_baseline[:, 1] - penn_wt_2_rnd_tuned[:, 1] slope, intercept, r_value, p_value, std_err = stats.linregress(x, y) print("r_value: %s, p_value: %s" % (r_value, p_value)) plt.plot(x, y, '.', label='original data') plt.plot(x, intercept + slope*x, 'r', label='fitted line') plt.legend() plt.show() x, y = penn_penn_baseline[:, 0], penn_penn_baseline[:, 1] - penn_penn_rnd_tuned[:, 1] slope, intercept, r_value, p_value, std_err = stats.linregress(x, y) print("r_value: %s, p_value: %s" % (r_value, p_value)) plt.plot(x, y, '.', label='original data') plt.plot(x, intercept + slope*x, 'r', label='fitted line') plt.legend() plt.show() # + fig, axs = plt.subplots(2, 2) x, y = penn_wt_2_baseline[:, 0], penn_wt_2_baseline[:, 1] - penn_wt_2_rnd_tuned[:, 1] slope, intercept, r_value, p_value, std_err = stats.linregress(x, y) axs[0, 0].plot(x, y, '.', label='original data') axs[0, 0].plot(x, intercept + slope*x, 'r') x, y = penn_penn_baseline[:, 0], penn_penn_baseline[:, 1] - penn_penn_rnd_tuned[:, 1] slope, intercept, r_value, p_value, std_err = stats.linregress(x, y) axs[0, 1].plot(x, y, '.', label='original data') axs[0, 1].plot(x, intercept + slope*x, 'r') x, y = wt_2_wt_2_baseline[:, 0], wt_2_wt_2_baseline[:, 1] - wt_2_wt_2_rnd_tuned[:, 1] slope, intercept, r_value, p_value, std_err = stats.linregress(x, y) axs[1, 0].plot(x, y, '.', label='original data') axs[1, 0].plot(x, intercept + slope*x, 'r') x, y = wt_2_penn_baseline[:, 0], wt_2_penn_baseline[:, 1] - wt_2_penn_rnd_tuned[:, 1] slope, intercept, r_value, p_value, std_err = stats.linregress(x, y) axs[1, 1].plot(x, y, '.', label='original data') axs[1, 1].plot(x, intercept + slope*x, 'r') fig.show() plt.savefig('./eval_by_length1.pdf') # - from scipy.interpolate import Rbf from scipy.stats import gaussian_kde x, y = penn_wt_2_baseline[:, 0], penn_wt_2_baseline[:, 1] - penn_wt_2_rnd_tuned[:, 1] slope, intercept, r_value, p_value, std_err = stats.linregress(x, y) print("r_value: %s, p_value: %s" % (r_value, p_value)) #plt.plot(x, y, '.', label='original data') x_min, x_max = min(x), max(x) y_min, y_max = min(y), max(y) plt.plot(x, y, '.', label='original data') plt.plot(x, intercept + slope*x, 'r', label='fitted line') plt.xlim(x_min, x_max) plt.xticks(np.arange(x_min, x_max, step=50)) #plt.ylim(y_min, y_max) rbf = Rbf(x, y, smooth=100.0) xi = np.linspace(x_min, x_max, 300) plt.plot(xi, rbf(xi), label="smoothed data") plt.legend() plt.show() # + fig, axs = plt.subplots(2, 2) x, y = penn_wt_2_baseline[:, 0], penn_wt_2_baseline[:, 1] - penn_wt_2_rnd_tuned[:, 1] slope, intercept, r_value, p_value, std_err = stats.linregress(x, y) x_min, x_max = min(x), max(x) rbf = Rbf(x, y, smooth=100.0) xi = np.linspace(x_min, x_max, 300) axs[0, 0].plot(x, y, '.', label='original data') axs[0, 0].plot(x, intercept + slope*x, 'r') axs[0, 0].plot(xi, rbf(xi), label="smoothed data") x, y = penn_penn_baseline[:, 0], penn_penn_baseline[:, 1] - penn_penn_rnd_tuned[:, 1] slope, intercept, r_value, p_value, std_err = stats.linregress(x, y) x_min, x_max = min(x), max(x) rbf = Rbf(x, y, smooth=100.0) xi = np.linspace(x_min, x_max, 300) axs[0, 1].plot(x, y, '.', label='original data') axs[0, 1].plot(x, intercept + slope*x, 'r') axs[0, 1].plot(xi, rbf(xi), label="smoothed data") x, y = wt_2_wt_2_baseline[:, 0], wt_2_wt_2_baseline[:, 1] - wt_2_wt_2_rnd_tuned[:, 1] slope, intercept, r_value, p_value, std_err = stats.linregress(x, y) x_min, x_max = min(x), max(x) rbf = Rbf(x, y, smooth=100.0) xi = np.linspace(x_min, x_max, 300) axs[1, 0].plot(x, y, '.', label='original data') axs[1, 0].plot(x, intercept + slope*x, 'r') axs[1, 0].plot(xi, rbf(xi), label="smoothed data") x, y = wt_2_penn_baseline[:, 0], wt_2_penn_baseline[:, 1] - wt_2_penn_rnd_tuned[:, 1] slope, intercept, r_value, p_value, std_err = stats.linregress(x, y) x_min, x_max = min(x), max(x) rbf = Rbf(x, y, smooth=100.0) xi = np.linspace(x_min, x_max, 300) axs[1, 1].plot(x, y, '.', label='original data') axs[1, 1].plot(x, intercept + slope*x, 'r') axs[1, 1].plot(xi, rbf(xi), label="smoothed data") fig.show() plt.savefig('./eval_by_length1.pdf') # - # + fig, axs = plt.subplots(1, 2) x = penn_wt_2_baseline[:, 0] x_min, x_max = min(x), max(x) xi = np.linspace(x_min, x_max, 300) kde = stats.gaussian_kde(x) axs[0].hist(x, density=True, bins=50) axs[0].plot(xi, kde.pdf(xi)) x = penn_penn_baseline[:, 0] x_min, x_max = min(x), max(x) xi = np.linspace(x_min, x_max, 300) kde = stats.gaussian_kde(x) axs[1].hist(x, density=True, bins=50) axs[1].plot(xi, kde.pdf(xi)) fig.show() plt.savefig('./eval_by_length2.pdf') # -
lm/code/eval-by-length.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + dc={"key": "3"} deletable=false editable=false run_control={"frozen": true} tags=["context"] # ## 1. Darwin's bibliography # <p><img src="https://www.thoughtco.com/thmb/5RP6YxnilIlHtkrqPtsrnVYWWZk=/768x0/filters:no_upscale():max_bytes(150000):strip_icc()/CharlesDarwin-5c2c3d7e46e0fb0001a343e3.jpg" alt="<NAME>" width="300px"></p> # <p><NAME> is one of the few universal figures of science. His most renowned work is without a doubt his "<em>On the Origin of Species</em>" published in 1859 which introduced the concept of natural selection. But Darwin wrote many other books on a wide range of topics, including geology, plants or his personal life. In this notebook, we will automatically detect how closely related his books are to each other.</p> # <p>To this purpose, we will develop the bases of <strong>a content-based book recommendation system</strong>, which will determine which books are close to each other based on how similar the discussed topics are. The methods we will use are commonly used in text- or documents-heavy industries such as legal, tech or customer support to perform some common task such as text classification or handling search engine queries.</p> # <p>Let's take a look at the books we'll use in our recommendation system.</p> # - # + dc={"key": "3"} tags=["sample_code"] # Import library import glob # The books files are contained in this folder folder = "datasets/" # List all the .txt files and sort them alphabetically files = glob.glob(folder+"*.txt") files.sort() files # + dc={"key": "10"} deletable=false editable=false run_control={"frozen": true} tags=["context"] # ## 2. Load the contents of each book into Python # <p>As a first step, we need to load the content of these books into Python and do some basic pre-processing to facilitate the downstream analyses. We call such a collection of texts <strong>a corpus</strong>. We will also store the titles for these books for future reference and print their respective length to get a gauge for their contents.</p> # + dc={"key": "10"} tags=["sample_code"] # Import libraries import re, os # Initialize the object that will contain the texts and titles txts = [] titles = [] for n in files: # Open each file f = open(n, encoding='utf-8-sig') # Remove all non-alpha-numeric characters pattern = re.compile('[\W_]+') txt = pattern.sub(' ', f.read()) # Store the texts and titles of the books in two separate lists txts.append(txt) titles.append(os.path.basename(n).replace(".txt","")) # Print the length, in characters, of each book [len(t) for t in txts] # + dc={"key": "17"} deletable=false editable=false run_control={"frozen": true} tags=["context"] # ## 3. Find "On the Origin of Species" # <p>For the next parts of this analysis, we will often check the results returned by our method for a given book. For consistency, we will refer to Darwin's most famous book: "<em>On the Origin of Species</em>." Let's find to which index this book is associated.</p> # + dc={"key": "17"} tags=["sample_code"] # Browse the list containing all the titles for i in range(len(titles)): # Store the index if the title is "OriginofSpecies" if titles[i] == "OriginofSpecies": ori = i # Print the stored index print(ori) # + dc={"key": "24"} deletable=false editable=false run_control={"frozen": true} tags=["context"] # ## 4. Tokenize the corpus # <p>As a next step, we need to transform the corpus into a format that is easier to deal with for the downstream analyses. We will tokenize our corpus, i.e., transform each text into a list of the individual words (called tokens) it is made of. To check the output of our process, we will print the first 20 tokens of "<em>On the Origin of Species</em>".</p> # + dc={"key": "24"} tags=["sample_code"] # Define a list of stop words stoplist = set('for a of the and to in to be which some is at that we i who whom show via may my our might as well'.split()) # Convert the text to lower case txts_lower_case = [i.lower() for i in txts] # Transform the text into tokens txts_split = [i.split() for i in txts_lower_case] # Remove tokens which are part of the list of stop words texts = [list(set(i) - stoplist) for i in txts_split] # Print the first 20 tokens for the "On the Origin of Species" book texts[15][:20] # + dc={"key": "31"} deletable=false editable=false run_control={"frozen": true} tags=["context"] # ## 5. Stemming of the tokenized corpus # <p>If you have read <em>On the Origin of Species</em>, you will have noticed that <NAME> can use different words to refer to a similar concept. For example, the concept of selection can be described by words such as <em>selection</em>, <em>selective</em>, <em>select</em> or <em>selects</em>. This will dilute the weight given to this concept in the book and potentially bias the results of the analysis.</p> # <p>To solve this issue, it is a common practice to use a <strong>stemming process</strong>, which will group together the inflected forms of a word so they can be analysed as a single item: <strong>the stem</strong>. In our <em>On the Origin of Species</em> example, the words related to the concept of selection would be gathered under the <em>select</em> stem.</p> # <p>As we are analysing 20 full books, the stemming algorithm can take several minutes to run and, in order to make the process faster, we will directly load the final results from a pickle file and review the method used to generate it.</p> # + dc={"key": "31"} tags=["sample_code"] import pickle # Load the stemmed tokens list from the pregenerated pickle file file = open('datasets/texts_stem.p', mode='rb') texts_stem = pickle.load(file) # Print the 20 first stemmed tokens from the "On the Origin of Species" book print(texts_stem[ori][:20]) # + dc={"key": "38"} deletable=false editable=false run_control={"frozen": true} tags=["context"] # ## 6. Building a bag-of-words model # <p>Now that we have transformed the texts into stemmed tokens, we need to build models that will be useable by downstream algorithms.</p> # <p>First, we need to will create a universe of all words contained in our corpus of <NAME> books, which we call <em>a dictionary</em>. Then, using the stemmed tokens and the dictionary, we will create <strong>bag-of-words models</strong> (BoW) of each of our texts. The BoW models will represent our books as a list of all uniques tokens they contain associated with their respective number of occurrences. </p> # <p>To better understand the structure of such a model, we will print the five first elements of one of the "<em>On the Origin of Species</em>" BoW model.</p> # + dc={"key": "38"} tags=["sample_code"] # Load the functions allowing to create and use dictionaries from gensim import corpora # Create a dictionary from the stemmed tokens dictionary = corpora.Dictionary(texts_stem) # Create a bag-of-words model for each book, # using the previously generated dictionary bows = [dictionary.doc2bow(i) for i in texts_stem] # Print the first five elements of the On the Origin of species' BoW model bows[ori][:5] # + dc={"key": "45"} deletable=false editable=false run_control={"frozen": true} tags=["context"] # ## 7. The most common words of a given book # <p>The results returned by the bag-of-words model is certainly easy to use for a computer but hard to interpret for a human. It is not straightforward to understand which stemmed tokens are present in a given book from <NAME>, and how many occurrences we can find.</p> # <p>In order to better understand how the model has been generated and visualize its content, we will transform it into a DataFrame and display the 10 most common stems for the book "<em>On the Origin of Species</em>".</p> # + dc={"key": "45"} df_bow_origin.head() # + dc={"key": "45"} tags=["sample_code"] # Import pandas to create and manipulate DataFrames import pandas as pd # Convert the BoW model for "On the Origin of Species" into a DataFrame df_bow_origin = pd.DataFrame(bows[ori]) # Add the column names to the DataFrame df_bow_origin.columns = ['index', 'occurrences'] # Add a column containing the token corresponding to the dictionary index df_bow_origin['token'] = df_bow_origin['index'].apply(lambda x: list(dictionary.token2id.keys())[list(dictionary.token2id.values()).index(x)]) # Sort the DataFrame by descending number of occurrences and # print the first 10 values df_bow_origin = df_bow_origin.sort_values('occurrences', ascending=False) df_bow_origin.head(10) # + dc={"key": "52"} deletable=false editable=false run_control={"frozen": true} tags=["context"] # ## 8. Build a tf-idf model # <p>If it wasn't for the presence of the stem "<em>speci</em>", we would have a hard time to guess this BoW model comes from the <em>On the Origin of Species</em> book. The most recurring words are, apart from few exceptions, very common and unlikely to carry any information peculiar to the given book. We need to use an additional step in order to determine which tokens are the most specific to a book.</p> # <p>To do so, we will use a <strong>tf-idf model</strong> (term frequency–inverse document frequency). This model defines the importance of each word depending on how frequent it is in this text and how infrequent it is in all the other documents. As a result, a high tf-idf score for a word will indicate that this word is specific to this text.</p> # <p>After computing those scores, we will print the 10 words most specific to the "<em>On the Origin of Species</em>" book (i.e., the 10 words with the highest tf-idf score).</p> # + dc={"key": "52"} tags=["sample_code"] # Load the gensim functions that will allow us to generate tf-idf models from gensim.models import TfidfModel # Generate the tf-idf model model = TfidfModel(bows) # Print the model for "On the Origin of Species" print(model[bows[ori]]) # + dc={"key": "59"} deletable=false editable=false run_control={"frozen": true} tags=["context"] # ## 9. The results of the tf-idf model # <p>Once again, the format of those results is hard to interpret for a human. Therefore, we will transform it into a more readable version and display the 10 most specific words for the "<em>On the Origin of Species</em>" book.</p> # + dc={"key": "59"} tags=["sample_code"] # Convert the tf-idf model for "On the Origin of Species" into a DataFrame df_tfidf = pd.DataFrame(model[bows[ori]]) # Name the columns of the DataFrame id and score df_tfidf.columns = ['id', 'score'] # Add the tokens corresponding to the numerical indices for better readability df_tfidf['token'] = df_tfidf['id'].apply(lambda x: list(dictionary.token2id.keys())[list(dictionary.token2id.values()).index(x)]) # Sort the DataFrame by descending tf-idf score and print the first 10 rows. df_tfidf = df_tfidf.sort_values('score', ascending=False).reset_index(drop=True) df_tfidf.head(10) # + dc={"key": "66"} deletable=false editable=false run_control={"frozen": true} tags=["context"] # ## 10. Compute distance between texts # <p>The results of the tf-idf algorithm now return stemmed tokens which are specific to each book. We can, for example, see that topics such as selection, breeding or domestication are defining "<em>On the Origin of Species</em>" (and yes, in this book, <NAME> talks quite a lot about pigeons too). Now that we have a model associating tokens to how specific they are to each book, we can measure how related to books are between each other.</p> # <p>To this purpose, we will use a measure of similarity called <strong>cosine similarity</strong> and we will visualize the results as a distance matrix, i.e., a matrix showing all pairwise distances between Darwin's books.</p> # + dc={"key": "66"} tags=["sample_code"] # Load the library allowing similarity computations from gensim import similarities # Compute the similarity matrix (pairwise distance between all texts) sims = similarities.MatrixSimilarity(bows) # Transform the resulting list into a dataframe sim_df = pd.DataFrame(list(sims)) # Add the titles of the books as columns and index of the dataframe sim_df.columns = titles sim_df.index = titles # Print the resulting matrix print(sim_df) # + dc={"key": "73"} deletable=false editable=false run_control={"frozen": true} tags=["context"] # ## 11. The book most similar to "On the Origin of Species" # <p>We now have a matrix containing all the similarity measures between any pair of books from <NAME>! We can now use this matrix to quickly extract the information we need, i.e., the distance between one book and one or several others. </p> # <p>As a first step, we will display which books are the most similar to "<em>On the Origin of Species</em>," more specifically we will produce a bar chart showing all books ranked by how similar they are to Darwin's landmark work.</p> # + dc={"key": "73"} tags=["sample_code"] # This is needed to display plots in a notebook # %matplotlib inline # Import libraries import matplotlib.pyplot as plt # Select the column corresponding to "On the Origin of Species" and v = sim_df.loc[:,'OriginofSpecies'] # Sort by ascending scores v_sorted = v.sort_values(ascending=True) # Plot this data has a horizontal bar plot v_sorted.plot.barh() # # Modify the axes labels and plot title for a better readability plt.xlabel("Cosine Similarity") plt.ylabel("Book title") plt.title("How similar all books are to Darwin's landmark work") # + dc={"key": "80"} deletable=false editable=false run_control={"frozen": true} tags=["context"] # ## 12. Which books have similar content? # <p>This turns out to be extremely useful if we want to determine a given book's most similar work. For example, we have just seen that if you enjoyed "<em>On the Origin of Species</em>," you can read books discussing similar concepts such as "<em>The Variation of Animals and Plants under Domestication</em>" or "<em>The Descent of Man, and Selection in Relation to Sex</em>." If you are familiar with Darwin's work, these suggestions will likely seem natural to you. Indeed, <em>On the Origin of Species</em> has a whole chapter about domestication and <em>The Descent of Man, and Selection in Relation to Sex</em> applies the theory of natural selection to human evolution. Hence, the results make sense.</p> # <p>However, we now want to have a better understanding of the big picture and see how Darwin's books are generally related to each other (in terms of topics discussed). To this purpose, we will represent the whole similarity matrix as a dendrogram, which is a standard tool to display such data. <strong>This last approach will display all the information about book similarities at once.</strong> For example, we can find a book's closest relative but, also, we can visualize which groups of books have similar topics (e.g., the cluster about <NAME> personal life with his autobiography and letters). If you are familiar with Darwin's bibliography, the results should not surprise you too much, which indicates the method gives good results. Otherwise, next time you read one of the author's book, you will know which other books to read next in order to learn more about the topics it addressed.</p> # + dc={"key": "80"} tags=["sample_code"] # Import libraries from scipy.cluster import hierarchy # Compute the clusters from the similarity matrix, # using the Ward variance minimization algorithm Z = hierarchy.linkage(sims, method='ward') # Display this result as a horizontal dendrogram display = hierarchy.dendrogram(Z, leaf_font_size=8, labels=sim_df.index, orientation="left")
notebook/book_recommendation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Introduction to programming for Geoscientists through Python # # # Lecture 6 solutions # # ## <NAME> (<EMAIL>) http://www.imperial.ac.uk/people/g.gorman # * **Read a two-column data file**</br> # The file *data/xy.dat* contains two columns of numbers, corresponding to *x* and *y* coordinates on a curve. The start of the file looks like this: # # -1.0000 -0.0000</br> # -0.9933 -0.0087</br> # -0.9867 -0.0179</br> # -0.9800 -0.0274</br> # -0.9733 -0.0374</br> # # Make a program that reads the first column into a list *x* and the second column into a list *y*. Then convert the lists to arrays, and plot the curve. Print out the maximum and minimum y coordinates. (Hint: Read the file line by line, split each line into words, convert to float, and append to *x* and *y*.)</br> # + ## STAGE 1: input # Open data file infile = open("data/xy.dat", "r") # "r" is for read # Initialise empty lists xlist = [] ylist = [] # Loop through infile and write to x and y lists for line in infile: line = line.split() # convert to list by dropping spaces xlist.append(float(line[0])) # take 0th element and covert to float ylist.append(float(line[1])) # take 1st element and covert to float # Close the filehandle infile.close() # + ## STAGE 2: view file # Sanity check -- Let's see what our lists look like for the first 10 print("First ten elements in x and y lists:\n") # N.B. "\n" adds a newline for i in range(10): print("X = %.2f, Y = %.2f" % (xlist[i], ylist[i])) # Max and min y coords print("\nThe min and max y are: %.2f and %.2f" % (min(ylist), max(ylist))) # + ## STAGE 3: plotting # Convert to array from numpy import * xarray = array(xlist) yarray = array(ylist) # plot from pylab import * plot(xlist, ylist) xlabel("X") ylabel("Y") show() # - # * **Read a data file**</br> # The files data/density_water.dat and data/density_air.dat contain data about the density of water and air (respectively) for different temperatures. The data files have some comment lines starting with # and some lines are blank. The rest of the lines contain density data: the temperature in the first column and the corresponding density in the second column. The goal of this exercise is to read the data in such a file and plot the density versus the temperature as distinct (small) circles for each data point. Let the program take the name of the data file via raw_input. Apply the program to both files. # + ## STAGE 1: work with one file # precede exactly as before but with modified for loop: # line[0] -- refers to the first character in a string # != -- boolean operator for 'not equal to' # and -- boolean operator, both T/F statements must be met # commented lines will start with a #, blank lines will consist of just a newline character '\n' infile = open("data/density_air.dat", "r") temp = [] dens = [] for line in infile: try: t, d = line.split() t = float(t) d = float(t) except: continue temp.append(t) # N.B. we're now filling out temp and dens lists dens.append(d) infile.close() # Sanity check: print(temp) # ... double check the original file to make sure this is right. # + ## STAGE 2: interactive file choice # Let's write it up as a function # We'll use exactly the same code as before, but specify the infile via raw_input() def readTempDenFile(sanity_check = True): #filename = raw_input("File path: ") # ask the user to specify the file path filename = "data/density_air.dat" infile = open(filename, "r") temp = [] dens = [] for line in infile: try: t, d = line.split() t = float(t) d = float(t) except: continue temp.append(t) # N.B. we're now filling out temp and dens lists dens.append(d) infile.close() return temp,dens # function defined, let's run it temp,dens = readTempDenFile() # unpack into two objects temp and dens print(temp) # + ## STAGE 3: plotting # same as before but with the plotting lines of code in our function # remember to have %pylab inline for ipython # ... and to import pylab and numpy if not already imported from pylab import * from numpy import * def readTempDenFile(sanity_check = True): #filename = raw_input("File path: ") # ask the user to specify the file path filename = "data/density_air.dat" infile = open(filename, "r") temp = [] dens = [] for line in infile: try: t, d = line.split() t = float(t) d = float(t) except: continue temp.append(t) # N.B. we're now filling out temp and dens lists dens.append(d) infile.close() plot(array(temp), array(dens)) xlabel("Temperature (C)") ylabel("Density (kg/m^3)") show() return temp,dens # run function readTempDenFile() # - # * **Read acceleration data and find velocities**</br> # A file data/acc.dat contains measurements $a_0, a_1, \ldots, a_{n-1}$ of the acceleration of an object moving along a straight line. The measurement $a_k$ is taken at time point $t_k = k\Delta t$, where $\Delta t$ is the time spacing between the measurements. The purpose of the exercise is to load the acceleration data into a program and compute the velocity $v(t)$ of the object at some time $t$. # # In general, the acceleration $a(t)$ is related to the velocity $v(t)$ through $v^\prime(t) = a(t)$. This means that # # $$ # v(t) = v(0) + \int_0^t{a(\tau)d\tau} # $$ # # If $a(t)$ is only known at some discrete, equally spaced points in time, $a_0, \ldots, a_{n-1}$ (which is the case in this exercise), we must compute the integral above numerically, for example by the Trapezoidal rule: # # $$ # v(t_k) \approx \Delta t \left(\frac{1}{2}a_0 + \frac{1}{2}a_k + \sum_{i=1}^{k-1}a_i \right), \ \ 1 \leq k \leq n-1. # $$ # # We assume $v(0) = 0$ so that also $v_0 = 0$. # Read the values $a_0, \ldots, a_{n-1}$ from file into an array, plot the acceleration versus time, and use the Trapezoidal rule to compute one $v(t_k)$ value, where $\Delta t$ and $k \geq 1$ are specified using raw_input. # + ## PART 1: read and plot # specify libraries # %pylab inline from pylab import * from numpy import * dt = 0.5 # read in acceleration infile = open("data/acc.dat", "r") alist = [] for line in infile: alist.append(float(line)) infile.close() aarray = array(alist) time = array([e*dt for e in range(len(alist))]) # time is specified by dt and the number of elements in acc.dat # plot plot(time, aarray) xlabel("Time") ylabel("Acceleration") show() # + ## PART 2: trapezoidal function def trap(dt, k, alist): if not (1 <= k <= (len(alist) - 1)): raise ValueError return dt*(.5*alist[0] + .5*alist[k] + sum(alist[:k])) dt = float(input("dt = ")) k = int(input("k = ")) print(trap(dt, k, alist)) # - # * **Read acceleration data and plot velocities**</br> # The task in this exercise is the same as the one above, except that we now want to compute $v(t_k)$ for all time points $t_k = k\Delta t$ and plot the velocity versus time. Repeated use of the Trapezoidal rule for all $k$ values is very inefficient. A more efficient formula arises if we add the area of a new trapezoid to the previous integral: # # $$ # v(t_k) = v(t_{k-1}) + \int_{t_{k-1}}^{t_k}a(\tau)\ d\tau \approx v(t_{k-1}) + \Delta t \frac{1}{2}\left(a_{k-1} + a_k\right), # $$ # # for $k = 1, 2, \ldots, n-1$, while $v_0 = 0$. Use this formula to fill an array *v* with velocity values. Now only $\Delta t$ is given on via raw_input, and the $a_0, \ldots, a_{n-1}$ values must be read from file as in the previous exercise. # ## STAGE 1: Read in acc.dat infile = open("data/acc.dat", "r") alist = [] for line in infile: alist.append(float(line)) infile.close() ## STAGE 2: Create function for calculating veloctities def calcV(dt, alist): vlist = [] # start with trapezoidal function k: 0 - 1 vlist.append(dt*(.5*alist[0] + .5*alist[1] + sum(alist[:1]))) for i in range(1, len(alist)): # add new trapezoid to previous velocity for new velocity vlist.append(vlist[-1] + (dt*.5*(alist[i-1]+alist[i]))) return vlist # + ## STAGE 3: Plot # libs # %pylab inline from pylab import * from numpy import * # calc v for a given dt dt = 0.005 vlist = calcV(dt, alist) # convert to arrays varray = array(vlist) time = array([e*dt for e in range(len(alist))]) # plot plot(time, varray) xlabel("Time") ylabel("Velocity") show() # - # * **Make a dictionary from a table**</br> # The file *data/constants.txt* contains a table of the values and the dimensions of some fundamental constants from physics. We want to load this table into a dictionary *constants*, where the keys are the names of the constants. For example, *constants['gravitational constant']* holds the value of the gravitational constant (6.67259 $\times$ 10$^{-11}$) in Newton's law of gravitation. Make a function that reads and interprets the text in the file, and thereafter returns the dictionary. # + def read_constants(file_path): infile = open(file_path, "r") constants = {} # An empty dictionary to store the constants that are read in from the file infile.readline(); infile.readline() # Skip the first two lines of the file, since these just contain the column names and the separator. for line in infile: words = line.split() # Split each line up into individual words dimension = words.pop() # pop is a list operation that removes the last element from a list and returns it value = float(words.pop()) # Again, use pop to obtain the constant itself. name = " ".join(words) # After the two 'pop' operations above, the words remaining in the 'words' list must be the name of the constant. Join the individual words together, with spaces inbetween, using .join. constants[name] = value # Create a new key-value pair in the dictionary return constants print(read_constants('data/constants.txt')) # - # * **Explore syntax differences: lists vs. dictionaries**</br> # Consider this code: # + t1 = {} # this specifies a dictionary t1[0] = -5 # create a new key called 0, give it the value -5 t1[1] = 10.5 # create a new key called 1, give it the value 10.5 print(t1) # shows the contents of 0 and 1 # - # Explain why the lines above work fine while the ones below do not: t2 = [] # this specifies a list t2[0] = -5 t2[1] = 10.5 # cannot work, raises and IndexError # [] notation is for indexing in lists # this is trying to specify the 0th element is -5 # IndexError is raised because the intial list has no dimensions, and therefore [0] is out of index # Dictionaries behave differently, the [] notation will create a new key and add the specified element to the dictionary # What must be done in the last code snippet to make it work properly? # + ## To make it work properly we have two options # 1. specify the list's dimension before asignment t2 = [0]*2 # a list containing two 0s t2[0] = -5 t2[1] = 10.5 print(t2) del t2 # 2. start with a dimensionless list, but append new elements t2 = [] t2.append(-5) t2.append(10.5) print(t2) # - # * **Compute the area of a triangle**</br> # An arbitrary triangle can be described by the coordinates of its three vertices: $(x_1, y_1), (x_2, y_2), (x_3, y_3)$, numbered in a counterclockwise direction. The area of the triangle is given by the formula: # # $A = \frac{1}{2}|x_2y_3 - x_3y_2 - x_1y_3 + x_3y_1 + x_1y_2 - x_2y_1|.$ # # Write a function *area(vertices)* that returns the area of a triangle whose vertices are specified by the argument vertices, which is a nested list of the vertex coordinates. For example, vertices can be [[0,0], [1,0], [0,2]] if the three corners of the triangle have coordinates (0, 0), (1, 0), and (0, 2). # # Then, assume that the vertices of the triangle are stored in a dictionary and not a list. The keys in the dictionary correspond to the vertex number (1, 2, or 3) while the values are 2-tuples with the x and y coordinates of the vertex. For example, in a triangle with vertices (0, 0), (1, 0), and (0, 2) the vertices argument becomes: # + ## STAGE 1: area function on list def area(vertices): # nb. vertices = [[x1,y1],[x2,y2],[x3,y3]] x2y3 = vertices[1][0] * vertices[2][1] x3y2 = vertices[2][0] * vertices[1][1] x1y3 = vertices[0][0] * vertices[2][1] x3y1 = vertices[2][0] * vertices[0][1] x1y2 = vertices[0][0] * vertices[1][1] x2y1 = vertices[1][0] * vertices[0][1] return .5*(x2y3 - x3y2 - x1y3 + x3y1 + x1y2 - x2y1) print(area([[0.0,0.0],[1.0,0.0],[0.0,2.0]])) # + ## STAGE 2: area function on dict def area(vertices): # nb. vertices = {v1: (x,y)} x2y3 = vertices[2][0] * vertices[3][1] x3y2 = vertices[3][0] * vertices[2][1] x1y3 = vertices[1][0] * vertices[3][1] x3y1 = vertices[3][0] * vertices[1][1] x1y2 = vertices[1][0] * vertices[2][1] x2y1 = vertices[2][0] * vertices[1][1] return .5*(x2y3 - x3y2 - x1y3 + x3y1 + x1y2 - x2y1) print(area({1: (0,0), 2: (1,0), 3: (0,2)})) # - # * **Improve a program**</br> # The file *data/densities.dat* contains a table of densities of various substances measured in g/cm$^3$. The following program reads the data in this file and produces a dictionary whose keys are the names of substances, and the values are the corresponding densities. # + def read_densities(filename): infile = open(filename, 'r') densities = {} for line in infile: words = line.split() density = float(words[-1]) if len(words[:-1]) == 2: substance = words[0] + ' ' + words[1] else: substance = words[0] densities[substance] = density infile.close() return densities densities = read_densities('data/densities.dat') # - # One problem we face when implementing the program above is that the name of the substance can contain one or two words, and maybe more words in a more comprehensive table. The purpose of this exercise is to use string operations to shorten the code and make it more general. Implement the following two methods in separate functions in the same program, and control that they give the same result. # # 1. Let *substance* consist of all the words but the last, using the join method in string objects to combine the words. # 2. Observe that all the densities start in the same column file and use substrings to divide line into two parts. (Hint: Remember to strip the first part such that, e.g., the density of ice is obtained as *densities['ice']* and not *densities['ice ']*.) # + ## STAGE 1: using string function join def read_densities(filename): infile = open(filename, 'r') densities = {} for line in infile: words = line.split() density = float(words.pop()) # pop is a list operation that removes the last element from a list and returns it substance = "_".join(words) # join the remaining words with _ densities[substance] = density infile.close() return densities densities = read_densities('data/densities.dat') print(densities) # + ## STAGE 2: Using string slicing and operations def read_densities(filename): infile = open(filename, 'r') densities = {} for line in infile: density = float(line[12:]) # column 13 onwards substance = line[:12] # upto coumn 12 substance = substance.strip() # remove trailing spaces substance = substance.replace(" ", "_") # replace spaces with _ densities[substance] = density infile.close() return densities densities = read_densities('data/densities.dat') # - # * **Write function data to a file**</br> # We want to dump $x$ and $f(x)$ values to a file named function_data.dat, where the $x$ values appear in the first column and the $f(x)$ values appear in the second. Choose $n$ equally spaced $x$ values in the interval [-4, 4]. Here, the function $f(x)$ is given by: # # $f(x) = \frac{1}{\sqrt{2\pi}}\exp(-0.5x^2)$</br></br> # + # libs from numpy import * from math import pi # define our function def f(x): return (1.0/sqrt(2.0*pi))*exp(-.5*x**2.0) # let's make our x xarray = linspace(-4.0, 4.0, 100) fxs = f(xarray) # let's zip them up for a simple for loop when writing out data = zip(xarray, fxs) # this combines each element into a tuple e.g. [(xarray1, fxs1), (xarray2, fxs2) ...] # write out outfile = open("ex5_out.dat", "w") # w is for writing! for x,y in data: outfile.write("X = %.2f Y = %.2f" % (x, y)) outfile.write("\n") # ensure newline outfile.close()
notebook/Lecture-6-Introduction-to-programming-for-geoscientists-Solutions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from os import path import numpy as np import matplotlib.pyplot as plt from bentdna import shapefourier workfolder = '/home/yizaochen/codes/dna_rna/length_effect/find_helical_axis' # ### Part 0: Initialize host = 'a_tract_21mer' s_agent = shapefourier.ShapeAgent(workfolder, host) # ### Part 1: Read $l_i$ and $\theta$ s_agent.read_l_modulus_theta() # ### Part 2: Assign frame-id frameid = 10 # ### Part 3: Let $\bar{L}= 14 \rm{bp} \times 0.34 \rm{nm/bp}=4.76 \rm{nm}$ # [Reference](https://bio.libretexts.org/Bookshelves/Genetics/Book%3A_Working_with_Molecular_Genetics_(Hardison)/Unit_I%3A_Genes_Nucleic_Acids_Genomes_and_Chromosomes/2%3A_Structures_of_Nucleic_Acids/2.5%3A_B-Form_A-Form_and_Z-Form_of_DNA) L = 4.76 # ### Part 4: wavelength $\lambda=\frac{2\bar{L}}{n}$, for $\sqrt{\frac{2}{L}}\cos{(\frac{n \pi s}{L})}$ n = 2 wavelength = 2 * L / n print(f'Wavelength is {wavelength:.3f} Å')
notebooks/wavelength_test.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/Omaromar2255/4433/blob/main/Colab%20RDP/Colab%20RDP242.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="iLh_9SkSut4u" # # **Colab RDP** : Remote Desktop to Colab Instance # # > **Warning : Not for Cryptocurrency Mining<br></br>** # >**Why are hardware resources such as T4 GPUs not available to me?** The best available hardware is prioritized for users who use Colaboratory interactively rather than for long-running computations. Users who use Colaboratory for long-running computations may be temporarily restricted in the type of hardware made available to them, and/or the duration that the hardware can be used for. We encourage users with high computational needs to use Colaboratory’s UI with a local runtime. Please note that using Colaboratory for cryptocurrency mining is disallowed entirely, and may result in being banned from using Colab altogether. # # Google Colab can give you Instance with 12GB of RAM and GPU for 12 hours (Max.) for Free users. Anyone can use it to perform Heavy Tasks. # # To use other similiar Notebooks use my Repository **[Colab Hacks](https://github.com/PradyumnaKrishna/Colab-Hacks)** # + id="Ozth7NCH0BD3" # + id="t4yNp3KmLtZ6" cellView="form" #@title **Create User** #@markdown Enter Username and Password import os username = "user" #@param {type:"string"} password = "<PASSWORD>" #@param {type:"string"} print("Creating User and Setting it up") # Creation of user os.system(f"useradd -m {username}") # Add user to sudo group os.system(f"adduser {username} sudo") # Set password of user to 'root' os.system(f"echo '{username}:{password}' | sudo chpasswd") # Change default shell from sh to bash os.system("sed -i 's/\/bin\/sh/\/bin\/bash/g' /etc/passwd") print(f"User created and configured having username `{username}` and password `{password}`") # + id="Q6bl1b0EifVG" #@title **RDP** #@markdown It takes 4-5 minutes for installation import os import subprocess #@markdown Visit http://remotedesktop.google.com/headless and copy the command after Authentication CRP = "" #@param {type:"string"} #@markdown Enter a Pin (more or equal to 6 digits) Pin = 123456 #@param {type: "integer"} #@markdown Autostart Notebook in RDP Autostart = False #@param {type: "boolean"} class CRD: def __init__(self, user): os.system("apt update") self.installCRD() self.installDesktopEnvironment() self.installGoogleChorme() self.finish(user) print("\nRDP created succesfully move to https://remotedesktop.google.com/access") @staticmethod def installCRD(): print("Installing Chrome Remote Desktop") subprocess.run(['wget', 'https://dl.google.com/linux/direct/chrome-remote-desktop_current_amd64.deb'], stdout=subprocess.PIPE) subprocess.run(['dpkg', '--install', 'chrome-remote-desktop_current_amd64.deb'], stdout=subprocess.PIPE) subprocess.run(['apt', 'install', '--assume-yes', '--fix-broken'], stdout=subprocess.PIPE) @staticmethod def installDesktopEnvironment(): print("Installing Desktop Environment") os.system("export DEBIAN_FRONTEND=noninteractive") os.system("apt install --assume-yes xfce4 desktop-base xfce4-terminal") os.system("bash -c 'echo \"exec /etc/X11/Xsession /usr/bin/xfce4-session\" > /etc/chrome-remote-desktop-session'") os.system("apt remove --assume-yes gnome-terminal") os.system("apt install --assume-yes xscreensaver") os.system("systemctl disable lightdm.service") @staticmethod def installGoogleChorme(): print("Installing Google Chrome") subprocess.run(["wget", "https://dl.google.com/linux/direct/google-chrome-stable_current_amd64.deb"], stdout=subprocess.PIPE) subprocess.run(["dpkg", "--install", "google-chrome-stable_current_amd64.deb"], stdout=subprocess.PIPE) subprocess.run(['apt', 'install', '--assume-yes', '--fix-broken'], stdout=subprocess.PIPE) @staticmethod def finish(user): print("Finalizing") if Autostart: os.makedirs(f"/home/{user}/.config/autostart", exist_ok=True) link = "https://colab.research.google.com/github/PradyumnaKrishna/Colab-Hacks/blob/master/Colab%20RDP/Colab%20RDP.ipynb" colab_autostart = """[Desktop Entry] Type=Application Name=Colab Exec=sh -c "sensible-browser {}" Icon= Comment=Open a predefined notebook at session signin. X-GNOME-Autostart-enabled=true""".format(link) with open(f"/home/{user}/.config/autostart/colab.desktop", "w") as f: f.write(colab_autostart) os.system(f"chmod +x /home/{user}/.config/autostart/colab.desktop") os.system(f"chown {user}:{user} /home/{user}/.config") os.system(f"adduser {user} chrome-remote-desktop") command = f"{CRP} --pin={Pin}" os.system(f"su - {user} -c '{command}'") os.system("service chrome-remote-desktop start") print("Finished Succesfully") try: if CRP == "": print("Please enter authcode from the given link") elif len(str(Pin)) < 6: print("Enter a pin more or equal to 6 digits") else: CRD(username) except NameError as e: print("'username' variable not found, Create a user first") # + id="vk2qtOTGIFsQ" cellView="form" #@title **Google Drive Mount** #@markdown Google Drive can be used as Persistance HDD for files.<br> #@markdown **Choose a method (GDFuse Recommended)** mount_method = "GDFuse" #@param ["GDFuse", "Native"] #@markdown **Options for GDFuse** <br> #@markdown - Visit https://github.com/astrada/google-drive-ocamlfuse/wiki/Team-Drives label = "default" #@param {type:"string"} mount_team_drive = False #@param {type:"boolean"} force_mount = False #@param {type:"boolean"} import os import subprocess class Drive(): creds = {} mountpoint = "" deps = False debug = False def __init__(self, mountpoint="/content/drives", debug=False): os.makedirs(mountpoint, exist_ok=True) self.mountpoint = mountpoint self.debug = debug def _mount_gdfuse(self, mount_dir): os.makedirs(mount_dir, exist_ok=True) subprocess.run( ['google-drive-ocamlfuse', '-o', 'allow_other', '-label', label, mount_dir, ] ) print(f"Drive Mounted at {mount_dir}. If you get input/output error, then `team_drive_id` might be wrong or not accessible.") def _unmount_gdfuse(self, mount_dir): subprocess.run( ['fusermount', '-u', mount_dir, ] ) os.rmdir(mount_dir) def auth(self): from google.colab import auth from oauth2client.client import GoogleCredentials auth.authenticate_user() creds = GoogleCredentials.get_application_default() self.creds = { "id": creds.client_id, "secret": creds.client_secret } def gdfuse(self, label, mound_team_drive=False, force_mount=False): import getpass if not self.creds: self.auth() if not self.deps: print("Installing google-drive-ocamlfuse") subprocess.run(['apt', 'install', 'software-properties-common python-software-properties module-init-tools', '-y']) subprocess.run(['add-apt-repository', 'ppa:alessandro-strada/ppa', '-y']) subprocess.run(['apt', 'update']) subprocess.run(['apt', 'install', '--assume-yes', 'google-drive-ocamlfuse']) self.deps = True base_dir = '/root/.gdfuse' config_dir = f'{base_dir}/{label}' mount_dir = f"{self.mountpoint}/{label}" if force_mount and os.path.exists(mount_dir): self._unmount_gdfuse(mount_dir) elif os.path.exists(mount_dir): print("Drive already mounted") return if not os.path.exists(config_dir) or force_mount: print(f"Please, open the following URL in a web browser: https://accounts.google.com/o/oauth2/auth?client_id={self.creds['id']}&redirect_uri=urn%3Aietf%3Awg%3Aoauth%3A2.0%3Aoob&scope=https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fdrive&response_type=code&access_type=offline&approval_prompt=force") vcode = getpass.getpass("Enter the Auth Code: ") subprocess.run( ['google-drive-ocamlfuse', '-headless', '-id', self.creds['id'], '-secret', self.creds['secret'], '-label', label, ], text=True, input=vcode ) if mount_team_drive: team_drive_id = input("Enter Team Drive ID: ") subprocess.run( ['sed', '-i', f's/team_drive_id=.*$/team_drive_id={team_drive_id}/g', f'{config_dir}/config' ] ) else: subprocess.run( ['sed', '-i', f's/team_drive_id=.*$/team_drive_id=/g', f'{config_dir}/config' ] ) self._mount_gdfuse(mount_dir) def native(self): from google.colab import drive mount_dir = f"{self.mountpoint}/Native" drive.mount(mount_dir) if 'drive' not in globals(): try: drive = Drive(f"/home/{username}/drives") except NameError: drive = Drive('/content/drives') if mount_method == "Native": drive.native() if mount_method == "GDFuse": drive.gdfuse(label, mount_team_drive, force_mount) # + id="6-5G4t8bqGoF" #@title install app media # ! sudo apt install vlc -y > /dev/null 2>&1 # ! sudo aptitude update > /dev/null 2>&1 # ! sudo apt-get install libavcodec-extra-53 > /dev/null 2>&1 # ! sudo apt-get install -y xarchiver > /dev/null 2>&1 # ! sudo apt-get install winff winff-doc ffmpeg libavcodec-extra > /dev/null 2>&1 # ! sudo apt-get install libtxc-dxtn-s2tc0 > /dev/null 2>&1 # ! sudo apt-get install mesa-utils > /dev/null 2>&1 # + id="sKjfSJ0Equfw" #@title ins bowr # ! sudo apt install firefox > /dev/null 2>&1 # ! sudo apt install software-properties-common apt-transport-https wget > /dev/null 2>&1 # ! wget -q https://packages.microsoft.com/keys/microsoft.asc -O- | sudo apt-key add - > /dev/null 2>&1 # ! sudo add-apt-repository "deb [arch=amd64] https://packages.microsoft.com/repos/edge stable main" > /dev/null 2>&1 # ! sudo apt install microsoft-edge-dev > /dev/null 2>&1 # ! wget -qO- https://deb.opera.com/archive.key | sudo apt-key add - > /dev/null 2>&1 # ! sudo add-apt-repository "deb [arch=i386,amd64] https://deb.opera.com/opera-stable/ stable non-free" > /dev/null 2>&1 # ! sudo apt install opera-stable # + id="s0I8hpijsivt" #@title ins win # ! apt-get update # ! sudo dpkg --add-architecture i386 # ! wget -nc https://dl.winehq.org/wine-builds/winehq.key; sudo apt-key add winehq.key # ! sudo apt-add-repository 'deb https://dl.winehq.org/wine-builds/ubuntu/ bionic main' # ! sudo add-apt-repository ppa:cybermax-dexter/sdl2-backport # ! sudo apt update && sudo apt install --install-recommends winehq-stable # ! sudo apt-get install winetricks # + id="s7-eD5BCs1jq" # ! winetricks dotnet45 # + id="8icuQYnyKDLk" cellView="form" #@title **SSH** # ! pip install colab_ssh --upgrade &> /dev/null #@markdown Choose a method (Agro Recommended) ssh_method = "Agro" #@param ["Agro", "Ngrok"] #@markdown Copy authtoken from https://dashboard.ngrok.com/auth (only for ngrok) ngrokRegion = "us" #@param ["us", "eu", "ap", "au", "sa", "jp", "in"] def runAgro(): from colab_ssh import launch_ssh_cloudflared launch_ssh_cloudflared(password=password) def runNgrok(): from colab_ssh import launch_ssh from IPython.display import clear_output import getpass ngrokToken = getpass.getpass("Enter the ngrokToken: ") launch_ssh(ngrokToken, password, region=ngrokRegion) clear_output() print("ssh", user, end='@') # ! curl -s http://localhost:4040/api/tunnels | python3 -c \ # "import sys, json; print(json.load(sys.stdin)['tunnels'][0]['public_url'][6:].replace(':', ' -p '))" try: user = username password = password except NameError: print("No user found, using username and password as '<PASSWORD>'") user='root' password='<PASSWORD>' if ssh_method == "Agro": runAgro() if ssh_method == "Ngrok": runNgrok() # + id="UoeBdz6_KE6a" cellView="form" #@title **Colab Shutdown** #@markdown To Kill NGROK Tunnel NGROK = False #@param {type:'boolean'} #@markdown To Sleep Colab Sleep = True #@param {type:'boolean'} if NGROK: # ! killall ngrok if Sleep: from time import sleep sleep(43200)
Colab RDP/Colab RDP242.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + def factorial_iterative(n): result = 1 for i in range(n): result *= i + 1 return result # - factorial_iterative(5) # + def factorial_recursive(n): if n == 1 or n==0: return 1 else: return n * factorial_recursive(n - 1) factorial_recursive(5)
Chapter03/Exercise51/Exercise51.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # **Question 1:** # <br>Write a program that calculates and prints the value according to the given formula: # <br>Q = Square root of [(2 * C * D)/H] # <br>Following are the fixed values of C and H: # <br>C is 50. H is 30. # <br>D is the variable whose values should be input to your program in a comma-separated # sequence. # <br>**Example** # <br>Let us assume the following comma separated input sequence is given to the program: # 100,150,180 # <br>The output of the program should be: # 18,22,24 # **Answer** C = 50 H = 30 D = 100,150,180 Q_list = [str(int((2 * C * d / H) ** 0.5)) for d in D] print(','.join(Q_list)) # **Question 2:** # <br>Write a program which takes 2 digits, X,Y as input and generates a 2-dimensional array. The # element value in the i-th row and j-th column of the array should be i\*j. # <br>Note: i=0,1...,X-1; j=0,1,...,Y-1. # <br>**Example** # <br>Suppose the following inputs are given to the program: # <br>3,5 # <br>Then, the output of the program should be: # <br>[[0, 0, 0, 0, 0], [0, 1, 2, 3, 4], [0, 2, 4, 6, 8]] # **Answer** X = 3 Y = 5 [[i*j for j in range(Y)] for i in range(X)] # **Question 3:** # <br>Write a program that accepts a comma separated sequence of words as input and prints the # words in a comma-separated sequence after sorting them alphabetically. # <br>Suppose the following input is supplied to the program: # <br>without,hello,bag,world # <br>Then, the output should be: # <br>bag,hello,without,world # **Answer** txt = 'without,hello,bag,world' print(','.join(sorted(txt.split(',')))) # **Question 4:** # <br>Write a program that accepts a sequence of whitespace separated words as input and prints # the words after removing all duplicate words and sorting them alphanumerically. # <br>Suppose the following input is supplied to the program: # <br>hello world and practice makes perfect and hello world again # <br>Then, the output should be: # <br>again and hello makes perfect practice world # **Answer** txt = 'hello world and practice makes perfect and hello world again' print(' '.join(sorted(list(set(txt.split(' ')))))) # **Question 5:** # <br>Write a program that accepts a sentence and calculate the number of letters and digits. # <br>Suppose the following input is supplied to the program: # <br>hello world! 123 # <br>Then, the output should be: # <br>LETTERS 10 # <br>DIGITS 3 # **Answer** # + txt = 'hello world! 123' n_letters = 0 n_digits = 0 for i in list(txt): if i.isnumeric(): n_digits += 1 elif i.isalpha(): n_letters += 1 else: pass print(f"LETTERS {n_letters}") print(f"DIGITS {n_digits}") # - # **Question 6:** # <br>A website requires the users to input username and password to register. Write a program to # check the validity of password input by users. # <br>Following are the criteria for checking the password: # <br>1. At least 1 letter between [a-z] # <br>2. At least 1 number between [0-9] # <br>1. At least 1 letter between [A-Z] # <br>3. At least 1 character from [$#@] # <br>4. Minimum length of transaction password: 6 # <br>5. Maximum length of transaction password: 12 # <br>Your program should accept a sequence of comma separated passwords and will check them # according to the above criteria. Passwords that match the criteria are to be printed, each # separated by a comma. # <br>**Example** # <br>If the following passwords are given as input to the program: # <br>ABd1234@1,a F1#,2w3E*,2We3345 # <br>Then, the output of the program should be: # <br>ABd1234@1 # **Answer** # + txt = "ABd1234@1,a F1#,2w3E*,2We3345" p_list = txt.split(',') correct_p_list = [] for p in p_list: if len(p) >= 6 and len(p) <= 12: if sum([1 if i.isnumeric() else 0 for i in list(p)]) >= 1: if sum([1 if i in ['$', '#', '@'] else 0 for i in list(p)]) >= 1: if sum([1 if i.isupper() else 0 for i in list(p)]) >= 1: if sum([1 if i.islower() else 0 for i in list(p)]) >= 1: correct_p_list.append(p) else: pass else: pass else: pass else: pass else: pass print(','.join(correct_p_list)) # -
Python Programming Basic Assignment - Assignment13.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/anjali0503/Anjali-Pandey/blob/master/Data_Analysis_Amazon_Forest.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="B5bOLZC2OdVV" import pandas as pd import numpy as np import matplotlib.pyplot as plt # %matplotlib inline import seaborn as sns #to get piechat import plotly.graph_objects as go import plotly.express as px import warnings warnings.filterwarnings("ignore") # + colab={"base_uri": "https://localhost:8080/", "height": 407} id="qJF_dCPqPm0X" outputId="0bd3b519-2b35-4edd-9494-cddd21aad543" df = pd.read_csv('amazon.csv' , encoding = 'latin1') df # + colab={"base_uri": "https://localhost:8080/"} id="bJHLc19MfsH7" outputId="d87cc9bb-d788-43b4-d832-cea7a4836c16" df.info() # + colab={"base_uri": "https://localhost:8080/"} id="rje1rN7TgOTI" outputId="e134bb78-86d3-4a2a-e2e0-167c5c52ac90" print(df.dtypes) # + colab={"base_uri": "https://localhost:8080/"} id="lV1m9S2wgxmL" outputId="7c08f815-9925-4be9-aaf0-20796b1bc2aa" print(df.columns) # + colab={"base_uri": "https://localhost:8080/"} id="nm58qoNlkGgb" outputId="f5622d00-6117-44c2-8902-064f9253bd21" print(df.shape) # to print row nd columns # + colab={"base_uri": "https://localhost:8080/"} id="F2goPwlUlaIh" outputId="8dbd8960-1f4f-4691-d274-f70ab5c5b585" df.columns # + colab={"base_uri": "https://localhost:8080/"} id="p-XlW4Ctlkqz" outputId="c706d05a-8c53-417d-cf67-a4181b08e3fa" df.state.value_counts() # to check the number of repeated rows # + colab={"base_uri": "https://localhost:8080/"} id="s2XlFsSWmLcm" outputId="9fd83827-c1c2-4be2-d10f-d71458d4ca32" print(set(df['state'].values)) # to get the names of distinct rows. # + colab={"base_uri": "https://localhost:8080/"} id="l_2MRNYZnEWr" outputId="8445f855-2b44-48ed-ff9c-ac7aa7f349a3" print(set(df['month'].values)) # + colab={"base_uri": "https://localhost:8080/", "height": 420} id="2gv6DeOFnkDt" outputId="a89bd77a-6236-466e-ff18-fdf0e34391f0" df_forest_fire_by_year = df.groupby(['year']).number.agg(sum) df_forest_fire_by_year =pd.DataFrame(df_forest_fire_by_year)#it will table form df_forest_fire_by_year.number = df_forest_fire_by_year.number.map(lambda p: round(p)) #for graph plotting. df_forest_fire_by_year.plot(ylabel='Total fires by year', figsize=(10,5))#setting for graph size df_forest_fire_by_year.T #by this u can view graphs nd table # + colab={"base_uri": "https://localhost:8080/", "height": 575} id="hHpamG9-oZVr" outputId="d0896cc2-504b-4ea7-fc23-e15f1c1dcbd8" sns.set(rc={'figure.figsize':(15,10)}) #figure size setting my_circle=plt.Circle( (0,0), 0.7, color='white') # coordinates of centre of pie chart,nd to get radius in white space plt.pie(df['state'].value_counts().values, labels = df['state'].value_counts().index) p=plt.gcf() #to get current fig p.gca().add_artist(my_circle)# to get different color to chat plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 575} id="c9tz7vC4swOw" outputId="b7cfdf11-e58f-4d6b-e8c8-309d0d239007" sns.set(rc={'figure.figsize':(15,10)}) #figure size setting my_circle=plt.Circle( (0,0), 0.7, color='white') # coordinates of centre of pie chart,nd to get radius in white space plt.pie(df['month'].value_counts().values, labels = df['month'].value_counts().index) p=plt.gcf() #to get current fig p.gca().add_artist(my_circle)# to get different color to chat plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 542} id="XDUKB9idt-zh" outputId="0ab4a684-3d81-4659-da37-1c8d66b7ca36" fig = px.pie(values=df['state'].value_counts().values, names=df['state'].value_counts().index,title='Cities involving forest fires') fig.show() # + colab={"base_uri": "https://localhost:8080/", "height": 542} id="WgZ-46MHw4hY" outputId="7cd0fc9f-12b4-41bc-d325-fbaac25ebd03" fig = px.pie(values=df['month'].value_counts().values, names=df['month'].value_counts().index,title='months involving forest fires') fig.show() # + colab={"base_uri": "https://localhost:8080/", "height": 542} id="Utj8JKcBxj6X" outputId="2b896cf3-8d16-44ed-e57e-61eec9a575b9" fig = px.scatter(df, x="year", y="month",size='number') #to get scatter graph fig.show() # + colab={"base_uri": "https://localhost:8080/", "height": 542} id="fvlX1RvLyAEQ" outputId="abdfa33c-0473-4dc3-b80c-e26d683747bf" fig = px.scatter(df, x="year", y="month",size='number') fig.show() # + colab={"base_uri": "https://localhost:8080/", "height": 989} id="0LkyZEs0BYOf" outputId="4006252c-5d75-4470-c2c5-6efbbd049b4a" bar = sns.barplot(df['state'], df['number'], color = "red") bar.set_xticklabels(df['state'], rotation=45) # + colab={"base_uri": "https://localhost:8080/"} id="w6ihfbcPCaDK" outputId="65bb3a37-d64d-41da-9769-cab6264de83e" df = df.groupby('state')['number'].mean().reset_index() #for calculating mean print(df)
Data_Analysis_Amazon_Forest.ipynb
# <a href="https://colab.research.google.com/github/dmlc/gluon-cv/blob/onnx/scripts/onnx/notebooks/action-recognition/i3d_inceptionv1_kinetics400.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # !pip3 install --upgrade onnxruntime # + import numpy as np import onnxruntime as rt import urllib.request import os.path # - # # (Optional) We use mxnet and gluoncv to read in video frames. # # Feel free to read in video frames your own way # # !pip3 install --upgrade mxnet gluoncv # + def fetch_model(): if not os.path.isfile("i3d_inceptionv1_kinetics400.onnx"): urllib.request.urlretrieve("https://apache-mxnet.s3-us-west-2.amazonaws.com/onnx/models/gluoncv-i3d_inceptionv1_kinetics400-098c004a.onnx", filename="i3d_inceptionv1_kinetics400.onnx") return "i3d_inceptionv1_kinetics400.onnx" def prepare_video(video_path, input_shape): from gluoncv.data.transforms import video from gluoncv.utils.filesystem import try_import_decord decord = try_import_decord() vr = decord.VideoReader(video_path) frame_id_list = list(range(0, 64, 2)) num_frames = len(frame_id_list) video_data = vr.get_batch(frame_id_list).asnumpy() clip_input = [video_data[vid, :, :, :] for vid, _ in enumerate(frame_id_list)] transform_fn = video.VideoGroupValTransform(size=input_shape[3], mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) clip_input = transform_fn(clip_input) clip_input = np.stack(clip_input, axis=0) clip_input = clip_input.reshape((-1,) + (num_frames, 3, input_shape[3], input_shape[4])) clip_input = np.transpose(clip_input, (0, 2, 1, 3, 4)) return clip_input def prepare_label(): from gluoncv.data import Kinetics400Attr return Kinetics400Attr().classes # - # **Make sure to replace the video you want to use** # + model = fetch_model() video_path = 'Your input' clip_input = prepare_video(video_path, (1, 3, 32, 224, 224)) label = prepare_label() # + # Create a onnx inference session and get the input name onnx_session = rt.InferenceSession(model, None) input_name = onnx_session.get_inputs()[0].name # + pred = onnx_session.run([], {input_name: clip_input.astype('float32')})[0] # - # # (Optional) We use mxnet to process the result. # # Feel free to process the result your own way # # !pip3 install --upgrade mxnet # + import mxnet as mx pred = mx.nd.array(pred) topK = 5 ind = mx.nd.topk(pred, k=topK)[0].astype('int') print('The input is classified to be') for i in range(topK): print(' [%s], with probability %.3f.'% (label[ind[i].asscalar()], mx.nd.softmax(pred)[0][ind[i]].asscalar()))
scripts/onnx/notebooks/action-recognition/i3d_inceptionv1_kinetics400.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="O_tXv7s_PUIT" # # Importing the libraries # + id="52x8GRbrPUIV" import numpy as np import pandas as pd # + id="CW4m54HKPUIW" import pandas as pd import numpy as np import matplotlib.pyplot as plt import nltk import seaborn as sns from nltk.corpus import stopwords from nltk.tokenize import word_tokenize from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelBinarizer from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences from sklearn.feature_extraction.text import TfidfVectorizer from keras.layers import SpatialDropout1D from sklearn.metrics import accuracy_score from sklearn.metrics import classification_report from sklearn.model_selection import GridSearchCV import re from IPython.core.interactiveshell import InteractiveShell InteractiveShell.ast_node_interactivity = "all" # + colab={"base_uri": "https://localhost:8080/"} id="y5KoKckTMtrp" outputId="0ac6d99a-cccb-4f04-b8ac-75d118b309b9" from google.colab import drive drive.mount('/content/drive',force_remount=True) # + colab={"base_uri": "https://localhost:8080/"} id="F4iMHYYfxDj6" outputId="71dcd802-b184-480f-cf57-ef5b8fd2e839" df=pd.read_csv("/content/drive/MyDrive/Tamil_30k.zip (Unzipped Files)/Tamil_30k.csv") df.count() # + colab={"base_uri": "https://localhost:8080/"} id="4L1owN4vrCfa" outputId="ee703dea-25ea-446d-d323-77508ddb77a1" df.website.value_counts() # + id="7E5aCEO5RfnE" df=df.drop(['website', 'section' ,'sub_section','article_url' ,'title' ,'author','author_link','published_date','hashtag','article_summary','image_url' ,'crawl_date'], axis=1) # + colab={"base_uri": "https://localhost:8080/"} id="tcXtco9VQKvU" outputId="af8ac260-8040-45b7-fbb4-2dc270dd2995" df.Category.value_counts() # + colab={"base_uri": "https://localhost:8080/", "height": 206} id="rD91rhK0Bx8k" outputId="90a890e9-7cc5-46af-d8ba-dcab8e6925d4" df.head() # + colab={"base_uri": "https://localhost:8080/"} id="R-C7_AdWtElj" outputId="60d58a20-8ad4-475e-fc18-c00cf4bbc195" target_category = df['Category'].unique() target_category # + colab={"base_uri": "https://localhost:8080/", "height": 424} id="xSHURJALOswW" outputId="10c53540-ecd2-44b0-95db-01cccd016d9c" df['categoryId'] = df['Category'].factorize()[0] df # + colab={"base_uri": "https://localhost:8080/", "height": 358} id="1Cp43370Oz5u" outputId="82d5dee5-b127-4299-bc98-88ac1f7db1a2" df.groupby('Category').categoryId.count().plot.bar(ylim=0) # + colab={"base_uri": "https://localhost:8080/"} id="BHtN9kHUtGqO" outputId="2c8d987a-83e6-4347-b8f8-8b3dbc35133f" from google.colab import drive drive.mount('/content/drive') # + id="hCg9aVy2xS5i" from sklearn.model_selection import train_test_split # split the data into test and train by maintaining same distribution of output varaible 'y_true' [stratify=y_true] X_train1, X_test, y_train1, y_test = train_test_split(df, df.Category, stratify=df.Category, test_size=0.2,random_state=42) # split the train data into train and cross validation by maintaining same distribution of output varaible 'y_train' [stratify=y_train] X_train, X_train_cv, y_train, y_train_cv = train_test_split(X_train1, y_train1, stratify=y_train1, test_size=0.2,random_state= 42) # + colab={"base_uri": "https://localhost:8080/"} id="cOVfRwuySXfk" outputId="39aebdfe-270d-46c1-a079-1d126fed2e9e" print(X_train.shape,y_train.shape) print(X_train_cv.shape,y_train_cv.shape) print(X_test.shape,y_test.shape) # + id="eGAXF_-SB2bi" from tqdm import * class Data_cleaner: def Data_Cleaning(self,text): #text cleaning text=re.sub(r'(\d+)',r'',text) text=text.replace(u',','') text=text.replace(u'"','') text=text.replace(u'(','') text=text.replace(u')','') text=text.replace(u'"','') text=text.replace(u':','') text=text.replace(u"'",'') text=text.replace(u"‘‘",'') text=text.replace(u"’’",'') text=text.replace(u"''",'') text=text.replace(u".",'') text=text.replace(u"*",'') text=text.replace(u"#",'') text=text.replace(u'"','') text=text.replace(u":",'') text=text.replace(u"|",'') text=text.replace(u":","") text=text.replace(u'"',"") text = re.sub('\s+',' ', text) #Split the sentences sentences=text.split(u"।") #print(sentences) #Tokenizing sentences_list=sentences tokens=[] for each in sentences_list: word_list=each.split(' ') tokens=tokens+word_list #Remove token with only space for tok in tokens: tok=tok.strip() #Remove hyphens in tokes for each in tokens: if '-' in each: tok=each.split('-') tokens.remove(each) tokens.append(tok[0]) tokens.append(tok[1]) tokens = [i.lower() for i in tokens] return tokens def Text_joiner(self,txt): lst = ' '.join(self.Data_Cleaning(txt)) return lst #you need to pass the text data only def Structured_Data(self,sen): lst3 = [] for i in tqdm(range(len(sen))): lst1 = self.Text_joiner(sen[i]) lst3.append(lst1) return lst3 # + id="k82oCfbaCUqy" X_train.loc[:,'text']=X_train.loc[:,'text'].apply(str) # + id="b02q-pBMB2Su" cl = Data_cleaner() # + colab={"base_uri": "https://localhost:8080/"} id="Cunedel7B2I1" outputId="d12e13bc-ac56-4c74-ec12-b76ce2af8cbe" lstt = cl.Structured_Data(X_train.text.values) # + id="EXglBcpyCwJ5" X_train['cleaned_text'] = lstt # + colab={"base_uri": "https://localhost:8080/", "height": 424} id="7-CGkOnFC3E5" outputId="54263627-15f1-4851-e2b1-7364af9142e2" X=X_train.drop(['Category','categoryId','index'],axis=1) X # + id="32961lg1Ca4Z" # + colab={"base_uri": "https://localhost:8080/", "height": 424} id="bE4L1knXC29-" outputId="fdf48ba2-4b85-4595-b28a-cd8cf1bdb216" X_train['cleaned_text'] = lstt X_train # + id="mWznt-cZDPK_" X_train_cv.loc[:,'text']=X_train_cv.loc[:,'text'].apply(str) # + colab={"base_uri": "https://localhost:8080/"} id="zIi6sjn7DGWD" outputId="51e105b2-688e-417b-eb71-66c9fbca9b83" d_cv = cl.Structured_Data(X_train_cv.text.values) # + id="5ybkQU7WDaWl" X_train_cv.loc[:,"cleaned_text"] = d_cv # + colab={"base_uri": "https://localhost:8080/", "height": 206} id="JJ43IsGaDgKP" outputId="97bac86a-64c5-41e0-f8fe-568acdedf773" X_train_cv.head() # + id="cIt9SrUPM8qd" X_train_cv["cleaned_text"] = d_cv # + colab={"base_uri": "https://localhost:8080/", "height": 424} id="6ANdudWRM8hB" outputId="5b856671-edc4-4296-fce8-a1de5916a2b8" X_train_cv # + id="hDfhUTW0Dfw1" X_test.loc[:,'text']=X_test.loc[:,'text'].apply(str) # + colab={"base_uri": "https://localhost:8080/"} id="q1DboY7GDuYZ" outputId="2bfc7f5b-0ba8-4722-c7f5-b96fdb68b987" d_test = cl.Structured_Data(X_test.text.values) # + id="yle0UXcUDuKE" X_test["cleaned_text"] = d_test # + id="0tQlsIsxEAYG" X_test = X_test.drop(columns=["cleaned_text"]) # + id="5EB6kCsPNOew" X_test["cleaned_text"] = d_test # + id="9c7HhraHEAKs" X_train = X_train[X_train["Category"]!='others'] # + id="59CnHD5ZxyBQ" X_train_cv= X_train_cv[X_train_cv["Category"]!='others'] # + id="tQHBhh51yJ6Y" X_test = X_test[X_test["Category"]!='others'] # + id="npp0Aba4PUIY" #vectorization from sklearn.feature_extraction.text import TfidfVectorizer vec = TfidfVectorizer() vec = vec.fit(X_train['cleaned_text']) X_train_bow = vec.transform(X_train['cleaned_text']) X_train_cv_bow = vec.transform(X_train_cv['text']) X_test_bow = vec.transform(X_test['cleaned_text']) # + colab={"base_uri": "https://localhost:8080/"} id="Rm-GqWU8PUIZ" outputId="1bc8b1f8-10c9-4625-8756-e2cc3f9173a7" print(X_train_bow.shape) print(X_train_cv_bow.shape) print(X_test_bow.shape) # + colab={"base_uri": "https://localhost:8080/"} id="vv_K8T2fPUIZ" outputId="15180eee-c428-4ee0-8190-05d1efa47daa" from sklearn.preprocessing import LabelEncoder enc_labels = LabelEncoder() X_train.loc[:, "Category"] = enc_labels.fit_transform(X_train["Category"]) # + colab={"base_uri": "https://localhost:8080/"} id="P9ZDH2GNPUIZ" outputId="39a9eda3-81c1-4d05-f5bb-ffda954e03b4" from sklearn.preprocessing import LabelEncoder enc_labels1 = LabelEncoder() X_train_cv.loc[:, "Category"] = enc_labels1.fit_transform(X_train_cv["Category"]) # + id="Iv1gXDqlPUIa" from sklearn.preprocessing import LabelEncoder enc_labels2 = LabelEncoder() X_test.loc[:, "Category"] = enc_labels2.fit_transform(X_test["Category"]) # + id="eHYyn_rnPUIa" y_train_label = np.array(list(X_train['Category'])) y_train_cv_label = np.array(list(X_train_cv['Category'])) y_test_label = np.array(list(X_test['Category'])) # + id="yvWpPxSTPUIa" from sklearn.model_selection import GridSearchCV # + colab={"base_uri": "https://localhost:8080/"} id="7sig9texPUIb" outputId="47a5ec89-6a70-4c83-ffe4-40cc95b10037" #Importing Multinomial naive Bayes from sklearn.naive_bayes import MultinomialNB alpha = [0.00001, 0.0001, 0.001, 0.1, 1, 10, 100,1000] clf = MultinomialNB() alpha_range = {'alpha': [0.00001, 0.0001, 0.001, 0.1, 1, 10, 100,1000]} clf_Cross = GridSearchCV(clf ,param_grid = alpha_range ,scoring='neg_log_loss',cv=5,return_train_score=True) clf_Cross.fit(X_train_bow,y_train_label) clf_Cross.best_params_ # + colab={"base_uri": "https://localhost:8080/"} id="Nlqm_hBuPUIb" outputId="4372e947-e1a9-4bb0-d350-28a35839b659" from sklearn.calibration import CalibratedClassifierCV nv_clf = MultinomialNB(alpha=1) nv_clf.fit(X_train_bow, y_train_label) sig_clf = CalibratedClassifierCV(nv_clf, method="sigmoid") sig_clf.fit(X_train_bow, y_train_label) # + colab={"base_uri": "https://localhost:8080/"} id="ShTAjWaNPUIc" outputId="5ff01b84-a792-4c94-9db1-0a8813a74a15" predict_y = sig_clf.predict_proba(X_train_bow) predict_acc = nv_clf.predict(X_train_bow) train_acc= accuracy_score(y_train_label, predict_acc) print('For alpha = ', 1, "The train accuracy is:",train_acc) print("**************************************************************************************") predict_y = sig_clf.predict_proba(X_train_cv_bow) predict_acc = nv_clf.predict(X_train_cv_bow) print('For alpha = ', 1, "The cv accuracy is:",accuracy_score(y_train_cv_label, predict_acc)) print("**************************************************************************************") predict_y = sig_clf.predict_proba(X_test_bow) predict_acc = nv_clf.predict(X_test_bow) print('For alpha = ', 1, "The test accuracy is:",accuracy_score(y_test_label, predict_acc)) # + colab={"base_uri": "https://localhost:8080/"} id="XW2Tn4zjstEf" outputId="20cb7512-83a5-4aa4-b125-789c9b61d3c2" print(classification_report(test_predict, Y_test, target_names=target_category)) # + id="oaexB1qDmtn0" cm = confusion_matrix(Y_test, y_pred) # + colab={"base_uri": "https://localhost:8080/"} id="dLGaT8aBr6rx" outputId="858cb8cf-5ce1-4c12-fccf-3287938b3c19" cm # + colab={"base_uri": "https://localhost:8080/", "height": 297} id="572_T_ZQsEIr" outputId="cfb5be6a-96e7-437b-e425-f65eefa0e4a7" #plotting heatmap of confusion matrix sns.heatmap(cm,square=True,annot=True,fmt='d',cbar=False,center=0) plt.xlabel("predicted labels") plt.ylabel("true labels") # + colab={"base_uri": "https://localhost:8080/", "height": 333} id="b7MJ0VC1nvFm" outputId="57e2966d-1fae-40cf-c7f5-de41bd4aee69" from sklearn.metrics import plot_confusion_matrix disp=plot_confusion_matrix(nb,y_pred, Y_test) # + id="i48ohZSzPUIc" #testing txt=["""பிளஸ் 2 தேர்வு ரத்துக்குள் ஒளிந்திருக்கும் ஆபத்துகள். சிபிஎஸ்இ மாணவர்களுக்கான 12ஆம் வகுப்புத் தேர்வை ரத்துசெய்யும் முடிவை ஒன்றிய அரசு எடுத்திருக்கிறது. விரைவில் இது குறித்து முடிவெடுங்கள் என்று உச்ச நீதிமன்றம் கூறுயதையடுத்துப் பிரதமர் நரேந்திர மோடியின் தலைமையில் நடந்த கூட்டத்தில் இந்த முடிவு எடுக்கப்பட்டிருக்கிறது. சிபிஎஸ்இ தேர்வு ரத்து அறிவிப்பைப் பின்பற்றி தமிழக அரசும் அதே முடிவை எடுக்கும் என்ற கருத்து பரவலாக இருந்துவருகிறது. """] # + id="JoGjaZucNhEJ" txt1=["""ராஜமௌலியின் ‘ஆர்.ஆர்.ஆர்’ டிரெய்லர் எப்போது ?.. கசிந்தது புதிய தகவல். ராஜமௌலியின் பிரம்மாண்ட இயக்கத்தில் உருவாகி வருகிறது ‘ஆர்.ஆர்.ஆர்’. சுதந்திர போராட்ட வீரர்களான சீதராமராஜு, கொமராம்பீம் ஆகியோர் வாழ்க்கையை மையமாக வைத்து இப்படம் உருவாகி வருகிறது. மிகுந்த எதிர்பார்ப்பை ஏற்படுத்தியுள்ள இப்படத்தில் ராம் சரண் மற்றும் ஜூனியர் என்.டி.ஆர் ஆகியோர் இணைந்து நடித்துள்ளனர். """] # + id="_rVUG3v8PUId" colab={"base_uri": "https://localhost:8080/"} outputId="7c8daea4-fa9a-4841-bb39-2b399ee0bd95" #Prediction for first article cln=cl.Structured_Data(txt) vect=vec.transform(cln) sig_clf.predict(vect) enc_labels.inverse_transform([8]) # + id="1A_io7afD9IH" colab={"base_uri": "https://localhost:8080/"} outputId="c470a682-62b1-4726-88e6-646c9e6b4887" #prediction for second article cln=cl.Structured_Data(txt1) vect1=vec.transform(cln) sig_clf.predict(vect1) enc_labels.inverse_transform([3]) # + id="6BAALpX3yxB6" # + id="hiOFTsS6yxHS" # + id="dTb62ZtHyxKN" # + id="X8QdNNnAyxNt" # + id="hEYbsGXdyxS3" # + id="IaXgcaNyKNYd" # + id="rxVoGjdPKNda" # + id="rtzDCdyJKNgk" # + id="00fLgwOkKNim" # + id="0ANDc4D-KNlG" # + id="n5DC06pRKNpZ" # + id="0VQojn0-yxWM" # + id="IyFMXUKYyxbR" # + id="JuNQ5L44yxej" # + id="BHl9-HkizhgE" # + id="nqS845Cuzh6e" # + id="iUDw-nlhzh_J" # + id="39wwd1IeziC_" # + id="sX4ad1IEziIO" # + id="g5r4ayCTziM6" # + colab={"base_uri": "https://localhost:8080/"} id="FgVHhb3GyxjO" outputId="2b3e83a6-e7cb-4cfc-c397-4092dfa317a8" from keras.layers import SpatialDropout1D from sklearn.metrics import classification_report from sklearn.metrics import roc_auc_score from sklearn.metrics import roc_curve from sklearn.metrics import precision_recall_curve from sklearn.model_selection import GridSearchCV from keras.models import Sequential from keras.layers import Dense from keras.layers import LSTM from keras.layers.embeddings import Embedding from keras.preprocessing import sequence from sklearn.feature_selection import RFE import re from IPython.core.interactiveshell import InteractiveShell InteractiveShell.ast_node_interactivity = "all" from sklearn.naive_bayes import MultinomialNB from sklearn.naive_bayes import GaussianNB from sklearn.metrics import roc_auc_score from sklearn.pipeline import Pipeline from sklearn.metrics import classification_report from sklearn.metrics import accuracy_score df.loc[:,'text']=df.loc[:,'text'].apply(str) cl = Data_cleaner() lstt = cl.Structured_Data(df.text.values) df['cleaned_text'] = lstt X_train, X_test, Y_train, Y_test = train_test_split(df.cleaned_text,df.Category, test_size = 0.3, random_state = 60,shuffle=True, stratify=df.Category) nb = Pipeline([('tfidf', TfidfVectorizer()), ('clf', MultinomialNB()), ]) nb.fit(X_train,Y_train) test_predict = nb.predict(X_test) train_accuracy = round(nb.score(X_train,Y_train)*100) test_accuracy =round(accuracy_score(test_predict, Y_test)*100) from sklearn.metrics import confusion_matrix classifier=nb.fit(X_train,Y_train) y_pred = classifier.predict(X_test)
Classification Of Tamil News Articles By Naive Bayes Model Using Natural Language Processing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="Ss1Xwysu2J_M" colab_type="text" # #**Scraper Grammy: raspagem de tabelas do wikipedia com Pandas** # # > *Baseado no Scraper Oscar, disponível em: https://github.com/juditecypreste/Scraper-Oscar* # # # # # + id="nQS4-z3t10K7" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 128} outputId="1f9da822-9dd5-4a6e-a8a6-bb1e3df1c2c1" #Imports from google.colab import drive import pandas as pd drive.mount('drive') # + [markdown] id="z9NGyruK5GYW" colab_type="text" # **Categoria:** Álbum do Ano # # + id="1_Q4RQXU4ztu" colab_type="code" colab={} #inserindo a URL da página e lendo seu HTML através do pandas album_do_ano = pd.read_html("https://pt.wikipedia.org/wiki/Grammy_Award_para_%C3%A1lbum_do_ano") # + [markdown] id="IACJwda29e03" colab_type="text" # A página "Grammy Award para álbum do ano" do ["Grammy Award para álbum do ano"](https://pt.wikipedia.org/wiki/Wikip%C3%A9dia:P%C3%A1gina_principal) divide, em tabelas, categorizando o prêmio por década. Com exceção da primeira tabela, que possui dados das décadas de 50 e 60. # # É preciso ter em mente que em python, os índices de listas vão de 0..n, nesse contexto, a tabela com dados das décadas de 50 e 60 estão, por exemplo, no índice 2 do dataset "albuns", criado na célula anterior. As tabelas contendo as informações de todos os prêmios estão no intervalo [2, 8]. # + id="WP41lLZ76Umj" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 424} outputId="0bd1f830-c339-46aa-d3a5-20792e7f3ab9" #adicionando a dataframes #iterar sobre a página e unir as tabelas necessitaria de maior poder computacional, como são poucas tabelas, não há prejuízo em fazer da seguinte forma: decada_50e60 = pd.DataFrame(album_do_ano[2]) decada70 = pd.DataFrame(album_do_ano[3]) decada80 = pd.DataFrame(album_do_ano[4]) decada90 = pd.DataFrame(album_do_ano[5]) decada00 = pd.DataFrame(album_do_ano[6]) decada10 = pd.DataFrame(album_do_ano[7]) decada20 = pd.DataFrame(album_do_ano[8]) #unindo dataframes frames = [decada_50e60, decada70, decada80, decada90, decada00, decada10, decada20] #concatenando e resetando o index das linhas premio_album_do_ano = pd.concat(frames).reset_index(drop=True) premio_album_do_ano # + id="pPgDLGnCMDjT" colab_type="code" colab={} #removendo a coluna indicados premio_album_do_ano.drop(columns='Indicados', axis=1, inplace=True) # + id="Hhr6u9wjhqgb" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 424} outputId="db9f34cc-f526-4b4d-be9b-058b14527bbd" #verificando se existem campos nulos pd.isnull(premio_album_do_ano) # + id="dUtqS1PeiRQS" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 424} outputId="b7cc0592-7e96-4283-87a1-969dc201b41f" premio_album_do_ano['Vencedores'].str.split(expand=True) premio_album_do_ano # + [markdown] id="8TVF-BJSi8wc" colab_type="text" # Nota-se que a coluna 'Vencedores' em algumas linhas, possui mais de uma informação. Para normalização do dataframe, será considerado apenas o nome do artista/intérprete do álbum. # # Há um padrão, sempre que há mais que o nome no interpreta na coluna, essa informação é precedida pelos caracteres ' . '. Logo, basta que seja feito um split com esse delimitador. # + id="i2-dJGZ4jWF4" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 424} outputId="a75368ba-9145-4824-d275-665d22e0b459" #adicionando premio_album_do_ano['Interprete'] = premio_album_do_ano['Vencedores'].str.split(' . ').str[0] premio_album_do_ano # + [markdown] id="yaLcdLslU3Cx" colab_type="text" # Excluindo a coluna 'vencedores e organizando e salvando o dataframe no google drive # + id="mdFaP8gjUAp0" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 424} outputId="83606c29-698f-4233-bebb-4b0b8a1c9d9a" #excluindo a coluna Vencedores premio_album_do_ano.drop(columns='Vencedores', axis=1, inplace=True) #organizando as colunas melhor_album = premio_album_do_ano[['Ano', 'Álbum', 'Interprete']] melhor_album.to_csv('melhor_album.csv') # !cp data.csv "drive/My Drive/" melhor_album # + [markdown] colab_type="text" id="Z-se5OjIYo6a" # **Categoria:** [Canção do Ano](https://en.wikipedia.org/wiki/Grammy_Award_for_Song_of_the_Year) # # Devido a desatualização das páginas escritas em português, para essa categoria foi utilizada a página em inglês do wikipedia # # + id="UJzpxBzWb8Ph" colab_type="code" colab={} #inserindo a URL da página e lendo seu HTML através do pandas melhor_cancao = pd.read_html("https://en.wikipedia.org/wiki/Grammy_Award_for_Song_of_the_Year") # + [markdown] id="_tacy5o5dUIj" colab_type="text" # A página, dessa vez, organiza todos os premiados em uma única tabela # + id="0jDb6EiRdwA1" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 528} outputId="c61ffde9-8a87-4f96-c0a0-3f768e3adb18" #a segunda tabela da página contém as informações do prêmio de canção do ano premio_cancao_do_ano = pd.DataFrame(melhor_cancao[1]) premio_cancao_do_ano # + id="MLs8l63BeiZO" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 424} outputId="58bd2d9a-c4a2-4678-b7ef-c1361f662b7c" #deletando a coluna 'Ref.', 'Nominees', 'Nationality' e 'Winner' premio_cancao_do_ano.drop(['Ref.', 'Nominees', 'Nationality', 'Winner(s)'], axis=1, inplace=True) premio_cancao_do_ano # + [markdown] id="A7DycaYNntTl" colab_type="text" # Removendo as sujeiras que estão na coluna 'Work', que se refere a canção ganhadora, o nome das músicas estão entre aspas. # + id="uxl-SEeNoaPr" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 424} outputId="631a6b36-3fc8-4bf2-b72e-df02d7c2cb91" #adicionando tudo que está contido entre aspas da coluna 'Work' a uma nova coluna 'Cancao' premio_cancao_do_ano['Cancao'] = premio_cancao_do_ano['Work'].str.extract(pat ='"(.*?)"') premio_cancao_do_ano # + id="3tszQEEzDWHe" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 424} outputId="ea645e8c-2129-4a2d-bc19-901ed224326e" #removendo a coluna 'Work' premio_cancao_do_ano.drop(['Work'], axis=1, inplace=True) premio_cancao_do_ano # + id="805szhCsLlbj" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 424} outputId="3b2f6df1-bf82-46f8-af65-5922135af897" #mudando o nome da coluna 'Year[I]' para Ano e 'Performing artist(s)[II]' para Interprete premio_cancao_do_ano.rename(columns={'Year[I]':'Ano', 'Performing artist(s)[II]':'Interprete'}, inplace=True) premio_cancao_do_ano # + id="m93xMwCIMUn_" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 424} outputId="bf475565-ea54-44a1-9454-51b65a484907" #Organizando as colunas e salvando em csv melhor_musica = premio_cancao_do_ano[['Ano', 'Cancao', 'Interprete']] melhor_musica.to_csv('melhor_musica.csv') # !cp melhor_musica.csv "drive/My Drive/" melhor_musica
Grammy.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.9.1 64-bit # name: python391jvsc74a57bd0e90483a7cb1e7e22226eb8a680b135918114a425dc58b4d338fc085a691d3c36 # --- """ dn= dimansion of society t_rec= time of Recovery TS= the time pass by each step d_p= days passed """ import networkx as nx import matplotlib.pyplot as plt import random import scipy.integrate as spi import numpy as np dn=64 t_rec=5 P=1/10 P_ch=2000 beta=P*4*(0.91+P_ch/(2*(dn**2))) gamma=1/t_rec TS=1.0 S0=1-1/dn**2 I0=1/dn**2 INPUT = (S0, I0, 0.0) ND=70 d_p=0 snode=[] inode=[] rnode=[] sdata=[] idata=[] rdata=[] tdata=[] G=nx.grid_2d_graph(dn, dn, periodic=True) pos = nx.spring_layout(G) # + for n_1 in G.nodes: G.nodes[n_1]['x']=['s'] G.nodes[n_1]['t']=[0] G.nodes[0,0]['x']=['i'] # - def pps(n) : P=d_p*100/ND PP= "{:.2f}".format(P) print("\r",end="") print("processes",PP,"% completed: ",end="") i=50*n/ND a=50-int(i) b=(int(i)+1)*"🟩" c=(a-1)*"⬜️" print(b+c,end="") while d_p<ND : pps(d_p) t_con=[] for n_7 in range(P_ch): rand_1=tuple(random.choices(list(G.nodes), k=2)) if list(G.edges).count(rand_1)==0: t_con.append(rand_1) G.add_edges_from(t_con) inf=[] for n_2 in G.nodes: if G.nodes[n_2]['x']==['i']: inf.append(n_2) for n_5 in G.nodes: if G.nodes[n_5]['x']==['i']: G.nodes[n_5]['t'][0]+=TS for n_3 in range(len(inf)): if G.nodes[inf[n_3]]['t']==[t_rec]: G.nodes[inf[n_3]]['x']=['r'] for n_4 in range(len(list(G.adj[inf[n_3]]))): if ((G.nodes[list(G.adj[inf[n_3]])[n_4]]['x']!=['r']) and (random.random()<P)): G.nodes[list(G.adj[inf[n_3]])[n_4]]['x']=['i'] d_p+=1 snode=[] inode=[] rnode=[] for n_6 in G.nodes: if G.nodes[n_6]['x']==['s']: snode.append(G.nodes[n_6]) if G.nodes[n_6]['x']==['i']: inode.append(G.nodes[n_6]) if G.nodes[n_6]['x']==['r']: rnode.append(G.nodes[n_6]) sdata.append(len(snode)) idata.append(len(inode)) rdata.append(len(rnode)) tdata.append(d_p) G.remove_edges_from(t_con) print("\r","computing successfully completed! ",50*"🟦") def diff_eqs(INP,t): Y=np.zeros((3)) V = INP Y[0] = - beta * V[0] * V[1] Y[1] = beta * V[0] * V[1] - gamma * V[1] Y[2] = gamma * V[1] return Y # + t_start = 0.0; t_end = ND; t_inc = TS t_range = np.arange(t_start, t_end+t_inc, t_inc) RES = spi.odeint(diff_eqs,INPUT,t_range) for i in range(len(RES)): RES[i,0]*=dn**2 RES[i,1]*=dn**2 RES[i,2]*=dn**2 # - plt.figure(figsize=(15,7)) plt.rcParams.update({'font.size': 10}) plt.grid(color = 'navy', linestyle = '--', linewidth = 0.5) plt.title('SIR Network and Mean-field simulation') plt.plot(tdata,sdata, 'darkblue', label='Susceptibles network') plt.plot(tdata,idata,'brown', label='Infectious network') plt.plot(tdata,rdata, 'darkgreen', label='Recovereds network') plt.legend(loc=0) plt.xlabel('Time(day)') plt.ylabel('people number') plt.plot(RES[:,0], '--b', label='Susceptibles MFT') plt.plot(RES[:,1], '--r', label='Infectious MFT') plt.plot(RES[:,2], '--g', label='Recovereds MFT') plt.legend(loc=0) plt.xlabel('Time(day)') plt.ylabel('people number') plt.savefig('SIR simulation') plt.show()
Simple-SIR-model-in-network-and-MFT/SIR.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Le réseau en Python côté client # # > application socket client basique # # - toc: true # - badges: true # - comments: false # - categories: [ISN, socket] # Avant d'ouvrir ce classeur, vous devrez avoir traité le classeur **Python 7 - Découverte du réseau et serveur** jusqu'à la partie IV. # Voici un programme client basique écrit en Python. On utilise toujours la librairie ***socket*** qui gère la communication réseau en Python. # # Exécutez la cellule suivante cellule après avoir lancé le serveur sur le classeur **Python 7 - Découverte du réseau et serveur**, puis rendez-vous dans l'onglet serveur pour initier la discussion. # # Remarquez que quand le client tourne, le kernel python est monopolisé et ne permet plus l'exécution d'une autre cellule, d'où la nécessité d'avoir deux classeurs distincts. # # Néanmoins, vous pourrez compléter la section suivante sur la compréhension du programme et répondre aux questions posées pendant que le client tourne. # ## Le programme client # + from socket import * SERVEUR , PORT = '127.0.0.1' , 50000 liaison = socket(AF_INET, SOCK_STREAM) try: liaison.connect((SERVEUR, PORT)) message="" except error: print("La connexion a échoué.") message="FIN" # serveur et service while message.upper() != "FIN" : message = liaison.recv(1024).decode("utf8") print("serveur >", message) if message.upper() != "FIN" : message = input("moi > ") liaison.send(message.encode("utf8")) print("Connexion terminée." ) liaison.close() # - # ## Compréhesion du programme client # 1) Que se passe t-il si vous lancez le client ***avant*** d'avoir lancé le serveur ? # # Votre réponse ... # 2) Quelle commande est responsable sur le client de l’envoi du message au serveur ? # # Votre réponse ... # 3) Écrire en langage naturel l’algorithme correspondant au programme client # # Votre réponse ... # # A vous de jouer : Jeu du juste prix en réseau # Ecrire ci-dessous le code client du jeu du juste prix # + # Votre programme ici... # -
_notebooks/2020-03-07-Python7 - Reseau client.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # When we talk about quantum computing, we actually talk about several different paradigms. The most common one is gate-model quantum computing, in the vein we discussed in the previous notebook. In this case, gates are applied on qubit registers to perform arbitrary transformations of quantum states made up of qubits. # # The second most common paradigm is quantum annealing. This paradigm is often also referred to as adiabatic quantum computing, although there are subtle differences. Quantum annealing solves a more specific problem -- universality is not a requirement -- which makes it an easier, albeit still difficult engineering challenge to scale it up. The technology is up to 2000 superconducting qubits in 2018, compared to the less than 100 qubits on gate-model quantum computers. D-Wave Systems has been building superconducting quantum annealers for over a decade and this company holds the record for the number of qubits -- 2048. More recently, an IARPA project was launched to build novel superconducting quantum annealers. A quantum optics implementation was also made available by QNNcloud that implements a coherent Ising model. Its restrictions are different from superconducting architectures. # # Gate-model quantum computing is conceptually easier to understand: it is the generalization of digital computing. Instead of deterministic logical operations of bit strings, we have deterministic transformations of (quantum) probability distributions over bit strings. Quantum annealing requires some understanding of physics, which is why we introduced classical and quantum many-body physics in a previous notebook. Over the last few years, quantum annealing inspired gate-model algorithms that work on current and near-term quantum computers (see the notebook on variational circuits). So in this sense, it is worth developing an understanding of the underlying physics model and how quantum annealing works, even if you are only interested in gate-model quantum computing. # # While there is a plethora of quantum computing languages, frameworks, and libraries for the gate-model, quantum annealing is less well-established. D-Wave Systems offers an open source suite called Ocean. A vendor-independent solution is XACC, an extensible compilation framework for hybrid quantum-classical computing architectures, but the only quantum annealer it maps to is that of D-Wave Systems. Since XACC is a much larger initiative that extends beyond annealing, we choose a few much simpler packages from Ocean to illustrate the core concepts of this paradigm. However, before diving into the details of quantum annealing, it is worth taking a slight detour to connect the unitary evolution we discussed in a closed system and in the gate-model paradigm and the Hamiltonian describing a quantum many-body system. We also briefly discuss the adiabatic theorem, which provides the foundation why quantum annealing would work at all. # # Unitary evolution and the Hamiltonian # # We introduced the Hamiltonian as an object describing the energy of a classical or quantum system. Something more is true: it gives a description of a system evolving with time. This formalism is expressed by the Schrödinger equation: # # $$ # i\hbar {\frac {d}{dt}}|\psi(t)\rangle = H|\psi(t)\rangle, # $$ # # where $\hbar$ is the reduced Planck constant. Previously we said that it is a unitary operator that evolves state. That is exactly what we get if we solve the Schrödinger equation for some time $t$: $U = \exp(-i Ht/\hbar)$. Note that we used that the Hamiltonian does not depend on time. In other words, every unitary we talked about so far has some underlying Hamiltonian. # # The Schrödinger equation in the above form is the time-dependent variant: the state depends on time. The time-independent Schröndinger equation reflects what we said about the Hamiltonian describing the energy of the system: # # $$ # H|\psi \rangle =E|\psi \rangle, # $$ # # where $E$ is the total energy of the system. # # The adiabatic theorem and adiabatic quantum computing # # An adiabatic process means that conditions change slowly enough for the system to adapt to the new configuration. For instance, in a quantum mechanical system, we can start from some Hamiltonian $H_0$ and slowly change it to some other Hamiltonian $H_1$. The simplest change could be a linear schedule: # # $$ # H(t) = (1-t) H_0 + t H_1, # $$ # # for $t\in[0,1]$ on some time scale. This Hamiltonian depends on time, so solving the Schrödinger equation is considerably more complicated. The adiabatic theorem says that if the change in the time-dependent Hamiltonian occurs slowly, the resulting dynamics remain simple: starting close to an eigenstate, the system remains close to # an eigenstate. This implies that if the system started in the ground state, if certain conditions are met, the system stays in the ground state. # # We call the energy difference between the ground state and the first excited state the gap. If $H(t)$ has a nonnegative gap for each $t$ during the transition and the change happens slowly, then the system stays in the ground state. If we denote the time-dependent gap by $\Delta(t)$, a course approximation of the speed limit scales as $1/\min(\Delta(t))^2$. # # This theorem allows something highly unusual. We can reach the ground state of an easy-to-solve quantum many body system, and change the Hamiltonian to a system we are interested in. For instance, we could start with the Hamiltonian $\sum_i \sigma^X_i$ -- its ground state is just the equal superposition. Let's see this on two sites: # + import numpy as np np.set_printoptions(precision=3, suppress=True) X = np.array([[0, 1], [1, 0]]) IX = np.kron(np.eye(2), X) XI = np.kron(X, np.eye(2)) H_0 = - (IX + XI) λ, v = np.linalg.eigh(H_0) print("Eigenvalues:", λ) print("Eigenstate for lowest eigenvalue", v[:, 0]) # - # Then we could turn this Hamiltonian slowly into a classical Ising model and read out the global solution. # # <img src="figures/annealing_process.svg" alt="Annealing process" style="width: 400px;"/> # # # Adiabatic quantum computation exploits this phenomenon and it is able to perform universal calculations with the final Hamiltonian being $H=-\sum_{<i,j>} J_{ij} \sigma^Z_i \sigma^Z_{j} - \sum_i h_i \sigma^Z_i - \sum_{<i,j>} g_{ij} \sigma^X_i\sigma^X_j$. Note that is not the transverse-field Ising model: the last term is an X-X interaction. If a quantum computer respects the speed limit, guarantees the finite gap, and implements this Hamiltonian, then it is equivalent to the gate model with some overhead. # # The quadratic scaling on the gap does not appear too bad. So can we solve NP-hard problems faster with this paradigm? It is unlikely. The gap is highly dependent on the problem, and actually difficult problems tend to have an exponentially small gap. So our speed limit would be quadratic over the exponentially small gap, so the overall time required would be exponentially large. # # Quantum annealing # # A theoretical obstacle to adiabatic quantum computing is that calculating the speed limit is clearly not trivial; in fact, it is harder than solving the original problem of finding the ground state of some Hamiltonian of interest. Engineering constraints also apply: the qubits decohere, the environment has finite temperature, and so on. *Quantum annealing* drops the strict requirements and instead of respecting speed limits, it repeats the transition (the annealing) over and over again. Having collected a number of samples, we pick the spin configuration with the lowest energy as our solution. There is no guarantee that this is the ground state. # # Quantum annealing has a slightly different software stack than gate-model quantum computers. Instead of a quantum circuit, the level of abstraction is the classical Ising model -- the problem we are interested in solving must be in this form. Then, just like superconducting gate-model quantum computers, superconducting quantum annealers also suffer from limited connectivity. In this case, it means that if our problem's connectivity does not match that of the hardware, we have to find a graph minor embedding. This will combine several physical qubits into a logical qubit. The workflow is summarized in the following diagram [[1](#1)]: # # <img src="figures/quantum_annealing_workflow.png" alt="Software stack on a quantum annealer" style="width: 400px;"/> # # A possible classical solver for the Ising model is the simulated annealer that we have seen before: # + import dimod J = {(0, 1): 1.0, (1, 2): -1.0} h = {0:0, 1:0, 2:0} model = dimod.BinaryQuadraticModel(h, J, 0.0, dimod.SPIN) sampler = dimod.SimulatedAnnealingSampler() response = sampler.sample(model, num_reads=10) print("Energy of samples:") print([solution.energy for solution in response.data()]) # - # Let's take a look at the minor embedding problem. This part is NP-hard in itself, so we normally use probabilistic heuristics to find an embedding. For instance, for many generations of the quantum annealer that D-Wave Systems produces has unit cells containing a $K_{4,4}$ bipartite fully-connected graph, with two remote connections from each qubit going to qubits in neighbouring unit cells. A unit cell with its local and remote connections indicated is depicted following figure: # # <img src="figures/unit_cell.png" alt="Unit cell in Chimera graph" style="width: 80px;"/> # # This is called the Chimera graph. The current largest hardware has 2048 qubits, consisting of $16\times 16$ unit cells of 8 qubits each. The Chimera graph is available as a `networkx` graph in the package `dwave_networkx`. We draw a smaller version, consisting of $2\times 2$ unit cells. # + import matplotlib.pyplot as plt import dwave_networkx as dnx # %matplotlib inline connectivity_structure = dnx.chimera_graph(2, 2) dnx.draw_chimera(connectivity_structure) plt.show() # - # Let's create a graph that certainly does not fit this connectivity structure. For instance, the complete graph $K_9$ on nine nodes: import networkx as nx G = nx.complete_graph(9) plt.axis('off') nx.draw_networkx(G, with_labels=False) import minorminer embedded_graph = minorminer.find_embedding(G.edges(), connectivity_structure.edges()) # Let's plot this embedding: dnx.draw_chimera_embedding(connectivity_structure, embedded_graph) plt.show() # Qubits that have the same colour corresponding to a logical node in the original problem defined by the $K_9$ graph. Qubits combined in such way form a chain. Even though our problem only has 9 variables (nodes), we used almost all 32 available on the toy Chimera graph. Let's find the maximum chain length: max_chain_length = 0 for _, chain in embedded_graph.items(): if len(chain) > max_chain_length: max_chain_length = len(chain) print(max_chain_length) # The chain on the hardware is implemented by having strong couplings between the elements in a chain -- in fact, twice as strong as what the user can set. Nevertheless, long chains can break, which means we receive inconsistent results. In general, we prefer shorter chains, so we do not waste physical qubits and we obtain more reliable results. # # References # # [1] <NAME>, <NAME>, <NAME>. (2018). [Open source software in quantum computing](https://doi.org/10.1371/journal.pone.0208561). *PLOS ONE* 13(12):e0208561. <a id='1'></a>
06_Adiabatic_Quantum_Computing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + #hide # default_exp conda # - # # Create conda packages # # > Pure python packages created from nbdev settings.ini # + #export from fastcore.script import * from fastcore.all import * from fastrelease.core import find_config import yaml,subprocess,glob,platform from copy import deepcopy try: from packaging.version import parse except ImportError: from pip._vendor.packaging.version import parse _PYPI_URL = 'https://pypi.org/pypi/' # - #export def pypi_json(s): "Dictionary decoded JSON for PYPI path `s`" return urljson(f'{_PYPI_URL}{s}/json') #export def latest_pypi(name): "Latest version of `name` on pypi" return max(parse(r) for r,o in pypi_json(name)['releases'].items() if not parse(r).is_prerelease and not o[0]['yanked']) #export def pypi_details(name): "Version, URL, and SHA256 for `name` from pypi" ver = str(latest_pypi(name)) pypi = pypi_json(f'{name}/{ver}') info = pypi['info'] rel = [o for o in pypi['urls'] if o['packagetype']=='sdist'][0] return ver,rel['url'],rel['digests']['sha256'] #export def update_meta(name,src_path,dest_path): "Update VERSION and SHA256 in meta.yaml from pypi" src_path,dest_path = Path(src_path),Path(dest_path) txt = src_path.read_text() ver,url,sha = pypi_details(name) dest_path.write_text(txt.replace('VERSION',ver).replace('SHA256',sha)) return ver #export def as_posix(p): "`Path(p).as_posix()`" return Path(p).as_posix() #export def conda_output_path(name,ver): "Output path for conda build" pre = run('conda info --root').strip() s = f"{as_posix(pre)}/conda-bld/*/{name}-{ver}-py" res = first(glob.glob(f"{s}_0.tar.bz2")) if not res: pyver = strcat(sys.version_info[:2]) res = first(glob.glob(f"{s}{pyver}_0.tar.bz2")) if res: return as_posix(res) #export def _pip_conda_meta(name, path): ver = str(latest_pypi(name)) pypi = pypi_json(f'{name}/{ver}') info = pypi['info'] rel = [o for o in pypi['urls'] if o['packagetype']=='sdist'][0] reqs = ['pip', 'python', 'packaging'] # Work around conda build bug - 'package' and 'source' must be first d1 = { 'package': {'name': name, 'version': ver}, 'source': {'url':rel['url'], 'sha256':rel['digests']['sha256']} } d2 = { 'build': {'number': '0', 'noarch': 'python', 'script': '{{ PYTHON }} -m pip install . -vv'}, 'test': {'imports': [name]}, 'requirements': {'host':reqs, 'run':reqs}, 'about': {'license': info['license'], 'home': info['project_url'], 'summary': info['summary']} } return d1,d2 #export def _write_yaml(path, name, d1, d2): path = Path(path) p = path/name p.mkdir(exist_ok=True, parents=True) yaml.SafeDumper.ignore_aliases = lambda *args : True with (p/'meta.yaml').open('w') as f: yaml.safe_dump(d1, f) yaml.safe_dump(d2, f) #export def write_pip_conda_meta(name, path='conda'): "Writes a `meta.yaml` file for `name` to the `conda` directory of the current directory" _write_yaml(path, name, *_pip_conda_meta(name, path)) #export def _get_conda_meta(): cfg = find_config() name,ver = cfg.lib_name,cfg.version url = cfg.doc_host or cfg.git_url reqs = ['pip', 'python', 'packaging'] if cfg.get('requirements'): reqs += cfg.requirements.split() if cfg.get('conda_requirements'): reqs += cfg.conda_requirements.split() pypi = pypi_json(f'{name}/{ver}') rel = [o for o in pypi['urls'] if o['packagetype']=='sdist'][0] # Work around conda build bug - 'package' and 'source' must be first d1 = { 'package': {'name': name, 'version': ver}, 'source': {'url':rel['url'], 'sha256':rel['digests']['sha256']} } d2 = { 'build': {'number': '0', 'noarch': 'python', 'script': '{{ PYTHON }} -m pip install . -vv'}, 'requirements': {'host':reqs, 'run':reqs}, 'test': {'imports': [cfg.get('lib_path')]}, 'about': { 'license': 'Apache Software', 'license_family': 'APACHE', 'home': url, 'doc_url': url, 'dev_url': url, 'summary': cfg.get('description') }, 'extra': {'recipe-maintainers': [cfg.get('user')]} } return name,d1,d2 #export def write_conda_meta(path='conda'): "Writes a `meta.yaml` file to the `conda` directory of the current directory" _write_yaml(path, *_get_conda_meta()) # This function is used in the `fastrelease_conda_package` CLI command. # # **NB**: you need to first of all upload your package to PyPi, before creating the conda package. #export def anaconda_upload(name, version, user=None, token=None, env_token=None): "Update `name` `version` to anaconda" user = f'-u {user} ' if user else '' if env_token: token = os.getenv(env_token) token = f'-t {token} ' if token else '' loc = conda_output_path(name,version) if not loc: raise Exception("Failed to find output") return run(f'anaconda {token} upload {user} {loc} --skip-existing', stderr=True) #export @call_parse def fastrelease_conda_package(path:Param("Path where package will be created", str)='conda', do_build:Param("Run `conda build` step", bool_arg)=True, build_args:Param("Additional args (as str) to send to `conda build`", str)='', skip_upload:Param("Skip `anaconda upload` step", store_true)=False, mambabuild:Param("Use `mambabuild` (requires `boa`)", store_true)=False, upload_user:Param("Optional user to upload package to")=None): "Create a `meta.yaml` file ready to be built into a package, and optionally build and upload it" write_conda_meta(path) cfg = find_config() out = f"Done. Next steps:\n```\`cd {path}\n""" name,lib_path = cfg.lib_name,cfg.lib_path # out_upl = f"anaconda upload {loc}" build = 'mambabuild' if mambabuild else 'build' if not do_build: return #print(f"{out}conda {build} .\n{out_upl}\n```") os.chdir(path) res = run(f"conda {build} --no-anaconda-upload {build_args} {name}") if 'anaconda upload' not in res: return print(f"{res}\n\Failed. Check auto-upload not set in .condarc. Try `--do_build False`.") loc = conda_output_path(lib_path, cfg.version) return anaconda_upload(lib_path, cfg.version) # To build and upload a conda package, cd to the root of your repo, and then: # # fastrelease_conda_package # # Or to do things more manually: # # ``` # fastrelease_conda_package --do_build false # # cd conda # conda build --no-anaconda-upload --output-folder build {name} # anaconda upload build/noarch/{name}-{ver}-*.tar.bz2 # ``` # # Add `--debug` to the `conda build command` to debug any problems that occur. Note that the build step takes a few minutes. Add `-u {org_name}` to the `anaconda upload` command if you wish to upload to an organization, or pass `upload_user` to `fastrelease_conda_package`. # # **NB**: you need to first of all upload your package to PyPi, before creating the conda package. #export @call_parse def chk_conda_rel(nm:Param('Package name on pypi', str), apkg:Param('Anaconda Package (defaults to {nm})', str)=None, channel:Param('Anaconda Channel', str)='fastai', force:Param('Always return github tag', store_true)=False): "Prints GitHub tag only if a newer release exists on Pypi compared to an Anaconda Repo." if not apkg: apkg=nm condavs = L(loads(run(f'mamba repoquery search {apkg} -c {channel} --json'))['result']['pkgs']) condatag = condavs.attrgot('version').map(parse) pypitag = latest_pypi(nm) if force or not condatag or pypitag > max(condatag): return f'{pypitag}' # ## Export- #hide from nbdev.export import notebook2script notebook2script()
01_conda.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd df=pd.read_csv('Salary_Data.csv') df from sklearn import preprocessing,model_selection,svm from sklearn.linear_model import LinearRegression import numpy as np X=df.iloc[:,:-1].values X Y=df.iloc[:,1].values Y import matplotlib.pyplot as plt from matplotlib.pyplot import style style.use('ggplot') plt.scatter(X,Y,color='r',label='data') plt.plot(X,Y,color='blue') plt.xlabel('Years Experience') plt.ylabel('Salary') plt.title('Years Exprience Vs Salary') plt.legend() plt.show() X=np.array([ 1.1, 1.3, 1.5, 2. , 2.2, 2.9, 3. , 3.2, 3.2, 3.7, 3.9, 4. , 4. , 4.1, 4.5, 4.9, 5.1, 5.3, 5.9, 6. , 6.8, 7.1, 7.9, 8.2, 8.7, 9. , 9.5, 9.6, 10.3, 10.5],dtype=np.float64) Y=np.array([ 39343., 46205., 37731., 43525., 39891., 56642., 60150., 54445., 64445., 57189., 63218., 55794., 56957., 57081., 61111., 67938., 66029., 83088., 81363., 93940., 91738., 98273., 101302., 113812., 109431., 105582., 116969., 112635., 122391., 121872.],dtype=np.float64) from statistics import mean def best_fit_slope(X,Y): m=(mean(X)*mean(Y)-mean(X*Y))/(mean(X)*mean(X)-mean(X*X)) c=mean(Y)-m*mean(X) return m,c m,c=best_fit_slope(X,Y) regression_line=[] for x in X: regression_line.append((m*x)+c) from matplotlib.pyplot import style style.use('ggplot') plt.scatter(X,Y,color='red',label='Original Data') plt.plot(X,regression_line,color='green',label='regression_line') plt.xlabel('Years of Experience') plt.ylabel('Salary') plt.title('Years Experience Vs Salary') plt.legend() plt.show() X_train,X_test,Y_train,Y_test=model_selection.train_test_split(X,Y,test_size=0.2) clf=LinearRegression() clf.fit(X_train,Y_train) accuracy=clf.score(X_test,Y_test) print(accuracy) p=np.array([[1.1],[10.3]]) p=p.reshape(2,-1) prediction=clf.predict(p) print(prediction) X=11 Y=(m*X)+c Y
Salarydata.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Ejemplo: Limpieza y procesamiento básico de textos # **Autor:** Unidad de Científicos de Datos (UCD) # # --- # Este ejemplo muestra las principales funcionalidades del módulo `limpieza`, de la librería **ConTexto**. También se muestran ejemplos de uso de las funciones de limpieza contenidas en el módulo auxiliar `limpieza_aux`, que hace parte de `utils`. # # Para mayor información sobre estos módulos y sus funciones, se puede consultar su documentación: # # * <a href="https://ucd-dnp.github.io/ConTexto/funciones/limpieza.html" target="_blank">Limpieza</a> # * <a href="https://ucd-dnp.github.io/ConTexto/funciones/utils.html#limpieza-aux" target="_blank">Funciones auxiliares de limpieza</a> # ____ # ## 1. Funciones de limpieza de textos # # En esta sección se muestra cómo se pueden hacer distintos procesamientos de un texto de entrada para remover elementos como signos de puntuación, *stopwords*, números y acentos, que pueden llegar a entorpecer el análisis de un conjunto de documentos. # ### 1.1 Importar módulo de ConTexto y definir texto de prueba # + from contexto.limpieza import * texto_prueba = '''hola, esto es una prueba para verificar que la limpieza sea hecha con precisión, empeño y calidad! Esperamos que esté todo de 10. Desde Amazonas hasta la Guajira y san andrés, desde John y María hasta Ernesto, esperamos que todo funcione de manera correcta.''' # - # ### 1.2 Aplicar funciones de limpieza de textos # + # Limpieza básica, se pasa todo a minúsculas y se eliminan signos de puntuación limpio_basico = limpieza_basica(texto_prueba) # Si se desea mantener los caracteres numéricos limpio_basico_nums = limpieza_basica(texto_prueba, quitar_numeros=False) # Para quitar acentos (diéresis, tildes y virgulillas) sin_acentos = remover_acentos(limpio_basico) # Quitar palabras con menos de 4 caracteres quitar_0a3_caracteres = remover_palabras_cortas(sin_acentos, 4) # - # Utilizando la función `limpieza_texto` se puede, a la vez: # # * Pasar todo el texto a minúsculas # * Quitar signos de puntuación # * Quitar *stopwords* (palabras y/o expresiones). Para esto, se pueden pasar directamente las listas de palabras y expresiones a quitar, o se puede pasar un archivo que contenga esta información. # * Quitar palabras de una longitud menor a *n* caracteres (configurable) # * Quitar números (configurable) # * Quitar acentos (configurable) # limpio_completo = limpieza_texto(texto_prueba, ubicacion_archivo='entrada/stopwords_prueba.txt', n_min=3) print(limpio_completo) # ### 1.3 Quitar elementos repetidos de un texto # # La función `quitar_repetidos` permite quitar elementos repetidos de un texto, de acuerdo a un separador definido por el usuario. # + texto_repetido = 'hola, hola, como estas,hola, hola tu' # Aplicar función directamente print('texto quitando partes repetidas, utilizando el separador por defecto ("|"):') print(quitar_repetidos(texto_repetido)) # Especificar el separador entre documentos/frases print(f'\ntexto quitando partes repetidas, separadas por coma:\n{quitar_repetidos(texto_repetido, ",")}') # Deshabilitar opción de quitar espacios al inicio y al final print('\ntexto quitando partes repetidas, separadas por coma y sin quitar espacios al inicio y final:') print(quitar_repetidos(texto_repetido, ",", remover_espacios=False)) # - # ### 1.4 Cargar listas de *stopwords*, predefinidas y definidas por el usuario # # **ConTexto** trae algunas listas predefinidas de *stopwords* que pueden ser cargadas y utilizadas directamente. Las listas incluidas son: # # * Palabras comunes del lenguaje castellano (solo palabras) # * Nombres comunes de hombres y mujeres (solo palabras) # * Nombres de municipios y departamentos de Colombia (palabras y expresiones: nombres compuestos como "San Andrés") # # Además de estas listas, la función `lista_stopwords` permite cargar listas predefinidas de las *stopwords* más comunes para varios lenguajes, utilizando la librería NLTK. # # Finalmente, la función `cargar_stopwords` permite al usuario cargar *stopwords* (tanto palabras como expresiones) desde un archivo plano. Las palabras/expresiones deben ir separadas por comas o ir en renglones separados para ser tenidas en cuenta por aparte. # # + # Cargar listas de stopwords predefinidas nombres_hombres = lista_nombres('hombre') nombres_mujeres = lista_nombres('mujer') nombres_todos = lista_nombres() apellidos = lista_apellidos() municipios = lista_geo_colombia('municipios') departamentos = lista_geo_colombia('departamentos') todos_geo = lista_geo_colombia() # Stopwords comunes de varios lenguajes (por defecto se devuelven las de español) stopwords = lista_stopwords() stopwords_ingles = lista_stopwords('ingles') # Cargar archivo con lista de términos y expresiones que se desean remover custom_sw = cargar_stopwords('entrada/stopwords_prueba.txt') print(custom_sw) # - # ____ # ## 2. Funciones auxiliares para limpieza de textos # # Adicionalmente, el módulo auxiliar `limpieza_aux` contiene algunas funciones complementarias que permiten identificar y remover elementos adicionales que puedan entorpecer el análisis de un conjunto de textos. # ### 2.1 Importar funciones auxiliares y definir textos de prueba # + from contexto.utils.limpieza_aux import substrings_en_comun, detectar_coincidencias from contexto.utils.limpieza_aux import caracteres_repetidos, caracteres_consecutivos, consonantes_consecutivas from contexto.utils.limpieza_aux import quitar_coincidenias, quitar_palabras_atipicas # Corpus de prueba textos_prueba = [ 'Este es el primer texto de prueba para la detección de coincidencias.', 'Una segunda oración permite evaluar si hay cadanea de caracteres elementos en común.', 'Tercera frase que consiste en un texto complementario con palabras comúnmente utilizadas.', 'En esta oración y la siguiente se introducen elementos para completar un grupo de por lo menos 5.', 'Finalmente, esta frase cierra un grupo de 5 oraciones para probar la detección de coincidencias.', 'Una última frase para ampliar un poco el grupo.'] # - # ### 2.2 Detectar y quitar coincidencias entre un conjunto de textos # # En ocasiones un documento puede tener un encabezado o pie de nota común en casi todas sus páginas. Esto puede entorpecer ciertos análisis, al darle un peso demasiado grande a estas coincidencias. # # Para evitar este problema, la función `quitar_coincidenias` (que a su vez utiliza las funciones `substrings_en_comun` y `detectar_coincidencias`) permite, para un conjunto de textos, encontrar y remover coincidencias (cadenas de caracteres) que cumplan una o varias de estas condiciones: # # * Que aparezcan en mínimo una proporción determinada de todos los textos # * Que su longitud (cantidad de caracteres) sea mayor o igual a un valor determinado # * Que la cadena tenga un número de palabras mayor o igual a un valor determinado # + # Detectar coincidencias de por lo menos 4 y 10 caracteres print(substrings_en_comun(textos_prueba[4], textos_prueba[5], longitud_min=4)) print(substrings_en_comun(textos_prueba[4], textos_prueba[5], longitud_min=10)) # Detectar cadenas de caracteres de mínimo 2 palabras que estén en mínimo la mitad de los textos print(detectar_coincidencias(textos_prueba, prop=0.5, n_min=2, longitud_min=5)) # Quitar las coincidencias encontradas print(quitar_coincidenias(textos_prueba, prop=0.5, n_min=2, longitud_min=5)) # - # ### 2.3 Detectar y quitar palabras o valores atípicos # # Si se está trabajando con un texto de mala calidad (por ejemplo, porque se aplicó OCR a un documento antiguo y mal escaneado), es posible que haya "ruido" en el texto, como palabras sin sentido, que puede afectar el análisis de este documento. Otro caso posible es trabajar con textos que tengan palabras o valores numéricos sospechosos (como "abcde" o "0000000"). En este caso, puede ser de utilidad poder detectar y/o remover estas palabras sospechosas o de ruido. # # Para evitar este problema, la función `quitar_palabras_atipicas` (que a su vez utiliza las funciones `caracteres_repetidos`, `caracteres_consecutivos` y `consonantes_consecutivas`) permite, para un conjunto de textos, encontrar y remover palabras que cumplan una o varias de estas condiciones: # # * Que tengan un número o letra repetidos de forma seguida más veces de lo permitido # * Que tengan números o letras consecutivas de forma seguida en un número mayor de lo permitido # * Que tengan más consonantes seguidas de lo permitido # # + # Detectar si una palabra tiene una cantidad determinada de caracteres repetidos seguidos caracteres_repetidos('123444321', 4) # La función por defecto quita acentos y pasa todo a minúsculas, para que esto no afecte la búsqueda de repetidos caracteres_repetidos('GóOol', 3) # Detectar si una palabra tiene una cantidad determinada de caracteres consecutivos seguidos caracteres_consecutivos('123444321', 4) caracteres_consecutivos('aBCdE', 4) # Detectar si una palabra tiene una cantidad determinada de consonantes seguidas consonantes_consecutivas('AbStracto', 3) consonantes_consecutivas('<NAME>', 4) # El resultado cambia si se deja de incluir la letra "Y" como vocal consonantes_consecutivas('<NAME>', 4, incluir_y=False) # La función quita acentos por defecto, por lo que puede trabajar con consonantes que tengan algún tipo de acento o tilde consonantes_consecutivas('mñçs', 4) # Prueba de quitar palabras con problemas en un texto texto_prueba = 'HolaAá! esta es una pruebba para ver si, En 12345, se pueden abstraer las reglas del abcdario.' texto_sin_atipicas = quitar_palabras_atipicas(texto_prueba, n_repetidas=3, n_consecutivas=3, n_consonantes=4) print(f"---------------\nTexto original:\n{texto_prueba}") print(f"---------------\nTexto sin palabras detectadas como atípicas:\n{texto_sin_atipicas}")
ejemplos/02_Limpieza de textos.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # 121 # 내 답안 # # char = 'a' # if (char.isLower()) : # print('a') # else : # print('A') # # user = input("") if user.islower(): print(user.upper()) else: print(user.lower()) # 122 score = input("") score = int(score) if 81 <= score <= 100 : print("A") elif 61 <= score <=80 : print('B') elif 41 <= score <=60 : print('C') elif 21 <= score <= 40 : print('D') elif 0<= score <=20 : print('E') else : print('range out of bound') # 123 환율 = {"달러": 1167, "엔": 1.096, "유로": 1268, "위안": 171} user = input("입력 : ") num, currency = user.split() print(float(num) * 환율[currency], "원") # 124 # + num1 = input("number 1 : ") num2 = input("number 2 : ") num3 = input("number 3 : ") num1 = int(num1) num2 = int(num2) num3 = int(num3) if num1 > num2 and num1 > num3 : print(num1) elif num2 > num1 and num2 > num3 : print(num2) else : print(num3) # - # 125 # + cp = input("Contact No. : ") num = cp.split("-")[0] if num == "011" : com = "SKT" elif num == "016" : com = "KT" elif num == "019" : com = "LGU" else : com = "not registered" print(f"You are {com} User.") # - # 126 zipCode = input('우편번호 : ') zipCode = zipCode[:3] if zipCode in ["010", '011', '012'] : print("강동구") elif zipCode in ["013", "014", "015"] : print("도봉구") else : print("노원구") # 127 id = input('주민번호 : ') id = id.split("-")[1] if id[0] == '1' or id[0] == '3' : print("Male") else : print("Female") # 128 id = input("주민번호 : ") id = id.split("-")[1] if (id[0] == '1' or id[0] == '3') and (0 <= int(뒷자리[1:3] <= 8)) : print("Male")
python_algorithm/pyPractice/p121to130.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: drlnd # language: python # name: drlnd # --- # # Continuous Control # # --- # # In this notebook, you will learn how to use the Unity ML-Agents environment for the second project of the [Deep Reinforcement Learning Nanodegree](https://www.udacity.com/course/deep-reinforcement-learning-nanodegree--nd893) program. # # ### 1. Start the Environment # # We begin by importing the necessary packages. If the code cell below returns an error, please revisit the project instructions to double-check that you have installed [Unity ML-Agents](https://github.com/Unity-Technologies/ml-agents/blob/master/docs/Installation.md) and [NumPy](http://www.numpy.org/). from unityagents import UnityEnvironment import numpy as np # Next, we will start the environment! **_Before running the code cell below_**, change the `file_name` parameter to match the location of the Unity environment that you downloaded. # # - **Mac**: `"path/to/Reacher.app"` # - **Windows** (x86): `"path/to/Reacher_Windows_x86/Reacher.exe"` # - **Windows** (x86_64): `"path/to/Reacher_Windows_x86_64/Reacher.exe"` # - **Linux** (x86): `"path/to/Reacher_Linux/Reacher.x86"` # - **Linux** (x86_64): `"path/to/Reacher_Linux/Reacher.x86_64"` # - **Linux** (x86, headless): `"path/to/Reacher_Linux_NoVis/Reacher.x86"` # - **Linux** (x86_64, headless): `"path/to/Reacher_Linux_NoVis/Reacher.x86_64"` # # For instance, if you are using a Mac, then you downloaded `Reacher.app`. If this file is in the same folder as the notebook, then the line below should appear as follows: # ``` # env = UnityEnvironment(file_name="Reacher.app") # ``` env = UnityEnvironment(file_name='20_envs/Reacher.app') # Environments contain **_brains_** which are responsible for deciding the actions of their associated agents. Here we check for the first brain available, and set it as the default brain we will be controlling from Python. # get the default brain brain_name = env.brain_names[0] brain = env.brains[brain_name] # ### 2. Examine the State and Action Spaces # # In this environment, a double-jointed arm can move to target locations. A reward of `+0.1` is provided for each step that the agent's hand is in the goal location. Thus, the goal of your agent is to maintain its position at the target location for as many time steps as possible. # # The observation space consists of `33` variables corresponding to position, rotation, velocity, and angular velocities of the arm. Each action is a vector with four numbers, corresponding to torque applicable to two joints. Every entry in the action vector must be a number between `-1` and `1`. # # Run the code cell below to print some information about the environment. # + # reset the environment env_info = env.reset(train_mode=True)[brain_name] # number of agents num_agents = len(env_info.agents) print('Number of agents:', num_agents) # size of each action action_size = brain.vector_action_space_size print('Size of each action:', action_size) # examine the state space states = env_info.vector_observations state_size = states.shape[1] print('There are {} agents. Each observes a state with length: {}'.format(states.shape[0], state_size)) print('The state for the first agent looks like:', states[0]) # - # ### 3. Take Random Actions in the Environment # # In the next code cell, you will learn how to use the Python API to control the agent and receive feedback from the environment. # # Once this cell is executed, you will watch the agent's performance, if it selects an action at random with each time step. A window should pop up that allows you to observe the agent, as it moves through the environment. # # Of course, as part of the project, you'll have to change the code so that the agent is able to use its experience to gradually choose better actions when interacting with the environment! # + # env_info = env.reset(train_mode=False)[brain_name] # reset the environment # states = env_info.vector_observations # get the current state (for each agent) # scores = np.zeros(num_agents) # initialize the score (for each agent) # while True: # actions = np.random.randn(num_agents, action_size) # select an action (for each agent) # actions = np.clip(actions, -1, 1) # all actions between -1 and 1 # env_info = env.step(actions)[brain_name] # send all actions to tne environment # next_states = env_info.vector_observations # get next state (for each agent) # rewards = env_info.rewards # get reward (for each agent) # dones = env_info.local_done # see if episode finished # scores += env_info.rewards # update the score (for each agent) # states = next_states # roll over states to next time step # if np.any(dones): # exit loop if episode finished # break # print('Total score (averaged over agents) this episode: {}'.format(np.mean(scores))) # - # When finished, you can close the environment. # + # env.close() # - # ### 4. It's Your Turn! # # Now it's your turn to train your own agent to solve the environment! When training the environment, set `train_mode=True`, so that the line for resetting the environment looks like the following: # ```python # env_info = env.reset(train_mode=True)[brain_name] # ``` # #### Model architecture # Actor-Critic model on below network. # (input) 33 # (hidden1) 33 -> 512 # (relu) # (hidden2) 512 -> 256 # (relu) # ├(Fully connected)256 -> 4 # │(tahn) # └(Fully connected)256 -> 1 import sys import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import pickle import pandas as pd from tqdm import tqdm import matplotlib.pyplot as plt class Network(nn.Module): def __init__(self, state_dim=1, action_dim=1, hidden_dims=[64, 64]): super(Network, self).__init__() self.state_dim = state_dim self.action_dim = action_dim self.hidden1 = nn.Linear(state_dim, hidden_dims[0]) self.hidden2 = nn.Linear(hidden_dims[0], hidden_dims[1]) self.actor = nn.Linear(hidden_dims[-1], action_dim) self.critic = nn.Linear(hidden_dims[-1], 1) self.sigma = nn.Parameter(torch.zeros(action_dim)) def forward(self, states, actions=None): tmp = F.relu(self.hidden1(states)) phi = F.relu(self.hidden2(tmp)) mu = F.tanh(self.actor(phi)) value = self.critic(phi).squeeze(-1) dist = torch.distributions.Normal(mu, F.softplus(self.sigma)) if actions is None: actions = dist.sample() log_prob = dist.log_prob(actions) log_prob = torch.sum(log_prob, dim=-1) entropy = torch.sum(dist.entropy(), dim=-1) return actions, log_prob, entropy, value def state_values(self, states): tmp = F.relu(self.hidden1(states)) phi = F.relu(self.hidden2(tmp)) return self.critic(phi).squeeze(-1) # #### Define agent # In below code, basic agent which collects trajectories is defined. # Update algorithm is defined seperately. class Agent: buffer_attrs = [ "states", "actions", "next_states", "rewards", "log_probs", "values", "dones", ] def __init__(self, env, model, tmax=128, n_epoch=10, batch_size=128, gamma=0.99, gae_lambda=0.96, eps=0.10, device="cpu"): self.env = env self.model = model self.opt_model = optim.Adam(model.parameters(), lr=1e-4) self.state_dim = model.state_dim self.action_dim = model.action_dim self.tmax = tmax self.n_epoch = n_epoch self.batch_size = batch_size self.gamma = gamma self.gae_lambda = gae_lambda self.eps = eps self.device = device self.rewards = None self.scores_by_episode = [] self.reset() def to_tensor(self, x, dtype=np.float32): return torch.from_numpy(np.array(x).astype(dtype)).to(self.device) def reset(self): self.brain_name = self.env.brain_names[0] env_info = self.env.reset(train_mode=True)[self.brain_name] self.last_states = self.to_tensor(env_info.vector_observations) def collect_trajectories(self): """ collect trajectories based on pi_theta """ buffer = dict([(k, []) for k in self.buffer_attrs]) for t in range(self.tmax): memory = {} # draw action from model memory["states"] = self.last_states pred = self.model(memory["states"]) pred = [v.detach() for v in pred] memory["actions"], memory["log_probs"], _, memory["values"] = pred # one step forward actions_np = memory["actions"].cpu().numpy() env_info = self.env.step(actions_np)[self.brain_name] memory["next_states"] = self.to_tensor(env_info.vector_observations) memory["rewards"] = self.to_tensor(env_info.rewards) memory["dones"] = self.to_tensor(env_info.local_done, dtype=np.uint8) # stack one step memory to buffer for k, v in memory.items(): buffer[k].append(v.unsqueeze(0)) self.last_states = memory["next_states"] r = np.array(env_info.rewards)[None,:] if self.rewards is None: self.rewards = r else: self.rewards = np.r_[self.rewards, r] if memory["dones"].any(): rewards_mean = self.rewards.sum(axis=0).mean() self.scores_by_episode.append(rewards_mean) self.rewards = None for k, v in buffer.items(): buffer[k] = torch.cat(v, dim=0) return buffer def calc_returns(self, rewards, values, dones, last_values): n_step, n_agent = rewards.shape # Create empty buffer GAE = torch.zeros_like(rewards).float().to(self.device) returns = torch.zeros_like(rewards).float().to(self.device) # Set start values GAE_current = torch.zeros(n_agent).float().to(self.device) returns_current = last_values values_next = last_values for irow in reversed(range(n_step)): values_current = values[irow] rewards_current = rewards[irow] gamma = self.gamma * (1. - dones[irow].float()) # Calculate TD Error td_error = rewards_current + gamma * values_next - values_current # Update GAE, returns GAE_current = td_error + gamma * self.gae_lambda * GAE_current returns_current = rewards_current + gamma * returns_current # Set GAE, returns to buffer GAE[irow] = GAE_current returns[irow] = returns_current values_next = values_current return GAE, returns def step(): pass # #### Define PPO update # Each comments in the below code(e.g. `##### x. *****`) is equivalant to the process of PPO. # <img src="./images/PPO_summary.png" width="500mm"> class PPOAgent(Agent): def __init__(self, env, model, tmax=128, n_epoch=10, batch_size=128, gamma=0.99, gae_lambda=0.96, eps=0.10, device="cpu"): super(PPOAgent, self).__init__(env, model, tmax=128, n_epoch=10, batch_size=128, gamma=0.99, gae_lambda=0.96, eps=0.10, device="cpu") def step(self): self.model.eval() ##### 1. Collect Trajetories based on pi_theta trajectories = self.collect_trajectories() # Calculate Score (averaged over agents) score = trajectories["rewards"].sum(dim=0).mean() # Append Values collesponding to last states last_values = self.model.state_values(self.last_states).detach() advantages, returns = self.calc_returns(trajectories["rewards"], trajectories["values"], trajectories["dones"], last_values) advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-6) # concat all agent for k, v in trajectories.items(): if len(v.shape) == 3: trajectories[k] = v.reshape([-1, v.shape[-1]]) else: trajectories[k] = v.reshape([-1]) advantages = advantages.reshape([-1]) returns = returns.reshape([-1]) # Mini-batch update self.model.train() n_sample = advantages.shape[0] n_batch = (n_sample - 1) // self.batch_size + 1 idx = np.arange(n_sample) np.random.shuffle(idx) for k, v in trajectories.items(): trajectories[k] = v[idx] advantages, returns = advantages[idx], returns[idx] ##### 4. Repeat 2 to 3 a couple times for i_epoch in range(self.n_epoch): for i_batch in range(n_batch): ##### 2. Compute the gradient idx_start = self.batch_size * i_batch idx_end = self.batch_size * (i_batch + 1) (states, actions, next_states, rewards, old_log_probs, old_values, dones) = [trajectories[k][idx_start:idx_end] for k in self.buffer_attrs] advantages_batch = advantages[idx_start:idx_end] returns_batch = returns[idx_start:idx_end] _, log_probs, entropy, values = self.model(states, actions) ratio = torch.exp(log_probs - old_log_probs) ratio_clamped = torch.clamp(ratio, 1 - self.eps, 1 + self.eps) adv_PPO = torch.min(ratio * advantages_batch, ratio_clamped * advantages_batch) loss_actor = -torch.mean(adv_PPO) - 0.01 * entropy.mean() loss_critic = 0.5 * (returns_batch - values).pow(2).mean() loss = loss_actor + loss_critic ##### 3. Update theta' self.opt_model.zero_grad() loss.backward() torch.nn.utils.clip_grad_norm_(self.model.parameters(), 10.) self.opt_model.step() del(loss) self.model.eval() return score # #### Train # + def get_env_info(env): # reset the environment brain_name = env.brain_names[0] brain = env.brains[brain_name] env_info = env.reset(train_mode=True)[brain_name] n_agent = len(env_info.agents) action_dim = brain.vector_action_space_size states = env_info.vector_observations state_dim = states.shape[1] return n_agent, state_dim, action_dim def train(): device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') # path = '20_envs/Reacher.app' # env = UnityEnvironment(file_name=path) n_agent, state_dim, action_dim = get_env_info(env) model = Network(state_dim, action_dim, hidden_dims=[512, 256]) model = model.to(device) agent = PPOAgent(env, model, tmax=128, n_epoch=10, batch_size=128, eps=0.1, device=device) history = pd.DataFrame() best_score = 0 n_step = 1500 n_episodes = 0 is_solved = False ##### 5. repeat from 1 for step in tqdm(range(n_step)): agent.step() scores = agent.scores_by_episode if n_episodes < len(scores): n_episodes = len(scores) history.loc[n_episodes, 'score'] = scores[-1] if 100 <= n_episodes: # calc mean score mean_reward = np.mean(scores[-100:]) history.loc[n_episodes, 'mean_score'] = mean_reward # save best model if best_score < mean_reward: best_score = mean_reward best_episode = n_episodes torch.save(model.state_dict(), f"models/bestmodel_{best_score:.2f}.pth") if is_solved==False and mean_reward>30: is_solved = True print(f"episode {best_episode} : mean score = {best_score:.2f},", end="") print("The environment is solved.") print(f"episode {best_episode} : mean score = {best_score:.2f},", end="") print("model saved.") return history if __name__ == "__main__": history = train() # - # #### Show result # + score = history['score'] mean_score = history['mean_score'] index = range(len(score)) plt.scatter(index, score) plt.scatter(index, mean_score) plt.legend(["score", "mean_score"]) plt.xlim([0,150]) plt.xlabel("train step") plt.ylabel("score") plt.grid() # - # #### Future work # - DDPG is worth to be compared to current implementation # - Q-Prop is the algorithm which can improve PPO
p2_continuous-control/Report.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # day22: Principal Components Analysis # # # # Objectives # # * Understand PCA on both toy and "real" image datasets # * Get familiar with how to perform transformations to low-dim. embeddings. # * Get familiar with how to "reconstruct" images # # # Outline # # * [Part 1: PCA for toy data with F=2 features](#part1) # * [Part 2: PCA with K=10 for image data with many features](#part2) # * [Part 3: PCA where we select K for image data with many features](#part3) # # # Takeaways # # * Understanding how we add/remove the *mean* vector is important # * Understanding that PCA is a nested model # * * The component vectors for K=4 are the first four components of K=5, K=6, or K=10 or ...) # * Selecting the number of components can be done in several ways: # * * Using the fraction of explained variance criteria, as discussed here # * * * Can be done very fast by reusing eigenvalues used at training time anyway # * * Use an application specific performance metric import numpy as np import pandas as pd import sklearn.datasets import sklearn.decomposition # + # import plotting libraries import matplotlib import matplotlib.pyplot as plt # %matplotlib inline plt.style.use('seaborn') # pretty matplotlib plots import seaborn as sns sns.set('notebook', font_scale=1.25, style='whitegrid') # - import matplotlib.colors as mcolors # # Setup: Create Small Toy Dataset with F=2 # # We will have N=21 examples, with 2 features $x_1$ and $x_2$ # + random_state = np.random.RandomState(0) # Create N training x values between -5 and 5 N = 21 F = 2 x1_N = np.linspace(-5, 5, N) + random_state.uniform(low=-0.2, high=0.2, size=N) B = 1.5 x2_N = 1.0 * x1_N + random_state.uniform(low=-B, high=B, size=N) x_NF = np.zeros((N,F)) x_NF[:,0] = x1_N x_NF[:,1] = x2_N # Plot the training data plt.plot(x1_N, x2_N, 'ks') plt.title('Training Data') plt.xlabel('x1'); plt.ylabel('x2'); plt.gca().set_aspect('equal', 'box') # - # # Review: Math of Principal Components Analysis (PCA) # # We'll assume feature vectors $x \in \mathbf{R}^F$. # # We want to find a principal components analysis model with $K$ components, which has parameters: # * $m \in \mathbb{R}^F$ : mean vector # * $W \in \mathbb{R}^{F \times K}$ : weight vectorss for each component # # This model makes projections to the latent "codes" $z_n \in \mathbb{R}^K$ like this: # $$ # z_n = \mathbf{W}^T (\mathbf{x}_n - \mathbf{m}) # $$ # # And then makes reconstructions like this: # $$ # \hat{x}_n(\mathbf{m}, \mathbf{W}) = \mathbf{W} z_n + \mathbf{m} # $$ # # The training objective is: # $$ # \min_{\mathbf{m} \in \mathbb{R}^F, \mathbf{W} \in \mathbb{R}^{F\times K}} # \quad \sum_{n=1}^N \sum_{f=1}^F (\mathbf{x}_{nf} - \hat{\mathbf{x}}_{nf}(\mathbf{m}, \mathbf{W}) )^2 # $$ # # # Part 1: PCA for F=2 data # ## Fit a pca model with K=1 component to our data pca = sklearn.decomposition.PCA(n_components=1) pca.fit(x_NF) # This fitting process estimates two parameters, $m$ and $W$, and stores them as attributes of `pca` # ## Inspecting the learned Mean vector $m$ # # This vector is accessible as the `mean_` attribute m_F = pca.mean_ print(m_F.shape) print("") print(m_F) # ## Exercise 1a: Can you convince yourself that this is the same as the training set's empirical mean vector? # # Compute the mean of each feature in the training set `x_NF` # + # TODO # - # ## Inspecting the learned weights $W$ # # This vector is accessible as the `components_` attribute of our learned `pca` model w_KF = pca.components_ print(w_KF.shape) print("") print(w_KF) # ## Exercise 1b: Can you convince yourself that this weight vector is a unit vector? # # That is, is the sum-of-squares magnitude of each component vector (each row) equal to one? # + # TODO compute sum of squares of each row of w_KF # - # ## Calling `transform`: Obtaining the low-dimensional embeddings z_NK = pca.transform(x_NF) print(z_NK.shape) print("") print(z_NK) # ### Compute the reconstructions # # Each training example (index $n$) can be reconstructed as: # # $$ # \hat{x}_n(\mathbf{m}, \mathbf{W}) = \mathbf{W} z_n + \mathbf{m} # $$ # # If we convert this to matrix-vector arithmetic for the full dataset, we can do: # # $$ # \hat{X} = \mathbf{Z} \mathbf{W} + \mathbf{m} # $$ # # where $\hat{X}$ is an $N \times F$ matrix, where each row is equal to $\hat{x}_n(\mathbf{m}, \mathbf{W})$ xhat_NF = np.dot(z_NK, w_KF) + m_F[np.newaxis,:] print(xhat_NF.shape) print() print("First 5 rows of this array") print(xhat_NF[:5]) # ### Visualize the reconstructions # + # Plot the training data plt.plot(x1_N, x2_N, 'ks', label='Training Data') # Plot the reconstructed data plt.plot(xhat_NF[:,0], xhat_NF[:,1], 'r.', label='Reconstructed Data', mew=5) # Draw line between training example and reconstruction for n in range(N): xy_22 = np.zeros((2,2)) xy_22[0] = x_NF[n,:] xy_22[1] = xhat_NF[n,:] plt.plot(xy_22[:,0], xy_22[:,1], 'r-') plt.legend(bbox_to_anchor=(1.75, 0.5)) plt.xlabel('x1'); plt.ylabel('x2'); plt.gca().set_aspect('equal', 'box') # - # ## Show that we know how to perform the PCA transform ourselves def my_transform(x_NF, w_KF, m_F): ''' Perform transformation to get low-dim embeddings Args ---- x_NF : 2D array, shape (N,F) w_KF : 2D array, shape (K,F) m_F : 1D array, shape (F,) Returns ------- z_NK : 2D array, shape (n_examples, n_components) = (N,K) Each row is a low-dim. "embedding" of the corresponding row in x_NF ''' # Use the equation defining z in terms of x and m and W z_NF = np.dot(x_NF - m_F[np.newaxis,:], w_KF.T) return z_NF z_NK = my_transform(x_NF, w_KF, m_F) print("Shape of z_NK") print(z_NK.shape) print() print("First 5 rows of z_NK") print(z_NK[:5]) # ## Discussion 1c: Are these z_NK values from `my_transform` the same as produced by `pca.transform`? # # + # TODO discuss # - # ## Now try with K=2 PCA for our F=2 dataset pca = sklearn.decomposition.PCA(n_components=2) pca.fit(x_NF) # Print out the transformed embedding features: pca.transform(x_NF) # ## Discussion 1d: What do you notice about the first column of z here when K=2? # # How does it compare with the old z? # + # TODO discuss # - # ## Exercise 1e: Show the learned weight vectors # # Print out the learned weight vectors for K=2 and show your self they are "unit" vectors. # + # TODO access and print weights from the `pca` object # - # ## Discussion 1f: How do the learned weight vectors for K=2 compare to the old weights for K=1? # # What is different? What is the same? Do any vectors point in the same direction? # + # TODO discuss # - # # Part 2: PCA for 8x8 digit images # # Let us load a dataset of 8x8 pixel images of numerical digits (1, 2, ... 8, 9, 0) that comes from sklearn # + x_all_LF, y_all_L = sklearn.datasets.load_digits(n_class=10, return_X_y=True) x_all_LF = np.asarray(x_all_LF, dtype=np.float64) / 16.0 # Make each pixel intensity between 0 and 1 F = 64 # number of features (flattened 8x8 pixel grayscale image) L = 1797 # all examples N = 1697 # train set size T = 100 # test set size shuffled_rows_L = np.random.RandomState(0).permutation(L) train_rows_N = shuffled_rows_L[:N] test_rows_T = shuffled_rows_L[N:] x_tr_NF = x_all_LF[train_rows_N].copy() y_tr_N = y_all_L[train_rows_N].copy() x_te_TF = x_all_LF[test_rows_T].copy() y_te_T = y_all_L[test_rows_T].copy() # - x_tr_NF.shape x_te_TF.shape # ## Show the first image in training set plt.imshow(x_tr_NF[0].reshape((8,8)), interpolation='nearest', vmin=0, vmax=1, cmap='gray') plt.colorbar(); plt.gcf().set_size_inches((3,3)); # ## Show fifth row image in training set plt.imshow(x_tr_NF[4].reshape((8,8)), interpolation='nearest', vmin=0, vmax=1, cmap='gray') plt.colorbar(); plt.gcf().set_size_inches((3,3)); # # Fit PCA with 10 components to this image dataset # # We'll fit this to the entire training set. K = 10 digit_pca = sklearn.decomposition.PCA(n_components=K) digit_pca.fit(x_tr_NF) # Unpack the mean parameter m_F = digit_pca.mean_ # Unpack the weights (K x F matrix) w_KF = digit_pca.components_ w_KF.shape # ## Show the mean image plt.imshow(m_F.reshape((8,8)), interpolation='nearest', vmin=0, vmax=1, cmap='gray') plt.colorbar(); plt.gcf().set_size_inches((3,3)); # ## Show the weight vectors for the first 10 components (each one reshaped as an image) W = 2 H = 2 fig, axgrid = plt.subplots(nrows=2, ncols=5, figsize=(5*W, 2*H)) for k in range(10): cur_ax = axgrid.flatten()[k] im_handle = cur_ax.imshow(w_KF[k].reshape((8,8)), interpolation='nearest', vmin=-.5, vmax=.5, cmap='RdBu_r') cur_ax.set_xticks([]); cur_ax.set_yticks([]); cur_ax.set_title("Comp k=%d" % (k+1)) if k == K - 1: plt.colorbar(im_handle, fraction=0.04, shrink=1.0); # ## Discussion 2a: Are these interpretable? Do you see evidence of the "digits" here? # # Look for evidence of strong vertical lines, or certain holes, etc. What are these components capturing? # + # TODO discuss # - # ## Create reconstructed images x_tr_NF.shape z_tr_NK = my_transform(x_tr_NF, w_KF, m_F) z_tr_NK.shape xhat_tr_NF = np.dot(z_tr_NK, w_KF) + m_F[np.newaxis,:] xhat_tr_NF.shape # ## Show the original and reconstructed images from training set # # We'll look at *first five* training examples. # + row_ids = [0, 1, 2, 3, 4] W = 2 H = 2 fig, axgrid = plt.subplots(nrows=2, ncols=len(row_ids), figsize=(5*W, 2*H)) # Top row: original images (rows of x_tr_NF) for col_id, row_id in enumerate(row_ids): cur_ax = axgrid.flatten()[col_id] im_handle = cur_ax.imshow(x_tr_NF[row_id].reshape((8,8)), interpolation='nearest', vmin=0.0, vmax=1.0, cmap='gray') if col_id == 0: cur_ax.set_ylabel("Original Image") cur_ax.set_title("Im. %d" % (row_id)) cur_ax.set_xticks([]); cur_ax.set_yticks([]); # Bottom row: reconstructed images (rows of xhat_tr_NF) for col_id, row_id in enumerate(row_ids): cur_ax = axgrid[1].flatten()[col_id] im_handle = cur_ax.imshow(xhat_tr_NF[row_id].reshape((8,8)), interpolation='nearest', vmin=0.0, vmax=1.0, cmap='gray') if col_id == 0: cur_ax.set_ylabel("Reconstruction\nwith K=10") cur_ax.set_xticks([]); cur_ax.set_yticks([]); plt.colorbar(im_handle, fraction=0.05); plt.tight_layout() # - # ## Show more original and reconstructed images from training set: All 9s # # We'll look at *five other* training examples, selected because they are all 9s # + row_ids = np.hstack(np.flatnonzero(y_tr_N== 9)[-5:]) W = 2 H = 2 fig, axgrid = plt.subplots(nrows=2, ncols=len(row_ids), figsize=(5*W, 2*H)) # Top row: original images (rows of x_tr_NF) for col_id, row_id in enumerate(row_ids): cur_ax = axgrid.flatten()[col_id] if col_id == 0: cur_ax.set_ylabel("Original Image") cur_ax.set_title("Image %d" % (row_id)) im_handle = cur_ax.imshow(x_tr_NF[row_id].reshape((8,8)), interpolation='nearest', vmin=0.0, vmax=1.0, cmap='gray') cur_ax.set_xticks([]); cur_ax.set_yticks([]); # Bottom row: reconstructed images (rows of xhat_tr_NF) for col_id, row_id in enumerate(row_ids): cur_ax = axgrid[1].flatten()[col_id] if col_id == 0: cur_ax.set_ylabel("Reconstruction\nwith K=10") im_handle = cur_ax.imshow(xhat_tr_NF[row_id].reshape((8,8)), interpolation='nearest', vmin=0.0, vmax=1.0, cmap='gray') cur_ax.set_xticks([]); cur_ax.set_yticks([]); plt.colorbar(im_handle, fraction=0.05); plt.tight_layout() # - # ## Discussion 2b: How good are these reconstructions? # # Across the two plots above, what do you notice? # # Remember that we are storing only 10 values for each image, not 64, which is a 6x savings! # # Given that, do you think the reconstructions are pretty good? What visual aspects aren't covered very well? # ## Exercise 2c: Show reconstructions for the first five *test* examples # # Step 1: Compute the low-dimensional embedding for the test set x_te_TF z_te_TK = None # TODO transform x_te_TF to obtain z_te_TK by calling pca.transform xhat_te_TF = np.zeros((T,F)) # TODO reconstruct the data by multiplying weights times z, plus adding the mean # + row_ids = [0, 1, 2, 3, 4] # row ids for our test set W = 2 H = 2 fig, axgrid = plt.subplots(nrows=2, ncols=len(row_ids), figsize=(5*W, 2*H)) # Top row: original images (rows of x_tr_NF) for col_id, row_id in enumerate(row_ids): cur_ax = axgrid.flatten()[col_id] if col_id == 0: cur_ax.set_ylabel("Original Image") cur_ax.set_title("Im. %d" % (row_id)) im_handle = cur_ax.imshow(x_te_TF[row_id].reshape((8,8)), interpolation='nearest', vmin=0.0, vmax=1.0, cmap='gray') cur_ax.set_xticks([]); cur_ax.set_yticks([]); # Bottom row: reconstructed images (rows of xhat_tr_NF) for col_id, row_id in enumerate(row_ids): cur_ax = axgrid[1].flatten()[col_id] if col_id == 0: cur_ax.set_ylabel("Reconstruction\nwith K=10") im_handle = cur_ax.imshow(xhat_te_TF[row_id].reshape((8,8)), interpolation='nearest', vmin=0.0, vmax=1.0, cmap='gray') cur_ax.set_xticks([]); cur_ax.set_yticks([]); plt.colorbar(im_handle, fraction=0.05); plt.tight_layout() # - # ## Discussion 2d: Is the PCA model good at reconstructing the test examples it has never seen before? # # Why or why not? Do you notice any differences compared to the training data? # # Part 3: PCA with ALL components and selecting K # ## First, fit PCA that learns all possible components # # Remember that in general, PCA is a *nested* model. # # The weights for K=5 are just the first 5 rows of the weights for K=6. # # The weights for K=9 are just the first 9 rows of the weights for K=10 or K=15, etc. all_pca = sklearn.decomposition.PCA().fit(x_tr_NF) # # Determine how each additional component explains observed variance better plt.plot(np.cumsum(all_pca.explained_variance_ratio_), 'bs-'); plt.xlabel('number of components K'); plt.ylabel('fraction of variance explained'); # ## Exercise 3a: What K value will give you at least 80% of the variance? 95%? # # Specify a concrete K value, by carefully interpreting the plot above. # # Often, we use these heuristics to choose a K that strikes a balance between "good" reconstruction and small size. # ## Exercise 3b: Visualize reconstructions for the first 5 training set images with K=30. # # + # TODO compute z # + # TODO compute xhat given z # + # TODO make plot of reconstructions xhat # - # ## Exercise 3c: Visualize reconstructions for the first 5 *test* set images with K=30. # # + # TODO compute z # + # TODO compute xhat given z # + # TODO make plot of reconstructions xhat # - # # Part 4: Understanding what we mean by "explained variance" # A goal of PCA is to find a low-dimensional reconstruction of our data that contains all the possible "information" of our original data. # # One way to define this precisely is to first look at squared error "reconstruction cost" of using just the empirical mean of our training data (this is like PCA with $K=0$) # # \begin{align} # \text{cost}_{K=0}(X, m) &= \frac{1}{N} \sum_{n=1}^N \sum_{f=1}^F (x_{nf} - m_f)^2 \\ # &= \frac{1}{N} \sum_{n=1}^N \sum_{f=1}^F \tilde{x}_{nf}^2, \qquad \tilde{x}_{nf} = x_{nf} - m_f # \end{align} # # Note that this cost is precisely equal to the *empirical variance* of the training set, since variance is the average squared error from the mean. # # This is just a property of the data itself. We can't change it. But it gives us a place to start when quantifying the quality of a reconstruction. # # Now, consider a reconstruction that uses the mean $m$ as well as a good $K$-dimensional weight component matrix $W$. # # We should be able to do better than the baseline above, right? # # Let's write the cost when we have $K>0$ principal components: # # \begin{align} # \text{cost}_K(X, m, W) &= \frac{1}{N} \sum_{n=1}^N \sum_{f=1}^F (x_{nf} - m_f - \sum_{k=1}^K W_{kf} \tilde{x}_{nf} )^2 \\ # \\ # &= \frac{1}{N} \sum_{n=1}^N \sum_{f=1}^F (\tilde{x}_{nf} - \sum_{k=1}^K W_{kf} \tilde{x}_{nf} )^2 \\ # \end{align} # # Now, this cost will have units that depend on the scale of the original data. However, we can look at the *ratio* of this cost to the original mean-only cost above. # # \begin{align} # \text{frac-unexplained-variance} = \frac{\text{cost}_K(X, m, W)}{\text{cost}_{K=0}(X,m)} # &= \frac # { \frac{1}{N} \sum_{n=1}^N \sum_{f=1}^F (\tilde{x}_{nf} - \sum_{k=1}^K W_{kf} \tilde{x}_{nf} )^2 } # { \frac{1}{N} \sum_{n=1}^N \sum_{f=1}^F \tilde{x}_{nf}^2 } # \end{align} # # Notice that as $K \rightarrow 0$, this ratio approaches 1, while as $K \rightarrow F$ this ratio approaches 0 on the training set. It turns out that this ratio will *monotonically decrease* as we increase $K$ and iteratively find the optimal weights $W$ for that K value. # # We can thus interpret this fraction as a measure of the *unexplained* variance of our original dataset. If the fraction is close to 1., most of our data is not well approximated by our PCA reconstructions. If the fraction is close to 0, we are doing a good job (most of the variance is accounted for). # # # ### Surprising result: We can compute this fraction simply using sums of eigenvalues we already have! # Now we can show a surprising result. The denominator can be computed as the sum of all eigenvalues of the covariance of $X$. And the numerator can be computed as a partial version of this sum. # # $$ # \text{frac-unexplained-variance} = \frac{\text{cost}_K(X, m, W)}{\text{cost}_{K=0}(X,m)} # = \frac # { \sum_{\ell={K+1}}^F \lambda_{\ell} } # { \sum_{f={1}}^F \lambda_{f} } # $$ # # where $\lambda_k$ is the $k$-th largest eigenvalue of the $F \times F$ covariance matrix of the training data # ## Computing the denominator (PCA with K=0 reconstruction cost) in 3 ways # ### Method 1: Directly compute reconstruction cost with K=0 model m_1F = np.mean(x_tr_NF, axis=0)[np.newaxis,:] costK0_v1 = np.sum(np.mean(np.square(x_tr_NF - m_1F), axis=0)) costK0_v1 # ### Method 2: Compute cost by measuring empirical variance of each feature and adding up cov_FF = np.cov(x_tr_NF.T, bias=1) var_F = np.diag(cov_FF) costK0_v2 = np.sum(var_F) costK0_v2 # ### Method 3: Compute cost as sum of eigenvalues of covariance matrix lam_F, W_FF = np.linalg.eig(cov_FF) # Get eigenvalues in sorted order from big to small sorted_ids = np.argsort(-1 * lam_F) lam_F = lam_F[sorted_ids] W_FF = W_FF[:, sorted_ids] lam_F[:10] costK0_v3 = np.sum(lam_F) costK0_v3 # Verify all three methods are the same! costK0_v1 costK0_v2 costK0_v3 # ## Computing the numerator (PCA with K-components reconstruction cost) in 2 ways K = 3 xtilde_NF = x_tr_NF - m_1F # ### Method 1: Direct reconstruction cost calculation W_KF = W_FF[:, :K].T.copy() # Compute xhat given x in 2 steps: # # * project each x to z # * then project each z back to xhat # Step 1: x to z z_tr_NK = np.dot(xtilde_NF, W_KF.T) # Step 2: z to xhat xhat_tr_NF = np.dot(z_tr_NK, W_KF) + m_1F # Now compute the total reconstruction cost costK_v1 = np.sum(np.mean(np.square(x_tr_NF - xhat_tr_NF), axis=0)) costK_v1 # ### Method 2: Calculation just summing all eigenvalues except the top-K costK_v2 = np.sum(lam_F[K:]) costK_v2 np.allclose(costK_v1, costK_v2) # ## Bringing it all together: Fraction of variance explained # Thus, a measure of our reconstruction quality on the training set is given by the fraction of variance that is explained by our $K$-component PCA # # Note, we switch here to using fraction-of-explained-variance (rather than unexplained). # # $$ # \text{frac-explained-variance} = 1 - \frac{\text{cost}_K(X, m, W)}{\text{cost}_{K=0}(X,m)} # = \frac # { \sum_{\ell={1}}^K \lambda_{\ell} } # { \sum_{f={1}}^F \lambda_{f} } # $$ # K # ### Version 1: Ratio of direct reconstruction costs (PCA with K to PCA with 0 components) frac_explained_var = 1 - (costK_v1 / costK0_v1) frac_explained_var # ### Version 2: Ratio of sums of eigenvectors frac_explained_var_v2 = np.sum(lam_F[:K]) / np.sum(lam_F) frac_explained_var_v2 # ### Version 3: Check against sklearn implementation np.sum(all_pca.explained_variance_ratio_[:K])
labs/day22_PrincipalComponentsAnalysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Request HTML # !pip install requests # + import requests url = r'https://en.wikipedia.org/wiki/List_of_postal_codes_of_Canada:_M' r = requests.get(url) print (r.content[:100]) # - # # Process as BeautifulSoup # !pip install beautifulsoup4 # + from bs4 import BeautifulSoup soup = BeautifulSoup(r.content, 'html.parser') # - # # Parse elements into Pandas DataFrame rows = soup.select('.wikitable tbody tr') # + row = rows[0] name = row.text.strip().split('\n') print (name) # - rows = rows[1:] # + data= [] for row in rows: pc = row.text.strip().split('\n') print (pc) d = dict() d['Postcode'] = pc[0] d['Borough'] = pc[1] d['Neighborhood'] = pc[2] data.append(d) # - # !pip install pandas # !pip install numpy import pandas as pd import numpy as np df = pd.DataFrame(data) df.head(10) # # Process Postcodes # ## Filter out Unassigned Boroughs df = df[df['Borough'] != 'Not assigned'] # ## If no Neighborhood name, Borough is the Neighborhood name df[df['Neighborhood'] == 'Not assigned'] df['Neighborhood'] = df['Neighborhood'].replace('Not assigned', np.NaN) df['Neighborhood'].fillna(df['Borough'], inplace=True) df.head(10) # ## Combine multiple Neighborhoods on same line, separated by commas df = df.groupby(['Postcode', 'Borough'])['Neighborhood'].apply(lambda x: ', '.join(x.astype(str))).reset_index() df.head(10) # # Save DataFrame as JSON file j = df.to_dict() # + import json with open('YYZ_Neighborhoods.json', 'w') as f: json.dump(j, f) # - # # Shape of DataFrame df.shape
Scrape_YYZ_Postal_Codes.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %run helper_functions.py # %run tweepy_wrapper.py # %run s3.py # %run mongo.py # %run df_functions.py import pandas as pd import string from nltk.corpus import stopwords nltk_stopwords = stopwords.words("english")+["rt", "via","-»","--»","--","---","-->","<--","->","<-","«--","«","«-","»","«»"] # - # ### Step 1: Obtain my tweets! # # I will obtain my entire tweet history! Note: For 2nd degree potential followers, I only extract 200 of their most recent tweets! gabr_tweets = extract_users_tweets("gabr_ibrahim", 2000) # ### Step 2: Create a dictionary from my tweets # # This dictionary will have the same structure as our already collected 2nd degree followers # + gabr_dict = dict() gabr_dict['gabr_ibrahim'] = {"content" : [], "hashtags" : [], "retweet_count": [], "favorite_count": []} for tweet in gabr_tweets: text = extract_text(tweet) hashtags = extract_hashtags(tweet) rts = tweet.retweet_count fav = tweet.favorite_count gabr_dict['gabr_ibrahim']['content'].append(text) gabr_dict['gabr_ibrahim']['hashtags'].extend(hashtags) gabr_dict['gabr_ibrahim']["retweet_count"].append(rts) gabr_dict['gabr_ibrahim']["favorite_count"].append(fav) # - # ### Step 3: Create a dataframe from my tweets # # We will now turn this dictionary into a dataframe - I do this as it allows me to utilise pandas in cleaning the content of my tweets! # # After the cleaning on the 'content' column, I will convert the dataframe back into a dictionary. gabr_tweets_df = pd.DataFrame.from_dict(gabr_dict, orient='index') gabr_tweets_df.head() clean_gabr_tweets = filtration(gabr_tweets_df, "content") clean_gabr_tweets = dataframe_to_dict(clean_gabr_tweets) clean_gabr_tweets #this is a list of 1 dictionary # ### Step 4: LDA Analysis # # Let's now move onto the LDA pre-processing stage and analysis! import spacy import nltk from gensim.models import Phrases from gensim.models.word2vec import LineSentence from gensim.corpora import Dictionary, MmCorpus from gensim.models.ldamulticore import LdaMulticore import pyLDAvis import pyLDAvis.gensim from collections import Counter from gensim.corpora.dictionary import Dictionary nlp = spacy.load('en') gabr_tweets = clean_gabr_tweets[0]['gabr_ibrahim']['content'] gabr_tweets[:5] # Let's now proceed to tokenize these tweets in addition to lemmatizing them! This will help improve the performance of our LDA model! # # I will utilise spacy for this process as it is a production grade NLP library that is exceptionally fast! tokenized_tweets = [] for tweet in gabr_tweets: tokenized_tweet = nlp(tweet) tweet = "" # we want to keep each tweet seperate for token in tokenized_tweet: if token.is_space: continue elif token.is_punct: continue elif token.is_stop: continue elif token.is_digit: continue elif len(token) == 1: continue elif len(token) == 2: continue else: tweet += str(token.lemma_) + " " #creating lemmatized version of tweet tokenized_tweets.append(tweet) tokenized_tweets = list(map(str.strip, tokenized_tweets)) # strip whitespace tokenized_tweets = [x for x in tokenized_tweets if x != ""] # remove empty entries tokenized_tweets[:5] # you can see how this is different to the raw tweets! # Lets now add these tokenized tweets to our dictionary! clean_gabr_tweets[0]['gabr_ibrahim']['tokenized_tweets'] = tokenized_tweets # I will not turn the dictionary back into a dataframe, run it through the filtration function before re-casting the dataframe into a dictionary. # # This time, we are running the filtration process on the tokenized tweets column and not the content column. # # NLP models are very sensitive - ensuring consistent cleaning is important! clean_gabr_tweets_df = pd.DataFrame.from_dict(clean_gabr_tweets[0], orient='index') clean_gabr_tweets_df.head() clean_gabr_tweets_df = filtration(clean_gabr_tweets_df, "tokenized_tweets") clean_gabr_tweets = dataframe_to_dict(clean_gabr_tweets_df) clean_gabr_tweets[0]['gabr_ibrahim']['tokenized_tweets'][:5] # ### Gensim LDA Process # # Fantastic - at this point, we have everything we need to proceed with LDA from the Gensim Library. # # LDA via the Gensim library requires that our data be in a very specific format. # # Broadly, LDA requires a Dictionary object that is later used to create a matrix called a corpus. # # The Gensim LDA Dictionary will require that we pass in a list of lists. Every sublist will be a tweet that has been split. # # Let's look at my first tweet as an example. # # Before: # # ['great turnout today hope able join slide available link video webinar come soon', tweet 2, tweet 3, ...] # # Correct Gensim Format: # # [['great', 'turnout', 'today', 'hope', 'able', 'join', 'slide', 'available','link', 'video', 'webinar', 'come', 'soon'], [tweet 2 in split form], [...],...] # # # list_of_tweets_gabr = clean_gabr_tweets[0]['gabr_ibrahim']['tokenized_tweets'] gensim_format_tweets = [] for tweet in list_of_tweets_gabr: list_form = tweet.split() gensim_format_tweets.append(list_form) gensim_format_tweets[:5] gensim_dictionary = Dictionary(gensim_format_tweets) # Now, I will now filter out extreme words - that is words that appear far too often and words that are rare. gensim_dictionary.filter_extremes(no_below=10, no_above=0.4) gensim_dictionary.compactify() # remove gaps after words that were removed # We now need to voctorize all the tweets so that it can be fed to the LDA algorithm! To do this, we will create a bag of words model from our tweets. # # After putting all our tweets through this bag of words model, we will end up with a 'corpus' that represents all the tweets for a particular user. In this case, that user is myself. # # We will save this corpus to disk as we go along! We will use the MmCorpus object from Gensim to achieve this. # !pwd file_path_corpus = "/home/igabr/new-project-4" def bag_of_words_generator(lst, dictionary): assert type(dictionary) == Dictionary, "Please enter a Gensim Dictionary" for i in lst: yield dictionary.doc2bow(i) MmCorpus.serialize(file_path_corpus+"{}.mm".format("gabr_ibrahim"), bag_of_words_generator(gensim_format_tweets, gensim_dictionary)) corpus = MmCorpus(file_path_corpus+"{}.mm".format("gabr_ibrahim")) corpus.num_terms # the number of terms in our corpus! corpus.num_docs # the number of documets. These are the number of tweets! # # Now for the LDA part! # # I will be using the LDAMulticore class from gensim! # # I set the passess parameter to 100 and the chunksize to 2000. # # The chunksie will ensure it use's all the documents at once, and the passess parameter will ensure it looks at all the documents 100 times before converging. # # As I am using my ENTIRE tweet history, I will create 30 topics! # # I will adjust this to 10 when running lda on 2nd degree connections, as I will only have 200 of their tweets! lda = LdaMulticore(corpus, num_topics=30, id2word=gensim_dictionary, chunksize=2000, workers=100, passes=100) # I can then save this lda model! lda.save(file_path_corpus+"lda_model_{}".format("gabr_ibrahim")) lda = LdaMulticore.load(file_path_corpus+"lda_model_{}".format("gabr_ibrahim")) # I now wish to extract all of the words that appear in each of the 30 topics that the LDA model was able to create. # # For each word in a topic, I will ensure that it has a frequency not equal to 0. # # I will place all these words into a list and then wrap a Counter object around it! # # # I am doing this as I want to see the distribution of words that appear accross all topics for a particular user. The LDA process will highlight key words that a particular user often uses in their twitter freed, across all topics that a particular user discusses. As such, the words they use will be indicitive of the topics a twitter user talks about! # # The counter object will simply keep a count of how many times, out of a maximum of 30 (topics) a word appears, given it has a frequency greater than 0. That is, the word appears in a topic. from collections import Counter # + word_list = [] for i in range(30): for term, frequency in lda.show_topic(i, topn=100): #returns top 100 words for a topic if frequency != 0: word_list.append(term) temp = Counter(word_list) # - len(temp) # This can be done later to help filter the important words. important_words = [] for k, v in temp.items(): if v >= 10: if k not in nltk_stopwords: doc = nlp(k) for token in doc: if not token.is_stop: if len(token) != 2: important_words.append(k) important_words len(important_words) # I will then place this LDA Counter Object back into our dictionary! # # We will then pickle this object - we will use it again for our TF-IDF analysis! # # Be sure to look at the file called lda.py to see how I stuructured the code to run through the 2nd degree connections! clean_gabr_tweets[0]['gabr_ibrahim'].keys() clean_gabr_tweets[0]['gabr_ibrahim']['LDA'] = temp pickle_object(clean_gabr_tweets, "gabr_ibrahim_tweets_LDA_Complete")
04-Project-Fletcher/Phases/Phase_3/Phase_3_Notebook.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="a8c70685" # # 토이 프로젝트 크롤링을 위한 모듈 # + id="8407cbb1" import time import urllib from bs4 import BeautifulSoup from selenium import webdriver from selenium.webdriver.common.keys import Keys import numpy as np import pandas as pd # + [markdown] id="d3787d35" # # 키노라이츠 크롤링 # + id="02dec9ba" driver = webdriver.Chrome('./chromedriver_win32/chromedriver.exe') url = 'https://m.kinolights.com/' driver.get(url) driver.find_element_by_xpath('/html/body/div/div/div/header/nav/a[2]').click() time.sleep(3) driver.find_element_by_css_selector('section.theme-list-section > ul > li:nth-child(6) > a').send_keys('\n') time.sleep(5) netflix_name = [] tving_name = [] tot_list = [] def click_contents(c): if c == 'Netflix': driver.find_element_by_xpath('//*[@id="triggerMenu"]/div/div[1]/div/div/button[1]/i').click() time.sleep(2) driver.find_element_by_xpath('//*[@id="listArea"]/div[1]/button/i').click() time.sleep(2) driver.find_element_by_xpath('//*[@id="modalContentBody"]/div/div[4]/button').click() html = BeautifulSoup(driver.page_source, 'html.parser') for name1 in html.select('div > div.title'): netflix_name.append(name1.text) tot_list.append(name1.text) driver.find_element_by_xpath('//*[@id="triggerMenu"]/div/div[1]/div/div/button[1]/i').click() elif c == 'Tving': driver.find_element_by_xpath('//*[@id="triggerMenu"]/div/div[1]/div/div/button[4]/i').click() time.sleep(2) html = BeautifulSoup(driver.page_source, 'html.parser') for name2 in html.select('div > div.title'): tving_name.append(name2.text) tot_list.append(name2.text) driver.find_element_by_xpath('//*[@id="triggerMenu"]/div/div[1]/div/div/button[4]/i').click() dist = ['Netflix', 'Tving'] dt = [] netflix_list = {} tving_list = {} for i in range(1, 38): driver.maximize_window() driver.find_element_by_xpath(f'//*[@id="contents"]/div[2]/div/div[{i}]/a/div[2]/div/div/div').click() time.sleep(5) html = BeautifulSoup(driver.page_source, 'html.parser') for d in html.select('#editorComment > p'): dt.append(d.text) dt[i-1] = dt[i-1][0:dt[i-1].index('차')] if i == 1: click_contents(dist[0]) click_contents(dist[1]) netflix_list[dt[i-1]] = np.array(netflix_name) tving_list[dt[i-1]] = np.array(tving_name) time.sleep(3) elif i >= 2: click_contents(dist[0]) click_contents(dist[1]) netflix_list[dt[i-1]] = np.array(netflix_name) tving_list[dt[i-1]] = np.array(tving_name) time.sleep(3) driver.find_element_by_xpath('//*[@id="header"]/div/button[1]/i').click() time.sleep(3) print(f'크롤링 {i} 완료. 다음으로 진행 시작.') if i % 11 == 0: driver.find_element_by_tag_name('body').send_keys(Keys.PAGE_DOWN) time.sleep(2) if i > 37: break print('크롤링 끝') for t in range(36, 0, -1): netflix_list[dt[t]] = netflix_list[dt[t]][len(netflix_list[dt[t-1]]):] tving_list[dt[t]] = tving_list[dt[t]][len(tving_list[dt[t-1]]):] # + id="d84a44c8" #크롤링 결과 확인 print(netflix_list) print(tving_list) print(tot_list) #중복 제거 print(set(tot_list)) print(len(set(tot_list))) # + [markdown] id="18841752" # # 크롤링 결과 데이터 정제하기 # + [markdown] id="4b9d9ab1" # ## 크롤링 결과 데이터 프레임으로 만들기 # + id="d3741dc7" df = pd.DataFrame(columns=['Date', 'Netflix', 'Tving']) df['Date'] = dt df['Netflix'] = netflix_list.values() df['Tving'] = tving_list.values() df # + [markdown] id="9a59c010" # ## 중복제거한 크롤링 컨텐츠명 정제하기 # + id="e7b93123" pd.DataFrame(set(tot_list)).to_csv('result_contents_for_crawling.csv', encoding = 'utf8', index = False) df3 = pd.read_csv('result_contents_for_crawling.csv', encoding = 'utf8') df3 = df3['0'].values df3 = list(df3) df3.remove('오버로드') df3.remove('슈퍼 미') df3.remove('진격의 거인 The Final Season') df3.remove('일상의 관계') m_nm = ['아이', '새콤달콤', '극한직업', '노바디', '소울', '1987', '크롤', '미나리', '헌트', '발신제한', '알라딘', '원더 우먼', '인질', '담보', '런', '백두산', '작은 아씨들', '싱크홀', '차인표', '런치박스', '소리도 없이'] d_nm = ['루카', '괴물', 'D.P.', '마우스', '런 온', '로스쿨'] nm_list = [] for m in m_nm: df3 = list(df3) df3.remove(m) m = f'영화 {m}' nm_list.append(m) for d in d_nm: df3 = list(df3) df3.remove(d) d = f'드라마 {d}' nm_list.append(d) for i in range(len(df3)): if '곡성' in df3[i]: df3[i] = '영화 곡성' elif '파트' in df3[i]: df3[i] = '뤼팽' elif '난노' in df3[i]: df3[i] = '그녀의 이름은 난노' elif '!' in df3[i]: df3[i] = df3[i].replace('!', '') elif ':' in df3[i]: df3[i] = df3[i].replace(':', '') print(df3) print(nm_list) print(len(df3)) for l in nm_list: df3 = list(df3) df3.append(l) for i in range(len(df3)): if 'D.P.' in df3[i]: df3[i] = '드라마 DP' pd.DataFrame(df3).to_csv('processed_nm of contents.csv', encoding = 'utf8', index = False) # + [markdown] id="aaae655c" # # SNS 크롤링하기 # + id="998a6247" driver = webdriver.Chrome('./chromedriver_win32/chromedriver.exe') url = 'https://www.instagram.com/' driver.get(url) time.sleep(4) ins_id = input('아이디를 입력하세요 : ') pw = input('비밀번호를 입력하세요 : ') driver.find_element_by_xpath('//*[@id="loginForm"]/div/div[1]/div/label/input').send_keys(ins_id) driver.find_element_by_xpath('//*[@id="loginForm"]/div/div[2]/div/label/input').send_keys(pw) driver.find_element_by_xpath('//*[@id="loginForm"]/div/div[3]/button/div').click() time.sleep(3) driver.find_element_by_xpath('//*[@id="react-root"]/section/main/div/div/div/section/div/button').click() time.sleep(3) try: driver.find_element_by_xpath('/html/body/div[6]/div/div/div/div[3]/button[2]').click() time.sleep(4) except: driver.find_element_by_xpath('/html/body/div[5]/div/div/div/div[3]/button[2]').send_keys('\n') time.sleep(4) key_word = df3 total_cnt = {} c = 0 for i, j in zip(key_word, range(len(key_word))): c += 1 print(f'크롤링 중 {c}') driver.find_element_by_xpath('//*[@id="react-root"]/section/nav/div[2]/div/div/div[2]/input').send_keys('#'+i) time.sleep(4) driver.find_element_by_xpath('//*[@id="react-root"]/section/nav/div[2]/div/div/div[2]/div[3]/div/div[2]/div/div[1]/a/div').click() html = BeautifulSoup(driver.page_source, 'html.parser') try: for cnt in html.select('main > header > div.WSpok > div > div:nth-of-type(2) > span > span'): if len(cnt.text.replace(',', "")) >= 4: total_cnt[i] = int(cnt.text.replace(',', "")) time.sleep(6) elif len(cnt.text.replace(',', "")) < 4: total_cnt[i] = int(cnt.text) time.sleep(6) except: for cnt in html.select('main > header > div.WSpok > div > div.qF0y9.Igw0E.IwRSH.eGOV_._4EzTm.a39_R > span > span'): if len(cnt.text.replace(',', "")) >= 4: total_cnt[i] = int(cnt.text.replace(',', "")) time.sleep(6) elif len(cnt.text.replace(',', "")) < 4: total_cnt[i] = int(cnt.text) time.sleep(6) driver.find_element_by_xpath('//*[@id="react-root"]/section/nav/div[2]/div/div/div[2]/input').send_keys(Keys.CONTROL, 'a') time.sleep(5) driver.find_element_by_xpath('//*[@id="react-root"]/section/nav/div[2]/div/div/div[2]/input').send_keys(Keys.DELETE) time.sleep(5) print(total_cnt) driver.close() # + [markdown] id="95cb827d" # ## SNS 크롤링 결과 데이터 데이터 프레임으로 만들기 # + id="cf85c324" import pandas as pd k_nm = [] v_nm = [] for k, v in total_cnt.items(): k_nm.append(k) v_nm.append(v) print(k_nm) print(v_nm) df = pd.DataFrame(columns = ['Name', 'Count']) df['Name'] = k_nm df['Count'] = v_nm df # + [markdown] id="39427a58" # ## 크롤링 제대로 안된 부분 다시 크롤링하고 정제하기 # + id="83f36d35" driver = webdriver.Chrome('./chromedriver_win32/chromedriver.exe') url = 'https://www.instagram.com/' driver.get(url) time.sleep(4) ins_id = input('아이디를 입력하세요 : ') pw = input('비밀번호를 입력하세요 : ') driver.find_element_by_xpath('//*[@id="loginForm"]/div/div[1]/div/label/input').send_keys(ins_id) driver.find_element_by_xpath('//*[@id="loginForm"]/div/div[2]/div/label/input').send_keys(pw) driver.find_element_by_xpath('//*[@id="loginForm"]/div/div[3]/button/div').click() time.sleep(3) driver.find_element_by_xpath('//*[@id="react-root"]/section/main/div/div/div/section/div/button').click() time.sleep(3) try: driver.find_element_by_xpath('/html/body/div[6]/div/div/div/div[3]/button[2]').click() time.sleep(4) except: driver.find_element_by_xpath('/html/body/div[5]/div/div/div/div[3]/button[2]').send_keys('\n') time.sleep(4) df.to_csv('insta_count.csv', encoding = 'utf8', index = False) pd.set_option('max_rows', None) d = pd.read_csv('insta_count.csv', encoding = 'utf8') a = d[['Name', 'Count']][:95] b = d['Name'][94:].values key_word = b total_cnt1 = {} c = 0 for i, j in zip(key_word, range(1, len(key_word))): c += 1 print(f'크롤링 중 {c}') driver.find_element_by_xpath('//*[@id="react-root"]/section/nav/div[2]/div/div/div[2]/input').send_keys('#'+i) time.sleep(4) driver.find_element_by_xpath('//*[@id="react-root"]/section/nav/div[2]/div/div/div[2]/div[3]/div/div[2]/div/div[1]/a/div').click() html = BeautifulSoup(driver.page_source, 'html.parser') try: for cnt in html.select('main > header > div.WSpok > div > div:nth-child(2) > span > span'): if len(cnt.text.replace(',', "")) >= 4: total_cnt1[i] = int(cnt.text.replace(',', "")) time.sleep(6) print(total_cnt1[i]) elif len(cnt.text.replace(',', "")) < 4: total_cnt1[i-1] = int(cnt.text) time.sleep(6) print(total_cnt1[i]) except: for cnt in html.select('main > header > div.WSpok > div > div.qF0y9.Igw0E.IwRSH.eGOV_._4EzTm.a39_R > span > span'): if len(cnt.text.replace(',', "")) >= 4: total_cnt1[i] = int(cnt.text.replace(',', "")) time.sleep(6) print(total_cnt1[i]) elif len(cnt.text.replace(',', "")) < 4: total_cnt1[i] = int(cnt.text) time.sleep(6) print(total_cnt1[i]) driver.find_element_by_xpath('//*[@id="react-root"]/section/nav/div[2]/div/div/div[2]/input').send_keys(Keys.CONTROL, 'a') time.sleep(5) driver.find_element_by_xpath('//*[@id="react-root"]/section/nav/div[2]/div/div/div[2]/input').send_keys(Keys.DELETE) time.sleep(5) print(total_cnt1) driver.close() # + id="5d88e3c6" del total_cnt1['영화 담보'] import pandas as pd k1_nm = [] v1_nm = [] for k1, v1 in total_cnt1.items(): k1_nm.append(k1) v1_nm.append(v1) print(k1_nm) print(v1_nm) df1 = pd.DataFrame(columns = ['Name', 'Count']) df1['Name'] = k1_nm df1['Count'] = v1_nm df1 # + id="1887723d" total = {} driver.find_element_by_xpath('//*[@id="react-root"]/section/nav/div[2]/div/div/div[2]/input').send_keys('#드라마 로스쿨') time.sleep(4) driver.find_element_by_xpath('//*[@id="react-root"]/section/nav/div[2]/div/div/div[2]/div[3]/div/div[2]/div/div[1]/a/div').click() html = BeautifulSoup(driver.page_source, 'html.parser') for cnt in html.select('div.WSpok > div > div.qF0y9.Igw0E.IwRSH.eGOV_._4EzTm.a39_R > span > span'): if len(cnt.text.replace(',', "")) >= 4: total['드라마 로스쿨'] = int(cnt.text.replace(',', "")) time.sleep(6) print(total['드라마 로스쿨']) print(total) # + [markdown] id="6999b17f" # ## 완벽하게 된 크롤링 결과를 데이터 프레임으로 만들어주고 컨텐츠명 정제하여 파일로 다시 저장하기 # + id="0bf8e923" import pandas as pd knm = [] vnm = [] for n, m in total.items(): knm.append(n) vnm.append(m) nm1 = pd.DataFrame(columns = ['Name', 'Count']) nm1['Name'] = knm nm1['Count'] = vnm print(nm1) new_df = pd.concat([a, df1, nm1], axis = 0) df_nm = new_df['Name'].values df_nm = list(df_nm) print(df_nm) for i in range(len(df_nm)): if '곡성' in df_nm[i]: df_nm[i] = '곡성' elif '!' in df_nm[i]: df_nm[i] = df_nm[i].replace('!', '') elif ':' in df_nm[i]: df_nm[i] = df_nm[i].replace(':', '') elif 'DP' in df_nm[i]: df_nm[i] = 'DP' elif ' ' in df_nm[i]: df_nm[i]=df_nm[i].replace(" ", "") elif ',' in df_nm[i]: df_nm[i] = df_nm[i].replace(',', '') for j in range(len(df_nm)): if '영화' in df_nm[j]: df_nm[j] = df_nm[j].strip('영화 ') elif '드라' in df_nm[j]: df_nm[j] = df_nm[j].strip('드라마 ') elif '마우스' in df_nm[j]: df_nm[j] = '마우스' elif ' ' in df_nm[j]: df_nm[j]=df_nm[j].replace(" ", "") elif ',' in df_nm[j]: df_nm[j] = df_nm[j].replace(',', '') new_df['Name'] = df_nm new_df.set_index(['Name'], inplace=True) print(new_df) new_df.to_csv('instagram crawling.csv', encoding='utf8') pd_df = pd.read_csv('instagram crawling.csv', encoding='utf8') for j in range(len(pd_df)): if '언제나그리고영원히' in pd_df['Name'][j]: pd_df['Name'][j] = "내가사랑했던모든남자들에게" elif '시지프스' in pd_df['Name'][j]: pd_df['Name'][j] = "시지프스" elif '우스' in pd_df['Name'][j]: pd_df['Name'][j] = '마우스' pd_df.drop(axis=1, columns = 'Unnamed: 0', inplace=True) pd_df # + id="166841d9" pd_df.to_csv('instagram crawling.csv', encoding='utf8')
Toyproject_ent.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %load_ext autoreload # %autoreload 2 # + tags=[] from sqlalchemy import create_engine from sqlalchemy.ext.declarative import declarative_base from pathlib import Path from db_setup import Base, Policy, engine, DBSession # - session = DBSession() # + tags=[] for attribute, value in Policy.__dict__.items(): print (attribute) # - my_doc = Policy(geo_code = 'SLV', url = 'https://github.com/OmdenaAI/2', reference = 'CODIGO 1234', title ='MyTitle', source = "scraping", country='Salvador') # + tags=[] my_doc.save_to_db() # - # In case that your code throws an error you have to run session.rollback() before trying to do other things session.rollback() # # Load csv to database import pandas as pd csv_path = Path.cwd().parent.joinpath('data').joinpath('external').joinpath('Scraped_Documents_Peru&Mexico.csv') csv_path df = pd.read_csv(csv_path) df.columns # + tags=["outputPrepend"] for index, row in df.iterrows(): my_doc = Policy() my_doc.country = row['country'] my_doc.title = row['title'] my_doc.reference = row['reference'] my_doc.publication_date = row['date'] # the date has to be in the correct format, currently this produces '0000-00-00' in the database my_doc.doc_class = row['category'] my_doc.doc_url = row['url'] my_doc.save_to_db() #print(row['country'], row['title']) # - session.close()
tasks/mySQLdb/Connect to DB.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + pycharm={"name": "#%%\n"} class Node: def __init__(self, data): # Node = data + address of next node self.data = data # data self.address_of_next_node = None # address-> None by default as we dont know # address of next node def print_linked_list(head): while head is not None: # this loop will end at the last node because last node doesnt have address print(str(head.data) + " -> ", end="") head = head.address_of_next_node # print("None") return def takeInput(): inputList = [int(element) for element in input().split()] head = None for currentData in inputList: if currentData == -1: break newNode = Node(currentData) if head is None: head = newNode else: curr = head while curr.address_of_next_node is not None: curr = curr.address_of_next_node curr.address_of_next_node = newNode return head head_of_Node = takeInput() # input with space like : 1 2 3 4 5 6 print_linked_list(head_of_Node)
11. Linked List-1/3.Linked_List_print.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + #Blue prints for Machine Learning # + #import Libraries import numpy as np import matplotlib.pyplot as plt import pandas as pd #Import Data dataset=pd.read_csv('Data.csv') X=dataset.iloc[:,:-1].values Y=dataset.iloc[:,3] #Splitting Data to test and train from sklearn.model_selection import train_test_split X_train,X_test,y_train,y_test=train_test_split(X,Y,test_size = 0.2,random_state=0) #Feature scaling(Standardising) """from sklearn.preprocessing import StandardScaler sc_X = StandardScaler() X_train=sc_X.fit_transform(X_train) X_test=sc_X.transform(X_test)""" # -
Machine Learning Template..ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .sage # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: SageMath 9.3 # language: sage # name: sagemath # --- # + # default_exp definition.limit # - # # definition.limit # > Something about limits #hide from mathbook.utility.markdown import * from mathbook.configs import * # uncomment for editing. # DESTINATION = 'notebook' # ORIGIN = 'notebook' # ## Limit of a function at a finite value #export if __name__ == '__main__': embed_markdown_file('definition.limit_of_a_function_at_a_finite_value.md', destination='notebook', origin='notebook') # ## Limit of a function # Let $f: D \to \mathbb{R}$ be a [real valued function](definition.real_valued_function), and assume that $f$
nbs/definition.limit_of_a_function.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Practical 1.1: Basics of Machine Learning : Diabetes with linear/polynomial regression # In this practical, we will be exploring regression in machine learning. # # Specifically, we will be trying to predict the risk of diabietes for a certain person # given attributes (also known as features) of the person such as: # - age # - sex # - BMI # # ## Imports # + from sklearn import datasets from sklearn.linear_model import LinearRegression, Ridge from sklearn.svm import SVR from sklearn.model_selection import train_test_split from sklearn.metrics import mean_squared_error, r2_score from sklearn.preprocessing import PolynomialFeatures from sklearn.base import clone import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns # %matplotlib inline # - # ## Loading the data # The dataset is builtin within scikit-learn, so we load it with: diabetes = examples, diabetes_risks = print(f"{len(examples)} examples in the dataset") print(f"examples shape:{examples.shape}") print(f"no. of examples:{examples.shape[0]}") print(f"feature per example:{examples.shape[1]}") # The shape tells us that `examples` is a 2-dimensional numpy array. # This means that you can imagine that `examples` is a table 442 rows (_1st example dimension_) and 10 columns (_2nd feature dimension_). # # <div style="text-align:center"> # <b>Think of examples is a 442 by 10 table:</b> # </div> # # | example | Age | Sex | BMI | ... | # | --- | --- | --- | --- | --- | # | 0 | 16 | M | 34 | ... | # | 1 | 14 | F | 24 | ... | # | 2 | 42 | M | 124 | ... | # | ... | ... | ... | ... | ... | # # > Each row represents an example in the dataset, while each column represents a feature. # # # Basicaly it means that there are 442 individual examples, and that for each of the # 442 examples there are 10 features, which include: # - The features include: # - Age # - Sex # - Body mass index # - Average blood pressure # - .... # Correspondingly, the dataset also contains `diabeties_risks`. # This is the value that we are trying to predict. # Lets take a look at the first ten risks diabetes_risks[:10] # In this task, we will try to use the features (such as BMI) and use them # to predict the diabetes risk using ML model. # The prediction target, `diabetes_risks` is a continuous numeric value. # This means that this is a **regression** problem. # ## Training the model # ### Feeding the data into the model # Our intution about health suggets that the higher the persons BMI, # the more unhealthly he/she is. This might mean that they also have # a higher risk of diabetes. # # First, we need to extract the BMI from the examples. # # | example | Age | Sex | BMI | ... | # | --- | --- | --- | --- | --- | # | 0 | 16 | M | 34 | ... | # | 1 | 14 | F | 24 | ... | # | 2 | 42 | M | 124 | ... | # | ... | ... | ... | ... | ... | # # Normally, doing this using code that does not take advantage of `numpy` looks like this: # ```python # # BMI is the second feature # # List indexing starts from 0 # # Hence BMI feature indexing is 1 # bmi_idx = 1 # bmis = [] # for example in examples: # # example is a list of features # # which contains the BMI we want # bmi = example[bmi_idx] # bmis.append(bmi) # ``` # # With `numpy` all of that can be shortend into: bmis = examples[:, 2] # ### Fitting the model # Once we have extracted a feature, we can train a **model** to try and fit the data. # For the model, we will use the simplest one: `LinearRegression` is a single, straght line model. # Training the model incrediblely easy with `scikit-learn` models, # just call `.fit()` on the model. model = LinearRegression() bmis = bmis.reshape((442, 1)) model.fit(bmis, diabetes_risks) # > `.reshape(442, 1)` reshapes the 442 elements to 442 rows & 1 column # > This is because Scikit-learn expects data in the 'shape' [example, feature], # > a 2D array instead of a 1D array. # ### Predicting using the model # Once we have trained the model, we can obtain predictions from the model using `.predict()`: try_bmis = bmis[:3] print(f"bmi input:{try_bmis}") try_risks = model.predict(try_bmis) print(f"diabeties output:{try_risks}") # ## Evaluating the Model # We can train our model, but we do not how it performs on unseen data.\ # # # ### Cross Validation dataset split # Actually we forgot to do something before training the model: # we have to split the dataset into **train** and **validate** sets: # - we train on the training set # - we _cross validate_ or check the models performance on unseen data on the validation set. # # Using the `Scikit-Learn`'s built in `train_test_split()` function, # we can do this easily. We prefix `train_` or `valid_` to help us determine # whether they belong to the _training_ of _validation_ sets respectively: train_bmis, valid_bmis, train_risks, valid_risks = # > `train_test_split` returns 4 values, so we have 4 variables to store each of these values. # > `test_size=0.3` - sets 30% of the data as test set and the remainding 70% as train set. # > `shuffle=True` - tells it to shuffle the data before splitting # We can now proceed to train the model as usual: model = model.fit(train_bmis, train_risks) # ### Evaluting with Metrics # **Metrics** help you evaluate how we your model is doing by providing a numeric # judgement of your models performance. The loss function is a example of a metric: # the lower the value, the better the model # # > We prefix the metric with the name of the set of data that the model for example # > the loss function evaluate on the validation set is called the **validation loss** # + # training loss pred_risks = train_mse = print(f'training mean squared error: {train_mse}') # validation loss pred_risks = valid_mse = print(f'validation mean squared error: {valid_mse}') # - # However, loss values are not very interpretable (ie is `3563.17..` a good value or bad value?). # This calls more interpretable metrics like the **$r^2$ score**, which in simple terms # measures how well your model fits the variation of the data. # # $r^2$ _normally_ ranges from 0.0 to 1.0, with a higher score meaning # that the model is better at fitting the data. # >$r^2$ may become negative if your model is especially terrible at fitting the data # + # training r2 pred_risks = train_r2 = print(f'training r2: {train_r2}') # validation r2 pred_risks = valid_r2 = print(f'validation r2: {valid_r2}') # - # ### Evaluating with Visualisation # Another way we can evaluate the model is plot it out with the data to see well the model fits the data. # This evaluation method is only suitable for simple models, such as the `LinearRegression` model we # are currently using. # + pred_risks = plt.plot(train_bmis, train_risks, '.', label='Training data') plt.plot(valid_bmis, valid_risks, 'x', label='Validation data') plt.plot(bmis, pred_risks, '-', label='Model') plt.xlabel('BMIs (Scaled)') plt.ylabel('Risk of Diabetes') plt.legend() # - # #### Evaluating with Learning Curves # One final but instrumental way to evaluate your model is by learning curve. # A learning curve is basically a plot of training and validation metrics as the amount of training increases. # > we can control the amount of training the model recieves by limiting the # > amount of training data that the model can train on. # # We plot the learning as follows: # + def plot_learning_curve(orig_model, train_ins, train_outs, valid_ins, valid_outs): # save the scores for plotting the learning curve laterff train_scores = [] valid_scores = [] for i in np.arange(0.1, 1.1, 0.1): train_size = int(442 * i) X_train = train_ins[:train_size] X_valid = valid_ins y_train = train_outs[:train_size] y_valid = valid_outs model = clone(orig_model) model.fit(X_train, y_train) y_train_pred = model.predict(X_train) y_valid_pred = model.predict(X_valid) train_r2 = r2_score(y_train, y_train_pred) valid_r2 = r2_score(y_valid, y_valid_pred) train_scores.append(train_r2) valid_scores.append(valid_r2) plt.plot(train_scores) plt.plot(valid_scores) plt.title("Learning curve") plt.ylabel('r2 score') plt.xlabel('% of Training Size') plt.xticks(range(10), (np.arange(0.1, 1.1, 0.1) * 100).astype(int)) plt.ylim(0.0, 1.0) # limit r2 score axis to 0.0 to 1 plt.legend(['Train', 'Validation']) plot_learning_curve(LinearRegression(), train_bmis, train_risks, valid_bmis, valid_risks) # - # From the learning curve, we can infer that the model is **underfitting** as both the # training and validation $r^2$ scores are low. We learn from the lecture that ways # to combat underfitting include: # - using a more powerful model # - using more or create better features to feed into your modle # ## Iterate. Iterate. Iterate. # So far, we have only trained the model on one feature: BMI. # Since, the model is underfitting, we can try to fit the model with all the features in the dataset. # We will also use another, more powerful model: the SVM, to combat underfitting. # + # split train validation sets train_examples, valid_examples, train_risks, valid_risks = # train the model us usual model = SVR(C=9e+2, kernel='rbf', gamma=1e-1) model.fit(train_examples, train_risks) # - # ### Evaluate with Metrics # We evaluate the model with $r^2$ score metric. # + # training loss pred_risks = train_r2 = print(f'training r2 score: {train_r2}') # validation loss pred_risks = valid_r2 = print(f'validation r2 score : {valid_r2}') # - # ### Evaluate with Learning Curve # We evaluate our model with a learning curve plot_learning_curve(SVR(C=9e+2, kernel='rbf', gamma=1e-1), train_examples, train_risks, valid_examples, valid_risks)
practicals/day_1/practical_1_1/diabetes.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import re import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns # %matplotlib inline pd.set_option('display.max_rows', None) # - # # NetLogo での実行結果を読み込む df = pd.read_csv("resources00e2LHS 論文の再現-table.csv", index_col=0, skiprows=6) print(df.shape) df.head() # # 出力結果を整形する # + def str_list_to_float_list(lst: str) -> list: return [float(re.sub('\[|\]', '', x)) for x in lst.split()] def make_df(prob_resource: float, min_energy: float) -> tuple: _title = 'p = {} e = {}'.format(prob_resource, min_energy) _df = pd.DataFrame({ 'given_energy': agg_df.loc[prob_resource, min_energy]['[given-energy] of people'], 'correlation': agg_df.loc[prob_resource, min_energy]['[correlation] of people'] }) return _title, _df agg_df = df.groupby(['prob-resource', 'min-energy']).agg({ '[given-energy] of people': ' '.join, '[correlation] of people': ' '.join }) agg_df['[given-energy] of people'] = agg_df['[given-energy] of people'].apply(str_list_to_float_list) agg_df['[correlation] of people'] = agg_df['[correlation] of people'].apply(str_list_to_float_list) df_dict = dict([make_df(x, y) for x, y in agg_df.index]) print('dfの一覧: {}'.format(df_dict.keys())) df_dict['p = 0.5 e = 0.5'].describe() # - # # 可視化 # + fig, axes = plt.subplots(7, 7, figsize=(40, 40)) for p, e in agg_df.index: _label = 'p = {} e = {}'.format(p, e) _df = df_dict[_label] _x = round((e - 0.2) * 10) _y = round((p - 0.2) * 10) sns.kdeplot(x=_df['given_energy'], y=_df['correlation'], shade=True, ax=axes[_x, _y]).set_title(_label) # -
Emergence and Evolution of Cooperation Under Resource Pressure/jupyter/.ipynb_checkpoints/Emergence and Evolution of Cooperation Under Resource Pressure-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Task based paralellism using Dask, a NEXRAD example. import nexradaws import tempfile import os import shutil import pyart from matplotlib import pyplot as plt from netCDF4 import num2date import numpy as np from dask_kubernetes import KubeCluster from dask.distributed import Client from dask.distributed import wait, progress import numpy as np from pandas.plotting import register_matplotlib_converters register_matplotlib_converters() #import six # ## Start with getting the code to work on a single core.. However we want to start the cluster first so it is ready for use # Select 10 'workers' under 'manual scaling' menu below and click 'Scale' # Click on the 'Dashboard link' to monitor calculation progress cluster = KubeCluster(n_workers=100) cluster # + #Attach a client to this cluster we have spun up # - client = Client(cluster) client # ## Now we work on the single core code # + # create a temporary directory templocation = tempfile.mkdtemp() #We are using the neat little nexradaws tool to connect to Amazon S3 conn = nexradaws.NexradAwsInterface() #Get available scans for the 20th of May in 2011 scans = conn.get_avail_scans('2011', '05', '20','KTLX') # - #list scan 10 to 20 scans[10:20] #Pick a scan to test on tscan = scans[10] #Download a NEXRAD volume to the temporary directory lcn = templocation localfiles = conn.download(tscan,lcn) #Now read in this file using Py-ART from the temporary file radar = pyart.io.read(localfiles.success[0].filepath) # # Lets plot the radar file myf = plt.figure(figsize=[10,10]) myd = pyart.graph.RadarMapDisplay(radar) myd.plot_ppi_map('reflectivity', 0, vmin=-8, vmax=64) # ### Now this is the "Granule" of code which is applied to a single volume # + def simple_sum(input_dict): #We use a dictionary as i #Fetch data from Amazon S3 templocation = tempfile.mkdtemp() conn = nexradaws.NexradAwsInterface() scans = conn.get_avail_scans(input_dict['year'], input_dict['month'], input_dict['day'], input_dict['radar']) #What scan in the day do you want me to download? tscan = scans[input_dict['scan_number']] #dump to a local file localfiles = conn.download(tscan,templocation) #read into Py-ART this_dataset = pyart.io.read(localfiles.success[0].filepath) #Get the time so we know what time we are dealing with time_start = num2date(this_dataset.time['data'][0], this_dataset.time['units']) #Total number of radar gates tgates = float(this_dataset.ngates*this_dataset.nrays) #Get the reflectivity data zdat = this_dataset.fields['reflectivity']['data'] #Fraction of gates above three levels z0 = float(len(np.where(zdat > 0.)[0]))/tgates z10 = float(len(np.where(zdat > 10.)[0]))/tgates z40 = float(len(np.where(zdat > 40.)[0]))/tgates #Grid the dataset grids = pyart.map.grid_from_radars(this_dataset,(31,301,301), ((0.,15000.),(-150000.,150000.),(-150000.,150000.)), fields=['reflectivity'], refl_field='reflectivity', weighting_function='BARNES2') z_prof_mean = grids.fields['reflectivity']['data'].mean(axis=(1,2)) npts_over_10 = np.array([float(len(np.where(\ grids.fields['reflectivity']['data'][i, :, :] > 10.)[0]))\ for i in range(grids.fields['reflectivity']['data'].shape[0])]) #Housekeeping del this_dataset, grids #return rv = (time_start, z0, z10, z40, z_prof_mean, npts_over_10) return rv def simple_sum_error_catching(input_dict): #Wrapping our function in error catching try: rv = simple_sum(input_dict) except (TypeError, KeyError) as e: rv = 'error' return rv # - # ### Test to make sure it works on a granule base_dict = {'year':'2011', 'month':'05', 'day':'20', 'radar':'KVNX'} simple_sum(dict(base_dict, **{'scan_number' : 1})) # ### Build a list for calling the function # + n_scans_today = len(conn.get_avail_scans(base_dict['year'], base_dict['month'], base_dict['day'], base_dict['radar'])) call_list =[dict(base_dict, **{'scan_number' : i}) for i in range(n_scans_today)] # - # ## Map the call_list list to the function and send to the cluster to execute future = client.map(simple_sum_error_catching, call_list) # ## Monitor progress progress(future) # ## Reduce to get result my_data = client.gather(future) # ## Filter out tasks that returned an error filtered_data = [value for value in my_data if type(value) != str] times = np.array([tpl[0] for tpl in filtered_data]) z0 = np.array([tpl[1] for tpl in filtered_data]) z10 = np.array([tpl[2] for tpl in filtered_data]) z40 = np.array([tpl[3] for tpl in filtered_data]) zprof = np.array([tpl[4] for tpl in filtered_data]) npts_10 = np.array([tpl[5] for tpl in filtered_data]) my_fig = plt.figure(figsize=[10,5]) plt.plot(times, z0, '-k', label='0dBz') plt.plot(times, z10, '-b', label='10dBz') plt.plot(times, z40, '-r', label='40dBz') plt.ylabel('ngates') plt.xlabel('Time') plt.legend() fig = plt.figure(figsize=[13,8]) plt.pcolormesh(times, np.linspace(0,15,31), zprof.transpose(), cmap=pyart.graph.cm_colorblind.HomeyerRainbow) plt.colorbar(label='Mean Reflectivity Factor (dBZ)') fig = plt.figure(figsize=[13,8]) plt.pcolormesh(times, np.linspace(0,15,31), npts_10.transpose(), cmap=pyart.graph.cm_colorblind.HomeyerRainbow) plt.colorbar(label='Area (km$^2$) above 10dBZ') days = conn.get_avail_days(base_dict['year'], base_dict['month']) new_call_list = [] for day in days: n_scans_today = len(conn.get_avail_scans(base_dict['year'], base_dict['month'], day, base_dict['radar'])) call_list_today =[dict(base_dict, **{'scan_number' : i, 'day' : day}) for i in range(n_scans_today)] new_call_list.extend(call_list_today[:]) len(new_call_list) future = client.map(simple_sum_error_catching, new_call_list) progress(future) my_data = client.gather(future) filtered_data = [value for value in my_data if type(value) != str] times = np.array([tpl[0] for tpl in filtered_data]) z0 = np.array([tpl[1] for tpl in filtered_data]) z10 = np.array([tpl[2] for tpl in filtered_data]) z40 = np.array([tpl[3] for tpl in filtered_data]) # + my_fig = plt.figure(figsize=[10,5]) plt.plot(times, z0, '-k', label='0dBz') plt.plot(times, z10, '-b', label='10dBz') plt.plot(times, z40, '-r', label='40dBz') plt.ylabel('ngates') plt.xlabel('Time') plt.legend() # + # KubeCluster? # -
notebooks/4_Pangeo_only_Dask_NEXRAD.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Матричный метод import numpy as np # * 18x+7y=8 # * -17x-8y=13 K = np.array([[18., 7.], [-17., -8.]]) K L = np.array([8., 13.]) L np.linalg.solve(K, L) # Ответ: 6.2, -14.8
mat_metod.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.6.2 # language: julia # name: julia-1.6 # --- # # Paralelismo en Julia # ## 1. Introducción # Julia proporciona distintos mecanismos para paralelizar código. Algunas de las estrategias y desafíos para escribir algoritmos paralelos son los siguientes: # # * Estrategias de paralelismo # * SIMD # * Multi-hilo # * Tareas # * Multiproceso # * Memoria compartida # * Memoria distribuida # * Programación de GPU # # * Desafíos de la computación paralela # * Orden de ejecución # * ejecución de fuera de orden de posibilidad # * acceso y mutación simultáneos # * Acceso y movimiento de datos # * Código de acceso y movimiento # * Adaptación adecuada de la estrategia de paralelismo a las capacidades de su máquina # * Hacer coincidir adecuadamente la estrategia de paralelismo con el problema en cuestión # ## ¿Qué es lo que está sucediendo con nuestras computadoras? # # ![](https://raw.githubusercontent.com/JuliaComputing/JuliaAcademyData.jl/master/courses/Parallel_Computing/images/40-years-processor-trend.png) # # ## Lo difícil de la computación paralela # * No pensamos en paralelo # * Aprendemos a escribir y razonar sobre programas en serie. # * El deseo de paralelismo a menudo surge _después_ de haber escrito su algoritmo (¡y lo encontró demasiado lento!) # # ## Resumen: # * Las arquitecturas computacionales actuales nos empujan hacia la programación paralela para un rendimiento máximo, ¡incluso si no estamos en un clúster! # * Pero es difícil diseñar buenos algoritmos paralelos # * Es difícil de expresar y razonar sobre esos algoritmos. # ## 2. SIMD: El paralelismo que puede (a veces) suceder automáticamente # # SIMD: Instrucción única, datos múltiples (Single Instruction Multiple Data) # # **Nota:** También llamado confusamente vectorización # ### Arquitectura # # En lugar de calcular cuatro sumas secuencialmente: # # \begin{align} # x_1 + y_1 &\rightarrow z_1 \\ # x_2 + y_2 &\rightarrow z_2 \\ # x_3 + y_3 &\rightarrow z_3 \\ # x_4 + y_4 &\rightarrow z_4 # \end{align} # # Procesadores modernos tienen unidades de procesamiento vectorial que pueden hacer lo anterior a la vez: # # $$ # \left(\begin{array}{cc} # x_1 \\ # x_2 \\ # x_3 \\ # x_4 # \end{array}\right) # + # \left(\begin{array}{cc} # y_1 \\ # y_2 \\ # y_3 \\ # y_4 # \end{array}\right) # \rightarrow # \left(\begin{array}{cc} # z_1 \\ # z_2 \\ # z_3 \\ # z_4 # \end{array}\right) # $$ # ### ¿Cómo se logra? using BenchmarkTools # + A = rand(100_000) function simplesum(A) result = zero(eltype(A)) for i in eachindex(A) @inbounds result += A[i] end return result end simplesum(A) # - # Como muchos lenguajes de programación modernos, Julia utiliza la verificación de límites ([_boundchecking_](https://docs.julialang.org/en/v1/devdocs/boundscheck/)) para garantizar la seguridad del programa al acceder a arreglos. # En bucles internos u otras situaciones críticas de rendimiento, es posible que se desee omitir estas comprobaciones de límites para mejorar el rendimiento en tiempo de ejecución. # # En consecuencia, Julia incluye la macro `@inbounds(...)` para decirle al compilador que omita dichas comprobaciones de límites dentro del bloque dado. @btime simplesum($A) # ¿ese tiempo es bueno? @btime sum($A) # Diseñamos una función más lenta que la suma prediseñada `sum()`, ¡y también estamos obteniendo una respuesta diferente! Veamos qué sucede con un flotante de 32 bits en lugar de uno de 64 bits. Cada elemento tiene la mitad del número de bits, por lo que también permite duplicar la longitud (para que el número total de bits procesados permanezca constante). A32 = rand(Float32, length(A)*2) @btime simplesum($A32) @btime sum($A32) # ¡Eso es aun peor! ¿Que está pasando aqui? # Estamos viendo múltiples diferencias en el desempeño: ¿quizás la suma incorporada de Julia está usando algún paralelismo? # # Intentemos usar SIMD nosotros mismos: function simdsum(A) result = zero(eltype(A)) @simd for i in eachindex(A) @inbounds result += A[i] end return result end @btime simdsum($A) @btime simdsum($A32) # ¿Qué hizo y por qué no siempre usamos (usa Julia pues) `@simd` para cada bucle **for** automáticamente? # # Veamos los resultados: simplesum(A), simdsum(A), sum(A) simplesum(A32), simdsum(A32), sum(A32) # ¿Por qué no son iguales? # # Sin `@simd`, Julia está haciendo _exactamente_ lo que le dijimos que hiciera: está tomando cada elemento de nuestro arreglo y lo agrega a una gran pila secuencialmente. Nuestra respuesta es más pequeña de lo que la "suma" incorporada de Julia cree que es: eso es porque, como la pila se hace más grande, comenzamos a perder las partes inferiores de cada elemento que estamos sumando, ¡y esas pequeñas pérdidas comienzan a acumularse! # # La macro `@simd` le dice a Julia que puede reorganizar las adiciones de punto flotante - # incluso si cambiara la respuesta. Dependiendo de su CPU, esto puede llevar a 2x o 4x # o incluso un paralelismo 8x. Básicamente, Julia está calculando sumas independientes para # los índices pares y los índices impares simultáneamente: # # \begin{align} # odds &\leftarrow 0 \\ # evens &\leftarrow 0 \\ # \text{loop}&\ \text{odd}\ i: \\ # &\left(\begin{array}{cc} # odds \\ # evens # \end{array}\right) # \leftarrow # \left(\begin{array}{cc} # odds \\ # evens # \end{array}\right) # + # \left(\begin{array}{cc} # x_{i} \\ # x_{i+1} # \end{array}\right) \\ # total &\leftarrow evens + odds # \end{align} # # # En muchos casos, Julia puede y sabe que un bucle for puede ser vectorizado (SIMD-ed) y aprovechará esto por defecto. B = rand(1:10, 100_000) @btime simplesum($B) @btime sum($B) B32 = rand(Int32(1):Int32(10), length(B)*2) @btime simplesum($B32) @btime simdsum($B32) # ¿Cómo inspeccionamos que se está vectorizando? @code_llvm simdsum(A32) # Entonces, ¿cuáles son los desafíos?: # # - El mayor obstáculo es que tienes que convencer a Julia y LLVM de que puede usar instrucciones SIMD para tu algoritmo dado. Eso no siempre es posible. # - Hay muchas limitaciones de lo que se puede y no se puede vectorizar # - Es necesario pensar en las consecuencias de reordenar su algoritmo # ## Resumen # # SIMD: # - Explota el paralelismo integrado en un procesador # - Ideal para bucles internos pequeños (y estrechos) # - A menudo ocurre automáticamente si tienes cuidado # - Sigue las [mejores prácticas de rendimiento](https://docs.julialang.org/en/v1/manual/performance-tips/) # - Usa `@inbounds` para cualquier acceso a un arreglo # - Evita ramas o llamadas a funciones # - Dependiendo del procesador y los tipos involucrados, puede producir ganancias de 2-16x con una sobrecarga extraordinariamente pequeña #
Curso Julia/Github - Material/src/Introduccion/S8-OperacionesVectorizadas.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/JordiVillaFreixa/ORcode/blob/main/oneDimGoldenSearch.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="r_0cNcDuj74r" # # Golden Search method for optimization # # Adapted from [Geodose](https://www.geodose.com/2021/06/golden-section-search-python-application-example.html) # # + id="tBxAz4UQjlaG" import numpy as np import matplotlib.pyplot as plt from IPython.display import clear_output import time def function(x): #fx=x**4-16*(x**3)+45*(x**2)-20*x+203 fx=x**5-2*x**4-23*x**3-12*x**2+36*x return fx def check_pos(x1,x2): if x2<x1: label='right' else: label='' return label def update_interior(xl,xu): d=((np.sqrt(5)-1)/2)*(xu-xl) x1=xl+d x2=xu-d return x1,x2 #FINDING MAXIMUM FUNCTION def find_max(xl,xu,x1,x2,label): fx1=function(x1) fx2=function(x2) if fx2>fx1 and label=='right': xl=xl xu=x1 new_x=update_interior(xl,xu) x1=new_x[0] x2=new_x[1] xopt=x2 else: xl=x2 xu=xu new_x=update_interior(xl,xu) x1=new_x[0] x2=new_x[1] xopt=x1 return xl,xu,xopt,function(xopt) #FINDING MINIMUM FUNCTION def find_min(xl,xu,x1,x2,label): fx1=function(x1) fx2=function(x2) if fx2>fx1 and label=='right': xl=x2 xu=xu new_x=update_interior(xl,xu) x1=new_x[0] x2=new_x[1] xopt=x1 else: xl=xl xu=x1 new_x=update_interior(xl,xu) x1=new_x[0] x2=new_x[1] xopt=x2 return xl,xu,xopt,function(xopt) #PLOTTING FUNCTION def plot_graph(xl,xu,x1,x2): clear_output(wait=True) #plot function graph plt.plot(x,y) #plt.plot([xl,xu],[0,0],'k') #plot x1 point plt.plot(x1,function(x1),'ro',label='x1') plt.plot([x1,x1],[0,function(x1)],'k') #plot x2 point plt.plot(x2,function(x2),'bo',label='x2') plt.plot([x2,x2],[0,function(x2)],'k') #plot xl line plt.plot([xl,xl],[0,function(xl)]) plt.annotate('xl',xy=(xl-0.01,-0.2)) #plot xu line plt.plot([xu,xu],[0,function(xu)]) plt.annotate('xu',xy=(xu-0.01,-0.2)) #plot x1 line plt.plot([x1,x1],[0,function(x1)],'k') plt.annotate('x1',xy=(x1-0.01,-0.2)) #plot x2 line plt.plot([x2,x2],[0,function(x2)],'k') plt.annotate('x2',xy=(x2-0.01,-0.2)) #y-axis limit #plt.ylim([-1.2,1.2]) plt.show() def golden_search(xl,xu,mode,et): it=0 e=1 while e>=et: new_x=update_interior(xl,xu) x1=new_x[0] x2=new_x[1] fx1=function(x1) fx2=function(x2) label=check_pos(x1,x2) clear_output(wait=True) plot_graph(xl,xu,x1,x2) #PLOTTING plt.show() #SELECTING AND UPDATING BOUNDARY-INTERIOR POINTS if mode=='max': new_boundary=find_max(xl,xu,x1,x2,label) elif mode=='min': new_boundary=find_min(xl,xu,x1,x2,label) else: print('Please define min/max mode') break #exit if mode not min or max xl=new_boundary[0] xu=new_boundary[1] xopt=new_boundary[2] it+=1 print ('Iteration: ',it) r=(np.sqrt(5)-1)/2 #GOLDEN RATIO e=((1-r)*(abs((xu-xl)/xopt)))*100 #Error print('Error:',e,' xopt:',xopt,' yopt:',function(xopt)) time.sleep(1) #GENERATE POINT FOR SINE GRAPH #xl=2.4 #xu=14 xl=-3 xu=-1 x=np.linspace(xl,xu,100) y=function(x) #EXECUTING GOLDEN SEARCH FUNCTION golden_search(xl,xu,'max',0.05)
oneDimGoldenSearch.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:aiProgram] * # language: python # name: conda-env-aiProgram-py # --- import numpy as np # + # We create a 1D ndarray that contains only integers x = np.array([1, 2, 3, 4, 5]) # We print x print() print('x = ', x) print() # We print information about x print('x has dimensions:', x.shape) print('x is an object of type:', type(x)) print('The elements in x are of type:', x.dtype) # + # We create a rank 2 ndarray that only contains integers Y = np.array([[1,2,3],[4,5,6],[7,8,9], [10,11,12]]) print('Y = \n', Y) # We print information about Y print('Y has dimensions:', Y.shape) print('Y has a total of', Y.size, 'elements') print('Y is an object of type:', type(Y)) print('The elements in Y are of type:', Y.dtype) # + # We create a rank 1 ndarray of floats but set the dtype to int64 x = np.array([1.5, 2.2, 3.7, 4.0, 5.9], dtype = np.int64) print('x = ', x) # We print the dtype x print('The elements in x are of type:', x.dtype) # + # We create a rank 1 ndarray x = np.array([1, 2, 3, 4, 5]) # We save x into the current directory as np.save('my_array', x) # + # We load the saved array from our current directory into variable y y = np.load('my_array.npy') print('y = ', y) # We print information about the ndarray we loaded print('y is an object of type:', type(y)) print('The elements in y are of type:', y.dtype) # - # ## Using Built-in Functions to Create ndarrays # + # We create a 3 x 4 ndarray full of zeros. X = np.zeros((3,4)) # X = np.ones((3,2)) # np.full(shape, constant value) print('X = \n', X) # We print information about X print('X has dimensions:', X.shape) print('X is an object of type:', type(X)) print('The elements in X are of type:', X.dtype) # + # We create a 5 x 5 Identity matrix. X = np.eye(5) print('X = \n', X) # Create a 4 x 4 diagonal matrix that contains the numbers 10,20,30, and 50 # on its main diagonal X = np.diag([10,20,30,50]) print('X = \n', X) # + # We create a rank 1 ndarray that has sequential integers from 0 to 9 x = np.arange(10) print('x = ', x) # We create a rank 1 ndarray that has evenly spaced integers from 1 to 13 in steps of 3. x = np.arange(1,14,3) print('x = ', x) # + # We create a rank 1 ndarray that has 10 integers evenly spaced between 0 and 25, # with 25 excluded. x = np.linspace(0,25,10, endpoint = False) print('x = ', x) # We create a rank 1 ndarray with 10 integers evenly spaced between 0 and 50, # with 50 excluded. We then reshape it to a 5 x 2 ndarray X = np.linspace(0,50,10, endpoint=False).reshape(5,2) print('X = \n', X) # + # We create a 3 x 2 ndarray with random integers in the half-open interval [4, 15). X = np.random.randint(4,15,size=(3,2)) print('X = \n', X) # + # We create a 1000 x 1000 ndarray of random floats drawn from normal (Gaussian) distribution # with a mean of zero and a standard deviation of 0.1. X = np.random.normal(0, 0.1, size=(1000,1000)) # We print X print() print('X = \n', X) print() # We print information about X print('X has dimensions:', X.shape) print('X is an object of type:', type(X)) print('The elements in X are of type:', X.dtype) print('The elements in X have a mean of:', X.mean()) print('The maximum value in X is:', X.max()) print('The minimum value in X is:', X.min()) print('X has', (X < 0).sum(), 'negative numbers') print('X has', (X > 0).sum(), 'positive numbers') # - # ## Accessing, Deleting, and Inserting Elements Into ndarrays # + # We create a rank 2 ndarray Y = np.array([[1,2,3],[4,5,6],[7,8,9]]) print('Original Y = \n', Y) # We delete the first row of y w = np.delete(Y, 0, axis=0) print('w = \n', w) # We delete the first and last column of y v = np.delete(Y, [0,2], axis=1) print('v = \n', v) # We append a new row containing 7,8,9 to y v = np.append(Y, [[7,8,9]], axis=0) print('v = \n', v) # We append a new column containing 9 and 10 to y q = np.append(Y,[[9],[10],[11]], axis=1) print('q = \n', q) # + # We create a rank 1 ndarray x = np.array([1,2]) print('x = ', x) # We create a rank 2 ndarray Y = np.array([[3,4],[5,6]]) print('Y = \n', Y) # We stack x on top of Y z = np.vstack((x,Y)) print('z = \n', z) # We stack x on the right of Y. We need to reshape x in order to stack it on the right of Y. w = np.hstack((Y,x.reshape(2,1))) print('w = \n', w) # - # ## Slicing ndarrays # # 1. ndarray[start:end] # 2. ndarray[start:] # 3. ndarray[:end] # + # We create a 4 x 5 ndarray that contains integers from 0 to 19 X = np.arange(20).reshape(4, 5) print('X = \n', X) # We select all the elements that are in the 2nd through 4th rows and in the 3rd to 5th columns Z = X[1:,2:5] print('Z = \n', Z) # + # create a copy of the slice using the np.copy() function Z = np.copy(X[1:4,2:5]) # create a copy of the slice using the copy as a method W = X[1:4,2:5].copy() # We change the last element in Z to 555 Z[2,2] = 555 # We change the last element in W to 444 W[2,2] = 444 print('X = \n', X) print('Z = \n', Z) print('W = \n', W) # + print('X = \n', X) # We print the elements above the main diagonal of X print('y =', np.diag(X, k=1)) print() # Create 3 x 3 ndarray with repeated values X = np.array([[1,2,3],[5,2,8],[1,2,3]]) print('X = \n', X) # We print the unique elements of X print('The unique elements in X are:',np.unique(X)) # - # ## Boolean Indexing, Set Operations, and Sorting # + # We create a 5 x 5 ndarray that contains integers from 0 to 24 X = np.arange(25).reshape(5, 5) print('Original X = \n', X) # We use Boolean indexing to select elements in X: print('The elements in X that are greater than 10:', X[X > 10]) # We use Boolean indexing to assign the elements that are between 10 and 17 the value of -1 X[(X > 10) & (X < 17)] = -1 print('X = \n', X) # + # We create a rank 1 ndarray x = np.array([1,2,3,4,5]) # We create a rank 1 ndarray y = np.array([6,7,2,8,4]) # We use set operations to compare x and y: print() print('The elements that are both in x and y:', np.intersect1d(x,y)) print('The elements that are in x that are not in y:', np.setdiff1d(x,y)) print('All the elements of x and y:',np.union1d(x,y)) # + # We create an unsorted rank 1 ndarray x = np.random.randint(1,11,size=(10,)) print('Original x = ', x) print('Sorted x (out of place):', np.sort(x)) # When we sort out of place the original array remains intact. To see this we print x again print('x after function sorting:', x) # We sort x and print the sorted array using sort as a method. x.sort() print('x after method sorting:', x) # + # We create an unsorted rank 2 ndarray X = np.random.randint(1,11,size=(5,5)) print('Original X = \n', X) # We sort the columns of X and print the sorted array print('X with sorted columns :\n', np.sort(X, axis = 0)) # - # ## Arithmetic operations and Broadcasting # + # We create two rank 1 ndarrays x = np.array([1,2,3,4]) y = np.array([5.5,6.5,7.5,8.5]) print('x = ', x) print('y = ', y) print('multiply(x,y) = ', np.multiply(x,y)) print('POW(x,2) =',np.power(x,2)) # We raise all elements to the power of 2 # + # We create a 2 x 2 ndarray X = np.array([[1,2,5], [3,4,6]]) print('X = \n', X) print('Standard Deviation of all elements in the columns of X:', X.std(axis=0)) print('Minimum value of all elements in the rows of X:', X.min(axis=1)) # We create a rank 1 ndarray x = np.array([1,2,3]) # We create a 3 x 3 ndarray Y = np.array([[1,2,3],[4,5,6],[7,8,9]]) print('x + Y = \n', x + Y) # - # When operating on two arrays, NumPy compares their shapes element-wise. It starts with the trailing dimensions and works its way forward. Two dimensions are compatible when # # * they are equal, or # * one of them is 1 #
AI_programming_with_python_udacity/numpy/Numpy.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Accessing Data # # For this data competition, we will be using the Friday Night Lights dataset from: # # [<NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2021). Endogenous variation in ventromedial prefrontal cortex state dynamics during naturalistic viewing reflects affective experience. *Science Advances, 7(17), eabf7129*.](https://www.science.org/doi/10.1126/sciadv.abf7129) # # In this dataset, participants (n=35) watched the pilot episode of the NBC television show Friday Night Lights while undergoing fMRI. We are sharing the raw data in the BIDS format. We are also sharing data that has been preprocessed using fMRIPrep version 20.2.1. See ```derivatives/code/fmriprep.sh``` for the script used to run preprocessing. We have also performed denoising on the preprocessed data by running a univariate GLM, which included the following regressors: # # - Intercept, Linear & Quadratic Trends (3) # - Expanded Realignment Parameters (24). Derivatives, quadratics, quadratic derivatives # - Spikes based on global outliers greater than 3 SD and also based on average frame differencing # - CSF regressor (1) # # See ```derivatives/code/Denoise_Preprocessed_Data.ipynb``` file for full details of the denoising model. We have included denoised data that is unsmoothed and also has been smoothed with a 6mm FWHM Gaussian kernel. We have included nifti and hdf5 versions of the data. Nifti files can be loaded using any software. HDF5 files are much faster to load if you are using our [nltools](https://nltools.org/) toolbox for your data analyses. Note that subject ```sub-sid000496``` had bad normalization using this preprocessing pipeline, so we have not included this participant in the denoised data. # # We are also sharing additional data from this paper, which may be used in projects: # # - Study 1 (n=13) fMRI Data viewing episodes 1 & 2 - Available on [OpenNeuro](https://openneuro.org/datasets/ds003524). # # - Study 3 (n=20) Face Expression Data - Available on [OSF](https://osf.io/f9gyd/). We are only sharing extracted Action Unit Values. We do not have permission to share raw video data. # # - Study 4 (n=192) Emotion Ratings - Available on [OSF](https://osf.io/f9gyd/). Rating data was collected on Amazon Mechanical Turk using a custom Flask [web application](https://github.com/cosanlab/moth_app). # # - Code from the original paper is available on [Github](https://github.com/cosanlab/vmPFC_dynamics) # # # Downloading Data # # All of the data is being hosted by [OpenNeuro](https://openneuro.org/datasets/ds003521). The entire dataset is about 202gb and can be downloaded directly from the website and also using the AWS command line interface. See the OpenNeuro [instructions](https://openneuro.org/datasets/ds003521/download) for downloading. However, as most people have limited storage space, we recommend using Datalad to only download the files you want to work with. We have included a brief overview of working with Datalad from the command line and within Python below. # # ## DataLad # # We encourage participants to download the data using [DataLad](https://www.datalad.org/), which is an open source version control system for data built on top of [git-annex](https://git-annex.branchable.com/). Think of it like git for data. It provides a handy command line interface for downloading data, tracking changes, and sharing it with others. # # While DataLad offers a number of useful features for working with datasets, there are three in particular that we think make it worth the effort to install for this competition. # # 1) Cloning a DataLad Repository can be completed with a single line of code `datalad install <repository>` and provides the full directory structure in the form of symbolic links. This allows you to explore all of the files in the dataset, without having to download the entire dataset at once. # # 2) Specific files can be easily downloaded using `datalad get <filename>`, and files can be removed from your computer at any time using `datalad drop <filename>`. As the dataset is relatively large, this will allow you to only work with the data that you need and you can drop the rest when you are done with it. # # 3) All of the DataLad commands can be run within Python using the datalad [python api](http://docs.datalad.org/en/latest/modref.html). # # We will only be covering a few basic DataLad functions to get and drop data. We encourage the interested reader to read the very comprehensive DataLad [User Handbook](http://handbook.datalad.org/en/latest/) for more details and troubleshooting. # # ### Installing Datalad # # DataLad can be easily installed using [pip](https://pip.pypa.io/en/stable/). # # `pip install datalad` # # Unfortunately, it currently requires manually installing the [git-annex](https://git-annex.branchable.com/) dependency, which is not automatically installed using pip. # # If you are using OSX, we recommend installing git-annex using [homebrew](https://brew.sh/) package manager. # # `brew install git-annex` # # If you are on Debian/Ubuntu we recommend enabling the [NeuroDebian](http://neuro.debian.net/) repository and installing with apt-get. # # `sudo apt-get install datalad` # # For more installation options, we recommend reading the DataLad [installation instructions](https://git-annex.branchable.com/). # # !pip install datalad # ### Download Data with DataLad # # The FNL dataset can be accessed at the following location https://openneuro.org/datasets/ds003521. To download the dataset run `datalad install https://github.com/OpenNeuroDatasets/ds003521.git` in a terminal in the location where you would like to install the dataset. Don't forget to change the directory to a folder on your local computer. The full dataset is approximately 202gb. # # You can run this from the notebook using the `!` cell magic. # + import os download_path = '/Users/lukechang/Downloads/FridayNightLights' if not os.path.exists(download_path): os.makedirs(download_path) # %cd {download_path} # !datalad install https://github.com/OpenNeuroDatasets/ds003521.git # - # ## Datalad Basics # # You might be surprised to find that after cloning the dataset that it barely takes up any space using `du -sh`. This is because cloning only downloads the metadata of the dataset to see what files are included. # # You can check to see how big the entire dataset would be if you downloaded everything using `datalad status`. # + # %cd ~/Downloads/FridayNightLights/ds003521 # !datalad status --annex # - # ### Getting Data # One of the really nice features of datalad is that you can see all of the data without actually storing it on your computer. When you want a specific file you use `datalad get <filename>` to download that specific file. Importantly, you do not need to download all of the data at once, only when you need it. # # Now that we have cloned the repository we can grab individual files. For example, suppose we wanted to grab the first subject's confound regressors generated by fmriprep. # !datalad get participants.tsv # Now we can check and see how much of the total dataset we have downloaded using `datalad status` # !datalad status --annex all # If you would like to download all of the files you can use `datalad get .`. Depending on the size of the dataset and the speed of your internet connection, this might take awhile. One really nice thing about datalad is that if your connection is interrupted you can simply run `datalad get .` again, and it will resume where it left off. # # You can also install the dataset and download all of the files with a single command `datalad install -g https://github.com/OpenNeuroDatasets/ds003521.git`. You may want to do this if you have a lot of storage available and a fast internet connection. For most people, we recommend only downloading the files you need for your project. # ### Dropping Data # Most people do not have unlimited space on their hard drives and are constantly looking for ways to free up space when they are no longer actively working with files. Any file in a dataset can be removed using `datalad drop`. Importantly, this does not delete the file, but rather removes it from your computer. You will still be able to see file metadata after it has been dropped in case you want to download it again in the future. # # As an example, let's drop the `participants.tsv` file. # !datalad drop participants.tsv # ## Datalad has a Python API # One particularly nice aspect of datalad is that it has a Python API, which means that anything you would like to do with datalad in the commandline, can also be run in Python. See the details of the datalad [Python API](http://docs.datalad.org/en/latest/modref.html). # # For example, suppose you would like to clone a data repository, such as the Localizer dataset. You can run `dl.clone(source=url, path=location)`. Make sure you set `download_path` to the location where you would like the Localizer repository installed. # # Also, note that datalad uses asyncio, which can lead to `RuntimeError: Cannot run the event loop while another loop is running` errors. You may run into this issue if you are running the code within a jupyter notebook. See the [datalad](https://handbook.datalad.org/en/latest/basics/101-135-help.html#asyncio-errors-at-datalad-import) documentation for more details on workarounds using `nest_asyncio`. # # + import os import glob import datalad.api as dl import pandas as pd download_path = '/Users/lukechang/Downloads/FridayNightLights' dl.clone(source='https://github.com/OpenNeuroDatasets/ds003521.git', path=download_path) # - # We can now create a dataset instance using `dl.Dataset(path_to_data)`. ds = dl.Dataset(download_path) # How much of the dataset have we downloaded? We can check the status of the annex using `ds.status(annex='all')`. results = ds.status(annex='all') # Looks like it's empty, which makes sense since we only cloned the dataset. # # Now we need to get some data. Let's start with something small to play with first. # # Let's use `glob` to find all of the tab-delimited confound data generated by fmriprep. file_list = glob.glob(os.path.join(download_path, 'derivatives', 'fmriprep', '*', 'func', '*tsv')) file_list.sort() file_list[:10] # glob can search the filetree and see all of the relevant data even though none of it has been downloaded yet. # # Let's now download the first subjects confound regressor file and load it using pandas. # + result = ds.get(file_list[0]) confounds = pd.read_csv(file_list[0], sep='\t') confounds.head() # - # What if we wanted to drop that file? Just like the CLI, we can use `ds.drop(file_name)`. result = ds.drop(file_list[0]) # To confirm that it is actually removed, let's try to load it again with pandas. confounds = pd.read_csv(file_list[0], sep='\t') # Looks like it was successfully removed. # # We can also load the entire dataset in one command if want using `ds.get(dataset='.', recursive=True)`. We are not going to do it right now as this will take awhile and require lots of free hard disk space. # # Let's actually download one of the files we will be using in the tutorial. First, let's use glob to get a list of all of the functional data that has been preprocessed by fmriprep, denoised, and smoothed. file_list = glob.glob(os.path.join(download_path, 'derivatives', 'denoised', 'smoothed', '*_task-movie_run-1_space-MNI152NLin2009cAsym_desc-preproc_trim_smooth6_denoised_bold.hdf5')) file_list.sort() file_list file_list = glob.glob(os.path.join(download_path, 'derivatives', 'denoised', 'smoothed', '*_task-movie_run-1_space-MNI152NLin2009cAsym_desc-preproc_trim_smooth6_denoised_bold.nii.gz')) file_list.sort() file_list # Now let's download the first subject's file using `ds.get()`. This file is 825mb, so this might take a few minutes depending on your internet speed. result = ds.get(file_list[0]) # How much of the dataset have we downloaded? We can check the status of the annex using `ds.status(annex='all')`. result = ds.status(annex='all')
content/Download_Data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="J8y9ZkLXmAZc" # ##### Copyright 2019 The TensorFlow Authors. # + id="l7u9xMl3UeOX" colab_type="code" cellView="form" colab={} #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # + [markdown] id="8hTYmggsUAFZ" colab_type="text" # # TensorFlow 数据集 # # TensorFlow Datasets 提供了一系列可以和 TensorFlow 配合使用的数据集。它负责下载和准备数据,以及构建 [`tf.data.Dataset`](https://www.tensorflow.org/api_docs/python/tf/data/Dataset)。 # + [markdown] colab_type="text" id="OGw9EgE0tC0C" # <table class="tfo-notebook-buttons" align="left"> # <td> # <a target="_blank" href="https://www.tensorflow.google.cn/datasets/overview"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />在 tensorflow.google.cn 上查看</a> # </td> # <td> # <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/zh-cn/datasets/overview.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />在 Google Colab 运行</a> # </td> # <td> # <a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/zh-cn/datasets/overview.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />在 Github 上查看源代码</a> # </td> # <td> # <a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/zh-cn/datasets/overview.ipynb"><img src="https://tensorflow.google.cn/images/download_logo_32px.png" />下载 notebook</a> # </td> # </table> # + [markdown] id="y8lL6RLLUtCO" colab_type="text" # Note: 我们的 TensorFlow 社区翻译了这些文档。因为社区翻译是尽力而为, 所以无法保证它们是最准确的,并且反映了最新的 # [官方英文文档](https://www.tensorflow.org/?hl=en)。如果您有改进此翻译的建议, 请提交 pull request 到 # [tensorflow/docs](https://github.com/tensorflow/docs) GitHub 仓库。要志愿地撰写或者审核译文,请加入 # [<EMAIL> Google Group](https://groups.google.com/a/tensorflow.org/forum/#!forum/docs-zh-cn)。 # + [markdown] id="tJc3DDLEUAFf" colab_type="text" # ## 安装 # # `pip install tensorflow-datasets` # # 注意使用 `tensorflow-datasets` 的前提是已经安装好 TensorFlow,目前支持的版本是 `tensorflow` (或者 `tensorflow-gpu`) >= `1.15.0` # + cellView="both" colab_type="code" id="boeZp0sYbO41" colab={} # !pip install -q tensorflow tensorflow-datasets matplotlib # + id="vcwlX1pgVtbV" colab_type="code" colab={} try: # # %tensorflow_version only exists in Colab. # %tensorflow_version 2.x except Exception: pass import tensorflow as tf # + colab_type="code" id="TTBSvHcSLBzc" colab={} from __future__ import absolute_import, division, print_function, unicode_literals import matplotlib.pyplot as plt import numpy as np import tensorflow as tf import tensorflow_datasets as tfds # + [markdown] id="PJRFn4v-UAFl" colab_type="text" # ## Eager execution # # TensorFlow 数据集同时兼容 TensorFlow [Eager 模式](https://tensorflow.google.cn/guide/eager) 和图模式。在这个 colab 环境里面,我们的代码将通过 Eager 模式执行。 # + colab_type="code" id="o9H2EiXzfNgO" colab={} tf.compat.v1.enable_eager_execution() # + [markdown] id="mXICnLE4UAFq" colab_type="text" # ## 列出可用的数据集 # # 每一个数据集(dataset)都实现了抽象基类 [`tfds.core.DatasetBuilder`](https://www.tensorflow.google.cn/datasets/api_docs/python/tfds/core/DatasetBuilder) 来构建。你可以通过 `tfds.list_builders()` 列出所有可用的数据集。 # # 你可以通过 [数据集文档页](https://www.tensorflow.google.cn/datasets/catalog/overview) 查看所有支持的数据集及其补充文档。 # + colab_type="code" id="FAvbSVzjLCIb" colab={} tfds.list_builders() # + [markdown] id="JWFm6VtdUAFt" colab_type="text" # ## `tfds.load`: 一行代码获取数据集 # # [`tfds.load`](https://www.tensorflow.google.cn/datasets/api_docs/python/tfds/load) 是构建并加载 `tf.data.Dataset` 最简单的方式。 # # `tf.data.Dataset` 是构建输入流水线的标准 TensorFlow 接口。如果你对这个接口不熟悉,我们强烈建议你阅读 [TensorFlow 官方指南](https://www.tensorflow.google.cn/guide/datasets)。 # # 下面,我们先加载 MNIST 训练数据。这个步骤会下载并准备好该数据,除非你显式指定 `download=False`。值得注意的是,一旦该数据准备好了,后续的 `load` 命令便不会重新下载,可以重复使用准备好的数据。 # 你可以通过指定 `data_dir=` (默认是 `~/tensorflow_datasets/`) 来自定义数据保存/加载的路径。 # + colab_type="code" id="dCou80mnLLPV" colab={} mnist_train = tfds.load(name="mnist", split="train") assert isinstance(mnist_train, tf.data.Dataset) print(mnist_train) # + [markdown] id="xdICgkWrUAFw" colab_type="text" # 加载数据集时,将使用规范的默认版本。 但是,建议你指定要使用的数据集的主版本,并在结果中表明使用了哪个版本的数据集。更多详情请参考[数据集版本控制文档](https://github.com/tensorflow/datasets/blob/master/docs/)。 # + colab_type="code" id="OCXCz-vhj0kE" colab={} mnist = tfds.load("mnist:1.*.*") # + [markdown] id="qyhTG75PUAFy" colab_type="text" # ## 特征字典 # # 所有 `tfds` 数据集都包含将特征名称映射到 Tensor 值的特征字典。 典型的数据集(如 MNIST)将具有2个键:`"image"` 和 `"label"`。 下面我们看一个例子。 # # 注意:在图模式(graph mode)下,请参阅 [tf.data 指南](https://www.tensorflow.google.cn/guide/datasets#creating_an_iterator) 以了解如何在 `tf.data.Dataset` 上进行迭代。 # + colab_type="code" id="YHE21nkHLrER" colab={} for mnist_example in mnist_train.take(1): # 只取一个样本 image, label = mnist_example["image"], mnist_example["label"] plt.imshow(image.numpy()[:, :, 0].astype(np.float32), cmap=plt.get_cmap("gray")) print("Label: %d" % label.numpy()) # + [markdown] id="Ee5rTx4IUAF1" colab_type="text" # ## `DatasetBuilder` # # `tfds.load` 实际上是一个基于 `DatasetBuilder` 的简单方便的包装器。我们可以直接使用 MNIST `DatasetBuilder` 实现与上述相同的操作。 # + colab_type="code" id="9FDDJXmBhpQ4" colab={} mnist_builder = tfds.builder("mnist") mnist_builder.download_and_prepare() mnist_train = mnist_builder.as_dataset(split="train") mnist_train # + [markdown] id="TaKrseRkUAF4" colab_type="text" # ## 输入流水线 # # 一旦有了 `tf.data.Dataset` 对象,就可以使用 [`tf.data` 接口](https://www.tensorflow.google.cn/guide/datasets) 定义适合模型训练的输入流水线的其余部分。 # # 在这里,我们将重复使用数据集,以便有无限的样本流,然后随机打乱并创建大小为32的批。 # + colab_type="code" id="9OQZqGZMlSE8" colab={} mnist_train = mnist_train.repeat().shuffle(1024).batch(32) # prefetch 将使输入流水线可以在模型训练时异步获取批处理。 mnist_train = mnist_train.prefetch(tf.data.experimental.AUTOTUNE) # 现在你可以遍历数据集的批次并在 mnist_train 中训练批次: # ... # + [markdown] id="4OT8i4qhUAF7" colab_type="text" # ## 数据集信息(DatasetInfo) # # 生成后,构建器将包含有关数据集的有用信息: # + colab_type="code" id="mSamfFznA9Ph" colab={} info = mnist_builder.info print(info) # + [markdown] id="MVMLnarBUAF-" colab_type="text" # `DatasetInfo` 还包含特征相关的有用信息: # + colab_type="code" id="u1wL14QH2TW1" colab={} print(info.features) print(info.features["label"].num_classes) print(info.features["label"].names) # + [markdown] id="YTiE3Ve9UAGB" colab_type="text" # 你也可以通过 `tfds.load` 使用 `with_info = True` 加载 `DatasetInfo`。 # + colab_type="code" id="tvZYujQwBL7B" colab={} mnist_test, info = tfds.load("mnist", split="test", with_info=True) print(info) # + [markdown] id="skNcP7LnUAGE" colab_type="text" # ## 可视化 # # 对于图像分类数据集,你可以使用 `tfds.show_examples` 来可视化一些样本。 # + colab_type="code" id="DpE2FD56cSQR" colab={} fig = tfds.show_examples(info, mnist_test)
site/zh-cn/datasets/overview.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 0.5.0 # language: julia # name: julia-0.5 # --- # # MNIST Demo # ___ # ### Load package # Also remember to start your Julia session with multiple threads to speed up learning. Here I'm using 4. using Alice Base.Threads.nthreads() # ### Load the data # # The MNIST database is a collection of handwritten digits and labels from a larger collection originally created by NIST (National Institute of Standards and Technology). It contains characters written by American Census bereau employees and high school students. # # The common set that everyone uses now for ML testing was created by <NAME>Cun (http://yann.lecun.com/exdb/mnist/). And there is a Julia package that makes it easy to access the data. using MNIST # #### Visualising the data function display_mnist(; flipink = false) rows, cols = 16, 32 num_images = rows * cols selection = rand(1:60000, num_images) # Random selection of image indices images = Array(Array{Float64, 2}, 0) # Initialise blank outer array to contain image arrays img = ones(30, 30) # Create blank canvas with some padding for i in selection img[2:29, 2:29] = trainfeatures(i) ./ 255 # Reshape each image as a matrix and place in canvas push!(images, copy(img)) # Push single image into images vector end images = hvcat(cols, images...) # Concatenate individual image matrices into a large matrix # Display image with either white or black ink with opposite background flipink ? Gray.(1.0 - images) : Gray.(images) end; display_mnist() display_mnist(flipink = true) # #### Training, validation and test data sets # The original data has two separate sets - 60,000 training and 10,000 test observations. Here we'll train on the first 50,000 in the original training set and use the other 10,000 for cross validation. The test set can stay as it is. # # The only pre-processing will be to scale the pixel inputs to the range [0, 1], reshape each image into a 28x28 array (in the provided set each image is a single column), and change the output label 0 to 10. # + # Download training images and labels from MNIST package train_images, train_labels = traindata() test_images, test_labels = testdata() # Rescale and reshape train_images = train_images ./ 255 train_images = reshape(train_images, 28, 28, 60000) test_images = test_images ./ 255 test_images = reshape(test_images, 28, 28, 10000) # Convert target to integer (from float) type and swap label 0 for 10 train_labels = Int.(train_labels) train_labels[train_labels .== 0] = 10 test_labels = Int.(test_labels) test_labels[test_labels .== 0] = 10 # Split training set into 50,000 training and 10,000 validation images # Images val_images = train_images[:, :, 50001:60000] train_images = train_images[:, :, 1:50000] # Labels val_labels = train_labels[50001:60000] train_labels = train_labels[1:50000]; # - # ### Build and train models # #### Net 1 - Shallow and narrow feed forward neural network # Let's get a benchmark result with a feedforward neural network (only fully connected hidden layers) with 2 hidden layers each with 50 neurons. # + # Set seed to be able to replicate srand(123) # Data Box and Input Layer databox = Data(train_images, train_labels, val_images, val_labels) batch_size = 128 input = InputLayer(databox, batch_size) # Fully connected hidden layers dim = 30 fc1 = FullyConnectedLayer(size(input), dim, activation = :tanh) fc2 = FullyConnectedLayer(size(fc1), dim, activation = :tanh) # Softmax Output Layer num_classes = 10 output = SoftmaxOutputLayer(databox, size(fc2), num_classes) # Model λ = 1e-3 # Regularisation net = NeuralNet(databox, [input, fc1, fc2, output], λ, regularisation=:L2) # + # Training parameters num_epochs = 40 # number of epochs α = 1e-2 # learning rate μ = 0.9 # momentum param / viscosity # Train train(net, num_epochs, α, μ, nesterov=true, shuffle=true, last_train_every=2, full_train_every=10, val_every=10) # - # PLot loss curves Gadfly.set_default_plot_size(24cm, 12cm) plot_loss_history(net, 2, 10, 10) # #### Net 2 - Convolutional neural network # Next let's train a convolutional neural network with configuration: # input -> convolution layer -> max pool layer -> fully connected layer -> softmax output layer # + # Set seed to be able to replicate srand(123) # Data Box and Input Layer databox = Data(train_images, train_labels, val_images, val_labels, test_images, test_labels) batch_size = 128 input = InputLayer(databox, batch_size) # Convolution Layer patch_dim = 5 num_patches = 20 patch_dims = (patch_dim, patch_dim, num_patches) conv1 = ConvolutionLayer(size(input), patch_dims, activation = :relu) # Pooling Layer stride = 2 pool1 = MaxPoolLayer(size(conv1), stride) # Fully Connected Layer dim = 100 fc1 = FullyConnectedLayer(size(pool1), dim, activation = :relu) # Softmax Output Layer num_classes = 10 output = SoftmaxOutputLayer(databox, size(fc1), num_classes) # Model λ = 1e-3 # Regularisation net = NeuralNet(databox, [input, conv1, pool1, fc1, output], λ, regularisation=:L2) # + # Training parameters num_epochs = 40 # number of epochs α = 1e-2 # learning rate μ = 0.9 # momentum param / viscosity # Training train(net, num_epochs, α, μ, nesterov=true, shuffle=true, last_train_every=2, full_train_every=10, val_every=10) # - plot_loss_history(net, 2, 10, 10) # #### Net 3 - Deeper convolutional neural network # Next let's train a deeper convolutional neural network with configuration: # input -> convolution layer -> max pool layer -> convolution layer -> max pool layer -> fully connected layer -> softmax output layer # + # Set seed to be able to replicate srand(123) # Data Box and Input Layer databox = Data(train_images, train_labels, val_images, val_labels, test_images, test_labels) batch_size = 128 input = InputLayer(databox, batch_size) # Convolution Layer 1 patch_dim = 5 num_patches1 = 20 patch_dims = (patch_dim, patch_dim, num_patches1) conv1 = ConvolutionLayer(size(input), patch_dims, activation = :relu) # Pooling Layer 1 stride = 2 pool1 = MaxPoolLayer(size(conv1), stride) # Convolution Layer 2 patch_dim = 5 num_patches2 = 40 patch_dims = (patch_dim, patch_dim, num_patches1, num_patches2) conv2 = ConvolutionLayer(size(pool1), patch_dims, activation = :relu) # Pooling Layer 2 stride = 2 pool2 = MaxPoolLayer(size(conv2), stride) # Fully Connected Layer dim = 100 fc1 = FullyConnectedLayer(size(pool2), dim, activation = :relu) # Softmax Output Layer num_classes = 10 output = SoftmaxOutputLayer(databox, size(fc1), num_classes) # Model λ = 1e-3 # Regularisation net = NeuralNet(databox, [input, conv1, pool1, conv2, pool2, fc1, output], λ, regularisation=:L2) # + # Training parameters num_epochs = 20 # number of epochs α = 1e-2 # learning rate μ = 0.9 # momentum param / viscosity # Training train(net, num_epochs, α, μ, nesterov=true, shuffle=true, last_train_every=2, full_train_every=10, val_every=10) # - Gadfly.set_default_plot_size(24cm, 12cm) plot_loss_history(net, 2, 10, 10) # ### Best test set accuracy # It should be possible to get to around 99.2% with a neural network with 2 convolution and pooling layers. This net isn't quite there so it'll be worth finetuning to get over 99%. accuracy(net, test_images, test_labels)
demo/mnist/Demo_MNIST_28x28.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Video Player and Recorder with SSD Object Detection # # Configuration # - configure `video_config.json` # - to set ssd weight file, classes and confidence # - like below # # ```json # { # "ssd_weights_path":"weights_SSD300.hdf5", # "classes":["Aeroplane", "Bicycle", "Bird", "Boat", "Bottle", # "Bus", "Car", "Cat", "Chair", "Cow", "Diningtable", # "Dog", "Horse","Motorbike", "Person", "Pottedplant", # "Sheep", "Sofa", "Train", "Tvmonitor"], # "confidence":0.6 # } # ``` # # How to use Video Player # - Video # # ``` # player = VideoPlayer(source_path, verbose) # player.play(start, step, last) # ``` # # - WebCam # # ``` # player = VideoPlayer(0) # player.play() # ``` from video_utils import VideoPlayer # webcam mode file_path = 0 player = VideoPlayer(file_path, verbose=0) player.play() # # How to use Video Recorder # - Video # # ``` # recorder = VideoRecorder(source_path, verbose) # recorder.record(target_path, start, step, last) # ``` # # - WebCam # # ``` # recorder = VideoRecorder(0, verbose) # recorder.record(target_path) # ``` from video_utils import VideoRecorder # webcam mode file_path = 0 recorder = VideoRecorder(file_path, verbose=0) recorder.record('out.avi')
SSD_video_player.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" # # Review Classification using Active Learning # # **Author:** [<NAME>](https://twitter.com/getdarshan)<br> # **Date created:** 2021/10/29<br> # **Last modified:** 2021/10/29<br> # **Description:** Demonstrating the advantages of active learning through review classification. # + [markdown] colab_type="text" # ## Introduction # # With the growth of data-centric Machine Learning, Active Learning has grown in popularity # amongst businesses and researchers. Active Learning seeks to progressively # train ML models so that the resultant model requires lesser amount of training data to # achieve competitive scores. # # The structure of an Active Learning pipeline involves a classifier and an oracle. The # oracle is an annotator that cleans, selects, labels the data, and feeds it to the model # when required. The oracle is a trained individual or a group of individuals that # ensure consistency in labeling of new data. # # The process starts with annotating a small subset of the full dataset and training an # initial model. The best model checkpoint is saved and then tested on a balanced test # set. The test set must be carefully sampled because the full training process will be # dependent on it. Once we have the initial evaluation scores, the oracle is tasked with # labeling more samples; the number of data points to be sampled is usually determined by # the business requirements. After that, the newly sampled data is added to the training # set, and the training procedure repeats. This cycle continues until either an # acceptable score is reached or some other business metric is met. # # This tutorial provides a basic demonstration of how Active Learning works by # demonstrating a ratio-based (least confidence) sampling strategy that results in lower # overall false positive and negative rates when compared to a model trained on the entire # dataset. This sampling falls under the domain of *uncertanity sampling*, in which new # datasets are sampled based on the uncertanity that the model outputs for the # corresponding label. In our example, we compare our model's false positive and false # negative rates and annotate the new data based on their ratio. # # Some other sampling techniques include: # # 1. [Committee sampling](https://www.researchgate.net/publication/51909346_Committee-Based_Sample_Selection_for_Probabilistic_Classifiers): # Using multiple models to vote for the best data points to be sampled # 2. [Entropy reduction](https://www.researchgate.net/publication/51909346_Committee-Based_Sample_Selection_for_Probabilistic_Classifiers): # Sampling according to an entropy threshold, selecting more of the samples that produce the highest entropy score. # 3. [Minimum margin based sampling](https://arxiv.org/abs/1906.00025v1): # Selects data points closest to the decision boundary # + [markdown] colab_type="text" # ## Importing required libraries # + colab_type="code" import tensorflow_datasets as tfds import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers import matplotlib.pyplot as plt import re import string tfds.disable_progress_bar() # + [markdown] colab_type="text" # ## Loading and preprocessing the data # # We will be using the IMDB reviews dataset for our experiments. This dataset has 50,000 # reviews in total, including training and testing splits. We will merge these splits and # sample our own, balanced training, validation and testing sets. # + colab_type="code" dataset = tfds.load( "imdb_reviews", split="train + test", as_supervised=True, batch_size=-1, shuffle_files=False, ) reviews, labels = tfds.as_numpy(dataset) print("Total examples:", reviews.shape[0]) # + [markdown] colab_type="text" # Active learning starts with labeling a subset of data. # For the ratio sampling technique that we will be using, we will need well-balanced training, # validation and testing splits. # + colab_type="code" val_split = 2500 test_split = 2500 train_split = 7500 # Separating the negative and positive samples for manual stratification x_positives, y_positives = reviews[labels == 1], labels[labels == 1] x_negatives, y_negatives = reviews[labels == 0], labels[labels == 0] # Creating training, validation and testing splits x_val, y_val = ( tf.concat((x_positives[:val_split], x_negatives[:val_split]), 0), tf.concat((y_positives[:val_split], y_negatives[:val_split]), 0), ) x_test, y_test = ( tf.concat( ( x_positives[val_split : val_split + test_split], x_negatives[val_split : val_split + test_split], ), 0, ), tf.concat( ( y_positives[val_split : val_split + test_split], y_negatives[val_split : val_split + test_split], ), 0, ), ) x_train, y_train = ( tf.concat( ( x_positives[val_split + test_split : val_split + test_split + train_split], x_negatives[val_split + test_split : val_split + test_split + train_split], ), 0, ), tf.concat( ( y_positives[val_split + test_split : val_split + test_split + train_split], y_negatives[val_split + test_split : val_split + test_split + train_split], ), 0, ), ) # Remaining pool of samples are stored separately. These are only labeled as and when required x_pool_positives, y_pool_positives = ( x_positives[val_split + test_split + train_split :], y_positives[val_split + test_split + train_split :], ) x_pool_negatives, y_pool_negatives = ( x_negatives[val_split + test_split + train_split :], y_negatives[val_split + test_split + train_split :], ) # Creating TF Datasets for faster prefetching and parallelization train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train)) val_dataset = tf.data.Dataset.from_tensor_slices((x_val, y_val)) test_dataset = tf.data.Dataset.from_tensor_slices((x_test, y_test)) pool_negatives = tf.data.Dataset.from_tensor_slices( (x_pool_negatives, y_pool_negatives) ) pool_positives = tf.data.Dataset.from_tensor_slices( (x_pool_positives, y_pool_positives) ) print(f"Initial training set size: {len(train_dataset)}") print(f"Validation set size: {len(val_dataset)}") print(f"Testing set size: {len(test_dataset)}") print(f"Unlabeled negative pool: {len(pool_negatives)}") print(f"Unlabeled positive pool: {len(pool_positives)}") # + [markdown] colab_type="text" # ### Fitting the `TextVectorization` layer # # Since we are working with text data, we will need to encode the text strings as vectors which # would then be passed through an `Embedding` layer. To make this tokenization process # faster, we use the `map()` function with its parallelization functionality. # + colab_type="code" def custom_standardization(input_data): lowercase = tf.strings.lower(input_data) stripped_html = tf.strings.regex_replace(lowercase, "<br />", " ") return tf.strings.regex_replace( stripped_html, f"[{re.escape(string.punctuation)}]", "" ) vectorizer = layers.TextVectorization( 3000, standardize=custom_standardization, output_sequence_length=150 ) # Adapting the dataset vectorizer.adapt( train_dataset.map(lambda x, y: x, num_parallel_calls=tf.data.AUTOTUNE).batch(256) ) def vectorize_text(text, label): text = vectorizer(text) return text, label train_dataset = train_dataset.map( vectorize_text, num_parallel_calls=tf.data.AUTOTUNE ).prefetch(tf.data.AUTOTUNE) pool_negatives = pool_negatives.map(vectorize_text, num_parallel_calls=tf.data.AUTOTUNE) pool_positives = pool_positives.map(vectorize_text, num_parallel_calls=tf.data.AUTOTUNE) val_dataset = val_dataset.batch(256).map( vectorize_text, num_parallel_calls=tf.data.AUTOTUNE ) test_dataset = test_dataset.batch(256).map( vectorize_text, num_parallel_calls=tf.data.AUTOTUNE ) # + [markdown] colab_type="text" # ## Creating Helper Functions # + colab_type="code" # Helper function for merging new history objects with older ones def append_history(losses, val_losses, accuracy, val_accuracy, history): losses = losses + history.history["loss"] val_losses = val_losses + history.history["val_loss"] accuracy = accuracy + history.history["binary_accuracy"] val_accuracy = val_accuracy + history.history["val_binary_accuracy"] return losses, val_losses, accuracy, val_accuracy # Plotter function def plot_history(losses, val_losses, accuracies, val_accuracies): plt.plot(losses) plt.plot(val_losses) plt.legend(["train_loss", "val_loss"]) plt.xlabel("Epochs") plt.ylabel("Loss") plt.show() plt.plot(accuracies) plt.plot(val_accuracies) plt.legend(["train_accuracy", "val_accuracy"]) plt.xlabel("Epochs") plt.ylabel("Accuracy") plt.show() # + [markdown] colab_type="text" # ## Creating the Model # # We create a small bidirectional LSTM model. When using Active Learning, you should make sure # that the model architecture is capable of overfitting to the initial data. # Overfitting gives a strong hint that the model will have enough capacity for # future, unseen data. # + colab_type="code" def create_model(): model = keras.models.Sequential( [ layers.Input(shape=(150,)), layers.Embedding(input_dim=3000, output_dim=128), layers.Bidirectional(layers.LSTM(32, return_sequences=True)), layers.GlobalMaxPool1D(), layers.Dense(20, activation="relu"), layers.Dropout(0.5), layers.Dense(1, activation="sigmoid"), ] ) model.summary() return model # + [markdown] colab_type="text" # ## Training on the entire dataset # # To show the effectiveness of Active Learning, we will first train the model on the entire # dataset containing 40,000 labeled samples. This model will be used for comparison later. # + colab_type="code" def train_full_model(full_train_dataset, val_dataset, test_dataset): model = create_model() model.compile( loss="binary_crossentropy", optimizer="rmsprop", metrics=[ keras.metrics.BinaryAccuracy(), keras.metrics.FalseNegatives(), keras.metrics.FalsePositives(), ], ) # We will save the best model at every epoch and load the best one for evaluation on the test set history = model.fit( full_train_dataset.batch(256), epochs=20, validation_data=val_dataset, callbacks=[ keras.callbacks.EarlyStopping(patience=4, verbose=1), keras.callbacks.ModelCheckpoint( "FullModelCheckpoint.h5", verbose=1, save_best_only=True ), ], ) # Plot history plot_history( history.history["loss"], history.history["val_loss"], history.history["binary_accuracy"], history.history["val_binary_accuracy"], ) # Loading the best checkpoint model = keras.models.load_model("FullModelCheckpoint.h5") print("-" * 100) print( "Test set evaluation: ", model.evaluate(test_dataset, verbose=0, return_dict=True), ) print("-" * 100) return model # Sampling the full train dataset to train on full_train_dataset = ( train_dataset.concatenate(pool_positives) .concatenate(pool_negatives) .cache() .shuffle(20000) ) # Training the full model full_dataset_model = train_full_model(full_train_dataset, val_dataset, test_dataset) # + [markdown] colab_type="text" # ## Training via Active Learning # # The general process we follow when performing Active Learning is demonstrated below: # # ![Active Learning](https://i.imgur.com/dmNKusp.png) # # The pipeline can be summarized in five parts: # # 1. Sample and annotate a small, balanced training dataset # 2. Train the model on this small subset # 3. Evaluate the model on a balanced testing set # 4. If the model satisfies the business criteria, deploy it in a real time setting # 5. If it doesn't pass the criteria, sample a few more samples according to the ratio of # false positives and negatives, add them to the training set and repeat from step 2 till # the model passes the tests or till all available data is exhausted. # # For the code below, we will perform sampling using the following formula:<br/> # # ![Ratio Sampling](https://i.imgur.com/LyZEiZL.png) # # Active Learning techniques use callbacks extensively for progress tracking. We will be # using model checkpointing and early stopping for this example. The `patience` parameter # for Early Stopping can help minimize overfitting and the time required. We have set it # `patience=4` for now but since the model is robust, we can increase the patience level if # desired. # # Note: We are not loading the checkpoint after the first training iteration. In my # experience working on Active Learning techniques, this helps the model probe the # newly formed loss landscape. Even if the model fails to improve in the second iteration, # we will still gain insight about the possible future false positive and negative rates. # This will help us sample a better set in the next iteration where the model will have a # greater chance to improve. # + colab_type="code" def train_active_learning_models( train_dataset, pool_negatives, pool_positives, val_dataset, test_dataset, num_iterations=3, sampling_size=5000, ): # Creating lists for storing metrics losses, val_losses, accuracies, val_accuracies = [], [], [], [] model = create_model() # We will monitor the false positives and false negatives predicted by our model # These will decide the subsequent sampling ratio for every Active Learning loop model.compile( loss="binary_crossentropy", optimizer="rmsprop", metrics=[ keras.metrics.BinaryAccuracy(), keras.metrics.FalseNegatives(), keras.metrics.FalsePositives(), ], ) # Defining checkpoints. # The checkpoint callback is reused throughout the training since it only saves the best overall model. checkpoint = keras.callbacks.ModelCheckpoint( "AL_Model.h5", save_best_only=True, verbose=1 ) # Here, patience is set to 4. This can be set higher if desired. early_stopping = keras.callbacks.EarlyStopping(patience=4, verbose=1) print(f"Starting to train with {len(train_dataset)} samples") # Initial fit with a small subset of the training set history = model.fit( train_dataset.cache().shuffle(20000).batch(256), epochs=20, validation_data=val_dataset, callbacks=[checkpoint, early_stopping], ) # Appending history losses, val_losses, accuracies, val_accuracies = append_history( losses, val_losses, accuracies, val_accuracies, history ) for iteration in range(num_iterations): # Getting predictions from previously trained model predictions = model.predict(test_dataset) # Generating labels from the output probabilities rounded = tf.where(tf.greater(predictions, 0.5), 1, 0) # Evaluating the number of zeros and ones incorrrectly classified _, _, false_negatives, false_positives = model.evaluate(test_dataset, verbose=0) print("-" * 100) print( f"Number of zeros incorrectly classified: {false_negatives}, Number of ones incorrectly classified: {false_positives}" ) # This technique of Active Learning demonstrates ratio based sampling where # Number of ones/zeros to sample = Number of ones/zeros incorrectly classified / Total incorrectly classified if false_negatives != 0 and false_positives != 0: total = false_negatives + false_positives sample_ratio_ones, sample_ratio_zeros = ( false_positives / total, false_negatives / total, ) # In the case where all samples are correctly predicted, we can sample both classes equally else: sample_ratio_ones, sample_ratio_zeros = 0.5, 0.5 print( f"Sample ratio for positives: {sample_ratio_ones}, Sample ratio for negatives:{sample_ratio_zeros}" ) # Sample the required number of ones and zeros sampled_dataset = pool_negatives.take( int(sample_ratio_zeros * sampling_size) ).concatenate(pool_positives.take(int(sample_ratio_ones * sampling_size))) # Skip the sampled data points to avoid repetition of sample pool_negatives = pool_negatives.skip(int(sample_ratio_zeros * sampling_size)) pool_positives = pool_positives.skip(int(sample_ratio_ones * sampling_size)) # Concatenating the train_dataset with the sampled_dataset train_dataset = train_dataset.concatenate(sampled_dataset).prefetch( tf.data.AUTOTUNE ) print(f"Starting training with {len(train_dataset)} samples") print("-" * 100) # We recompile the model to reset the optimizer states and retrain the model model.compile( loss="binary_crossentropy", optimizer="rmsprop", metrics=[ keras.metrics.BinaryAccuracy(), keras.metrics.FalseNegatives(), keras.metrics.FalsePositives(), ], ) history = model.fit( train_dataset.cache().shuffle(20000).batch(256), validation_data=val_dataset, epochs=20, callbacks=[ checkpoint, keras.callbacks.EarlyStopping(patience=4, verbose=1), ], ) # Appending the history losses, val_losses, accuracies, val_accuracies = append_history( losses, val_losses, accuracies, val_accuracies, history ) # Loading the best model from this training loop model = keras.models.load_model("AL_Model.h5") # Plotting the overall history and evaluating the final model plot_history(losses, val_losses, accuracies, val_accuracies) print("-" * 100) print( "Test set evaluation: ", model.evaluate(test_dataset, verbose=0, return_dict=True), ) print("-" * 100) return model active_learning_model = train_active_learning_models( train_dataset, pool_negatives, pool_positives, val_dataset, test_dataset ) # + [markdown] colab_type="text" # ## Conclusion # # Active Learning is a growing area of research. This example demonstrates the cost-efficiency # benefits of using Active Learning, as it eliminates the need to annotate large amounts of # data, saving resources. # # The following are some noteworthy observations from this example: # # 1. We only require 30,000 samples to reach the same (if not better) scores as the model # trained on the full datatset. This means that in a real life setting, we save the effort # required for annotating 10,000 images! # 2. The number of false negatives and false positives are well balanced at the end of the # training as compared to the skewed ratio obtained from the full training. This makes the # model slightly more useful in real life scenarios where both the labels hold equal # importance. # # For further reading about the types of sampling ratios, training techniques or available # open source libraries/implementations, you can refer to the resources below: # # 1. [Active Learning Literature Survey](http://burrsettles.com/pub/settles.activelearning.pdf) (Burr Settles, 2010). # 2. [modAL](https://github.com/modAL-python/modAL): A Modular Active Learning framework. # 3. Google's unofficial [Active Learning playground](https://github.com/google/active-learning).
examples/nlp/ipynb/active_learning_review_classification.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + class Node: def __init__(self, val): self.val = val self.next = None def merge_lists(node1, node2): if not node1: return node2 if not node2: return node1 if node1.val <= node2.val: temp = node1 node1 = node1.next else: temp = node2 node2 = node2.next head = temp while node1 and node2: if node1.val <= node2.val: temp.next = node1 temp = temp.next node1 = node1.next else: temp.next = node2 temp = temp.next node2 = node2.next if not node1: temp.next = node2 else: temp.next = node1 return head def find_mid(head): if not head: return head slow = fast = head while fast.next and fast.next.next: slow = slow.next fast = fast.next.next return slow def merge_sort_linked_list(head): if not head or not head.next: return head mid = find_mid(head) mid_next = mid.next mid.next = None first = merge_sort_linked_list(head) second = merge_sort_linked_list(mid_next) sorted_head = merge_lists(first, second) return sorted_head if __name__=='__main__': head = Node(4) head.next = Node(2) head.next.next = Node(7) head.next.next.next = Node(3) head.next.next.next.next = Node(6) head.next.next.next.next.next = Node(5) head.next.next.next.next.next.next = Node(1) head.next.next.next.next.next.next.next = Node(8) head = merge_sort_linked_list(head) while head: print(head.val, end = ' -> ') head = head.next print('X') # -
flarow/Linked List/Sort Linked List using Merge Sort.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # BigGAN Exploration # ## Copyright notice # # This version (c) 2019 <NAME>, [MIT License](LICENSE). # ## Architecture # # While we are using a massively refined version of a GAN called BigGAN (original paper here: https://arxiv.org/abs/1809.11096), the basic architecture is the same: # # ![](img/gan.jpg) # ## Imports # # We use Tensorflow here simply because the BigGAN model is implemented in Tensorflow. We also use imageio to make animated GIFs, everything else should already be installed in your environment from previous notebooks. Please use the installation commands in the comments to install the missing packges. # + import tensorflow as tf # pip install tensorflow import tensorflow_hub as hub # pip install tensorflow-hub import imageio # conda install imageio import numpy as np from scipy.stats import truncnorm from io import BytesIO import PIL.Image import IPython.display import os print(tf.__version__) # - # ## Load and analyze model # # Tensorflow allows us to operate on models in the cloud. Here, we are using the 256x256 version of BigGAN. Replace `256`with `512` below for higher resolution output at the price of longer compute time, or use `128` for the opposite. module_path = 'https://tfhub.dev/deepmind/biggan-256/2' tf.reset_default_graph() module = hub.Module(module_path) # + # Get a list of all the inputs of the model inputs = {k: tf.placeholder(v.dtype, v.get_shape().as_list(), k) for k, v in module.get_input_info_dict().items()} # Define graph entry and exit points output = module(inputs) input_z = inputs['z'] input_y = inputs['y'] input_trunc = inputs['truncation'] # Print dimensions of latent space: note that the latent space has two multi-dimensional parts in BigGAN, # one for the actual latent space vector and one for the ImageNet class DIM_Z = input_z.shape.as_list()[1] DIM_Y = input_y.shape.as_list()[1] print(DIM_Z, DIM_Y) # - # ## Helper Functions # + def deprocess(img): img = np.concatenate(img, axis=0) # "Squeeze" img = np.clip(((img + 1) / 2.0) * 255, 0, 255) img = PIL.Image.fromarray(img.astype(np.uint8)) return img def show_img_PIL(img, fmt='jpeg'): f = BytesIO() img.save(f, fmt) IPython.display.display(IPython.display.Image(data=f.getvalue())) def truncated_z_sample(truncation=1.0): Z_MIN = -2 Z_MAX = 2 values = truncnorm.rvs(Z_MIN, Z_MAX, size=(1,DIM_Z), random_state=None) return truncation * values def makegif(folder): images = [] files = [] for file in os.listdir(folder): files.append(folder + '/' + file) files = sorted(files) print(files) for file in files: images.append(imageio.imread(file)) imageio.mimsave('movie.gif', images) # - FOLDER = 'images' # + if not os.path.exists(FOLDER): os.makedirs(FOLDER) initializer = tf.global_variables_initializer() sess = tf.Session() sess.run(initializer) for i in range(10): z = truncated_z_sample() # Get z sample # y = np.random.rand(1,DIM_Y) # Set all classes to value between 0 and 1 # y /= y.sum() # Scale vector so it adds up to 1 y = np.zeros((1, DIM_Y)) # Set all classes to 0 y[0,10] = 1 # Set one class to 1 feed_dict = {input_z: z, input_y: y, input_trunc: 1.0} # Set parameters of model img = sess.run(output, feed_dict=feed_dict) # Generate image img = deprocess(img) show_img_PIL(img) img.save(f'{FOLDER}/{i:03d}.jpg') makegif(FOLDER)
N08_BigGAN.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Entrada y salida de datos print("Hola, me llamo <NAME>") x = 5 print(x) print("El valor de la variable x es: "+str(x)) texto = input("Introduce aquí tu nombre: ") texto numero = int(input("Introduce aquí tu edad")) numero 2*numero + 5 x = 5 print(x) x = 'j' print(x) x = 3.1415 print(x) x = True print(x) x = 'h' print("La variable x ahora se trata de un carácter: "+ x) x = 5 print("La variable x es entera: "+str(x)) x = "La variable x es ahora una cadena de caracteres (string)" print(x) # # Operadores de decisión si condicion booleana: código que se ejecuta si la condición es verdad .. .. .. siguiente código que se ejecuta siempre x = int(input("Escribe un número:")) if x == 5: print("Has escrito el número 5") x = int(input("Escribe un número:")) if x < 5: print("Has escrito un número menor que 5") elif x < 10: print("Has escrito un número menor que 10 y mayor o igual a 5") else: print("EL número que has escrito es mayor o igual que 10") x = int(input("Escribe un número:")) if x>0 and x < 10: print("Has escrito un número en el rango de [0,10]") x = int(input("Escribe un número:")) if x < 0 or x > 10: print("Tu número escrito no se encuentra dentro del rango [0,10]")
scripts/tema1/07-io.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python2 # --- # <br /> # <div style="text-align: center;"> # <span style="font-weight: bold; color:#6dc; font-family: 'Arial Narrow'; font-size: 3.5em;">Current Methane Concentration, CH4 (NASA)</span> # </div> # <br /> # # <span style="color:#444; font-family: 'Arial'; font-size: 1.3em;">NOT REAL TIME DATA.<br /><br /> # Data taken from: ftp://aftp.cmdl.noaa.gov/products/carbontracker/ch4/fluxes/ </span> # <br /> import numpy as np import xarray as xr import matplotlib.pyplot as plt import urllib2 from contextlib import closing import rasterio import os import shutil import netCDF4 # %matplotlib inline np.set_printoptions(threshold='nan') # + remote_path = 'ftp://aftp.cmdl.noaa.gov/products/carbontracker/ch4/fluxes/' last_file = '201012.nc' local_path = os.getcwd() print (remote_path) print (last_file) print (local_path) # - with closing(urllib2.urlopen(remote_path+last_file)) as r: with open(last_file, 'wb') as f: shutil.copyfileobj(r, f) ncfile = xr.open_dataset(local_path+'/'+last_file, decode_times=False) print('* Variables disponibles en el fichero:') for v in ncfile.variables: print(v) ncfile.info() ncfile.variables # <br /> # <span style="font-weight: bold; color:#6dc; font-family: 'Arial Narrow'; font-size: 2.5em;">Visualizing Data</span> # <br /> # + # open a local NetCDF file or remote OPeNDAP URL url = url=local_path+'/'+last_file nc = netCDF4.Dataset(url) # examine the variables print nc.variables.keys() print nc.variables['fossil'] # sample every 10th point of the 'z' variable fossil = nc.variables['fossil'][0,:,:] print 'Shape: ',fossil.shape agwaste = nc.variables['agwaste'][0,:,:] print 'Shape: ',agwaste.shape natural = nc.variables['natural'][0,:,:] print 'Shape: ',natural.shape bioburn = nc.variables['bioburn'][0,:,:] print 'Shape: ',bioburn.shape ocean = nc.variables['ocean'][0,:,:] print 'Shape: ',ocean.shape topo = [a + b + c + d + e for a, b, c, d, e in zip(fossil, agwaste, natural, bioburn, ocean)] #print topo # - # Ploting plt.figure(figsize=(10,10)) plt.imshow(topo,clim=(0.0, 1)) # + for i in reversed(topo): data = list(reversed(topo)) plt.figure(figsize=(10,10)) plt.imshow(data,clim=(0.0, 200)) # - # <br /> # <span style="font-weight: bold; color:#6dc; font-family: 'Arial Narrow'; font-size: 2.5em;">GitHub Script</span> # <br /> import numpy as np from contextlib import closing import urllib2 import shutil import os from netCDF4 import Dataset import rasterio import tinys3 import netCDF4 def dataDownload(): remote_path = 'ftp://aftp.cmdl.noaa.gov/products/carbontracker/ch4/fluxes/' last_file = '201012.nc' local_path = os.getcwd() print (remote_path) print (last_file) print (local_path) with closing(urllib2.urlopen(remote_path+last_file)) as r: with open(last_file, 'wb') as f: shutil.copyfileobj(r, f) return last_file def netcdf2tif(dst,outFile): local_path = os.getcwd() url = local_path+'/'+dst nc = netCDF4.Dataset(url) # examine the variables print nc.variables.keys() # sample every 10th point of the 'z' variable fossil = nc.variables['fossil'][0,:,:] print 'Shape: ',fossil.shape agwaste = nc.variables['agwaste'][0,:,:] print 'Shape: ',agwaste.shape natural = nc.variables['natural'][0,:,:] print 'Shape: ',natural.shape bioburn = nc.variables['bioburn'][0,:,:] print 'Shape: ',bioburn.shape ocean = nc.variables['ocean'][0,:,:] print 'Shape: ',ocean.shape topo = [a + b + c + d + e for a, b, c, d, e in zip(fossil, agwaste, natural, bioburn, ocean)] for i in reversed(topo): data = list(reversed(topo)) data = np.asarray(data) data[data < 0] = -1 # Return lat info south_lat = -90 north_lat = 90 # Return lon info west_lon = -180 east_lon = 180 # Transformation function transform = rasterio.transform.from_bounds(west_lon, south_lat, east_lon, north_lat, data.shape[1], data.shape[0]) # Profile profile = { 'driver':'GTiff', 'height':data.shape[0], 'width':data.shape[1], 'count':1, 'dtype':np.float64, 'crs':'EPSG:4326', 'transform':transform, 'compress':'lzw', 'nodata': -1 } with rasterio.open(outFile, 'w', **profile) as dst: dst.write(data.astype(profile['dtype']), 1) def s3Upload(outFile): # Push to Amazon S3 instance conn = tinys3.Connection(os.getenv('S3_ACCESS_KEY'),os.getenv('S3_SECRET_KEY'),tls=True) f = open(outFile,'rb') conn.upload(outFile,f,os.getenv('BUCKET')) # Execution outFile ='methane.tif' print 'starting' file = dataDownload() print 'downloaded' netcdf2tif(file,outFile) print 'converted' #s3Upload(outFile) print 'finish'
ResourceWatch/data_management/9) Current Methane Concentration, CH4 (NASA).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Simulating an ideal gas in OpenMM # ## Preliminaries # # First, we import OpenMM. At this stage we also import teh testsystems in openmmtools to create the simulation systemd more easily. # # It's recommended that you always import this way: from simtk import openmm, unit from simtk.openmm import app import numpy as np from openmmtools.testsystems import * # ### This is the source code we will use to create a system of ideal gas particles. # class IdealGas(TestSystem): # # """Create an 'ideal gas' of noninteracting particles in a periodic box. # # Parameters # ---------- # nparticles : int, optional, default=216 # number of particles # mass : int, optional, default=39.9 * unit.amu # temperature : int, optional, default=298.0 * unit.kelvin # pressure : int, optional, default=1.0 * unit.atmosphere # volume : None # if None, defaults to (nparticles * temperature * unit.BOLTZMANN_CONSTANT_kB / pressure).in_units_of(unit.nanometers**3) # # Examples # -------- # # Create an ideal gas system. # # >>> gas = IdealGas() # >>> system, positions = gas.system, gas.positions # # Create a smaller ideal gas system containing 64 particles. # # >>> gas = IdealGas(nparticles=64) # >>> system, positions = gas.system, gas.positions # # """ # # def __init__(self, nparticles=216, mass=39.9 * unit.amu, temperature=298.0 * unit.kelvin, pressure=1.0 * unit.atmosphere, volume=None, **kwargs): # # TestSystem.__init__(self, **kwargs) # # if volume is None: # volume = (nparticles * temperature * unit.BOLTZMANN_CONSTANT_kB / pressure).in_units_of(unit.nanometers**3) # # # Create an empty system object. # system = openmm.System() # # # Compute box size. # length = volume**(1.0 / 3.0) # a = unit.Quantity((length, 0 * unit.nanometer, 0 * unit.nanometer)) # b = unit.Quantity((0 * unit.nanometer, length, 0 * unit.nanometer)) # c = unit.Quantity((0 * unit.nanometer, 0 * unit.nanometer, length)) # system.setDefaultPeriodicBoxVectors(a, b, c) # # # Add a null periodic nonbonded force to allow setting a barostat. # nonbonded_force = openmm.NonbondedForce() # nonbonded_force.setNonbondedMethod(openmm.NonbondedForce.CutoffPeriodic) # for i in range(nparticles): # nonbonded_force.addParticle(0.0, 1.0, 0.0) # system.addForce(nonbonded_force) # # # Add particles. # for index in range(nparticles): # system.addParticle(mass) # # # Create initial coordinates using subrandom positions. # positions = subrandom_particle_positions(nparticles, system.getDefaultPeriodicBoxVectors()) # # # Create topology. # topology = app.Topology() # element = app.Element.getBySymbol('Ar') # chain = topology.addChain() # for particle in range(system.getNumParticles()): # residue = topology.addResidue('Ar', chain) # topology.addAtom('Ar', element, residue) # self.topology = topology # # self.system, self.positions = system, positions # self.ndof = 3 * nparticles # # def get_potential_expectation(self, state): # """Return the expectation of the potential energy, computed analytically or numerically. # # Parameters # ---------- # state : ThermodynamicState with temperature defined # The thermodynamic state at which the property is to be computed. # # Returns # ------- # potential_mean : simtk.unit.Quantity compatible with simtk.unit.kilojoules_per_mole # The expectation of the potential energy. # # """ # # return 0.0 * unit.kilojoules_per_mole # # def get_potential_standard_deviation(self, state): # """Return the standard deviation of the potential energy, computed analytically or numerically. # # Parameters # ---------- # state : ThermodynamicState with temperature defined # The thermodynamic state at which the property is to be computed. # # Returns # ------- # potential_stddev : simtk.unit.Quantity compatible with simtk.unit.kilojoules_per_mole # potential energy standard deviation if implemented, or else None # # """ # # return 0.0 * unit.kilojoules_per_mole # # def get_kinetic_expectation(self, state): # """Return the expectation of the kinetic energy, computed analytically or numerically. # # Parameters # ---------- # state : ThermodynamicState with temperature defined # The thermodynamic state at which the property is to be computed. # # Returns # ------- # potential_mean : simtk.unit.Quantity compatible with simtk.unit.kilojoules_per_mole # The expectation of the potential energy. # # """ # # return (3. / 2.) * kB * state.temperature # # def get_kinetic_standard_deviation(self, state): # """Return the standard deviation of the kinetic energy, computed analytically or numerically. # # Parameters # ---------- # state : ThermodynamicState with temperature defined # The thermodynamic state at which the property is to be computed. # # Returns # ------- # potential_stddev : simtk.unit.Quantity compatible with simtk.unit.kilojoules_per_mole # potential energy standard deviation if implemented, or else None # # """ # # return (3. / 2.) * kB * state.temperature # # def get_volume_expectation(self, state): # """Return the expectation of the volume, computed analytically. # # Parameters # ---------- # state : ThermodynamicState with temperature and pressure defined # The thermodynamic state at which the property is to be computed. # # Returns # ------- # volume_mean : simtk.unit.Quantity compatible with simtk.unit.nanometers**3 # The expectation of the volume at equilibrium. # # Notes # ----- # The true mean volume is used, rather than the large-N limit. # # """ # # if not state.pressure: # box_vectors = self.system.getDefaultPeriodicBoxVectors() # volume = box_vectors[0][0] * box_vectors[1][1] * box_vectors[2][2] # return volume # # N = self._system.getNumParticles() # return ((N + 1) * unit.BOLTZMANN_CONSTANT_kB * state.temperature / state.pressure).in_units_of(unit.nanometers**3) # # def get_volume_standard_deviation(self, state): # """Return the standard deviation of the volume, computed analytically. # # Parameters # ---------- # state : ThermodynamicState with temperature and pressure defined # The thermodynamic state at which the property is to be computed. # # Returns # ------- # volume_stddev : simtk.unit.Quantity compatible with simtk.unit.nanometers**3 # The standard deviation of the volume at equilibrium. # # Notes # ----- # The true mean volume is used, rather than the large-N limit. # # """ # # if not state.pressure: # return 0.0 * unit.nanometers**3 # # N = self._system.getNumParticles() # return (numpy.sqrt(N + 1) * unit.BOLTZMANN_CONSTANT_kB * state.temperature / state.pressure).in_units_of(unit.nanometers**3) # # ### Questions: # # 1.) Which gas is used as default? # # 2.) Why is it an ideal gas? # # 3.) What is the kinetic energy of the system? # Create a ideal gas gas = IdealGas(nparticles=1000,temperature=120*unit.kelvin) # Now we define the thermodynamic variables and a copy of the system and the positions pressure = 1*unit.atmospheres temperature = 120*unit.kelvin [system, positions] = [gas.system, gas.positions] # This is the collision rate of the Langevin integrator and the timestep which will be used. collision_rate = 5/unit.picoseconds timestep = 2.5*unit.femtoseconds # Setting up the simulation and minimizing the potential energy ... # + # Create a context integrator = openmm.LangevinIntegrator(temperature, collision_rate, timestep) context = openmm.Context(system, integrator) context.setPositions(positions) # Minimize energy print('Minimizing energy...') openmm.LocalEnergyMinimizer.minimize(context) # - # ### Question: # # Is it necessary to minimize the potential energy? state = context.getState(getEnergy=True) state.getPotentialEnergy() # We will simulate this system in total for 5000 steps and write the kinetic energy every 10 step niterations = 50000 nsteps = 10 # Create an array to store the kinetic energy every 10 steps and simulate ... u = np.zeros(niterations, np.float64) for iteration in range(niterations): # print('iteration %5d' % (iteration)) # Run some dynamics integrator.step(nsteps) # Compute energies at all alchemical states u[iteration] = context.getState(getEnergy=True).getKineticEnergy() / unit.kilojoule_per_mole # We will now plot the kinetic energy import matplotlib.pyplot as plt # %matplotlib inline plt.plot(u) # Questions: # # 1.) Redo the simulation for the same number of steps without redefining the context class. Why is the kinetic energy more constant now then before? # # 2.) What is the energy in Joule you expect to get? # # 3.) How can you achieve a more constant kinetic energy?
simulating_ideal_gas.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="MWW1TyjaecRh" # ##### Copyright 2018 The TensorFlow Authors. # + cellView="form" colab={} colab_type="code" id="mOtR1FzCef-u" #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # + [markdown] colab_type="text" id="Zr7KpBhMcYvE" # # Build a linear model with Estimators # + [markdown] colab_type="text" id="uJl4gaPFzxQz" # <table class="tfo-notebook-buttons" align="left"> # <td> # <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/r1/tutorials/estimators/linear.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> # </td> # <td> # <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/r1/tutorials/estimators/linear.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> # </td> # </table> # + [markdown] colab_type="text" id="uJl4gaPFzxQz" # > Note: This is an archived TF1 notebook. These are configured # to run in TF2's # [compatbility mode](https://www.tensorflow.org/guide/migrate) # but will run in TF1 as well. To use TF1 in Colab, use the # [%tensorflow_version 1.x](https://colab.research.google.com/notebooks/tensorflow_version.ipynb) # magic. # + [markdown] colab_type="text" id="77aETSYDcdoK" # This tutorial uses the `tf.estimator` API in TensorFlow to solve a benchmark binary classification problem. Estimators are TensorFlow's most scalable and production-oriented model type. For more information see the [Estimator guide](https://www.tensorflow.org/r1/guide/estimators). # # ## Overview # # Using census data which contains data about a person's age, education, marital status, and occupation (the *features*), we will try to predict whether or not the person earns more than 50,000 dollars a year (the target *label*). We will train a *logistic regression* model that, given an individual's information, outputs a number between 0 and 1—this can be interpreted as the probability that the individual has an annual income of over 50,000 dollars. # # Key Point: As a modeler and developer, think about how this data is used and the potential benefits and harm a model's predictions can cause. A model like this could reinforce societal biases and disparities. Is each feature relevant to the problem you want to solve or will it introduce bias? For more information, read about [ML fairness](https://developers.google.com/machine-learning/fairness-overview/). # # ## Setup # # Import TensorFlow, feature column support, and supporting modules: # + colab={} colab_type="code" id="NQgONe5ecYvE" import tensorflow.compat.v1 as tf import tensorflow.feature_column as fc import os import sys import matplotlib.pyplot as plt from IPython.display import clear_output # + [markdown] colab_type="text" id="Rpb1JSMj1nqk" # And let's enable [eager execution](https://www.tensorflow.org/r1/guide/eager) to inspect this program as we run it: # + [markdown] colab_type="text" id="-MPr95UccYvL" # ## Download the official implementation # # We'll use the [wide and deep model](https://github.com/tensorflow/models/tree/master/official/r1/wide_deep/) available in TensorFlow's [model repository](https://github.com/tensorflow/models/). Download the code, add the root directory to your Python path, and jump to the `wide_deep` directory: # + colab={} colab_type="code" id="tTwQzWcn8aBu" # ! pip install requests # ! git clone --depth 1 https://github.com/tensorflow/models # + [markdown] colab_type="text" id="sRpuysc73Eb-" # Add the root directory of the repository to your Python path: # + colab={} colab_type="code" id="yVvFyhnkcYvL" models_path = os.path.join(os.getcwd(), 'models') sys.path.append(models_path) # + [markdown] colab_type="text" id="15Ethw-wcYvP" # Download the dataset: # + colab={} colab_type="code" id="6QilS4-0cYvQ" from official.r1.wide_deep import census_dataset from official.r1.wide_deep import census_main census_dataset.download("/tmp/census_data/") # + [markdown] colab_type="text" id="cD5e3ibAcYvS" # ### Command line usage # # The repo includes a complete program for experimenting with this type of model. # # To execute the tutorial code from the command line first add the path to tensorflow/models to your `PYTHONPATH`. # + colab={} colab_type="code" id="DYOkY8boUptJ" #export PYTHONPATH=${PYTHONPATH}:"$(pwd)/models" #running from python you need to set the `os.environ` or the subprocess will not see the directory. if "PYTHONPATH" in os.environ: os.environ['PYTHONPATH'] += os.pathsep + models_path else: os.environ['PYTHONPATH'] = models_path # + [markdown] colab_type="text" id="5r0V9YUMUyoh" # Use `--help` to see what command line options are available: # + colab={} colab_type="code" id="1_3tBaLW4YM4" # !python -m official.r1.wide_deep.census_main --help # + [markdown] colab_type="text" id="RrMLazEN6DMj" # Now run the model: # # + colab={} colab_type="code" id="py7MarZl5Yh6" # !python -m official.r1.wide_deep.census_main --model_type=wide --train_epochs=2 # + [markdown] colab_type="text" id="AmZ4CpaOcYvV" # ## Read the U.S. Census data # # This example uses the [U.S Census Income Dataset](https://archive.ics.uci.edu/ml/datasets/Census+Income) from 1994 and 1995. We have provided the [census_dataset.py](https://github.com/tensorflow/models/tree/master/official/r1/wide_deep/census_dataset.py) script to download the data and perform a little cleanup. # # Since the task is a *binary classification problem*, we'll construct a label column named "label" whose value is 1 if the income is over 50K, and 0 otherwise. For reference, see the `input_fn` in [census_main.py](https://github.com/tensorflow/models/tree/master/official/r1/wide_deep/census_main.py). # # Let's look at the data to see which columns we can use to predict the target label: # + colab={} colab_type="code" id="N6Tgye8bcYvX" # !ls /tmp/census_data/ # + colab={} colab_type="code" id="6y3mj9zKcYva" train_file = "/tmp/census_data/adult.data" test_file = "/tmp/census_data/adult.test" # + [markdown] colab_type="text" id="EO_McKgE5il2" # [pandas](https://pandas.pydata.org/) provides some convenient utilities for data analysis. Here's a list of columns available in the Census Income dataset: # + colab={} colab_type="code" id="vkn1FNmpcYvb" import pandas train_df = pandas.read_csv(train_file, header = None, names = census_dataset._CSV_COLUMNS) test_df = pandas.read_csv(test_file, header = None, names = census_dataset._CSV_COLUMNS) train_df.head() # + [markdown] colab_type="text" id="QZZtXes4cYvf" # The columns are grouped into two types: *categorical* and *continuous* columns: # # * A column is called *categorical* if its value can only be one of the categories in a finite set. For example, the relationship status of a person (wife, husband, unmarried, etc.) or the education level (high school, college, etc.) are categorical columns. # * A column is called *continuous* if its value can be any numerical value in a continuous range. For example, the capital gain of a person (e.g. $14,084) is a continuous column. # # ## Converting Data into Tensors # # When building a `tf.estimator` model, the input data is specified by using an *input function* (or `input_fn`). This builder function returns a `tf.data.Dataset` of batches of `(features-dict, label)` pairs. It is not called until it is passed to `tf.estimator.Estimator` methods such as `train` and `evaluate`. # # The input builder function returns the following pair: # # 1. `features`: A dict from feature names to `Tensors` or `SparseTensors` containing batches of features. # 2. `labels`: A `Tensor` containing batches of labels. # # The keys of the `features` are used to configure the model's input layer. # # Note: The input function is called while constructing the TensorFlow graph, *not* while running the graph. It is returning a representation of the input data as a sequence of TensorFlow graph operations. # # For small problems like this, it's easy to make a `tf.data.Dataset` by slicing the `pandas.DataFrame`: # + colab={} colab_type="code" id="N7zNJflKcYvg" def easy_input_function(df, label_key, num_epochs, shuffle, batch_size): label = df[label_key] ds = tf.data.Dataset.from_tensor_slices((dict(df),label)) if shuffle: ds = ds.shuffle(10000) ds = ds.batch(batch_size).repeat(num_epochs) return ds # + [markdown] colab_type="text" id="WeEgNR9AcYvh" # Since we have eager execution enabled, it's easy to inspect the resulting dataset: # + colab={} colab_type="code" id="ygaKuikecYvi" ds = easy_input_function(train_df, label_key='income_bracket', num_epochs=5, shuffle=True, batch_size=10) for feature_batch, label_batch in ds.take(1): print('Some feature keys:', list(feature_batch.keys())[:5]) print() print('A batch of Ages :', feature_batch['age']) print() print('A batch of Labels:', label_batch ) # + [markdown] colab_type="text" id="O_KZxQUucYvm" # But this approach has severly-limited scalability. Larger datasets should be streamed from disk. The `census_dataset.input_fn` provides an example of how to do this using `tf.decode_csv` and `tf.data.TextLineDataset`: # # <!-- TODO(markdaoust): This `input_fn` should use `tf.contrib.data.make_csv_dataset` --> # + colab={} colab_type="code" id="vUTeXaEUcYvn" import inspect print(inspect.getsource(census_dataset.input_fn)) # + [markdown] colab_type="text" id="yyGcv_e-cYvq" # This `input_fn` returns equivalent output: # + colab={} colab_type="code" id="Mv3as_CEcYvu" ds = census_dataset.input_fn(train_file, num_epochs=5, shuffle=True, batch_size=10) for feature_batch, label_batch in ds.take(1): print('Feature keys:', list(feature_batch.keys())[:5]) print() print('Age batch :', feature_batch['age']) print() print('Label batch :', label_batch ) # + [markdown] colab_type="text" id="810fnfY5cYvz" # Because `Estimators` expect an `input_fn` that takes no arguments, we typically wrap configurable input function into an object with the expected signature. For this notebook configure the `train_inpf` to iterate over the data twice: # + colab={} colab_type="code" id="wnQdpEcVcYv0" import functools train_inpf = functools.partial(census_dataset.input_fn, train_file, num_epochs=2, shuffle=True, batch_size=64) test_inpf = functools.partial(census_dataset.input_fn, test_file, num_epochs=1, shuffle=False, batch_size=64) # + [markdown] colab_type="text" id="pboNpNWhcYv4" # ## Selecting and Engineering Features for the Model # # Estimators use a system called [feature columns](https://www.tensorflow.org/r1/guide/feature_columns) to describe how the model should interpret each of the raw input features. An Estimator expects a vector of numeric inputs, and feature columns describe how the model should convert each feature. # # Selecting and crafting the right set of feature columns is key to learning an effective model. A *feature column* can be either one of the raw inputs in the original features `dict` (a *base feature column*), or any new columns created using transformations defined over one or multiple base columns (a *derived feature columns*). # # A feature column is an abstract concept of any raw or derived variable that can be used to predict the target label. # + [markdown] colab_type="text" id="_hh-cWdU__Lq" # ### Base Feature Columns # + [markdown] colab_type="text" id="BKz6LA8_ACI7" # #### Numeric columns # # The simplest `feature_column` is `numeric_column`. This indicates that a feature is a numeric value that should be input to the model directly. For example: # + colab={} colab_type="code" id="ZX0r2T5OcYv6" age = fc.numeric_column('age') # + [markdown] colab_type="text" id="tnLUiaHxcYv-" # The model will use the `feature_column` definitions to build the model input. You can inspect the resulting output using the `input_layer` function: # + colab={} colab_type="code" id="kREtIPfwcYv_" fc.input_layer(feature_batch, [age]).numpy() # + [markdown] colab_type="text" id="OPuLduCucYwD" # The following will train and evaluate a model using only the `age` feature: # + colab={} colab_type="code" id="9R5eSJ1pcYwE" classifier = tf.estimator.LinearClassifier(feature_columns=[age]) classifier.train(train_inpf) result = classifier.evaluate(test_inpf) clear_output() # used for display in notebook print(result) # + [markdown] colab_type="text" id="YDZGcdTdcYwI" # Similarly, we can define a `NumericColumn` for each continuous feature column # that we want to use in the model: # + colab={} colab_type="code" id="uqPbUqlxcYwJ" education_num = tf.feature_column.numeric_column('education_num') capital_gain = tf.feature_column.numeric_column('capital_gain') capital_loss = tf.feature_column.numeric_column('capital_loss') hours_per_week = tf.feature_column.numeric_column('hours_per_week') my_numeric_columns = [age,education_num, capital_gain, capital_loss, hours_per_week] fc.input_layer(feature_batch, my_numeric_columns).numpy() # + [markdown] colab_type="text" id="cBGDN97IcYwQ" # You could retrain a model on these features by changing the `feature_columns` argument to the constructor: # + colab={} colab_type="code" id="XN8k5S95cYwR" classifier = tf.estimator.LinearClassifier(feature_columns=my_numeric_columns) classifier.train(train_inpf) result = classifier.evaluate(test_inpf) clear_output() for key,value in sorted(result.items()): print('%s: %s' % (key, value)) # + [markdown] colab_type="text" id="jBRq9_AzcYwU" # #### Categorical columns # # To define a feature column for a categorical feature, create a `CategoricalColumn` using one of the `tf.feature_column.categorical_column*` functions. # # If you know the set of all possible feature values of a column—and there are only a few of them—use `categorical_column_with_vocabulary_list`. Each key in the list is assigned an auto-incremented ID starting from 0. For example, for the `relationship` column we can assign the feature string `Husband` to an integer ID of 0 and "Not-in-family" to 1, etc. # + colab={} colab_type="code" id="0IjqSi9tcYwV" relationship = fc.categorical_column_with_vocabulary_list( 'relationship', ['Husband', 'Not-in-family', 'Wife', 'Own-child', 'Unmarried', 'Other-relative']) # + [markdown] colab_type="text" id="-RjoWv-7cYwW" # This creates a sparse one-hot vector from the raw input feature. # # The `input_layer` function we're using is designed for DNN models and expects dense inputs. To demonstrate the categorical column we must wrap it in a `tf.feature_column.indicator_column` to create the dense one-hot output (Linear `Estimators` can often skip this dense-step). # # Note: the other sparse-to-dense option is `tf.feature_column.embedding_column`. # # Run the input layer, configured with both the `age` and `relationship` columns: # + colab={} colab_type="code" id="kI43CYlncYwY" fc.input_layer(feature_batch, [age, fc.indicator_column(relationship)]) # + [markdown] colab_type="text" id="tTudP7WHcYwb" # If we don't know the set of possible values in advance, use the `categorical_column_with_hash_bucket` instead: # + colab={} colab_type="code" id="8pSBaliCcYwb" occupation = tf.feature_column.categorical_column_with_hash_bucket( 'occupation', hash_bucket_size=1000) # + [markdown] colab_type="text" id="fSAPrqQkcYwd" # Here, each possible value in the feature column `occupation` is hashed to an integer ID as we encounter them in training. The example batch has a few different occupations: # + colab={} colab_type="code" id="dCvQNv36cYwe" for item in feature_batch['occupation'].numpy(): print(item.decode()) # + [markdown] colab_type="text" id="KP5hN2rAcYwh" # If we run `input_layer` with the hashed column, we see that the output shape is `(batch_size, hash_bucket_size)`: # + colab={} colab_type="code" id="0Y16peWacYwh" occupation_result = fc.input_layer(feature_batch, [fc.indicator_column(occupation)]) occupation_result.numpy().shape # + [markdown] colab_type="text" id="HMW2MzWAcYwk" # It's easier to see the actual results if we take the `tf.argmax` over the `hash_bucket_size` dimension. Notice how any duplicate occupations are mapped to the same pseudo-random index: # + colab={} colab_type="code" id="q_ryRglmcYwk" tf.argmax(occupation_result, axis=1).numpy() # + [markdown] colab_type="text" id="j1e5NfyKcYwn" # Note: Hash collisions are unavoidable, but often have minimal impact on model quality. The effect may be noticable if the hash buckets are being used to compress the input space. # # No matter how we choose to define a `SparseColumn`, each feature string is mapped into an integer ID by looking up a fixed mapping or by hashing. Under the hood, the `LinearModel` class is responsible for managing the mapping and creating `tf.Variable` to store the model parameters (model *weights*) for each feature ID. The model parameters are learned through the model training process described later. # # Let's do the similar trick to define the other categorical features: # + colab={} colab_type="code" id="0Z5eUrd_cYwo" education = tf.feature_column.categorical_column_with_vocabulary_list( 'education', [ 'Bachelors', 'HS-grad', '11th', 'Masters', '9th', 'Some-college', 'Assoc-acdm', 'Assoc-voc', '7th-8th', 'Doctorate', 'Prof-school', '5th-6th', '10th', '1st-4th', 'Preschool', '12th']) marital_status = tf.feature_column.categorical_column_with_vocabulary_list( 'marital_status', [ 'Married-civ-spouse', 'Divorced', 'Married-spouse-absent', 'Never-married', 'Separated', 'Married-AF-spouse', 'Widowed']) workclass = tf.feature_column.categorical_column_with_vocabulary_list( 'workclass', [ 'Self-emp-not-inc', 'Private', 'State-gov', 'Federal-gov', 'Local-gov', '?', 'Self-emp-inc', 'Without-pay', 'Never-worked']) my_categorical_columns = [relationship, occupation, education, marital_status, workclass] # + [markdown] colab_type="text" id="ASQJM1pEcYwr" # It's easy to use both sets of columns to configure a model that uses all these features: # + colab={} colab_type="code" id="_i_MLoo9cYws" classifier = tf.estimator.LinearClassifier(feature_columns=my_numeric_columns+my_categorical_columns) classifier.train(train_inpf) result = classifier.evaluate(test_inpf) clear_output() for key,value in sorted(result.items()): print('%s: %s' % (key, value)) # + [markdown] colab_type="text" id="zdKEqF6xcYwv" # ### Derived feature columns # + [markdown] colab_type="text" id="RgYaf_48FSU2" # #### Make Continuous Features Categorical through Bucketization # # Sometimes the relationship between a continuous feature and the label is not linear. For example, *age* and *income*—a person's income may grow in the early stage of their career, then the growth may slow at some point, and finally, the income decreases after retirement. In this scenario, using the raw `age` as a real-valued feature column might not be a good choice because the model can only learn one of the three cases: # # 1. Income always increases at some rate as age grows (positive correlation), # 2. Income always decreases at some rate as age grows (negative correlation), or # 3. Income stays the same no matter at what age (no correlation). # # If we want to learn the fine-grained correlation between income and each age group separately, we can leverage *bucketization*. Bucketization is a process of dividing the entire range of a continuous feature into a set of consecutive buckets, and then converting the original numerical feature into a bucket ID (as a categorical feature) depending on which bucket that value falls into. So, we can define a `bucketized_column` over `age` as: # + colab={} colab_type="code" id="KT4pjD9AcYww" age_buckets = tf.feature_column.bucketized_column( age, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65]) # + [markdown] colab_type="text" id="S-XOscrEcYwx" # `boundaries` is a list of bucket boundaries. In this case, there are 10 boundaries, resulting in 11 age group buckets (from age 17 and below, 18-24, 25-29, ..., to 65 and over). # # With bucketing, the model sees each bucket as a one-hot feature: # + colab={} colab_type="code" id="Lr40vm3qcYwy" fc.input_layer(feature_batch, [age, age_buckets]).numpy() # + [markdown] colab_type="text" id="Z_tQI9j8cYw1" # #### Learn complex relationships with crossed column # # Using each base feature column separately may not be enough to explain the data. For example, the correlation between education and the label (earning > 50,000 dollars) may be different for different occupations. Therefore, if we only learn a single model weight for `education="Bachelors"` and `education="Masters"`, we won't capture every education-occupation combination (e.g. distinguishing between `education="Bachelors"` AND `occupation="Exec-managerial"` AND `education="Bachelors" AND occupation="Craft-repair"`). # # To learn the differences between different feature combinations, we can add *crossed feature columns* to the model: # + colab={} colab_type="code" id="IAPhPzXscYw1" education_x_occupation = tf.feature_column.crossed_column( ['education', 'occupation'], hash_bucket_size=1000) # + [markdown] colab_type="text" id="UeTxMunbcYw5" # We can also create a `crossed_column` over more than two columns. Each constituent column can be either a base feature column that is categorical (`SparseColumn`), a bucketized real-valued feature column, or even another `CrossColumn`. For example: # + colab={} colab_type="code" id="y8UaBld9cYw7" age_buckets_x_education_x_occupation = tf.feature_column.crossed_column( [age_buckets, 'education', 'occupation'], hash_bucket_size=1000) # + [markdown] colab_type="text" id="HvKmW6U5cYw8" # These crossed columns always use hash buckets to avoid the exponential explosion in the number of categories, and put the control over number of model weights in the hands of the user. # # + [markdown] colab_type="text" id="HtjpheB6cYw9" # ## Define the logistic regression model # # After processing the input data and defining all the feature columns, we can put them together and build a *logistic regression* model. The previous section showed several types of base and derived feature columns, including: # # * `CategoricalColumn` # * `NumericColumn` # * `BucketizedColumn` # * `CrossedColumn` # # All of these are subclasses of the abstract `FeatureColumn` class and can be added to the `feature_columns` field of a model: # + colab={} colab_type="code" id="Klmf3OxpcYw-" import tempfile base_columns = [ education, marital_status, relationship, workclass, occupation, age_buckets, ] crossed_columns = [ tf.feature_column.crossed_column( ['education', 'occupation'], hash_bucket_size=1000), tf.feature_column.crossed_column( [age_buckets, 'education', 'occupation'], hash_bucket_size=1000), ] model = tf.estimator.LinearClassifier( model_dir=tempfile.mkdtemp(), feature_columns=base_columns + crossed_columns, optimizer=tf.train.FtrlOptimizer(learning_rate=0.1)) # + [markdown] colab_type="text" id="jRhnPxUucYxC" # The model automatically learns a bias term, which controls the prediction made without observing any features. The learned model files are stored in `model_dir`. # # ## Train and evaluate the model # # After adding all the features to the model, let's train the model. Training a model is just a single command using the `tf.estimator` API: # + colab={} colab_type="code" id="ZlrIBuoecYxD" train_inpf = functools.partial(census_dataset.input_fn, train_file, num_epochs=40, shuffle=True, batch_size=64) model.train(train_inpf) clear_output() # used for notebook display # + [markdown] colab_type="text" id="IvY3a9pzcYxH" # After the model is trained, evaluate the accuracy of the model by predicting the labels of the holdout data: # + colab={} colab_type="code" id="L9nVJEO8cYxI" results = model.evaluate(test_inpf) clear_output() for key,value in sorted(results.items()): print('%s: %0.2f' % (key, value)) # + [markdown] colab_type="text" id="E0fAibNDcYxL" # The first line of the output should display something like: `accuracy: 0.84`, which means the accuracy is 84%. You can try using more features and transformations to see if you can do better! # # After the model is evaluated, we can use it to predict whether an individual has an annual income of over 50,000 dollars given an individual's information input. # # Let's look in more detail how the model performed: # + colab={} colab_type="code" id="8R5bz5CxcYxL" import numpy as np predict_df = test_df[:20].copy() pred_iter = model.predict( lambda:easy_input_function(predict_df, label_key='income_bracket', num_epochs=1, shuffle=False, batch_size=10)) classes = np.array(['<=50K', '>50K']) pred_class_id = [] for pred_dict in pred_iter: pred_class_id.append(pred_dict['class_ids']) predict_df['predicted_class'] = classes[np.array(pred_class_id)] predict_df['correct'] = predict_df['predicted_class'] == predict_df['income_bracket'] clear_output() predict_df[['income_bracket','predicted_class', 'correct']] # + [markdown] colab_type="text" id="N_uCpFTicYxN" # For a working end-to-end example, download our [example code](https://github.com/tensorflow/models/tree/master/official/r1/wide_deep/census_main.py) and set the `model_type` flag to `wide`. # + [markdown] colab_type="text" id="oyKy1lM_3gkL" # ## Adding Regularization to Prevent Overfitting # # Regularization is a technique used to avoid overfitting. Overfitting happens when a model performs well on the data it is trained on, but worse on test data that the model has not seen before. Overfitting can occur when a model is excessively complex, such as having too many parameters relative to the number of observed training data. Regularization allows you to control the model's complexity and make the model more generalizable to unseen data. # # You can add L1 and L2 regularizations to the model with the following code: # + colab={} colab_type="code" id="lzMUSBQ03hHx" model_l1 = tf.estimator.LinearClassifier( feature_columns=base_columns + crossed_columns, optimizer=tf.train.FtrlOptimizer( learning_rate=0.1, l1_regularization_strength=10.0, l2_regularization_strength=0.0)) model_l1.train(train_inpf) results = model_l1.evaluate(test_inpf) clear_output() for key in sorted(results): print('%s: %0.2f' % (key, results[key])) # + colab={} colab_type="code" id="ofmPL212JIy2" model_l2 = tf.estimator.LinearClassifier( feature_columns=base_columns + crossed_columns, optimizer=tf.train.FtrlOptimizer( learning_rate=0.1, l1_regularization_strength=0.0, l2_regularization_strength=10.0)) model_l2.train(train_inpf) results = model_l2.evaluate(test_inpf) clear_output() for key in sorted(results): print('%s: %0.2f' % (key, results[key])) # + [markdown] colab_type="text" id="Lp1Rfy_k4e7w" # These regularized models don't perform much better than the base model. Let's look at the model's weight distributions to better see the effect of the regularization: # + colab={} colab_type="code" id="Wb6093N04XlS" def get_flat_weights(model): weight_names = [ name for name in model.get_variable_names() if "linear_model" in name and "Ftrl" not in name] weight_values = [model.get_variable_value(name) for name in weight_names] weights_flat = np.concatenate([item.flatten() for item in weight_values], axis=0) return weights_flat weights_flat = get_flat_weights(model) weights_flat_l1 = get_flat_weights(model_l1) weights_flat_l2 = get_flat_weights(model_l2) # + [markdown] colab_type="text" id="GskJmtfmL0p-" # The models have many zero-valued weights caused by unused hash bins (there are many more hash bins than categories in some columns). We can mask these weights when viewing the weight distributions: # + colab={} colab_type="code" id="rM3agZe3MT3D" weight_mask = weights_flat != 0 weights_base = weights_flat[weight_mask] weights_l1 = weights_flat_l1[weight_mask] weights_l2 = weights_flat_l2[weight_mask] # + [markdown] colab_type="text" id="NqBpxLLQNEBE" # Now plot the distributions: # + colab={} colab_type="code" id="IdFK7wWa5_0K" plt.figure() _ = plt.hist(weights_base, bins=np.linspace(-3,3,30)) plt.title('Base Model') plt.ylim([0,500]) plt.figure() _ = plt.hist(weights_l1, bins=np.linspace(-3,3,30)) plt.title('L1 - Regularization') plt.ylim([0,500]) plt.figure() _ = plt.hist(weights_l2, bins=np.linspace(-3,3,30)) plt.title('L2 - Regularization') _=plt.ylim([0,500]) # + [markdown] colab_type="text" id="Mv6knhFa5-iJ" # Both types of regularization squeeze the distribution of weights towards zero. L2 regularization has a greater effect in the tails of the distribution eliminating extreme weights. L1 regularization produces more exactly-zero values, in this case it sets ~200 to zero.
site/en/r1/tutorials/estimators/linear.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + pycharm={"name": "#%%", "is_executing": false} class MoneyBox: def __init__(self, capacity): self.capacity = capacity def can_add(self, b): if b <= self.capacity: return True else: return False def add(self, b): if self.can_add(b): self.capacity = self.capacity - b # + pycharm={"name": "#%%\n", "is_executing": false} # first variable class Buffer: def __init__(self): self.t_buf = [] def get_current_part(self): print(self.t_buf) def add(self, *args): _args = list(args) self.t_buf = self.t_buf + _args while True: if len(self.t_buf) < 5: break elif len(self.t_buf) == 5: print(sum(self.t_buf)) self.t_buf =[] elif len(self.t_buf) > 5: print(sum(self.t_buf[:5])) self.t_buf = self.t_buf[5:] x = Buffer() x.add(2, 3, 4, 5, 1) x.get_current_part() x.add(2, 3,) x.get_current_part() x.add(1, 4, 5) x.get_current_part() x.add(1, 2, 3, 4, 5, 0, 7, 8) x.get_current_part() x.add(0, 0) # + pycharm={"name": "#%%\n", "is_executing": false} # next variable class Buffer: def __init__(self): self.buf=[] def add(self, *a): self.buf.extend(a) while len(self.buf)>=5: print(sum(self.buf[:5])) del(self.buf[:5]) def get_current_part(self): return print(self.buf) x = Buffer() x.add(2, 3, 4, 5, 1) x.get_current_part() x.add(2, 3,) x.get_current_part() x.add(1, 4, 5) x.get_current_part() x.add(1, 2, 3, 4, 5, 0, 7, 8) x.get_current_part() x.add(0, 0) # + pycharm={"name": "#%%\n", "is_executing": false} class Mylist(list): def eve(self): return len(self)%2==0 x=Mylist() print(x) x.extend([1, 2, 3]) print(issubclass(list, Mylist)) isinstance(x, Mylist) print(Mylist.mro()) # + pycharm={"name": "#%%\n", "is_executing": false} class A: pass class B(A): pass class C: pass class D(C): pass class E(B, D): pass x= E() import pprint pprint.pprint(E.mro()) # + pycharm={"name": "#%%\n", "is_executing": false} d = {'a': ['a', 'a'], 'b': ['a'], 'c': ['a'], 'y': ['a', 'x'], 'd': ['b', 'c', 'y'], 'e': ['d'], 'f': ['d'], 'g': ['f'], 'x': ['x'], 'z': ['x'], 'v': ['y', 'z'], 'w': ['v']} #('el': [[], []]) el = dict() for k, v in d.items(): el[k] = [v, []] for e, l in d.items(): if k in l: el[k][1].append(e) print(el) # + pycharm={"name": "#%%\n"} """Какие варианты есть для записи структуры 1) реб: род, дети: род 2) род: все пути с детьми 3) список с уровнями узлов род:реб """ # + pycharm={"name": "#%%\n", "is_executing": false} nlst = ['a', 'b : a', 'c : a', 'y : a', 'd : b c y', 'e : d', 'f : d', 'g : f', 'x', 'y : x', 'z : x', 'v : y z', 'w : v'] # 1) заполню список словарями n_dict1= [] for i in nlst: i_0, *i_1 = i.split(" : ") n_dict1.append({i_0 : i_1}) from pprint import pprint pprint(n_dict1) print() # + pycharm={"name": "#%%\n", "is_executing": false} n_dict2 = dict() # положу все в словарь for i in nlst: i_0, *i_1 = i.split(" : ") n_dict2.update({i_0 : i_1}) from pprint import pprint pprint(n_dict2) print() # + pycharm={"name": "#%%\n", "is_executing": false} graph1 ={'A': ['B', 'C'], 'B': ['C', 'D'], 'C': ['D'], 'D': ['C'], 'E': ['F'], 'F': ['C']} graph2 ={'b': ['a'], 'c': ['a'], 'y': ['a', 'x'], 'd': ['b', 'c', 'y'], 'e': ['d'], 'f': ['d'], 'g': ['f'], 'z': ['x'], 'v': ['y', 'z'], 'w': ['v']} def find_path(graph, chi, par, path=[]): path = path + [chi] if chi == par: return path if chi not in graph: return None for node in graph[chi]: if node not in path: newpath = find_path(graph, node, par, path) if newpath: return newpath find_path(graph1, 'A', 'D') l=find_path(graph2, 'w', 'm') print(l, l) # + pycharm={"name": "#%%\n", "is_executing": false} class ExtendedStack(list): def sum(self): # операция сложения s1 = self.pop() s2 = self.pop() self.append(s1+s2) def sub(self): # операция вычитания s1 = self.pop() s2 = self.pop() self.append(s1-s2) def mul(self): # операция умножения s1 = self.pop() s2 = self.pop() self.append(s1*s2) def div(self): # операция целочисленного деления s1 = self.pop() s2 = self.pop() self.append(s1//s2) def test(): ex_stack = ExtendedStack([1, 2, 3, 4, -3, 3, 5, 10]) ex_stack.div() assert ex_stack.pop() == 2 ex_stack.sub() assert ex_stack.pop() == 6 ex_stack.sum() assert ex_stack.pop() == 7 ex_stack.mul() assert ex_stack.pop() == 2 assert len(ex_stack) == 0 test() # + pycharm={"name": "#%%\n", "is_executing": false} class ExtendedStack(list): def sum(self): # операция сложения exec('self += [self.pop()+self.pop()]') def sub(self): # операция вычитания exec('self += [self.pop()-self.pop()]') def mul(self): # операция умножения exec('self += [self.pop()*self.pop()]') def div(self): exec('self += [self.pop()//self.pop()]') e = ExtendedStack() # + pycharm={"name": "#%%\n"} """ Одно из применений множественного наследование – расширение функциональности класса каким-то заранее определенным способом. Например, если нам понадобится логировать какую-то информацию при обращении к методам класса. Рассмотрим класс Loggable: import time class Loggable: def log(self, msg): print(str(time.ctime()) + ": " + str(msg)) У него есть ровно один метод log, который позволяет выводить в лог (в данном случае в stdout) какое-то сообщение, добавляя при этом текущее время. Реализуйте класс LoggableList, отнаследовав его от классов list и Loggable таким образом, чтобы при добавлении элемента в список посредством метода append в лог отправлялось сообщение, состоящее из только что добавленного элемента. Примечание Ваша программа не должна содержать класс Loggable. При проверке вашей программе будет доступен этот класс, и он будет содержать метод log, описанный выше.""" class LoggableList(list, Loggable): def append(self, a): super(LoggableList, self).append(a) self.log(str(a)) class LoggableList(list, Loggable): def append(self, x): list.append(self, x) self.log(x)
Stepic/python_curs.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # MIT License # # Copyright (c) 2020 <NAME> # # Permission is hereby granted, free of charge, to any person obtaining a # # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. # - # # Redes Neurais Completamente Conectadas com TensorFlow # # <table align="left"> # <td> # <a href="https://colab.research.google.com/github/alcunha/nn-with-tf/blob/master/Fully-Connected-Neural-Networks-with-TF.ipynb"><img src="./images/colab_logo_32px.png" />Executar no Google Colab</a> # </td> # <td> # <a href="https://github.com/alcunha/nn-with-tf/blob/master/Fully-Connected-Neural-Networks-with-TF.ipynb"><img src="./images/GitHub-Mark-32px.png" />Ver código no GitHub</a> # </td> # </table> # *“NÃO ENTRE EM PÂNICO”* # # (O <NAME>) # ! pip install tensorflow pandas matplotlib # + import tensorflow as tf import pandas as pd import matplotlib.pyplot as plt from tensorflow import keras # - # ## Tensores # # Tensores são arrays multi-dimensionais com um mesmo tipo (**dtype**). # #### Rank 0 ("Escalar") a = tf.constant(4) b = tf.constant(3.2) c = tf.constant('casa') print(a) print(b) print(c) # #### Rank 1 ("Vetor") list1 = tf.constant([4, 3]) list2 = tf.constant([3.2, 2]) list3 = tf.constant(['casa', 'rua']) print(list1) print(list2) print(list3) # Lista com tipos diferentes vai gerar erro: # + #list4 = tf.constant(['casa', 4]) # - # #### Rank 2 ("Matriz") mat1 = tf.constant([[2, 3], [4, 5]]) mat2 = tf.constant([[1, 1], [1, 1.]]) print(mat1) print(mat2) # Convertendo os valores de um tensor para o NumPy: mat1.numpy() zeros = tf.zeros([2, 3], dtype=tf.int32) print(zeros) ones = tf.ones([3, 3, 2], dtype=tf.float32) print(ones) tf.constant([1, 2, 3, 4, 5, 6], shape=(2,3), dtype=tf.float32) # ### Operações a = tf.cast(a, tf.float32) a + b tf.add(a, b) print(list1) print(list1.shape) print(list1.dtype) print(list1 + tf.ones(list1.shape, dtype=list1.dtype)) # Multiplicação de matrizes # + mat1 = tf.constant([[1, 2, 3], [4, 5, 6]]) mat2 = tf.constant([ [1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4], ]) # - print(mat1.shape) print(mat2.shape) tf.matmul(mat1, mat2) mat1 * mat1 # #### Bônus: Números complexos tf.complex(a, b) # ## Redes Neurais Artificiais # # Redes neurais artificiais são modelos computacionais inspirados na estrutura do sistema # nervoso de animais que adquire conhecimento por meio da experiência. # # Um neurônio articial (do tipo Perceptron) é composto por pesos W que multiplicam a entrada X e uma função de ativação f para determinar a saída Y. # # <img src="./images/Perceptron-bias.png" width="600"> # <center>Figura 1: Esquema de um neurônio articial (Perceptron)</center> # <center>Fonte: [1]</center> # # No exemplo da figura acima: # # y = f(x1\*w1 + x2\*w2) # # y2 = f(x1\*w1 + x2\*w2 + b) # # #### Rede neural completamente conectada # # Os neurônios podem ser dispostos em camadas e várias dessas camadas podem ser encadeadas até a saída da rede. Neurônios em uma camada completamente conectada tem conexões a todas as "saídas" da camada anterior. Matematicamente, os pesos da rede podem ser representados como matrizes. # # <img src="./images/MultiLayer_Neural_Network.png" width="600"> # <center>Figura 2: Esquema de uma rede completamente conectada</center> # <center>Fonte: [2]</center> # # Durante o treinamento, os pesos de uma rede neural são otimizados de acordo com o erro que é retropropagado ao longo das camadas. # ## Exemplo Prático: NN para o dataset Titanic # # 1. Pré-processamento # 2. Construção do pipeline de dados # 3. Construção do modelo # 4. Treinamento # 5. Avaliação do modelo e visualização dos resultados # # #### Pré-processamento do dataset TITANIC_TRAIN_URL = "https://storage.googleapis.com/tf-datasets/titanic/train.csv" TITANIC_TEST_URL = "https://storage.googleapis.com/tf-datasets/titanic/eval.csv" titanic_train_path = keras.utils.get_file('titanic_train.csv', TITANIC_TRAIN_URL) titanic_test_path = keras.utils.get_file('titanic_test.csv', TITANIC_TEST_URL) train_df = pd.read_csv(titanic_train_path) train_df.head() selected_columns = ['class', 'sex', 'age', 'n_siblings_spouses', 'fare', 'survived'] train_df = train_df[selected_columns] train_df.head() print(train_df['class'].unique()) print(train_df['sex'].unique()) # Feature enconde: pd.get_dummies(train_df['class']).head() train_df = pd.concat([train_df, pd.get_dummies(train_df['class']), pd.get_dummies(train_df['sex'])], axis=1) train_df.head() train_df = train_df.drop(columns=['class', 'sex']) train_df.head() train_df_labels = train_df[['survived']].copy() train_df_labels.head() train_df_features = train_df.drop(columns=['survived']) train_df_features.head() # Replicando para o conjunto de teste: test_df = pd.read_csv(titanic_test_path) test_df = test_df[selected_columns] test_df = pd.concat([test_df, pd.get_dummies(test_df['class']), pd.get_dummies(test_df['sex'])], axis=1) test_df = test_df.drop(columns=['class', 'sex']) test_df_labels = test_df[['survived']].copy() test_df_features = test_df.drop(columns=['survived']) test_df_features.head() test_df_labels.head() # #### Construindo o pipeline de dados (tf.data.Dataset) # # O `tf.data` é uma API do TensorFlow que permite construir pipelines de dados. Essa API é altamente flexível, permitindo a construção de pipelines complexos a partir de operações simples. train_dataset = tf.data.Dataset.from_tensor_slices((train_df_features.values, train_df_labels.values)) test_dataset = tf.data.Dataset.from_tensor_slices((test_df_features.values, test_df_labels.values)) for features, label in train_dataset.take(2): print(features) print(label) SHUFFLE_BUFFER_SIZE = len(train_df_labels) TEST_LENGHT = len(test_df_labels) BATCH_SIZE = 16 EPOCHS = 300 # + train_dataset = train_dataset.shuffle(SHUFFLE_BUFFER_SIZE) train_dataset = train_dataset.batch(BATCH_SIZE, drop_remainder=True) ## Se usar a opção epochs para a quantidade de épocas da função fit do modelo, não se deve aplicar o repeat #train_dataset = train_dataset.repeat(EPOCHS) # - for features, label in train_dataset.take(1): print(features) print(label) # No teste não precisamos aleatorizar as instâncias: #test_dataset = test_dataset.batch(BATCH_SIZE, drop_remainder=True).repeat(EPOCHS) test_dataset = test_dataset.batch(BATCH_SIZE, drop_remainder=True) # Pergunta: Por que não deveríamos utilizar `rop_remainder` na avaliação de um experimento real? # ### Construindo o modelo model = keras.Sequential([ keras.Input(shape=(8,)), keras.layers.Dense(32, activation='relu'), keras.layers.Dense(32), keras.layers.Activation('relu'), keras.layers.Dense(1) ]) model.summary() model.compile( loss=tf.keras.losses.BinaryCrossentropy(from_logits=True), optimizer='adam', metrics=['accuracy']) # ### Treinamento ## Se não usar o parâmetro epochs, o modelo vai treinar até o pipeline de dados parar de produzir batchs ## Nesse caso, para simular o número de épocas, pode usar o repeat durante a preparação do pipepline ## Para este caso, utilizaremos o epochs e, portanto, não precisa utilizar o repeat no pipeline history = model.fit(train_dataset, epochs=EPOCHS, validation_data=test_dataset, validation_steps=TEST_LENGHT // BATCH_SIZE) # ### Resultados plt.plot(history.history['accuracy']) plt.plot(history.history['val_accuracy']) plt.ylim([0.6, 0.9]) plt.title('Acurácia do Modelo') plt.ylabel('acurácia') plt.xlabel('Época') plt.legend(['train', 'test'], loc='lower left') plt.show() plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.ylim([0.2, 0.7]) plt.title('Loss do Modelo') plt.ylabel('loss') plt.xlabel('Época') plt.legend(['train', 'test'], loc='lower left') plt.show() # ### Predição features, labels = next(iter(test_dataset)) features.numpy()[0] labels.numpy() predictions = model.predict(features.numpy()) predictions for pred, label in zip(tf.sigmoid(predictions).numpy(), labels.numpy()): print("Probabilidade: {:.2%}, Sobreviveu: {}".format(pred[0], ("Sim" if bool(label[0]) else "Não"))) # ### Referências # [1] File:Perceptron-bias.svg. Disponível em: https://commons.wikimedia.org/wiki/File:Perceptron-bias.svg. Acesso em: 03 de julho de 2020. # # [2] File:Multi-Layer Neural Network-Vector.svg. Disponível em: https://commons.wikimedia.org/wiki/File:Multi-Layer_Neural_Network-Vector.svg. Acesso em: 03 de julho de 2020.
Fully-Connected-Neural-Networks-with-TF.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Cruise control # # <NAME> and <NAME> # 17 Jun 2019 # # The cruise control system of a car is a common feedback system encountered in everyday life. The system attempts to maintain a constant velocity in the presence of disturbances primarily caused by changes in the slope of a road. The controller compensates for these unknowns by measuring the speed of the car and adjusting the throttle appropriately. # # This notebook explores the dynamics and control of the cruise control system, following the material presenting in Feedback Systems by Astrom and Murray. A nonlinear model of the vehicle dynamics is used, with both state space and frequency domain control laws. The process model is presented in Section 1, and a controller based on state feedback is discussed in Section 2, where we also add integral action to the controller. In Section 3 we explore the behavior with PI control including the effect of actuator saturation and how it is avoided by windup protection. Different methods of constructing control systems are shown, all using the InputOutputSystem class (and subclasses). import numpy as np import matplotlib.pyplot as plt from math import pi import control as ct # ## Process Model # # ### Vehicle Dynamics # # To develop a mathematical model we start with a force balance for the car body. Let $v$ be the speed of the car, $m$ the total mass (including passengers), $F$ the force generated by the contact of the wheels with the road, and $F_d$ the disturbance force due to gravity, friction, and aerodynamic drag. def vehicle_update(t, x, u, params={}): """Vehicle dynamics for cruise control system. Parameters ---------- x : array System state: car velocity in m/s u : array System input: [throttle, gear, road_slope], where throttle is a float between 0 and 1, gear is an integer between 1 and 5, and road_slope is in rad. Returns ------- float Vehicle acceleration """ from math import copysign, sin sign = lambda x: copysign(1, x) # define the sign() function # Set up the system parameters m = params.get('m', 1600.) # vehicle mass, kg g = params.get('g', 9.8) # gravitational constant, m/s^2 Cr = params.get('Cr', 0.01) # coefficient of rolling friction Cd = params.get('Cd', 0.32) # drag coefficient rho = params.get('rho', 1.3) # density of air, kg/m^3 A = params.get('A', 2.4) # car area, m^2 alpha = params.get( 'alpha', [40, 25, 16, 12, 10]) # gear ratio / wheel radius # Define variables for vehicle state and inputs v = x[0] # vehicle velocity throttle = np.clip(u[0], 0, 1) # vehicle throttle gear = u[1] # vehicle gear theta = u[2] # road slope # Force generated by the engine omega = alpha[int(gear)-1] * v # engine angular speed F = alpha[int(gear)-1] * motor_torque(omega, params) * throttle # Disturbance forces # # The disturbance force Fd has three major components: Fg, the forces due # to gravity; Fr, the forces due to rolling friction; and Fa, the # aerodynamic drag. # Letting the slope of the road be \theta (theta), gravity gives the # force Fg = m g sin \theta. Fg = m * g * sin(theta) # A simple model of rolling friction is Fr = m g Cr sgn(v), where Cr is # the coefficient of rolling friction and sgn(v) is the sign of v (±1) or # zero if v = 0. Fr = m * g * Cr * sign(v) # The aerodynamic drag is proportional to the square of the speed: Fa = # 1/2 \rho Cd A |v| v, where \rho is the density of air, Cd is the # shape-dependent aerodynamic drag coefficient, and A is the frontal area # of the car. Fa = 1/2 * rho * Cd * A * abs(v) * v # Final acceleration on the car Fd = Fg + Fr + Fa dv = (F - Fd) / m return dv # ### Engine model # # The force F is generated by the engine, whose torque is proportional to the rate of fuel injection, which is itself proportional to a control signal 0 <= u <= 1 that controls the throttle position. The torque also depends on engine speed omega. def motor_torque(omega, params={}): # Set up the system parameters Tm = params.get('Tm', 190.) # engine torque constant omega_m = params.get('omega_m', 420.) # peak engine angular speed beta = params.get('beta', 0.4) # peak engine rolloff return np.clip(Tm * (1 - beta * (omega/omega_m - 1)**2), 0, None) # Torque curves for a typical car engine. The graph on the left shows the torque generated by the engine as a function of the angular velocity of the engine, while the curve on the right shows torque as a function of car speed for different gears. # + # Figure 4.2a - single torque curve as function of omega omega_range = np.linspace(0, 700, 701) plt.subplot(2, 2, 1) plt.plot(omega_range, [motor_torque(w) for w in omega_range]) plt.xlabel('Angular velocity $\omega$ [rad/s]') plt.ylabel('Torque $T$ [Nm]') plt.grid(True, linestyle='dotted') # Figure 4.2b - torque curves in different gears, as function of velocity plt.subplot(2, 2, 2) v_range = np.linspace(0, 70, 71) alpha = [40, 25, 16, 12, 10] for gear in range(5): omega_range = alpha[gear] * v_range plt.plot(v_range, [motor_torque(w) for w in omega_range], color='blue', linestyle='solid') # Set up the axes and style plt.axis([0, 70, 100, 200]) plt.grid(True, linestyle='dotted') # Add labels plt.text(11.5, 120, '$n$=1') plt.text(24, 120, '$n$=2') plt.text(42.5, 120, '$n$=3') plt.text(58.5, 120, '$n$=4') plt.text(58.5, 185, '$n$=5') plt.xlabel('Velocity $v$ [m/s]') plt.ylabel('Torque $T$ [Nm]') plt.tight_layout() plt.suptitle('Torque curves for typical car engine'); # - # ### Input/ouput model for the vehicle system # # We now create an input/output model for the vehicle system that takes the throttle input $u$, the gear and the angle of the road $\theta$ as input. The output of this model is the current vehicle velocity $v$. # + vehicle = ct.NonlinearIOSystem( vehicle_update, None, name='vehicle', inputs = ('u', 'gear', 'theta'), outputs = ('v'), states=('v')) # Define a generator for creating a "standard" cruise control plot def cruise_plot(sys, t, y, t_hill=5, vref=20, antiwindup=False, linetype='b-', subplots=[None, None]): # Figure out the plot bounds and indices v_min = vref-1.2; v_max = vref+0.5; v_ind = sys.find_output('v') u_min = 0; u_max = 2 if antiwindup else 1; u_ind = sys.find_output('u') # Make sure the upper and lower bounds on v are OK while max(y[v_ind]) > v_max: v_max += 1 while min(y[v_ind]) < v_min: v_min -= 1 # Create arrays for return values subplot_axes = subplots.copy() # Velocity profile if subplot_axes[0] is None: subplot_axes[0] = plt.subplot(2, 1, 1) else: plt.sca(subplots[0]) plt.plot(t, y[v_ind], linetype) plt.plot(t, vref*np.ones(t.shape), 'k-') plt.plot([t_hill, t_hill], [v_min, v_max], 'k--') plt.axis([0, t[-1], v_min, v_max]) plt.xlabel('Time $t$ [s]') plt.ylabel('Velocity $v$ [m/s]') # Commanded input profile if subplot_axes[1] is None: subplot_axes[1] = plt.subplot(2, 1, 2) else: plt.sca(subplots[1]) plt.plot(t, y[u_ind], 'r--' if antiwindup else linetype) plt.plot([t_hill, t_hill], [u_min, u_max], 'k--') plt.axis([0, t[-1], u_min, u_max]) plt.xlabel('Time $t$ [s]') plt.ylabel('Throttle $u$') # Applied input profile if antiwindup: plt.plot(t, np.clip(y[u_ind], 0, 1), linetype) plt.legend(['Commanded', 'Applied'], frameon=False) return subplot_axes # - # ## State space controller # # Construct a state space controller with integral action, linearized around an equilibrium point. The controller is constructed around the equilibrium point $(x_d, u_d)$ and includes both feedforward and feedback compensation. # # * Controller inputs - $(x, y, r)$: system states, system output, reference # * Controller state - $z$: integrated error $(y - r)$ # * Controller output - $u$: state feedback control # # Note: to make the structure of the controller more clear, we implement this as a "nonlinear" input/output module, even though the actual input/output system is linear. This also allows the use of parameters to set the operating point and gains for the controller. # # + def sf_update(t, z, u, params={}): y, r = u[1], u[2] return y - r def sf_output(t, z, u, params={}): # Get the controller parameters that we need K = params.get('K', 0) ki = params.get('ki', 0) kf = params.get('kf', 0) xd = params.get('xd', 0) yd = params.get('yd', 0) ud = params.get('ud', 0) # Get the system state and reference input x, y, r = u[0], u[1], u[2] return ud - K * (x - xd) - ki * z + kf * (r - yd) # Create the input/output system for the controller control_sf = ct.NonlinearIOSystem( sf_update, sf_output, name='control', inputs=('x', 'y', 'r'), outputs=('u'), states=('z')) # Create the closed loop system for the state space controller cruise_sf = ct.InterconnectedSystem( (vehicle, control_sf), name='cruise', connections=( ('vehicle.u', 'control.u'), ('control.x', 'vehicle.v'), ('control.y', 'vehicle.v')), inplist=('control.r', 'vehicle.gear', 'vehicle.theta'), outlist=('control.u', 'vehicle.v'), outputs=['u', 'v']) # Define the time and input vectors T = np.linspace(0, 25, 501) vref = 20 * np.ones(T.shape) gear = 4 * np.ones(T.shape) theta0 = np.zeros(T.shape) # Find the equilibrium point for the system Xeq, Ueq = ct.find_eqpt( vehicle, [vref[0]], [0, gear[0], theta0[0]], y0=[vref[0]], iu=[1, 2]) print("Xeq = ", Xeq) print("Ueq = ", Ueq) # Compute the linearized system at the eq pt cruise_linearized = ct.linearize(vehicle, Xeq, [Ueq[0], gear[0], 0]) # + # Construct the gain matrices for the system A, B, C = cruise_linearized.A, cruise_linearized.B[0, 0], cruise_linearized.C K = 0.5 kf = -1 / (C * np.linalg.inv(A - B * K) * B) # Compute the steady state velocity and throttle setting xd = Xeq[0] ud = Ueq[0] yd = vref[-1] # Response of the system with no integral feedback term plt.figure() theta_hill = [ 0 if t <= 5 else 4./180. * pi * (t-5) if t <= 6 else 4./180. * pi for t in T] t, y_sfb = ct.input_output_response( cruise_sf, T, [vref, gear, theta_hill], [Xeq[0], 0], params={'K':K, 'ki':0.0, 'kf':kf, 'xd':xd, 'ud':ud, 'yd':yd}) subplots = cruise_plot(cruise_sf, t, y_sfb, t_hill=5, linetype='b--') # Response of the system with state feedback + integral action t, y_sfb_int = ct.input_output_response( cruise_sf, T, [vref, gear, theta_hill], [Xeq[0], 0], params={'K':K, 'ki':0.1, 'kf':kf, 'xd':xd, 'ud':ud, 'yd':yd}) cruise_plot(cruise_sf, t, y_sfb_int, t_hill=5, linetype='b-', subplots=subplots) # Add title and legend plt.suptitle('Cruise control with state feedback, integral action') import matplotlib.lines as mlines p_line = mlines.Line2D([], [], color='blue', linestyle='--', label='State feedback') pi_line = mlines.Line2D([], [], color='blue', linestyle='-', label='w/ integral action') plt.legend(handles=[p_line, pi_line], frameon=False, loc='lower right'); # - # ## Pole/zero cancellation # # The transfer function for the linearized dynamics of the cruise control system is given by $P(s) = b/(s+a)$. A simple (but not necessarily good) way to design a PI controller is to choose the parameters of the PI controller as $k_\text{i}=ak_\text{p}$. The controller transfer function is then $C(s)=k_\text{p}+k_\text{i}/s=k_\text{i}(s+a)/s$. It has a zero at $s = -k_\text{i}/k_\text{p}=-a$ that cancels the process pole at $s = -a$. We have $P(s)C(s)=k_\text{i}/s$ giving the transfer function from reference to vehicle velocity as $G_{yr}(s)=b k_\text{p}/(s + b k_\text{p})$, and control design is then simply a matter of choosing the gain $k_\text{p}$. The closed loop system dynamics are of first order with the time constant $1/(b k_\text{p})$. # + # Get the transfer function from throttle input + hill to vehicle speed P = ct.ss2tf(cruise_linearized[0, 0]) # Construction a controller that cancels the pole kp = 0.5 a = -P.pole()[0] b = np.real(P(0)) * a ki = a * kp C = ct.tf2ss(ct.TransferFunction([kp, ki], [1, 0])) control_pz = ct.LinearIOSystem(C, name='control', inputs='u', outputs='y') print("system: a = ", a, ", b = ", b) print("pzcancel: kp =", kp, ", ki =", ki, ", 1/(kp b) = ", 1/(kp * b)) print("sfb_int: K = ", K, ", ki = 0.1") # Construct the closed loop system and plot the response # Create the closed loop system for the state space controller cruise_pz = ct.InterconnectedSystem( (vehicle, control_pz), name='cruise_pz', connections = ( ('control.u', '-vehicle.v'), ('vehicle.u', 'control.y')), inplist = ('control.u', 'vehicle.gear', 'vehicle.theta'), inputs = ('vref', 'gear', 'theta'), outlist = ('vehicle.v', 'vehicle.u'), outputs = ('v', 'u')) # Find the equilibrium point X0, U0 = ct.find_eqpt( cruise_pz, [vref[0], 0], [vref[0], gear[0], theta0[0]], iu=[1, 2], y0=[vref[0], 0], iy=[0]) # Response of the system with PI controller canceling process pole t, y_pzcancel = ct.input_output_response( cruise_pz, T, [vref, gear, theta_hill], X0) subplots = cruise_plot(cruise_pz, t, y_pzcancel, t_hill=5, linetype='b-') cruise_plot(cruise_sf, t, y_sfb_int, t_hill=5, linetype='b--', subplots=subplots); # - # ## PI Controller # # In this example, the speed of the vehicle is measured and compared to the desired speed. The controller is a PI controller represented as a transfer function. In the textbook, the simulations are done for LTI systems, but here we simulate the full nonlinear system. # ### Parameter design through pole placement # # To illustrate the design of a PI controller, we choose the gains $k_\text{p}$ and $k_\text{i}$ so that the characteristic polynomial has the form # # $$ # s^2 + 2 \zeta \omega_0 s + \omega_0^2 # $$ # + # Values of the first order transfer function P(s) = b/(s + a) are set above # Define the input that we want to track T = np.linspace(0, 40, 101) vref = 20 * np.ones(T.shape) gear = 4 * np.ones(T.shape) theta_hill = np.array([ 0 if t <= 5 else 4./180. * pi * (t-5) if t <= 6 else 4./180. * pi for t in T]) # Fix \omega_0 and vary \zeta w0 = 0.5 subplots = [None, None] for zeta in [0.5, 1, 2]: # Create the controller transfer function (as an I/O system) kp = (2*zeta*w0 - a)/b ki = w0**2 / b control_tf = ct.tf2io( ct.TransferFunction([kp, ki], [1, 0.01*ki/kp]), name='control', inputs='u', outputs='y') # Construct the closed loop system by interconnecting process and controller cruise_tf = ct.InterconnectedSystem( (vehicle, control_tf), name='cruise', connections = [('control.u', '-vehicle.v'), ('vehicle.u', 'control.y')], inplist = ('control.u', 'vehicle.gear', 'vehicle.theta'), inputs = ('vref', 'gear', 'theta'), outlist = ('vehicle.v', 'vehicle.u'), outputs = ('v', 'u')) # Plot the velocity response X0, U0 = ct.find_eqpt( cruise_tf, [vref[0], 0], [vref[0], gear[0], theta_hill[0]], iu=[1, 2], y0=[vref[0], 0], iy=[0]) t, y = ct.input_output_response(cruise_tf, T, [vref, gear, theta_hill], X0) subplots = cruise_plot(cruise_tf, t, y, t_hill=5, subplots=subplots) # - # Fix \zeta and vary \omega_0 zeta = 1 subplots = [None, None] for w0 in [0.2, 0.5, 1]: # Create the controller transfer function (as an I/O system) kp = (2*zeta*w0 - a)/b ki = w0**2 / b control_tf = ct.tf2io( ct.TransferFunction([kp, ki], [1, 0.01*ki/kp]), name='control', inputs='u', outputs='y') # Construct the closed loop system by interconnecting process and controller cruise_tf = ct.InterconnectedSystem( (vehicle, control_tf), name='cruise', connections = [('control.u', '-vehicle.v'), ('vehicle.u', 'control.y')], inplist = ('control.u', 'vehicle.gear', 'vehicle.theta'), inputs = ('vref', 'gear', 'theta'), outlist = ('vehicle.v', 'vehicle.u'), outputs = ('v', 'u')) # Plot the velocity response X0, U0 = ct.find_eqpt( cruise_tf, [vref[0], 0], [vref[0], gear[0], theta_hill[0]], iu=[1, 2], y0=[vref[0], 0], iy=[0]) t, y = ct.input_output_response(cruise_tf, T, [vref, gear, theta_hill], X0) subplots = cruise_plot(cruise_tf, t, y, t_hill=5, subplots=subplots) # ### Robustness to change in mass # + # Nominal controller design for remaining analyses # Construct a PI controller with rolloff, as a transfer function Kp = 0.5 # proportional gain Ki = 0.1 # integral gain control_tf = ct.tf2io( ct.TransferFunction([Kp, Ki], [1, 0.01*Ki/Kp]), name='control', inputs='u', outputs='y') cruise_tf = ct.InterconnectedSystem( (vehicle, control_tf), name='cruise', connections = [('control.u', '-vehicle.v'), ('vehicle.u', 'control.y')], inplist = ('control.u', 'vehicle.gear', 'vehicle.theta'), inputs = ('vref', 'gear', 'theta'), outlist = ('vehicle.v', 'vehicle.u'), outputs = ('v', 'u')) # + # Define the time and input vectors T = np.linspace(0, 25, 101) vref = 20 * np.ones(T.shape) gear = 4 * np.ones(T.shape) theta0 = np.zeros(T.shape) # Now simulate the effect of a hill at t = 5 seconds plt.figure() plt.suptitle('Response to change in road slope') theta_hill = np.array([ 0 if t <= 5 else 4./180. * pi * (t-5) if t <= 6 else 4./180. * pi for t in T]) subplots = [None, None] linecolor = ['red', 'blue', 'green'] handles = [] for i, m in enumerate([1200, 1600, 2000]): # Compute the equilibrium state for the system X0, U0 = ct.find_eqpt( cruise_tf, [vref[0], 0], [vref[0], gear[0], theta0[0]], iu=[1, 2], y0=[vref[0], 0], iy=[0], params={'m':m}) t, y = ct.input_output_response( cruise_tf, T, [vref, gear, theta_hill], X0, params={'m':m}) subplots = cruise_plot(cruise_tf, t, y, t_hill=5, subplots=subplots, linetype=linecolor[i][0] + '-') handles.append(mlines.Line2D([], [], color=linecolor[i], linestyle='-', label="m = %d" % m)) # Add labels to the plots plt.sca(subplots[0]) plt.ylabel('Speed [m/s]') plt.legend(handles=handles, frameon=False, loc='lower right'); plt.sca(subplots[1]) plt.ylabel('Throttle') plt.xlabel('Time [s]'); # - # ## PI controller with antiwindup protection # # We now create a more complicated feedback controller that includes anti-windup protection. # + def pi_update(t, x, u, params={}): # Get the controller parameters that we need ki = params.get('ki', 0.1) kaw = params.get('kaw', 2) # anti-windup gain # Assign variables for inputs and states (for readability) v = u[0] # current velocity vref = u[1] # reference velocity z = x[0] # integrated error # Compute the nominal controller output (needed for anti-windup) u_a = pi_output(t, x, u, params) # Compute anti-windup compensation (scale by ki to account for structure) u_aw = kaw/ki * (np.clip(u_a, 0, 1) - u_a) if ki != 0 else 0 # State is the integrated error, minus anti-windup compensation return (vref - v) + u_aw def pi_output(t, x, u, params={}): # Get the controller parameters that we need kp = params.get('kp', 0.5) ki = params.get('ki', 0.1) # Assign variables for inputs and states (for readability) v = u[0] # current velocity vref = u[1] # reference velocity z = x[0] # integrated error # PI controller return kp * (vref - v) + ki * z control_pi = ct.NonlinearIOSystem( pi_update, pi_output, name='control', inputs = ['v', 'vref'], outputs = ['u'], states = ['z'], params = {'kp':0.5, 'ki':0.1}) # Create the closed loop system cruise_pi = ct.InterconnectedSystem( (vehicle, control_pi), name='cruise', connections=( ('vehicle.u', 'control.u'), ('control.v', 'vehicle.v')), inplist=('control.vref', 'vehicle.gear', 'vehicle.theta'), outlist=('control.u', 'vehicle.v'), outputs=['u', 'v']) # - # ### Response to a small hill # # Figure 4.3b shows the response of the closed loop system. The figure shows that even if the hill is so steep that the throttle changes from 0.17 to almost full throttle, the largest speed error is less than 1 m/s, and the desired velocity is recovered after 20 s. # + # Compute the equilibrium throttle setting for the desired speed X0, U0, Y0 = ct.find_eqpt( cruise_pi, [vref[0], 0], [vref[0], gear[0], theta0[0]], y0=[0, vref[0]], iu=[1, 2], iy=[1], return_y=True) # Now simulate the effect of a hill at t = 5 seconds plt.figure() plt.suptitle('Car with cruise control encountering sloping road') theta_hill = [ 0 if t <= 5 else 4./180. * pi * (t-5) if t <= 6 else 4./180. * pi for t in T] t, y = ct.input_output_response( cruise_pi, T, [vref, gear, theta_hill], X0) cruise_plot(cruise_pi, t, y); # - # ### Effect of Windup # # The windup effect occurs when a car encounters a hill that is so steep ($6^\circ$) that the throttle saturates when the cruise controller attempts to maintain speed. plt.figure() plt.suptitle('Cruise control with integrator windup') T = np.linspace(0, 50, 101) vref = 20 * np.ones(T.shape) theta_hill = [ 0 if t <= 5 else 6./180. * pi * (t-5) if t <= 6 else 6./180. * pi for t in T] t, y = ct.input_output_response( cruise_pi, T, [vref, gear, theta_hill], X0, params={'kaw':0}) cruise_plot(cruise_pi, t, y, antiwindup=True); # ### PI controller with anti-windup compensation # # Anti-windup can be applied to the system to improve the response. Because of the feedback from the actuator model, the output of the integrator is quickly reset to a value such that the controller output is at the saturation limit. plt.figure() plt.suptitle('Cruise control with integrator anti-windup protection') t, y = ct.input_output_response( cruise_pi, T, [vref, gear, theta_hill], X0, params={'kaw':2.}) cruise_plot(cruise_pi, t, y, antiwindup=True);
examples/cruise.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline # # ================================= # Plot graphs' barycenter using FGW # ================================= # # This example illustrates the computation barycenter of labeled graphs using FGW # # Requires networkx >=2 # # .. [18] <NAME>, <NAME>, <NAME>, <NAME> # and <NAME> # "Optimal Transport for structured data with application on graphs" # International Conference on Machine Learning (ICML). 2019. # # # # + # Author: <NAME> <<EMAIL>> # # License: MIT License #%% load libraries import numpy as np import matplotlib.pyplot as plt import networkx as nx import math from scipy.sparse.csgraph import shortest_path import matplotlib.colors as mcol from matplotlib import cm from ot.gromov import fgw_barycenters #%% Graph functions def find_thresh(C, inf=0.5, sup=3, step=10): """ Trick to find the adequate thresholds from where value of the C matrix are considered close enough to say that nodes are connected Tthe threshold is found by a linesearch between values "inf" and "sup" with "step" thresholds tested. The optimal threshold is the one which minimizes the reconstruction error between the shortest_path matrix coming from the thresholded adjency matrix and the original matrix. Parameters ---------- C : ndarray, shape (n_nodes,n_nodes) The structure matrix to threshold inf : float The beginning of the linesearch sup : float The end of the linesearch step : integer Number of thresholds tested """ dist = [] search = np.linspace(inf, sup, step) for thresh in search: Cprime = sp_to_adjency(C, 0, thresh) SC = shortest_path(Cprime, method='D') SC[SC == float('inf')] = 100 dist.append(np.linalg.norm(SC - C)) return search[np.argmin(dist)], dist def sp_to_adjency(C, threshinf=0.2, threshsup=1.8): """ Thresholds the structure matrix in order to compute an adjency matrix. All values between threshinf and threshsup are considered representing connected nodes and set to 1. Else are set to 0 Parameters ---------- C : ndarray, shape (n_nodes,n_nodes) The structure matrix to threshold threshinf : float The minimum value of distance from which the new value is set to 1 threshsup : float The maximum value of distance from which the new value is set to 1 Returns ------- C : ndarray, shape (n_nodes,n_nodes) The threshold matrix. Each element is in {0,1} """ H = np.zeros_like(C) np.fill_diagonal(H, np.diagonal(C)) C = C - H C = np.minimum(np.maximum(C, threshinf), threshsup) C[C == threshsup] = 0 C[C != 0] = 1 return C def build_noisy_circular_graph(N=20, mu=0, sigma=0.3, with_noise=False, structure_noise=False, p=None): """ Create a noisy circular graph """ g = nx.Graph() g.add_nodes_from(list(range(N))) for i in range(N): noise = float(np.random.normal(mu, sigma, 1)) if with_noise: g.add_node(i, attr_name=math.sin((2 * i * math.pi / N)) + noise) else: g.add_node(i, attr_name=math.sin(2 * i * math.pi / N)) g.add_edge(i, i + 1) if structure_noise: randomint = np.random.randint(0, p) if randomint == 0: if i <= N - 3: g.add_edge(i, i + 2) if i == N - 2: g.add_edge(i, 0) if i == N - 1: g.add_edge(i, 1) g.add_edge(N, 0) noise = float(np.random.normal(mu, sigma, 1)) if with_noise: g.add_node(N, attr_name=math.sin((2 * N * math.pi / N)) + noise) else: g.add_node(N, attr_name=math.sin(2 * N * math.pi / N)) return g def graph_colors(nx_graph, vmin=0, vmax=7): cnorm = mcol.Normalize(vmin=vmin, vmax=vmax) cpick = cm.ScalarMappable(norm=cnorm, cmap='viridis') cpick.set_array([]) val_map = {} for k, v in nx.get_node_attributes(nx_graph, 'attr_name').items(): val_map[k] = cpick.to_rgba(v) colors = [] for node in nx_graph.nodes(): colors.append(val_map[node]) return colors # - # Generate data # ------------- # # # + #%% circular dataset # We build a dataset of noisy circular graphs. # Noise is added on the structures by random connections and on the features by gaussian noise. np.random.seed(30) X0 = [] for k in range(9): X0.append(build_noisy_circular_graph(np.random.randint(15, 25), with_noise=True, structure_noise=True, p=3)) # - # Plot data # --------- # # # + #%% Plot graphs plt.figure(figsize=(8, 10)) for i in range(len(X0)): plt.subplot(3, 3, i + 1) g = X0[i] pos = nx.kamada_kawai_layout(g) nx.draw(g, pos=pos, node_color=graph_colors(g, vmin=-1, vmax=1), with_labels=False, node_size=100) plt.suptitle('Dataset of noisy graphs. Color indicates the label', fontsize=20) plt.show() # - # Barycenter computation # ---------------------- # # # + #%% We compute the barycenter using FGW. Structure matrices are computed using the shortest_path distance in the graph # Features distances are the euclidean distances Cs = [shortest_path(nx.adjacency_matrix(x)) for x in X0] ps = [np.ones(len(x.nodes())) / len(x.nodes()) for x in X0] Ys = [np.array([v for (k, v) in nx.get_node_attributes(x, 'attr_name').items()]).reshape(-1, 1) for x in X0] lambdas = np.array([np.ones(len(Ys)) / len(Ys)]).ravel() sizebary = 15 # we choose a barycenter with 15 nodes A, C, log = fgw_barycenters(sizebary, Ys, Cs, ps, lambdas, alpha=0.95, log=True) # - # Plot Barycenter # ------------------------- # # # + #%% Create the barycenter bary = nx.from_numpy_matrix(sp_to_adjency(C, threshinf=0, threshsup=find_thresh(C, sup=100, step=100)[0])) for i, v in enumerate(A.ravel()): bary.add_node(i, attr_name=v) #%% pos = nx.kamada_kawai_layout(bary) nx.draw(bary, pos=pos, node_color=graph_colors(bary, vmin=-1, vmax=1), with_labels=False) plt.suptitle('Barycenter', fontsize=20) plt.show()
notebooks/plot_barycenter_fgw.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Variables # ### Variable Assignment # When we generate a result, the answer is displayed, but not kept anywhere. 2 * 3 # If we want to get back to that result, we have to store it. We put it in a box, with a name on the box. This is a **variable**. six = 2 * 3 print(six) # If we look for a variable that hasn't ever been defined, we get an error. print(seven) # That's **not** the same as an empty box, well labeled: nothing = None print(nothing) type(None) # (None is the special python value for a no-value variable.) # *Supplementary Materials*: There's more on variables at [Software Carpentry's Python lesson](http://swcarpentry.github.io/python-novice-inflammation/01-numpy/index.html). # Anywhere we could put a raw number, we can put a variable label, and that works fine: print(5 * six) scary = six * six * six print(scary) # ### Reassignment and multiple labels # But here's the real scary thing: it seems like we can put something else in that box: scary = 25 print(scary) # Note that **the data that was there before has been lost**. # No labels refer to it any more - so it has been "Garbage Collected"! We might imagine something pulled out of the box, and thrown on the floor, to make way for the next occupant. # In fact, though, it is the **label** that has moved. We can see this because we have more than one label refering to the same box: name = "Eric" nom = name print(nom) print(name) # And we can move just one of those labels: nom = "Idle" print(name) print(nom) # So we can now develop a better understanding of our labels and boxes: each box is a piece of space (an *address*) in computer memory. # Each label (variable) is a reference to such a place. # When the number of labels on a box ("variables referencing an address") gets down to zero, then the data in the box cannot be found any more. # After a while, the language's "Garbage collector" will wander by, notice a box with no labels, and throw the data away, **making that box # available for more data**. # Old fashioned languages like C and Fortran don't have Garbage collectors. So a memory address with no references to it # still takes up memory, and the computer can more easily run out. # So when I write: name = "Michael" # The following things happen: # 1. A new text **object** is created, and an address in memory is found for it. # 1. The variable "name" is moved to refer to that address. # 1. The old address, containing "James", now has no labels. # 1. The garbage collector frees the memory at the old address. # **Supplementary materials**: There's an online python tutor which is great for visualising memory and references. Try the [scenario we just looked at](http://www.pythontutor.com/visualize.html#code=name%20%3D%20%22Eric%22%0Anom%20%3D%20name%0Aprint%28nom%29%0Aprint%28name%29%0Anom%20%3D%20%22Idle%22%0Aprint%28name%29%0Aprint%28nom%29%0Aname%20%3D%20%22Michael%22%0Aprint%28name%29%0Aprint%28nom%29%0A&cumulative=false&curInstr=0&heapPrimitives=nevernest&mode=display&origin=opt-frontend.js&py=3&rawInputLstJSON=%5B%5D&textReferences=false). # # Labels are contained in groups called "frames": our frame contains two labels, 'nom' and 'name'. # ### Objects and types # An object, like `name`, has a type. In the online python tutor example, we see that the objects have type "str". # `str` means a text object: Programmers call these 'strings'. type(name) # Depending on its type, an object can have different *properties*: data fields Inside the object. # Consider a Python complex number for example: z = 3 + 1j # We can see what properties and methods an object has available using the `dir` function: dir(z) # You can see that there are several methods whose name starts and ends with `__` (e.g. `__init__`): these are special methods that Python uses internally, and we will discuss some of them later on in this course. The others (in this case, `conjugate`, `img` and `real`) are the methods and fields through which we can interact with this object. type(z) z.real z.imag # A property of an object is accessed with a dot. # The jargon is that the "dot operator" is used to obtain a property of an object. # When we try to access a property that doesn't exist, we get an error: z.wrong # ### Reading error messages. # It's important, when learning to program, to develop an ability to read an error message and find, from in amongst # all the confusing noise, the bit of the error message which tells you what to change! # We don't yet know what is meant by `AttributeError`, or "Traceback". z2 = 5 - 6j print("Gets to here") print(z.wrong) print("Didn't get to here") # But in the above, we can see that the error happens on the **third** line of our code cell. # We can also see that the error message: # > 'complex' object has no attribute 'wrong' # # ...tells us something important. Even if we don't understand the rest, this is useful for debugging! # ### Variables and the notebook kernel # When I type code in the notebook, the objects live in memory between cells. number = 0 print(number) # If I change a variable: number = number + 1 print(number) # It keeps its new value for the next cell. # But cells are **not** always evaluated in order. # If I now go back to Input 31, reading `number = number + 1`, I can run it again, with Shift-Enter. The value of `number` will change from 2 to 3, then from 3 to 4 - but the output of the next cell (containing the `print` statement) will not change unless I rerun that too. Try it! # So it's important to remember that if you move your cursor around in the notebook, it doesn't always run top to bottom. # **Supplementary material**: (1) [Jupyter notebook documentation](https://jupyter-notebook.readthedocs.io/en/latest/).
ch00python/015variables.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/100rab-S/Tensorflow-Developer-Certificate/blob/main/S%2BP_Week_3_Lesson_2_RNN.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="zX4Kg8DUTKWO" #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # + [markdown] id="view-in-github" # <a href="https://colab.research.google.com/github/lmoroney/dlaicourse/blob/master/TensorFlow%20In%20Practice/Course%204%20-%20S%2BP/S%2BP%20Week%203%20Lesson%202%20-%20RNN.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + cellView="both" id="D1J15Vh_1Jih" try: # # %tensorflow_version only exists in Colab. # %tensorflow_version 2.x except Exception: pass # + id="BOjujz601HcS" colab={"base_uri": "https://localhost:8080/"} outputId="0a264cda-2fa0-4ea8-f270-84e5e91e0816" import tensorflow as tf import numpy as np import matplotlib.pyplot as plt print(tf.__version__) # + id="Zswl7jRtGzkk" def plot_series(time, series, format="-", start=0, end=None): #creating synthetic data for modelling plt.plot(time[start:end], series[start:end], format) plt.xlabel("Time") plt.ylabel("Value") plt.grid(True) def trend(time, slope=0): return slope * time def seasonal_pattern(season_time): """Just an arbitrary pattern, you can change it if you wish""" return np.where(season_time < 0.4, np.cos(season_time * 2 * np.pi), 1 / np.exp(3 * season_time)) def seasonality(time, period, amplitude=1, phase=0): """Repeats the same pattern at each period""" season_time = ((time + phase) % period) / period return amplitude * seasonal_pattern(season_time) def noise(time, noise_level=1, seed=None): rnd = np.random.RandomState(seed) return rnd.randn(len(time)) * noise_level time = np.arange(4 * 365 + 1, dtype="float32") baseline = 10 series = trend(time, 0.1) baseline = 10 amplitude = 40 slope = 0.05 noise_level = 5 # Create the series series = baseline + trend(time, slope) + seasonality(time, period=365, amplitude=amplitude) # Update with noise series += noise(time, noise_level, seed=42) split_time = 1000 #splitting the series into train and validation for modelling time_train = time[:split_time] x_train = series[:split_time] time_valid = time[split_time:] x_valid = series[split_time:] window_size = 20 #no. of consecutive values taken from the series to predict the next value in the series batch_size = 32 #no. of windows taken at a time (to spped up training and procesesing) shuffle_buffer_size = 1000 #it is used to shuffle with better efficiency and fast # + id="4sTTIOCbyShY" def windowed_dataset(series, window_size, batch_size, shuffle_buffer): ''' This function is used to create dataset which can directly be fed into the model. Parameters: series: the synthetic series that we generated in the above code (simply a time varied series of numbers), window_size: the number of consecutive values taken from the sereies as x for the model, batch_size: the number of windows that will be considered at once, shuffle_buffer: parameter for shuffle function to shuffle the data more efficiently Return: the function returns a list of tuple, where each tuple is a single training example in the form of ([x], y). where [x] is the window of dataset and y is the label. ''' dataset = tf.data.Dataset.from_tensor_slices(series) dataset = dataset.window(window_size + 1, shift=1, drop_remainder=True) dataset = dataset.flat_map(lambda window: window.batch(window_size + 1)) dataset = dataset.shuffle(shuffle_buffer).map(lambda window: (window[:-1], window[-1])) dataset = dataset.batch(batch_size).prefetch(1) return dataset # + id="L4nblWkqg1NL" tf.keras.backend.clear_session() tf.random.set_seed(51) np.random.seed(51) train_set = windowed_dataset(x_train, window_size, batch_size=128, shuffle_buffer=shuffle_buffer_size) model = tf.keras.models.Sequential([ tf.keras.layers.Lambda(lambda x: tf.expand_dims(x, axis=-1), input_shape=[None]), # ''' # since the RNN layer need 3 dimensional input, but the windowed_dataset just created 2 dimensional dataset (ie ([x], y)), therefore we # need to add an extra dimension to the dataset. To do this preprocessing before feeding into the model, we can use Lambda layer # of keras api. Now in this layer, we add an extra dimension at the last of every training example. Now the dimension is ([x], y, 1), # where 1 denotes the univariate or multivariate feature of the series. # input_shape = [None] is because, so that the layer can take all kind of input. # ''' tf.keras.layers.SimpleRNN(40, return_sequences=True), # ''' # it is a simple rnn layer which has 40 units and therefore it will provide 40 outputs to the next layer because we have set # return_sequences = True. if this was set to false (which is default) then the rnn layer would output just one output. But since the # next layer is also rnn layer, therefore we can set return_sequences = True and now this layer provides 40 outputs which gets feeded # into each unit of next rnn layer. # ''' tf.keras.layers.SimpleRNN(40), # ''' # this is another simple rnn layer, where each output from the previous layer is fed into each unit in this layer. that is the reason, # the number of units in this layer is equal to units in previous layer. The return_sequences is set to False in this layer because we # just need 1 output because next layer is a dense layer. # ''' tf.keras.layers.Dense(1), tf.keras.layers.Lambda(lambda x: x * 100.0) # ''' # we again use lambda layer to do some arbitrary processing. Since in our dataset most of the values of x axis are in multiples of # 100 therefore we multiply each value with 100 so that it can help training. # ''' ]) lr_schedule = tf.keras.callbacks.LearningRateScheduler( lambda epoch: 1e-8 * 10**(epoch / 20)) # ''' # it is used to find a perfect learning rate for our optimizer by using multiple learning rate after each epoch. # ''' optimizer = tf.keras.optimizers.SGD(lr=1e-8, momentum=0.9) model.compile(loss=tf.keras.losses.Huber(), optimizer=optimizer, metrics=["mae"]) # ''' # loss = tf.keras.losses.Huber(), we use this loss function because it gives a nice output when the input might consist of outliers. # ''' history = model.fit(train_set, epochs=100, callbacks=[lr_schedule], verbose = 0) # + id="5He3pp-Hj758" colab={"base_uri": "https://localhost:8080/", "height": 290} outputId="5c27242b-a55e-4672-e384-338f00948af4" plt.semilogx(history.history["lr"], history.history["loss"]) plt.axis([1e-8, 1e-4, 0, 30]) # + id="6y1KMowRkHkC" tf.keras.backend.clear_session() tf.random.set_seed(51) np.random.seed(51) dataset = windowed_dataset(x_train, window_size, batch_size=128, shuffle_buffer=shuffle_buffer_size) model = tf.keras.models.Sequential([ tf.keras.layers.Lambda(lambda x: tf.expand_dims(x, axis=-1), input_shape=[None]), tf.keras.layers.SimpleRNN(40, return_sequences=True), tf.keras.layers.SimpleRNN(40), tf.keras.layers.Dense(1), tf.keras.layers.Lambda(lambda x: x * 100.0) ]) optimizer = tf.keras.optimizers.SGD(lr=5e-5, momentum=0.9) # from the above graph we found the best learning paramter for the optimizer. Hence we will be using it to fine tune it. model.compile(loss=tf.keras.losses.Huber(), optimizer=optimizer, metrics=["mae"]) history = model.fit(dataset,epochs=400) # + id="ejBynEKekaKw" colab={"base_uri": "https://localhost:8080/", "height": 388} outputId="b33dbccb-0437-4dcc-a054-1200f8c6e4f5" # now we will collect prediction on the validation datset. It can be done by passing the window from the validaiton set forecast=[] for time in range(len(series) - window_size): forecast.append(model.predict(series[time:time + window_size][np.newaxis])) #instead of writing np.newaxis we can also write None. forecast = forecast[split_time-window_size:] results = np.array(forecast)[:, 0, 0] plt.figure(figsize=(10, 6)) plot_series(time_valid, x_valid) plot_series(time_valid, results) # + id="hR2BO0Dai_ZT" colab={"base_uri": "https://localhost:8080/"} outputId="a3510874-3f05-4b10-8c23-1d5c5b9d1ddb" tf.keras.metrics.mean_absolute_error(x_valid, results).numpy() # + id="vXaFHOaJqE6M" colab={"base_uri": "https://localhost:8080/", "height": 607} outputId="92976780-7604-4ee0-f46d-44b2e7adfad3" import matplotlib.image as mpimg import matplotlib.pyplot as plt #----------------------------------------------------------- # Retrieve a list of list results on training and test data # sets for each training epoch #----------------------------------------------------------- mae=history.history['mae'] loss=history.history['loss'] epochs=range(len(loss)) # Get number of epochs #------------------------------------------------ # Plot MAE and Loss #------------------------------------------------ plt.plot(epochs, mae, 'r') plt.plot(epochs, loss, 'b') plt.title('MAE and Loss') plt.xlabel("Epochs") plt.ylabel("Accuracy") plt.legend(["MAE", "Loss"]) plt.figure() epochs_zoom = epochs[200:] mae_zoom = mae[200:] loss_zoom = loss[200:] #------------------------------------------------ # Plot Zoomed MAE and Loss #------------------------------------------------ plt.plot(epochs_zoom, mae_zoom, 'r') plt.plot(epochs_zoom, loss_zoom, 'b') plt.title('MAE and Loss') plt.xlabel("Epochs") plt.ylabel("Accuracy") plt.legend(["MAE", "Loss"]) plt.figure()
S+P_Week_3_Lesson_2_RNN.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + # Copyright 2021 Google LLC # Use of this source code is governed by an MIT-style # license that can be found in the LICENSE file or at # https://opensource.org/licenses/MIT. # Notebook authors: <NAME> (<EMAIL>) # and <NAME> (<EMAIL>) # This notebook reproduces figures for chapter 14 from the book # "Probabilistic Machine Learning: An Introduction" # by <NAME> (MIT Press, 2021). # Book pdf is available from http://probml.ai # - # <a href="https://opensource.org/licenses/MIT" target="_parent"><img src="https://img.shields.io/github/license/probml/pyprobml"/></a> # <a href="https://colab.research.google.com/github/probml/pml-book/blob/main/pml1/figure_notebooks/chapter14_neural_networks_for_images_figures.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # ## Figure 14.1:<a name='14.1'></a> <a name='mlpTranslationInvariance'></a> # # Detecting patterns in 2d images using unstructured MLPs does not work well, because the method is not translation invariant. We can design a weight vector to act as a \bf matched filter for detecting the desired cross-shape. This will give a strong response of 5 if the object is on the left, but a weak response of 1 if the object is shifted over to the right. Adapted from Figure 7.16 of <a href='#Stevens2020'>[SAV20]</a> #@title Click me to run setup { display-mode: "form" } try: if PYPROBML_SETUP_ALREADY_RUN: print('skipping setup') except: PYPROBML_SETUP_ALREADY_RUN = True print('running setup...') # !git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null # %cd -q /pyprobml/scripts # %reload_ext autoreload # %autoreload 2 # !pip install superimport deimport -qqq import superimport def try_deimport(): try: from deimport.deimport import deimport deimport(superimport) except Exception as e: print(e) print('finished!') # <img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_14.1.png" width="256"/> # ## Figure 14.2:<a name='14.2'></a> <a name='imageTemplates'></a> # # We can classify a digit by looking for certain discriminative features (image templates) occuring in the correct (relative) locations. From Figure 5.1 of <a href='#kerasBook'>[Cho17]</a> . Used with kind permission of <NAME> #@title Click me to run setup { display-mode: "form" } try: if PYPROBML_SETUP_ALREADY_RUN: print('skipping setup') except: PYPROBML_SETUP_ALREADY_RUN = True print('running setup...') # !git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null # %cd -q /pyprobml/scripts # %reload_ext autoreload # %autoreload 2 # !pip install superimport deimport -qqq import superimport def try_deimport(): try: from deimport.deimport import deimport deimport(superimport) except Exception as e: print(e) print('finished!') # <img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_14.2.png" width="256"/> # ## Figure 14.3:<a name='14.3'></a> <a name='tab:convTable'></a> # # Discrete convolution of $ \bm x =[1,2,3,4]$ with $ \bm w =[5,6,7]$ to yield $ \bm z =[5,16,34,52,45,28]$. We see that this operation consists of ``flipping'' $ \bm w $ and then ``dragging'' it over $ \bm x $, multiplying elementwise, and adding up the results #@title Click me to run setup { display-mode: "form" } try: if PYPROBML_SETUP_ALREADY_RUN: print('skipping setup') except: PYPROBML_SETUP_ALREADY_RUN = True print('running setup...') # !git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null # %cd -q /pyprobml/scripts # %reload_ext autoreload # %autoreload 2 # !pip install superimport deimport -qqq import superimport def try_deimport(): try: from deimport.deimport import deimport deimport(superimport) except Exception as e: print(e) print('finished!') # ## Figure 14.4:<a name='14.4'></a> <a name='conv1d'></a> # # 1d cross correlation. From Figure 15.3.2 of <a href='#dive'>[Zha+20]</a> . Used with kind permission of <NAME> #@title Click me to run setup { display-mode: "form" } try: if PYPROBML_SETUP_ALREADY_RUN: print('skipping setup') except: PYPROBML_SETUP_ALREADY_RUN = True print('running setup...') # !git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null # %cd -q /pyprobml/scripts # %reload_ext autoreload # %autoreload 2 # !pip install superimport deimport -qqq import superimport def try_deimport(): try: from deimport.deimport import deimport deimport(superimport) except Exception as e: print(e) print('finished!') # <img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_14.4.png" width="256"/> # ## Figure 14.5:<a name='14.5'></a> <a name='diveConv2d'></a> # # Illustration of 2d cross correlation. # To reproduce this figure, click the open in colab button: <a href="https://colab.research.google.com/github/probml/probml-notebooks/blob/master/notebooks-d2l/conv2d_torch.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> #@title Click me to run setup { display-mode: "form" } try: if PYPROBML_SETUP_ALREADY_RUN: print('skipping setup') except: PYPROBML_SETUP_ALREADY_RUN = True print('running setup...') # !git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null # %cd -q /pyprobml/scripts # %reload_ext autoreload # %autoreload 2 # !pip install superimport deimport -qqq import superimport def try_deimport(): try: from deimport.deimport import deimport deimport(superimport) except Exception as e: print(e) print('finished!') # <img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_14.5.png" width="256"/> # ## Figure 14.6:<a name='14.6'></a> <a name='cholletConv2d'></a> # # Convolving a 2d image (left) with a $3 \times 3$ filter (middle) produces a 2d response map (right). The bright spots of the response map correspond to locations in the image which contain diagonal lines sloping down and to the right. From Figure 5.3 of <a href='#kerasBook'>[Cho17]</a> . Used with kind permission of <NAME> #@title Click me to run setup { display-mode: "form" } try: if PYPROBML_SETUP_ALREADY_RUN: print('skipping setup') except: PYPROBML_SETUP_ALREADY_RUN = True print('running setup...') # !git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null # %cd -q /pyprobml/scripts # %reload_ext autoreload # %autoreload 2 # !pip install superimport deimport -qqq import superimport def try_deimport(): try: from deimport.deimport import deimport deimport(superimport) except Exception as e: print(e) print('finished!') # <img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_14.6.png" width="256"/> # ## Figure 14.7:<a name='14.7'></a> <a name='zeroPadding'></a> # # Same-convolution (using zero-padding) ensures the output is the same size as the input. Adapted from Figure 8.3 of <a href='#Stevens2020'>[SAV20]</a> #@title Click me to run setup { display-mode: "form" } try: if PYPROBML_SETUP_ALREADY_RUN: print('skipping setup') except: PYPROBML_SETUP_ALREADY_RUN = True print('running setup...') # !git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null # %cd -q /pyprobml/scripts # %reload_ext autoreload # %autoreload 2 # !pip install superimport deimport -qqq import superimport def try_deimport(): try: from deimport.deimport import deimport deimport(superimport) except Exception as e: print(e) print('finished!') # <img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_14.7.png" width="256"/> # ## Figure 14.8:<a name='14.8'></a> <a name='CNN2d'></a> # # Illustration of padding and strides in 2d convolution. (a) We apply ``same convolution'' to a $5 \times 7$ input (with zero padding) using a $3 \times 3$ filter to create a $5 \times 7$ output. (b) Now we use a stride of 2, so the output has size $3 \times 4$. Adapted from Figures 14.3--14.4 of <a href='#Geron2019'>[Aur19]</a> #@title Click me to run setup { display-mode: "form" } try: if PYPROBML_SETUP_ALREADY_RUN: print('skipping setup') except: PYPROBML_SETUP_ALREADY_RUN = True print('running setup...') # !git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null # %cd -q /pyprobml/scripts # %reload_ext autoreload # %autoreload 2 # !pip install superimport deimport -qqq import superimport def try_deimport(): try: from deimport.deimport import deimport deimport(superimport) except Exception as e: print(e) print('finished!') # <img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_14.8_A.png" width="256"/> # <img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_14.8_B.png" width="256"/> # ## Figure 14.9:<a name='14.9'></a> <a name='convMultiChannel'></a> # # Illustration of 2d convolution applied to an input with 2 channels. # To reproduce this figure, click the open in colab button: <a href="https://colab.research.google.com/github/probml/probml-notebooks/blob/master/notebooks-d2l/conv2d_torch.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> #@title Click me to run setup { display-mode: "form" } try: if PYPROBML_SETUP_ALREADY_RUN: print('skipping setup') except: PYPROBML_SETUP_ALREADY_RUN = True print('running setup...') # !git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null # %cd -q /pyprobml/scripts # %reload_ext autoreload # %autoreload 2 # !pip install superimport deimport -qqq import superimport def try_deimport(): try: from deimport.deimport import deimport deimport(superimport) except Exception as e: print(e) print('finished!') # <img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_14.9.png" width="256"/> # ## Figure 14.10:<a name='14.10'></a> <a name='CNNhypercolumn'></a> # # Illustration of a CNN with 2 convolutional layers. The input has 3 color channels. The feature maps at internal layers have multiple channels. The cylinders correspond to hypercolumns, which are feature vectors at a certain location. Adapted from Figure 14.6 of <a href='#Geron2019'>[Aur19]</a> #@title Click me to run setup { display-mode: "form" } try: if PYPROBML_SETUP_ALREADY_RUN: print('skipping setup') except: PYPROBML_SETUP_ALREADY_RUN = True print('running setup...') # !git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null # %cd -q /pyprobml/scripts # %reload_ext autoreload # %autoreload 2 # !pip install superimport deimport -qqq import superimport def try_deimport(): try: from deimport.deimport import deimport deimport(superimport) except Exception as e: print(e) print('finished!') # <img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_14.10.png" width="256"/> # ## Figure 14.11:<a name='14.11'></a> <a name='conv1x1'></a> # # Mapping 3 channels to 2 using convolution with a filter of size $1 \times 1 \times 3 \times 2$. Adapted from Figure 6.4.2 of <a href='#dive'>[Zha+20]</a> #@title Click me to run setup { display-mode: "form" } try: if PYPROBML_SETUP_ALREADY_RUN: print('skipping setup') except: PYPROBML_SETUP_ALREADY_RUN = True print('running setup...') # !git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null # %cd -q /pyprobml/scripts # %reload_ext autoreload # %autoreload 2 # !pip install superimport deimport -qqq import superimport def try_deimport(): try: from deimport.deimport import deimport deimport(superimport) except Exception as e: print(e) print('finished!') # <img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_14.11.png" width="256"/> # ## Figure 14.12:<a name='14.12'></a> <a name='maxPool'></a> # # Illustration of maxpooling with a 2x2 filter and a stride of 1. Adapted from Figure 6.5.1 of <a href='#dive'>[Zha+20]</a> #@title Click me to run setup { display-mode: "form" } try: if PYPROBML_SETUP_ALREADY_RUN: print('skipping setup') except: PYPROBML_SETUP_ALREADY_RUN = True print('running setup...') # !git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null # %cd -q /pyprobml/scripts # %reload_ext autoreload # %autoreload 2 # !pip install superimport deimport -qqq import superimport def try_deimport(): try: from deimport.deimport import deimport deimport(superimport) except Exception as e: print(e) print('finished!') # <img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_14.12.png" width="256"/> # ## Figure 14.13:<a name='14.13'></a> <a name='cnnIntro'></a> # # A simple CNN for classifying images. Adapted from https://blog.floydhub.com/building-your-first-convnet/ #@title Click me to run setup { display-mode: "form" } try: if PYPROBML_SETUP_ALREADY_RUN: print('skipping setup') except: PYPROBML_SETUP_ALREADY_RUN = True print('running setup...') # !git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null # %cd -q /pyprobml/scripts # %reload_ext autoreload # %autoreload 2 # !pip install superimport deimport -qqq import superimport def try_deimport(): try: from deimport.deimport import deimport deimport(superimport) except Exception as e: print(e) print('finished!') # <img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_14.13.png" width="256"/> # ## Figure 14.14:<a name='14.14'></a> <a name='groupNorm'></a> # # Illustration of different activation normalization methods for a CNN. Each subplot shows a feature map tensor, with N as the batch axis, C as the channel axis, and (H, W) as the spatial axes. The pixels in blue are normalized by the same mean and variance, computed by aggregating the values of these pixels. Left to right: batch norm, layer norm, instance norm, and group norm (with 2 groups of 3 channels). From Figure 2 of <a href='#Wu2018GN'>[YK18]</a> . Used with kind permission of Kaiming He #@title Click me to run setup { display-mode: "form" } try: if PYPROBML_SETUP_ALREADY_RUN: print('skipping setup') except: PYPROBML_SETUP_ALREADY_RUN = True print('running setup...') # !git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null # %cd -q /pyprobml/scripts # %reload_ext autoreload # %autoreload 2 # !pip install superimport deimport -qqq import superimport def try_deimport(): try: from deimport.deimport import deimport deimport(superimport) except Exception as e: print(e) print('finished!') # <img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_14.14.png" width="256"/> # ## Figure 14.15:<a name='14.15'></a> <a name='lenet'></a> # # LeNet5, a convolutional neural net for classifying handwritten digits. From Figure 6.6.1 of <a href='#dive'>[Zha+20]</a> . Used with kind permission of <NAME> #@title Click me to run setup { display-mode: "form" } try: if PYPROBML_SETUP_ALREADY_RUN: print('skipping setup') except: PYPROBML_SETUP_ALREADY_RUN = True print('running setup...') # !git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null # %cd -q /pyprobml/scripts # %reload_ext autoreload # %autoreload 2 # !pip install superimport deimport -qqq import superimport def try_deimport(): try: from deimport.deimport import deimport deimport(superimport) except Exception as e: print(e) print('finished!') # <img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_14.15.png" width="256"/> # ## Figure 14.16:<a name='14.16'></a> <a name='alexnet'></a> # # (a) LeNet5. We assume the input has size $1 \times 28 \times 28$, as is the case for MNIST. From Figure 6.6.2 of <a href='#dive'>[Zha+20]</a> . Used with kind permission of <NAME>. (b) AlexNet. We assume the input has size $3 \times 224 \times 224$, as is the case for (cropped and rescaled) images from ImageNet. From Figure 7.1.2 of <a href='#dive'>[Zha+20]</a> . Used with kind permission of <NAME> #@title Click me to run setup { display-mode: "form" } try: if PYPROBML_SETUP_ALREADY_RUN: print('skipping setup') except: PYPROBML_SETUP_ALREADY_RUN = True print('running setup...') # !git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null # %cd -q /pyprobml/scripts # %reload_ext autoreload # %autoreload 2 # !pip install superimport deimport -qqq import superimport def try_deimport(): try: from deimport.deimport import deimport deimport(superimport) except Exception as e: print(e) print('finished!') # <img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_14.16_A.png" width="256"/> # <img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_14.16_B.png" width="256"/> # ## Figure 14.17:<a name='14.17'></a> <a name='cnnMnist'></a> # # Results of applying a CNN to some MNIST images (cherry picked to include some errors). Red is incorrect, blue is correct. (a) After 1 epoch of training. (b) After 2 epochs. # To reproduce this figure, click the open in colab button: <a href="https://colab.research.google.com/github/probml/probml-notebooks/blob/master/notebooks/cnn_mnist_tf.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> #@title Click me to run setup { display-mode: "form" } try: if PYPROBML_SETUP_ALREADY_RUN: print('skipping setup') except: PYPROBML_SETUP_ALREADY_RUN = True print('running setup...') # !git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null # %cd -q /pyprobml/scripts # %reload_ext autoreload # %autoreload 2 # !pip install superimport deimport -qqq import superimport def try_deimport(): try: from deimport.deimport import deimport deimport(superimport) except Exception as e: print(e) print('finished!') # <img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_14.17_A.png" width="256"/> # <img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_14.17_B.png" width="256"/> # ## Figure 14.18:<a name='14.18'></a> <a name='inception'></a> # # Inception module. The $1 \times 1$ convolutional layers reduce the number of channels, keeping the spatial dimensions the same. The parallel pathways through convolutions of different sizes allows the model to learn which filter size to use for each layer. The final depth concatenation block combines the outputs of all the different pathways (which all have the same spatial size). From Figure 7.4.1 of <a href='#dive'>[Zha+20]</a> . Used with kind permission of <NAME> #@title Click me to run setup { display-mode: "form" } try: if PYPROBML_SETUP_ALREADY_RUN: print('skipping setup') except: PYPROBML_SETUP_ALREADY_RUN = True print('running setup...') # !git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null # %cd -q /pyprobml/scripts # %reload_ext autoreload # %autoreload 2 # !pip install superimport deimport -qqq import superimport def try_deimport(): try: from deimport.deimport import deimport deimport(superimport) except Exception as e: print(e) print('finished!') # <img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_14.18.png" width="256"/> # ## Figure 14.19:<a name='14.19'></a> <a name='googlenet'></a> # # GoogLeNet (slightly simplified from the original). Input is on the left. From Figure 7.4.2 of <a href='#dive'>[Zha+20]</a> . Used with kind permission of <NAME> #@title Click me to run setup { display-mode: "form" } try: if PYPROBML_SETUP_ALREADY_RUN: print('skipping setup') except: PYPROBML_SETUP_ALREADY_RUN = True print('running setup...') # !git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null # %cd -q /pyprobml/scripts # %reload_ext autoreload # %autoreload 2 # !pip install superimport deimport -qqq import superimport def try_deimport(): try: from deimport.deimport import deimport deimport(superimport) except Exception as e: print(e) print('finished!') # <img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_14.19.png" width="256"/> # ## Figure 14.20:<a name='14.20'></a> <a name='resnetBlock'></a> # # A residual block for a CNN. Left: standard version. Right: version with 1x1 convolution, to allow a change in the number of channels between the input to the block and the output. From Figure 7.6.3 of <a href='#dive'>[Zha+20]</a> . Used with kind permission of <NAME> #@title Click me to run setup { display-mode: "form" } try: if PYPROBML_SETUP_ALREADY_RUN: print('skipping setup') except: PYPROBML_SETUP_ALREADY_RUN = True print('running setup...') # !git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null # %cd -q /pyprobml/scripts # %reload_ext autoreload # %autoreload 2 # !pip install superimport deimport -qqq import superimport def try_deimport(): try: from deimport.deimport import deimport deimport(superimport) except Exception as e: print(e) print('finished!') # <img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_14.20.png" width="256"/> # ## Figure 14.21:<a name='14.21'></a> <a name='resnet18'></a> # # The ResNet-18 architecture. Each dotted module is a residual block shown in \cref fig:resnetBlock . From Figure 7.6.4 of <a href='#dive'>[Zha+20]</a> . Used with kind permission of <NAME> #@title Click me to run setup { display-mode: "form" } try: if PYPROBML_SETUP_ALREADY_RUN: print('skipping setup') except: PYPROBML_SETUP_ALREADY_RUN = True print('running setup...') # !git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null # %cd -q /pyprobml/scripts # %reload_ext autoreload # %autoreload 2 # !pip install superimport deimport -qqq import superimport def try_deimport(): try: from deimport.deimport import deimport deimport(superimport) except Exception as e: print(e) print('finished!') # <img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_14.21.png" width="256"/> # ## Figure 14.22:<a name='14.22'></a> <a name='densenet'></a> # # (a) Left: a residual block adds the output to the input. Right: a densenet block concatenates the output with the input. (b) Illustration of a densenet. From Figures 7.7.1--7.7.2 of <a href='#dive'>[Zha+20]</a> . Used with kind permission of <NAME> #@title Click me to run setup { display-mode: "form" } try: if PYPROBML_SETUP_ALREADY_RUN: print('skipping setup') except: PYPROBML_SETUP_ALREADY_RUN = True print('running setup...') # !git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null # %cd -q /pyprobml/scripts # %reload_ext autoreload # %autoreload 2 # !pip install superimport deimport -qqq import superimport def try_deimport(): try: from deimport.deimport import deimport deimport(superimport) except Exception as e: print(e) print('finished!') # <img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_14.22_A.png" width="256"/> # <img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_14.22_B.png" width="256"/> # ## Figure 14.23:<a name='14.23'></a> <a name='dilatedConv'></a> # # Dilated convolution with a 3x3 filter using rate 1, 2 and 3. From Figure 1 of <a href='#Cui2019cnn'>[Xim+19]</a> . Used with kind permission of <NAME> #@title Click me to run setup { display-mode: "form" } try: if PYPROBML_SETUP_ALREADY_RUN: print('skipping setup') except: PYPROBML_SETUP_ALREADY_RUN = True print('running setup...') # !git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null # %cd -q /pyprobml/scripts # %reload_ext autoreload # %autoreload 2 # !pip install superimport deimport -qqq import superimport def try_deimport(): try: from deimport.deimport import deimport deimport(superimport) except Exception as e: print(e) print('finished!') # <img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_14.23.png" width="256"/> # ## Figure 14.24:<a name='14.24'></a> <a name='transposedConvDive'></a> # # Transposed convolution with 2x2 kernel. From Figure 13.10.1 of <a href='#dive'>[Zha+20]</a> . Used with kind permission of <NAME> #@title Click me to run setup { display-mode: "form" } try: if PYPROBML_SETUP_ALREADY_RUN: print('skipping setup') except: PYPROBML_SETUP_ALREADY_RUN = True print('running setup...') # !git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null # %cd -q /pyprobml/scripts # %reload_ext autoreload # %autoreload 2 # !pip install superimport deimport -qqq import superimport def try_deimport(): try: from deimport.deimport import deimport deimport(superimport) except Exception as e: print(e) print('finished!') # <img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_14.24.png" width="256"/> # ## Figure 14.25:<a name='14.25'></a> <a name='deconv'></a> # # Convolution, deconvolution and transposed convolution. Here $s$ is the stride and $p$ is the padding. From https://tinyurl.com/ynxcxsut . Used with kind permission of <NAME> #@title Click me to run setup { display-mode: "form" } try: if PYPROBML_SETUP_ALREADY_RUN: print('skipping setup') except: PYPROBML_SETUP_ALREADY_RUN = True print('running setup...') # !git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null # %cd -q /pyprobml/scripts # %reload_ext autoreload # %autoreload 2 # !pip install superimport deimport -qqq import superimport def try_deimport(): try: from deimport.deimport import deimport deimport(superimport) except Exception as e: print(e) print('finished!') # <img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_14.25.png" width="256"/> # ## Figure 14.26:<a name='14.26'></a> <a name='depthwiseSeparable'></a> # # Depthwise separable convolutions: each of the $C$ input channels undergoes a 2d convolution to produce $C$ output channels, which get combined pointwise (via 1x1 convolution) to produce $D$ output channels. From https://bit.ly/2L9fm2o . Used with kind permission of <NAME> #@title Click me to run setup { display-mode: "form" } try: if PYPROBML_SETUP_ALREADY_RUN: print('skipping setup') except: PYPROBML_SETUP_ALREADY_RUN = True print('running setup...') # !git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null # %cd -q /pyprobml/scripts # %reload_ext autoreload # %autoreload 2 # !pip install superimport deimport -qqq import superimport def try_deimport(): try: from deimport.deimport import deimport deimport(superimport) except Exception as e: print(e) print('finished!') # <img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_14.26.png" width="256"/> # ## Figure 14.27:<a name='14.27'></a> <a name='anchorBoxes'></a> # # (a) Illustration of face detection, a special case of object detection. (Photo of author and his wife Margaret, taken at Filoli in California in Feburary, 2018. Image processed by <NAME> using SSD face model.) (b) Illustration of anchor boxes. Adapted from \citep[Sec 12.5] dive #@title Click me to run setup { display-mode: "form" } try: if PYPROBML_SETUP_ALREADY_RUN: print('skipping setup') except: PYPROBML_SETUP_ALREADY_RUN = True print('running setup...') # !git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null # %cd -q /pyprobml/scripts # %reload_ext autoreload # %autoreload 2 # !pip install superimport deimport -qqq import superimport def try_deimport(): try: from deimport.deimport import deimport deimport(superimport) except Exception as e: print(e) print('finished!') # <img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_14.27_A.jpg" width="256"/> # <img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_14.27_B.png" width="256"/> # ## Figure 14.28:<a name='14.28'></a> <a name='maskRCNN'></a> # # Illustration of object detection and instance segmentation using Mask R-CNN. From https://github.com/matterport/Mask_RCNN . Used with kind permission of <NAME> #@title Click me to run setup { display-mode: "form" } try: if PYPROBML_SETUP_ALREADY_RUN: print('skipping setup') except: PYPROBML_SETUP_ALREADY_RUN = True print('running setup...') # !git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null # %cd -q /pyprobml/scripts # %reload_ext autoreload # %autoreload 2 # !pip install superimport deimport -qqq import superimport def try_deimport(): try: from deimport.deimport import deimport deimport(superimport) except Exception as e: print(e) print('finished!') # <img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_14.28.png" width="256"/> # ## Figure 14.29:<a name='14.29'></a> <a name='semanticSeg'></a> # # Illustration of an \bf encoder-decoder (aka \bf U-net ) CNN for semantic segmentation. The encoder uses convolution (which downsamples), and the decoder uses transposed convolution (which upsamples). From Figure 1 of <a href='#segnet'>[VAR17]</a> . Used with kind permission of <NAME> #@title Click me to run setup { display-mode: "form" } try: if PYPROBML_SETUP_ALREADY_RUN: print('skipping setup') except: PYPROBML_SETUP_ALREADY_RUN = True print('running setup...') # !git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null # %cd -q /pyprobml/scripts # %reload_ext autoreload # %autoreload 2 # !pip install superimport deimport -qqq import superimport def try_deimport(): try: from deimport.deimport import deimport deimport(superimport) except Exception as e: print(e) print('finished!') # <img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_14.29.png" width="256"/> # ## Figure 14.30:<a name='14.30'></a> <a name='Unet'></a> # # Illustration of the U-Net model for semantic segmentation. Each blue box corresponds to a multi-channel feature map. The number of channels is shown on the top of the box, and the height/width is shown in the bottom left. White boxes denote copied feature maps. The different colored arrows correspond to different operations. From Figure 1 from <a href='#Ronneberger2015'>[OPT15]</a> . Used with kind permission of <NAME> #@title Click me to run setup { display-mode: "form" } try: if PYPROBML_SETUP_ALREADY_RUN: print('skipping setup') except: PYPROBML_SETUP_ALREADY_RUN = True print('running setup...') # !git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null # %cd -q /pyprobml/scripts # %reload_ext autoreload # %autoreload 2 # !pip install superimport deimport -qqq import superimport def try_deimport(): try: from deimport.deimport import deimport deimport(superimport) except Exception as e: print(e) print('finished!') # <img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_14.30.png" width="256"/> # ## Figure 14.31:<a name='14.31'></a> <a name='densePrediction'></a> # # Illustration of a multi-task dense prediction problem. From Based on Figure 1 of <a href='#Eigen2015'>[DR15]</a> . Used with kind permission of <NAME> #@title Click me to run setup { display-mode: "form" } try: if PYPROBML_SETUP_ALREADY_RUN: print('skipping setup') except: PYPROBML_SETUP_ALREADY_RUN = True print('running setup...') # !git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null # %cd -q /pyprobml/scripts # %reload_ext autoreload # %autoreload 2 # !pip install superimport deimport -qqq import superimport def try_deimport(): try: from deimport.deimport import deimport deimport(superimport) except Exception as e: print(e) print('finished!') # <img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_14.31.png" width="256"/> # ## Figure 14.32:<a name='14.32'></a> <a name='openPose'></a> # # Illustration of keypoint detection for body, hands and face using the OpenPose system. From Figure 8 of <a href='#openPose'>[Zhe+18]</a> . Used with kind permission of <NAME> #@title Click me to run setup { display-mode: "form" } try: if PYPROBML_SETUP_ALREADY_RUN: print('skipping setup') except: PYPROBML_SETUP_ALREADY_RUN = True print('running setup...') # !git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null # %cd -q /pyprobml/scripts # %reload_ext autoreload # %autoreload 2 # !pip install superimport deimport -qqq import superimport def try_deimport(): try: from deimport.deimport import deimport deimport(superimport) except Exception as e: print(e) print('finished!') # <img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_14.32.png" width="256"/> # ## Figure 14.33:<a name='14.33'></a> <a name='goose'></a> # # Images that maximize the probability of ImageNet classes ``goose'' and ``ostrich'' under a simple Gaussian prior. From http://yosinski.com/deepvis . Used with kind permission of <NAME> #@title Click me to run setup { display-mode: "form" } try: if PYPROBML_SETUP_ALREADY_RUN: print('skipping setup') except: PYPROBML_SETUP_ALREADY_RUN = True print('running setup...') # !git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null # %cd -q /pyprobml/scripts # %reload_ext autoreload # %autoreload 2 # !pip install superimport deimport -qqq import superimport def try_deimport(): try: from deimport.deimport import deimport deimport(superimport) except Exception as e: print(e) print('finished!') # <img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_14.33.jpg" width="256"/> # ## Figure 14.34:<a name='14.34'></a> <a name='TVnorm'></a> # # Illustration of total variation norm. (a) Input image: a green sea turtle (Used with kind permission of Wikimedia author <NAME>). (b) Horizontal deltas. (c) Vertical deltas. Adapted from https://www.tensorflow.org/tutorials/generative/style_transfer #@title Click me to run setup { display-mode: "form" } try: if PYPROBML_SETUP_ALREADY_RUN: print('skipping setup') except: PYPROBML_SETUP_ALREADY_RUN = True print('running setup...') # !git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null # %cd -q /pyprobml/scripts # %reload_ext autoreload # %autoreload 2 # !pip install superimport deimport -qqq import superimport def try_deimport(): try: from deimport.deimport import deimport deimport(superimport) except Exception as e: print(e) print('finished!') # <img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_14.34_A.jpg" width="256"/> # <img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_14.34_B.png" width="256"/> # <img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_14.34_C.png" width="256"/> # ## Figure 14.35:<a name='14.35'></a> <a name='deepdreamClassvis'></a> # # Images that maximize the probability of certain ImageNet classes under a TV prior. From https://research.googleblog.com/2015/06/inceptionism-going-deeper-into-neural.html . Used with kind permission of <NAME> #@title Click me to run setup { display-mode: "form" } try: if PYPROBML_SETUP_ALREADY_RUN: print('skipping setup') except: PYPROBML_SETUP_ALREADY_RUN = True print('running setup...') # !git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null # %cd -q /pyprobml/scripts # %reload_ext autoreload # %autoreload 2 # !pip install superimport deimport -qqq import superimport def try_deimport(): try: from deimport.deimport import deimport deimport(superimport) except Exception as e: print(e) print('finished!') # <img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_14.35.png" width="256"/> # ## Figure 14.36:<a name='14.36'></a> <a name='cnn-vis'></a> # # We visualize ``optimal stimuli'' for neurons in layers Conv 1, 3, 5 and fc8 in the AlexNet architecture, trained on the ImageNet dataset. For Conv5, we also show retrieved real images (under the column ``data driven'') that produce similar activations. Based on the method in <a href='#Mahendran16ijcv'>[MV16]</a> . Used with kind permission of <NAME> #@title Click me to run setup { display-mode: "form" } try: if PYPROBML_SETUP_ALREADY_RUN: print('skipping setup') except: PYPROBML_SETUP_ALREADY_RUN = True print('running setup...') # !git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null # %cd -q /pyprobml/scripts # %reload_ext autoreload # %autoreload 2 # !pip install superimport deimport -qqq import superimport def try_deimport(): try: from deimport.deimport import deimport deimport(superimport) except Exception as e: print(e) print('finished!') # <img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_14.36.jpg" width="256"/> # ## Figure 14.37:<a name='14.37'></a> <a name='deepdream'></a> # # Illustration of DeepDream. The CNN is an Inception classifier trained on ImageNet. (a) Starting image of an Aurelia aurita (also called moon jelly). (b) Image generated after 10 iterations. (c) Image generated after 50 iterations. From https://en.wikipedia.org/wiki/DeepDream . Used with kind permission of Wikipedia author <NAME> #@title Click me to run setup { display-mode: "form" } try: if PYPROBML_SETUP_ALREADY_RUN: print('skipping setup') except: PYPROBML_SETUP_ALREADY_RUN = True print('running setup...') # !git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null # %cd -q /pyprobml/scripts # %reload_ext autoreload # %autoreload 2 # !pip install superimport deimport -qqq import superimport def try_deimport(): try: from deimport.deimport import deimport deimport(superimport) except Exception as e: print(e) print('finished!') # <img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_14.37_A.jpg" width="256"/> # <img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_14.37_B.jpg" width="256"/> # <img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_14.37_C.jpg" width="256"/> # ## Figure 14.38:<a name='14.38'></a> <a name='styleTransfer'></a> # # Example output from a neural style transfer system. (a) Content image: a green sea turtle (Used with kind permission of Wikimedia author <NAME>). (b) Style image: a painting by <NAME> called ``Composition 7''. (c) Output of neural style generation. Adapted from https://www.tensorflow.org/tutorials/generative/style_transfer #@title Click me to run setup { display-mode: "form" } try: if PYPROBML_SETUP_ALREADY_RUN: print('skipping setup') except: PYPROBML_SETUP_ALREADY_RUN = True print('running setup...') # !git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null # %cd -q /pyprobml/scripts # %reload_ext autoreload # %autoreload 2 # !pip install superimport deimport -qqq import superimport def try_deimport(): try: from deimport.deimport import deimport deimport(superimport) except Exception as e: print(e) print('finished!') # <img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_14.38_A.jpg" width="256"/> # <img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_14.38_B.jpg" width="256"/> # <img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_14.38_C.png" width="256"/> # ## Figure 14.39:<a name='14.39'></a> <a name='styleTransferGSOC'></a> # # Neural style transfer applied to photos of the ``production team'', who helped create code and demos for this book and its sequel. From top to bottom, left to right: <NAME> (the author), <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, Coco (the team dog). Each content photo used a different artistic style. Adapted from https://www.tensorflow.org/tutorials/generative/style_transfer #@title Click me to run setup { display-mode: "form" } try: if PYPROBML_SETUP_ALREADY_RUN: print('skipping setup') except: PYPROBML_SETUP_ALREADY_RUN = True print('running setup...') # !git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null # %cd -q /pyprobml/scripts # %reload_ext autoreload # %autoreload 2 # !pip install superimport deimport -qqq import superimport def try_deimport(): try: from deimport.deimport import deimport deimport(superimport) except Exception as e: print(e) print('finished!') # <img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_14.39_A.png" width="256"/> # <img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_14.39_B.png" width="256"/> # <img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_14.39_C.png" width="256"/> # <img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_14.39_D.png" width="256"/> # <img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_14.39_E.png" width="256"/> # <img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_14.39_F.png" width="256"/> # <img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_14.39_G.png" width="256"/> # <img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_14.39_H.png" width="256"/> # ## Figure 14.40:<a name='14.40'></a> <a name='neuralStyleD2L'></a> # # Illustration of how neural style transfer works. Adapted from Figure 12.12.2 of <a href='#dive'>[Zha+20]</a> #@title Click me to run setup { display-mode: "form" } try: if PYPROBML_SETUP_ALREADY_RUN: print('skipping setup') except: PYPROBML_SETUP_ALREADY_RUN = True print('running setup...') # !git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null # %cd -q /pyprobml/scripts # %reload_ext autoreload # %autoreload 2 # !pip install superimport deimport -qqq import superimport def try_deimport(): try: from deimport.deimport import deimport deimport(superimport) except Exception as e: print(e) print('finished!') # <img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_14.40.png" width="256"/> # ## Figure 14.41:<a name='14.41'></a> <a name='featureMaps3Images'></a> # # Schematic representation of 3 kinds of feature maps for 3 different input images. Adapted from Figure 5.16 of <a href='#Foster2019'>[Dav19]</a> #@title Click me to run setup { display-mode: "form" } try: if PYPROBML_SETUP_ALREADY_RUN: print('skipping setup') except: PYPROBML_SETUP_ALREADY_RUN = True print('running setup...') # !git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null # %cd -q /pyprobml/scripts # %reload_ext autoreload # %autoreload 2 # !pip install superimport deimport -qqq import superimport def try_deimport(): try: from deimport.deimport import deimport deimport(superimport) except Exception as e: print(e) print('finished!') # <img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_14.41.png" width="256"/> # ## References: # <a name='Geron2019'>[Aur19]</a> <NAME> "Hands-On Machine Learning with Scikit-Learn and TensorFlow: Concepts, Tools, and Techniques for BuildingIntelligent Systems (2nd edition)". (2019). # # <a name='kerasBook'>[Cho17]</a> F. Chollet "Deep learning with Python". (2017). # # <a name='Eigen2015'>[DR15]</a> <NAME> and <NAME>. "Predicting Depth, Surface Normals and Semantic Labels with aCommon Multi-Scale Convolutional Architecture". (2015). # # <a name='Foster2019'>[Dav19]</a> <NAME> "Generative Deep Learning: Teaching Machines to Paint, WriteCompose, and Play". (2019). # # <a name='Mahendran16ijcv'>[MV16]</a> <NAME> and <NAME>. "Visualizing Deep Convolutional Neural Networks Using Natural Pre-images". In: ijcv (2016). # # <a name='Ronneberger2015'>[OPT15]</a> <NAME>, <NAME> and <NAME>. "U-Net: Convolutional Networks for Biomedical ImageSegmentation". (2015). # # <a name='Stevens2020'>[SAV20]</a> <NAME>, <NAME> and <NAME>. "Deep Learning with PyTorch". (2020). # # <a name='segnet'>[VAR17]</a> <NAME>, <NAME> and <NAME>. "SegNet: A Deep Convolutional Encoder-Decoder Architecture forImage Segmentation". In: pami (2017). # # <a name='Cui2019cnn'>[Xim+19]</a> <NAME>, <NAME>, <NAME>, <NAME>, <NAME> and <NAME>. "Multiscale Spatial-Spectral Convolutional Network withImage-Based Framework for Hyperspectral Imagery Classification". In: Remote Sensing (2019). # # <a name='Wu2018GN'>[YK18]</a> <NAME> and <NAME>. "Group Normalization". (2018). # # <a name='dive'>[Zha+20]</a> <NAME>, <NAME>, <NAME> and <NAME>. "Dive into deep learning". (2020). # # <a name='openPose'>[Zhe+18]</a> <NAME>, <NAME>, <NAME>, <NAME> and <NAME>. "OpenPose: Realtime Multi-Person 2D Pose Estimationusing Part Affinity Fields". abs/1812.08008 (2018). arXiv: 1812.08008 # #
pml1/figure_notebooks/chapter14_neural_networks_for_images_figures.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import datetime as dt import rasterio import numpy as np import pandas as pd import fiona from shapely.geometry import mapping, shape from shapely.geometry import box as shBox import rasterio # from rasterio import windows as win # ### read csv file of the climate data downloaded for each year # to generate the LAI, we downloaded the layer from Copernicus land service portal: https://land.copernicus.eu/global/products/lai if the download is customized the value of the layer will be DN. the number is multiplied by 1/30 to convert to LAI value # the null values are filled with the 0.5 LAI value. The cell size of the data is 300m. We converted all layers (summer and winter) to 30m with the same projectsion to be able to use in the model. # set the year year=2011 # set the csv of the climate file d=pd.read_csv(r'G:\NationalLayer\ClimateData\Raw_CDC_data\{}csv\{}.csv'.format(year,year),header=None) # set the columns of the csv file here d.columns = ["Station", "Date", "PRCP_Type", "prcp","Measurement_Flag","Quality_Flag","Source_Flag","col99"] # set the station shapefile address stationPointShape=r'G:\NationalLayer\ClimateData\Stations\Stations_All_USAl.shp' field_id='StationFID' # set the reasters for the start of seson of the year startSeasonAddress=r'G:\NationalLayer\ClimateData\Phenology\2011\SOST2011_30m_projected.img' endSeasonAddress =r'G:\NationalLayer\ClimateData\Phenology\2011\EOST2011_30m_projected.img' d.sample(10) # set a radious search in meter for creating a box around each station to get the average of season end and start search=1000 def findWindow (shapeBound,mainRasterBnd,mainRasterCellSize): startRow = int((mainRasterBnd[3] - shapeBound[3])/mainRasterCellSize) endRow = int((shapeBound[3] - shapeBound[1])/mainRasterCellSize)+1+startRow startCol = int((shapeBound[0] - mainRasterBnd[0])/mainRasterCellSize) endCol = int((shapeBound[2] - shapeBound[0])/mainRasterCellSize)+1+startCol return (startRow,endRow,startCol,endCol) with rasterio.open(startSeasonAddress) as rstNational: kwds = rstNational.meta.copy() mainRasterBnd=rstNational.bounds cellSize= kwds['transform'][0] # thie is a dictionary that the key is station_id and the value is [startS,endS] dictStartEndSeacon={} for stPoint in fiona.open(stationPointShape): st_id=stPoint['properties'][field_id] p=(shape(stPoint['geometry'])) st_coors=(p.x,p.y) # (left,butt,right,top) searchBound=(p.x-search,p.y-search,p.x+search,p.y+search) # let's the bound in the national raster window_use=findWindow(searchBound,mainRasterBnd,cellSize) getWin=((window_use[0],window_use[1]),(window_use[2],window_use[3])) # read the windown in each raster and get the values with rasterio.open(startSeasonAddress) as src: nodata = (src.meta.copy())['nodata'] startSeason_ar=(src.read(1, window=getWin)).astype('float') startSeason_ar[(startSeason_ar==nodata)|(startSeason_ar>365)|(startSeason_ar<0)]=np.nan with rasterio.open(endSeasonAddress) as src: nodata = (src.meta.copy())['nodata'] endSeason_ar=(src.read(1, window=getWin)).astype('float') endSeason_ar[(endSeason_ar==nodata)|(endSeason_ar>365)|(endSeason_ar<0)]=np.nan if (np.nanmean(startSeason_ar)>=0)&(np.nanmean(endSeason_ar)>=0): stSos=np.nanmean(startSeason_ar) stEos=np.nanmean(endSeason_ar) sosDt=(dt.datetime(year, 1, 1) + dt.timedelta(stSos - 1)) eosDt=(dt.datetime(year, 1, 1) + dt.timedelta(stEos - 1)) dictStartEndSeacon[str(st_id)]=[sosDt.strftime('%Y-%m-%d'),eosDt.strftime('%Y-%m-%d')] lstDictValues=[] for key, val in dictStartEndSeacon.items(): lstDictValues.append([key,val[0],val[1]]) dft=pd.DataFrame(lstDictValues,columns=['st_unique_ID','SOS','EOS']) dft['SOS']=pd.to_datetime(dft['SOS'], format='%Y-%m-%d') dft['EOS']=pd.to_datetime(dft['EOS'], format='%Y-%m-%d') end_year = pd.Timestamp('{}-12-30'.format(year)) mask=((dft['SOS']<=end_year) & (dft['EOS'] <= end_year)&(dft['SOS']<=dft['EOS'])) dft=dft[mask] dft['st_unique_ID']=dft['st_unique_ID'].astype('int') dft.sample(3) dft.to_csv(r'G:\NationalLayer\ClimateData\DailySummary\Station_Season_{}.csv'.format(year)) dfCSV_US= pd.read_csv(r'G:\NationalLayer\ClimateData\Stations\Stations_All_USA.csv') dfCSV_US['StationFID']=dfCSV_US['StationFID'].astype('int') dft.head(2) dfCSV_US.rename(columns={'StationFID':'st_unique_ID_USCSV'}, inplace=True) dfCSV_US.head(3) dfSeasons=dfCSV_US.set_index('st_unique_ID_USCSV').join(dft.set_index('st_unique_ID')) dfSeasons['st_unique_ID']=(dfSeasons.index).astype('int') dfSeasons.sample(10) intcptRate_on = 0.3 intcptRate_off = 0.1 storageCap_off = 1 storageCap_on = 3 # ### read the stations' csv file. This could be geographically selected. They also come with and StationFID col # #### list of states lststates = ["AL", "AZ", "AR", "CA", "CO", "CT", "DC", "DE", "FL", "GA", "ID", "IL", "IN", "IA", "KS", "KY", "LA", "ME", "MD", "MA", "MI", "MN", "MS", "MO", "MT", "NE", "NV", "NH", "NJ", "NM", "NY", "NC", "ND", "OH", "OK", "OR", "PA", "RI", "SC", "SD", "TN", "TX", "UT", "VT", "VA", "WA", "WV", "WI", "WY"] d.sample(5) # ### join the climate data dframe to the stations' dframe and generate a df that carries all cols df=d.set_index('Station').join(dfSeasons.set_index('Station_ID')) df.rename(columns={'Name': 'State', 'col2': 'Place'}, inplace=True) df['NOAA_st_ID']=df.index df.sample(2) df.columns # create a list of all places of the df lstAll=df['State'] # select only rows that their 'state' col is in the list of our states dfUS=df[lstAll.isin(lststates)] dfUS['st_unique_ID']=(dfUS['st_unique_ID']).astype('int') dfUS.head(4) dfUS=dfUS[(dfUS['PRCP_Type']=='PRCP')] # select only rows that have some rainfall (type and amount>0) dfUS=dfUS[(dfUS['prcp']>0)] len(dfUS.prcp) # convert the date col to a date object lstDates=dfUS['Date'] dfUS['Date']=pd.to_datetime(dfUS['Date'], format='%Y%m%d') dfUS.sample(10) dfUS[(dfUS.index=='USC00088051') & (dfUS.Date=='2016-10-07')] # convert the prcp col to float. Also convert that to m. The native unit of data is 10mm; that's why we divided by 10,000 dfUS['prcp'] = dfUS.prcp.astype(float) dfUS['prcp'] = (dfUS['prcp'])/10000.0 dfUS['leaf_on']=0 dfUS.loc[(dfUS['Date'] >= dfUS['SOS'])&(dfUS['Date'] <= dfUS['EOS']), 'leaf_on'] = 1 dfUS.sample(2) # + # dfUS['intercepted']=0 # dfUS.loc[dfUS.leaf_on==1, 'intercepted'] = ((dfUS.loc[dfUS.leaf_on==1])['prcp'])*intcptRate_on # dfUS.loc[dfUS.leaf_on==0, 'intercepted'] = ((dfUS.loc[dfUS.leaf_on==0])['prcp'])*intcptRate_off # dfUS.loc[((dfUS.leaf_on==0) & (dfUS['intercepted'] >storageCap_off)), 'intercepted'] =storageCap_off # dfUS.loc[((dfUS.leaf_on==1) & (dfUS['intercepted'] >storageCap_on)), 'intercepted'] =storageCap_on # - dfUS.to_csv(r'G:\NationalLayer\ClimateData\DailySummary\dfUS{}_processed.csv'.format(year))
ClimateDataAnalysis_rainfall.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from phimal_utilities.data import Dataset_2D from phimal_utilities.data.diffusion import AdvectionDiffusionGaussian2D import numpy as np import matplotlib.pyplot as plt # %load_ext autoreload # %autoreload 2 # - dataset = Dataset_2D(AdvectionDiffusionGaussian2D, D=2.0, x0=[0.0, 0.0], sigma=0.5, v=[0.1, 0.1]) # + # Making grid x = np.linspace(-10, 10, 100) t = np.linspace(0.0, 1.0, 25) x_grid, y_grid, t_grid = np.meshgrid(x, x, t, indexing='ij') X = np.concatenate([x_grid.reshape(-1, 1), y_grid.reshape(-1, 1)], axis=1) # - u = dataset.generate_solution(X, t_grid.reshape(-1, 1)) frame = 10 plt.contourf(u.reshape(x_grid.shape)[:, :, frame]) X.shape t_deriv=dataset.time_deriv(X,t_grid.reshape(-1,1)).reshape(x_grid.shape) library_deriv = dataset.library(X,t_grid.reshape(-1,1)).reshape(100,100,25,18) x_grid.shape t_deriv.shape plt.contourf(library_deriv[:,:,2,2]) plt.contourf(t_deriv[:,:,3]) # + from phimal_utilities.data.diffusion import DiffusionGaussian from DeePyMoD_SBL.deepymod_torch.library_functions import library_2Din_1Dout from DeePyMoD_SBL.deepymod_torch.DeepMod import DeepMod import time from DeePyMoD_SBL.deepymod_torch.output import Tensorboard, progress from DeePyMoD_SBL.deepymod_torch.losses import reg_loss, mse_loss, l1_loss from DeePyMoD_SBL.deepymod_torch.sparsity import scaling, threshold from numpy import pi # -
paper/figures/Diffusion_2D/.ipynb_checkpoints/testing_data_deepmod-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Test `compare_venus_ctd` Module # # Render figure object produced by the `nowcast.figures.publish.compare_venus_ctd` module. # Provides data for visual testing to confirm that refactoring has not adversely changed figure for web page. # # Set-up and function call replicates as nearly as possible what is done in the `nowcast.workers.make_plots` worker. # Notebooks like this should be developed in a # [Nowcast Figures Development Environment](https://salishsea-nowcast.readthedocs.io/en/latest/figures/fig_dev_env.html) # so that all of the necessary dependency packages are installed. # The development has to be done on a workstation that has the Salish Sea Nowcast system `/results/` parition mounted. # + import io import os from pathlib import Path import arrow import netCDF4 as nc import yaml from salishsea_tools import nc_tools from nowcast.figures import research_VENUS from nowcast.figures.comparison import compare_venus_ctd # - # %matplotlib inline # This figure module needs to access the ONC web services API. # Doing so requires a user token which you can generate on the Web Services API tab of your # [ONC account profile page](http://dmas.uvic.ca/Profile). # The `_prep_plot_data()` function assumes that the token will be stored in an environment variable # named `ONC_USER_TOKEN`. # You can do that using a cell like the following, # but **be careful not to commit the notebook with your token in the cell** # or you will publish it to the world in this notebook. import os os.environ['ONC_USER_TOKEN'] = 'my-token' # + config = ''' timezone: Canada/Pacific run_types: nowcast: config name: SalishSea bathymetry: /results/nowcast-sys/grid/bathymetry_201702.nc mesh_mask: /results/nowcast-sys/grid/mesh_mask201702.nc nowcast-green: config name: SOG bathymetry: /results/nowcast-sys/grid/bathymetry_201702.nc mesh_mask: /results/nowcast-sys/grid/mesh_mask201702.nc run: results_archive: nowcast: /results/SalishSea/nowcast/ nowcast-green: /results/SalishSea/nowcast-green/ forecast: /results/SalishSea/forecast/ ''' config = yaml.load(io.StringIO(config)) # + run_date = arrow.get('2018-10-19') run_type = 'nowcast-green' dmy = run_date.format('DDMMMYY').lower() start_day = { 'nowcast-green': run_date.format('YYYYMMDD'), 'forecast': run_date.replace(days=+1).format('YYYYMMDD'), } end_day = { 'nowcast-green': run_date.format('YYYYMMDD'), 'forecast': run_date.replace(days=+2).format('YYYYMMDD'), } ymd = run_date.format('YYYYMMDD') results_home = Path(config['run']['results_archive'][run_type]) results_dir = results_home/dmy dev_results_home = Path(config['run']['results_archive']['nowcast-green']) dev_results_dir = dev_results_home/dmy # - bathy = nc.Dataset(config['run_types'][run_type]['bathymetry']) mesh_mask = nc.Dataset(config['run_types'][run_type]['mesh_mask']) dev_mesh_mask = nc.Dataset(config['run_types']['nowcast-green']['mesh_mask']) grid_T_hr = nc.Dataset( str(results_dir/'SalishSea_1h_{0}_{1}_grid_T.nc' .format(start_day[run_type], end_day[run_type]))) dev_grid_T_hr = nc.Dataset( str(dev_results_dir/'SalishSea_1h_{0}_{1}_grid_T.nc' .format(start_day[run_type], end_day[run_type]))) # + # # %%timeit -n1 -r1 from importlib import reload from nowcast.figures import website_theme, shared from salishsea_tools import places, data_tools reload(compare_venus_ctd) reload(website_theme) reload(shared) reload(places) reload(data_tools) fig = compare_venus_ctd.make_figure( 'East node', grid_T_hr, dev_grid_T_hr, config['timezone'], mesh_mask, dev_mesh_mask, theme=website_theme) # + # # %%timeit -n1 -r1 from importlib import reload from nowcast.figures import website_theme, shared from salishsea_tools import places, data_tools reload(compare_venus_ctd) reload(website_theme) reload(shared) reload(places) reload(data_tools) fig = compare_venus_ctd.make_figure( 'Central node', grid_T_hr, dev_grid_T_hr, config['timezone'], mesh_mask, dev_mesh_mask, theme=website_theme) # + # # %%timeit -n1 -r1 from importlib import reload from nowcast.figures import website_theme, shared from salishsea_tools import places, data_tools reload(compare_venus_ctd) reload(website_theme) reload(shared) reload(places) reload(data_tools) fig = compare_venus_ctd.make_figure( 'Delta DDL node', grid_T_hr, dev_grid_T_hr, config['timezone'], mesh_mask, dev_mesh_mask, theme=website_theme, figsize=(8, 10)) # + # # %%timeit -n1 -r1 from importlib import reload from nowcast.figures import website_theme, shared from salishsea_tools import places, data_tools reload(compare_venus_ctd) reload(website_theme) reload(shared) reload(places) reload(data_tools) fig = compare_venus_ctd.make_figure( 'Delta BBL node', grid_T_hr, dev_grid_T_hr, config['timezone'], mesh_mask, dev_mesh_mask, theme=website_theme, figsize=(8, 10)) # -
notebooks/figures/comparison/TestCompareVENUS_CTD.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Welcome to the first Beginner Python Workshop # # **Topic: comments, data types, containers, variables** # # This notebooks will give you a basic introduction to the Python world. Each topic mentioned below is also covered in the [tutorials and tutorial videos](https://github.com/GuckLab/Python-Workshops/tree/main/tutorials) # # <NAME>, Guck Division, MPL, 2021 # notebook metadata you can ignore! info = {"workshop": "01", "topic": ["comments", "data types", "containers", "variables"], "version" : "0.0.1"} # ### How to use this notebook # # - Click on a cell (each box is called a cell). Hit "shift+enter", this will run the cell! # - You can run the cells in any order! # - The output of runnable code is printed below the cell. # - Check out this [Jupyter Notebook Tutorial video](https://www.youtube.com/watch?v=HW29067qVWk). # # See the help tab above for more information! # # # What is in this Workshop? # In this notebook we cover: # - Comments in python # - Basic Data Types # - Textual data (strings) # - Numbers (integers and floats) # - booleans (True and False) # - Container data types # - Lists # - Dictionaries # - Variables # # We will also see how to install Python and how to use anaconda prompt (bash) and pip. These are external to the notebook. # ----------- # ## Comments # Comments are used to describe your code. # # Anything following a '#' is a comment and is not runnable code. # + # this is a comment # + active="" # You will also see comments on the same line as code like so: # - 1 + 2 # that was some code, but I am a comment! # You can write multi-line comments with three double quotes (""" the comment """) or three single quotes (''' my comment ''') """ This is a multi-line comment """ # ----------- # # Basic data types: strings, numbers, boolean # ### Strings (`str`) # Strings are textual data. We use them to represent words or any textual information. # We can use single quotes or double quotes to make a string print("This is a string") print('This is also a string') # What if I want to put the following into a string: Jona's message # + # this will create an error! 'Jona's message' # - # Instead we use single quotes and double quotes together "Jona's message" # using the special escape character '\' will also work, but isn't as clean looking 'Jona\'s message' # ----------- # ## Numbers (integer `int` and floating point `float`) # Numbers are easy in Python! # this is an integer 3 # now let's see a float 3.14 # + # let's add some numbers together 3 + 3.14 # notice how to output has many trailing zeros! It is called floating point for a reason... # - # #### How to add, divide etc.? # # - \+ addition # - \- subtraction (minus) # - \* multiplication # - \/ division # - \// division (but does not include the remainder - round to whole number) # - \% division (but returns the remainder) # - \** exponent/power print(9 + 3.1) print(9 - 3.1) print(9 * 3.1) print(9 / 4) print(9 // 4) print(9 % 4) print(9 ** 4) # ----------- # ## Boolean (`bool`) # Boolean values are rather ... polarising :). Boolean arrays are fast and work well with NumPy! # # They are written as `True` and `False` in Python. print(True, False) # check what equals True or False in Python print(1 == True) print(0 == False) # + # what happens when we try True == False? True == False # it is indeed False! # - # what about adding booleans together? print(True + True) print(True + 5) print(True + 5.45) # ----------- # # Built-in containers in Python # # #### What is a container? # A container is just an object that holds other objects. Lists and dictionaries are two such objects. They can hold other data types such as strings, numbers, bool, and even other containers. So you can have lists with lists in them! # ------------------------- # ## Lists (`list`) # You can imagine a list as, well, a list of things (like a shopping list)! Let's start with that example. # - A list is created with square brackets '[ ]' # - Each item in the list is separated from the next with a comma ',' ["oranges", "bread", "spargel"] # As you can see above, this list is a fake shopping list of food items. It is just made of three strings. # # A list can hold any other data type(s): ["bread", 42, 3.14, True] # We can put lists in lists... ["outer list", ["inner list"]] # we can even put lists in lists in lists ... but this isn't so common and can get confusing. ["outer list", ["inner list 1", ["inner list 2", ["inner list 3"]]]] # ----------- # ## Dictionaries (`dict`) # You can imagine a dict as an actual dictionary. Each word (key) in the dictionary has a corresponding description (value). # - Dictionaries are created with the curly brackets '{}' # - Each item is separated with a comma ',' # - each key:value pair is separated with a colon ':' # - Dictionaries contain keys and values. Each key has a value, just like in an actual dictionary. When starting out, it's best to keep the keys as strings, while the values can be anything. # We want to show how many of each shopping item we need. Dictionaries would be good for this! # + # this was our (shopping) list: ["oranges", "bread", "spargel"] # this would be our dictionary: {"oranges": 4, "bread": 1, "spargel": 2} # we want 4 oranges, 1 bread, and 2 spargel # - # Of course dictionaries are not limited to strings and integers! The keys can be most data types, while the values can contain any other data type. {42: "the answer", "True?": True, "my list": ["this", "is", "a", "list"]} # Dictionaries can be written in a more readable way like so: {"number1": 12, "number2": 56, "number3": 3.14, "number4": 18,} # ---------- # # Variables in Python # # #### What is a variable? # A variable is a place to store data, such as a number, a string, a list, a function, a class, a module etc. # # They can be reused, which makes life a lot easier for us! # # Let's look at a simple example... value = 5 # + # imagine we want to create a calculation that reuses an input value many times. # We define this value at the start and assign it to a variable with the equals sign: '=' value = 5 # we then define our "complicated" calculation that uses this value: # the result of this calculation is assigned to the variable "answer": answer = (42/value ** value) + (value + (value * 23)) # print out the answer to see it! print(answer) # - # Now imagine you want to change the input value. This is easy, we just change the 5 above to some other number! Try it out. # # If we had not set this variable, this process would become tedious. You would have to change the number 5 below so many times. # + # example without using a variable for the input value answer = (42/42 ** 42) + (42 + (42 * 23)) # - # ## Using variables for different data types # Python makes this easy. You just use the equals '=' sign to assign any data/data type to the variable. # # Here are some examples for different data types: message = "this is a string" my_number1 = 42 my_number2 = 3.14 shopping_list = ["oranges", "bread", "spargel"] shopping_dict = {"oranges": 4, "bread": 1, "spargel": 2} # these variables are now useable in this Python instance (until you restart this notebook!) print(my_number1 + my_number2) print(shopping_list[0]) # will print out "oranges" # ----------- # ### Strings # Here we will see how to: # - assign a string # - index a string (get a character in the string). # - NB: indexing in python starts at 0 # - slice a string (get a part of the string) message = "hello world" print(message) # each string has a length, notice that it returns a number! len(message) # Indexing the string - get a character in the string # indexing: gives you the letter at that index. Indexing starts at 0 print(message[0]) print(message[1]) print(message[2]) # negative indexing starts from the end of the string message[-1] # assigning this indexed string to a new variable will be a string! message_indexed = message[0] print(message_indexed) # Slicing the string - get a part of the string # + # slicing: allows you to slice the string between two indexes # from first index up to second index (but not including the second index!) message[0:5] # + # leaving one index empty just means the slice will go to the start/end of the string. print(message[:3]) print(message[6:]) # + # negative indexes work here too message[-5:] # - # Let's look at an actual example. You want to get the experiment number from a filename. # + file_name = "001_experiment.rtdc" # you want to get the number of the filename file_number = file_name[:3] print(file_number) # - # ----------- # ### Numbers # Here we will see how to: # - assign a number (`int` and `float`) # + # what about variables for numbers? They are the exact same! power_value = 6 Power_value = 54 value_of_pi = 3.14 print(power_value) print(Power_value) # - # we can use these variables together value_of_pi / power_value # ----------- # ### Lists # Here we will see how to: # - assign a list # - index a list (get an element from the list). # - NB: indexing in python starts at 0 # - slice a list (get one or more elements of the list) # + colour_list = ["green", "blue", "red", "cyan"] print(colour_list) # - # We can index and slice lists too! # + # indexing print(colour_list[0]) print(colour_list[1]) print(colour_list[2]) print(colour_list[3]) # + colour1 = colour_list[1] print(colour1) # - type(colour1) # + # slicing print(colour_list[1:3]) # + # notice how the sliced list returns a list! colour_slice1 = colour_list[:2] print(colour_slice1) # - # Let's look at an actual example, similar to the one above. You want to get the experiment number from several file names. # + file_names = ["001_experiment.rtdc", "002_experiment.rtdc", "003_experiment.rtdc",] # you can look at the first filename by indexing print(file_names[0]) # you can look at several by slicing print(file_names[1:]) # - # get the numbers from the filenames by using a simple loop. Check out the Loops tutorial! for name in file_names: # name is now just a string # when we slice it, we are slicing the string print(name[:3]) # ----------- # ### Dictionaries # Here we will see how to: # - assign a dict # - index a dict (get an item from the dict). # - NB: indexing in python starts at 0 # - *not covered* iterate over a dictionary to access each key:value pair # + my_dict = {"key1": 7, "key2": 3.14, "key3": "message"} print(my_dict) # + # index the dict with the key my_dict["key3"] # + # index the dict with a number will not work! my_dict[0] # - # ### Excercises # # (hint: use a search engine to look for answers) # # 1. Rename a string variable by slicing. # # Rename "001_experiment_ddmmyy.rtdc" to "001_experiment_070721.rtdc" # + filename = "001_experiment_ddmmyy.rtdc" # - # 2. You have a number variable and want to print it out in the following sentence: # # answer = 42 # # "The answer to life, the universe and everything is: 42" # # Find two ways to do this! # + answer = 42 # - # 3. Replace an item in a list. # # Replace the string with a different string. # + example_items = ["replace me", 55, 4.43, False]
workshops/Python-Beginner-Workshop-01.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Watch Me Code 1: List Enumeration & Aggregates # quizzes = [5,5,4,2,2,4,5,3] # definiate loop for quiz in quizzes: print("Grade:", quiz) # definite loop with indexes print(len(quizzes)) for i in range(len(quizzes)): print("Quiz %d Grade: %d" %( i, quizzes[i])) # + # indexes #print(quizzes[0]) # first #print(quizzes[-1]) # last print(quizzes) # mutable quizzes[1] = 0 print(quizzes) #slices print(quizzes[0:3]) print(quizzes[3:]) # - # aggregations print("Count",len(quizzes)) print("Total", sum(quizzes)) print("Average", sum(quizzes)/len(quizzes)) grades = ["A", "B+", "A", "C+", "B-"] grades[:2] print(grades) grades.append('')
content/lessons/09/Watch-Me-Code/WMC1-List-Enumeration.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: R # language: R # name: ir # --- # ## **Reading data frame** Regdata <- read.csv("regrex1.csv") x <- Regdata$x y <- Regdata$y print(Regdata) # ## **Plotting the scatter plot** plot(x, y, main = "Regex1_scatterplot_R", col = "blue") # ## **Linear modeling** plot(x, y, main = "regrex1_scatterplot_R_linermodeling", col = "blue") abline(lm(y ~ x))
Scatterplot-nb-R.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Data Parallelism # # 이번 세션에는 데이터 병렬화 기법에 대해 알아보겠습니다. # # ## 1. `torch.nn.DataParallel` # 가장 먼저 우리에게 친숙한 `torch.nn.DataParallel`의 동작 방식에 대해 알아봅시다. `torch.nn.DataParallel`은 single-node & multi-GPU에서 동작하는 multi-thread 모듈입니다. # ### 1) Forward Pass # # 1. 입력된 mini-batch를 **Scatter**하여 각 디바이스로 전송. # 2. GPU-1에 올라와 있는 모델의 파라미터를 GPU-2,3,4로 **Broadcast**. # 3. 각 디바이스로 복제된 모델로 **Forward**하여 Logits을 계산 함. # 4. 계산된 Logits을 **Gather**하여 GPU-1에 모음. # 5. Logits으로부터 **Loss**를 계산함. (with loss reduction) # # ![](../images/dp_forward.png) # # <br> # # 코드로 나타내면 아래와 같습니다. # + import torch.nn as nn def data_parallel(module, inputs, labels, device_ids, output_device): inputs = nn.parallel.scatter(inputs, device_ids) # 입력 데이터를 device_ids들에 Scatter함 replicas = nn.parallel.replicate(module, device_ids) # 모델을 device_ids들에 복제함. logit = nn.parallel.parallel_apply(replicas, inputs) # 각 device에 복제된 모델이 각 device의 데이터를 Forward함. logits = nn.parallel.gather(outputs, output_device) # 모델의 logit을 output_device(하나의 device)로 모음 return logits # - # ### 2) Backward Pass # # 1. 계산된 Loss를 각 디바이스에 **Scatter**함. # 2. 전달받은 Loss를 이용해서 각 디바이스에서 **Backward**를 수행하여 Gradients 계산. # 3. 계산된 모든 Gradient를 GPU-1로 **Reduce**하여 GPU-1에 전부 더함. # 4. 더해진 Gradients를 이용하여 GPU-1에 있는 모델을 업데이트. # # ![](../images/dp_backward.png) # # # #### 혹시나 모르시는 분들을 위해... # - `loss.backward()`: 기울기를 미분해서 Gradient를 계산 # - `optimizer.step()`: 계산된 Gradient를 이용해서 파라미터를 업데이트 # - Computation cost는 `backward()` > `step()`. # # ![](../images/backward_step.png) # # + """ src/data_parallel.py """ from torch import nn from torch.optim import Adam from torch.utils.data import DataLoader from transformers import BertForSequenceClassification, BertTokenizer from datasets import load_dataset # 1. create dataset datasets = load_dataset("multi_nli").data["train"] datasets = [ { "premise": str(p), "hypothesis": str(h), "labels": l.as_py(), } for p, h, l in zip(datasets[2], datasets[5], datasets[9]) ] data_loader = DataLoader(datasets, batch_size=128, num_workers=2) # 2. create model and tokenizer model_name = "bert-base-cased" tokenizer = BertTokenizer.from_pretrained(model_name) model = BertForSequenceClassification.from_pretrained(model_name, num_labels=3).cuda() # 3. make data parallel module # device_ids: 사용할 디바이스 리스트 / output_device: 출력값을 모을 디바이스 model = nn.DataParallel(model, device_ids=[0, 1], output_device=0) # 4. create optimizer and loss fn optimizer = Adam(model.parameters(), lr=3e-5) loss_fn = nn.CrossEntropyLoss(reduction="mean") # 5. start training for i, data in enumerate(data_loader): optimizer.zero_grad() tokens = tokenizer( data["premise"], data["hypothesis"], padding=True, truncation=True, max_length=512, return_tensors="pt", ) logits = model( input_ids=tokens.input_ids.cuda(), attention_mask=tokens.attention_mask.cuda(), return_dict=False, )[0] loss = loss_fn(logits, data["labels"].cuda()) loss.backward() optimizer.step() if i % 10 == 0: print(f"step:{i}, loss:{loss}") if i == 100: break # - # !python ../src/data_parallel.py # ![](../images/dp_training.png) # # Multi-GPU에서 학습이 잘 되는군요. 그런데 문제는 0번 GPU에 Logits이 쏠리다보니 GPU 메모리 불균형 문제가 일어납니다. 이러한 문제는 0번 device로 Logits이 아닌 Loss를 Gather하는 방식으로 변경하면 어느정도 완화시킬 수 있습니다. Logits에 비해 Loss는 Scalar이기 때문에 크기가 훨씬 작기 때문이죠. 이 작업은 [당근마켓 블로그](https://medium.com/daangn/pytorch-multi-gpu-%ED%95%99%EC%8A%B5-%EC%A0%9C%EB%8C%80%EB%A1%9C-%ED%95%98%EA%B8%B0-27270617936b)에 소개되었던 [PyTorch-Encoding](https://github.com/zhanghang1989/PyTorch-Encoding)의 `DataParallelCriterion`과 동일합니다. 블로그에 꽤나 복잡하게 설명되어 있는데, 복잡한 방법 대신 간단하게 **forward 함수를 오버라이드 하는 것** 만으로 동일 기능을 쉽게 구현 할 수 있습니다. # # ![](../images/dp_forward_2.png) # # <br> # # 핵심은 Loss Computation과 Loss가 reduction을 multi-thread 안에서 작동 시키는 것입니다. 모델의 forward 함수는 multi-thread에서 작동되고 있기 때문에 Loss Computation 부분을 forward 함수 안에 넣으면 매우 쉽게 구현할 수 있겠죠. # # 한가지 특이한 점은 이렇게 구현하면 Loss의 reduction이 2번 일어나게 되는데요. multi-thread에서 batch_size//4개에서 4개로 reduction 되는 과정(그림에서 4번)이 한번 일어나고, 각 디바이스에서 출력된 4개의 Loss를 1개로 Reduction 하는 과정(그림에서 5번)이 다시 일어나게 됩니다. 그렇다고 하더라도 Loss computation 부분을 병렬화 시킬 수 있고, 0번 GPU에 가해지는 메모리 부담이 적기 때문에 훨씬 효율적이죠. # + """ src/custom_data_parallel.py """ from torch import nn # logits을 출력하는 일반적인 모델 class Model(nn.Module): def __init__(self): super().__init__() self.linear = nn.Linear(768, 3) def forward(self, inputs): outputs = self.linear(inputs) return outputs # forward pass에서 loss를 출력하는 parallel 모델 class ParallelLossModel(Model): def __init__(self): super().__init__() def forward(self, inputs, labels): logits = super(ParallelLossModel, self).forward(inputs) loss = nn.CrossEntropyLoss(reduction="mean")(logits, labels) return loss # - # 운이 좋게도 우리가 자주 사용하는 Huggingface Transformers 모델들은 forward pass에서 곧 바로 Loss를 구하는 기능을 내장하고 있습니다. 따라서 이러한 과정 없이 transformers의 기능을 이용하여 진행하겠습니다. 아래의 코드는 Transformers 모델의 `labels`인자에 라벨을 입력하여 Loss를 바로 출력합니다. # + """ src/efficient_data_parallel.py """ # 1 ~ 4까지 생략... # 5. start training for i, data in enumerate(data_loader): optimizer.zero_grad() tokens = tokenizer( data["premise"], data["hypothesis"], padding=True, truncation=True, max_length=512, return_tensors="pt", ) loss = model( input_ids=tokens.input_ids.cuda(), attention_mask=tokens.attention_mask.cuda(), labels=data["labels"], ).loss loss = loss.mean() # (4,) -> (1,) loss.backward() optimizer.step() if i % 10 == 0: print(f"step:{i}, loss:{loss}") if i == 100: break # - # !python ../src/efficient_data_parallel.py # # <br> # # ## 2. `torch.nn.DataParallel`의 문제점 # # # ### 1) 멀티쓰레드 모듈이기 때문에 Python에서 비효율적임. # Python은 GIL (Global Interpreter Lock)에 의해 하나의 프로세스에서 동시에 여러개의 쓰레드가 작동 할 수 없습니다. 따라서 근본적으로 멀티 쓰레드가 아닌 **멀티 프로세스 프로그램**으로 만들어서 여러개의 프로세스를 동시에 실행하게 해야합니다. # # <br> # # ### 2) 하나의 모델에서 업데이트 된 모델이 다른 device로 매 스텝마다 복제되어야 함. # 현재의 방식은 각 디바이스에서 계산된 Gradient를 하나의 디바이스로 모아서(Gather) 업데이트 하는 방식이기 때문에 업데이트된 모델을 매번 다른 디바이스들로 복제(Broadcast)해야 하는데, 이 과정이 꽤나 비쌉니다. 그러나 Gradient를 Gather하지 않고 각 디바이스에서 자체적으로 `step()`을 수행한다면 모델을 매번 복제하지 않아도 되겠죠. 어떻게 이 것을 구현 할 수 있을까요? # # <br> # # ### Solution? ➝ All-reduce!! 👍 # ![](../images/allreduce.png) # # 정답은 앞서 배웠던 All-reduce 연산입니다. 각 디바이스에서 계산된 Gradients를 모두 더해서 모든 디바이스에 균일하게 뿌려준다면 각 디바이스에서 자체적으로 `step()`을 수행 할 수 있습니다. 그러면 매번 모델을 특정 디바이스로부터 복제해 올 필요가 없겠죠. 따라서 All-reduce를 활용하는 방식으로 기존 방식을 개선해야 합니다. # # <br> # # ### 그러나... 🤔 # 그러나 All-reduce는 매우 비용이 높은 연산에 속합니다. 왜 그럴까요? All-reduce의 세부 구현을 살펴봅시다. # # <br> # # ### Reduce + Broadcast 구현 방식 # ![](../images/allreduce_1.png) # # <br> # # ### All to All 구현 방식 # ![](../images/allreduce_2.png) # # <br><br> # ## 3. `torch.nn.parallel.DistributedDataParallel` (이하 DDP) # # ### Ring All-reduce 💍 # Ring All-reduce는 2017년에 바이두의 연구진이 개발한 새로운 연산입니다. 기존의 방식들에 비해 월등히 효율적인 성능을 보여줬기 때문에 DDP 개발의 핵심이 되었죠. # # - https://github.com/baidu-research/baidu-allreduce # # ![](../images/ring_allreduce.gif) # # <br> # # ![](../images/ring_allreduce.png) # # <br> # # ### DDP란? # DDP는 기존 DataParallel의 문제를 개선하기 위해 등장한 데이터 병렬처리 모듈이며 single/multi-node & multi-GPU에서 동작하는 multi-process 모듈입니다. All-reduce를 활용하게 되면서 마스터 프로세스의 개념이 없어졌기 때문에 학습 과정이 매우 심플하게 변합니다. # # ![](../images/ddp.png) # # <br> # # + """ src/ddp.py """ import torch import torch.distributed as dist from torch.nn.parallel import DistributedDataParallel from torch.optim import Adam from torch.utils.data import DataLoader, DistributedSampler from transformers import BertForSequenceClassification, BertTokenizer from datasets import load_dataset # 1. initialize process group dist.init_process_group("nccl") rank = dist.get_rank() world_size = dist.get_world_size() torch.cuda.set_device(rank) device = torch.cuda.current_device() # 2. create dataset datasets = load_dataset("multi_nli").data["train"] datasets = [ { "premise": str(p), "hypothesis": str(h), "labels": l.as_py(), } for p, h, l in zip(datasets[2], datasets[5], datasets[9]) ] # 3. create DistributedSampler # DistributedSampler는 데이터를 쪼개서 다른 프로세스로 전송하기 위한 모듈입니다. sampler = DistributedSampler( datasets, num_replicas=world_size, rank=rank, shuffle=True, ) data_loader = DataLoader( datasets, batch_size=32, num_workers=2, sampler=sampler, shuffle=False, pin_memory=True, ) # 4. create model and tokenizer model_name = "bert-base-cased" tokenizer = BertTokenizer.from_pretrained(model_name) model = BertForSequenceClassification.from_pretrained(model_name, num_labels=3).cuda() # 5. make distributed data parallel module model = DistributedDataParallel(model, device_ids=[device], output_device=device) # 5. create optimizer optimizer = Adam(model.parameters(), lr=3e-5) # 6. start training for i, data in enumerate(data_loader): optimizer.zero_grad() tokens = tokenizer( data["premise"], data["hypothesis"], padding=True, truncation=True, max_length=512, return_tensors="pt", ) loss = model( input_ids=tokens.input_ids.cuda(), attention_mask=tokens.attention_mask.cuda(), labels=data["labels"], ).loss loss.backward() optimizer.step() if i % 10 == 0 and rank == 0: print(f"step:{i}, loss:{loss}") if i == 100: break # - # 멀티프로세스 애플리케이션이기 때문에 `torchrun`를 사용합니다. # !torchrun --nproc_per_node=2 ../src/ddp.py # ### 그런데 잠깐, All-reduce를 언제 수행하는게 좋을까요? # - All-reduce를 `backward()`연산과 함께 하는게 좋을까요? # - 아니면 `backward()`가 모두 끝나고 `step()` 시작 전에 하는게 좋을까요? # # ![](../images/ddp_analysis_1.png) # # <br> # # ### 결과적으로 `backward()`와 `all-reduce`를 중첩시키는 것이 좋습니다. # # 결과적으로 `backward()`와 `all-reduce`를 중첩시키는 것이 가장 효율적인 방식입니다. `all_reduce`는 네트워크 통신, `backward()`, `step()` 등은 GPU 연산이기 때문에 동시에 처리할 수 있죠. 이들을 중첩시키면 즉, computation과 communication이 최대한으로 overlap 되기 때문에 연산 효율이 크게 증가합니다. # # ![](../images/ddp_analysis_2.png) # # <br> # # 분석 결과 `backward()`와 `step()`을 비교해보면 `backward()`가 훨씬 무거운 연산이였습니다. # # ![](../images/ddp_analysis_3.png) # # <br> # # 당연히 더 무거운 연산을 중첩시킬 수록 전체 학습 과정을 수행하는 시간이 짧아집니다. 분석 결과 `backward()`가 끝날때 까지 기다리는 것 보다 `all-reduce`를 함께 수행하는 것이 훨씬 빨랐습니다. # # ![](../images/ddp_analysis_4.png) # # <br> # # ### 이 때, 생길 수 있는 궁금증들... # - Q1: `backward()` 연산 중에 Gradient가 모두 계산되지 않았는데 어떻게 `all-reduce`를 수행합니까? # - A1: `backward()`는 뒤쪽 레이어부터 순차적으로 이루어지기 때문에 계산이 끝난 레이어 먼저 전송하면 됩니다. # # <br> # # - Q2: 그렇다면 언제마다 `all-reduce`를 수행하나요? 레이어마다 이루어지나요? # - A2: 아닙니다. Gradient Bucketing을 수행합니다. Bucket이 가득차면 All-reduce를 수행합니다. # # <br> # # ### Gradient Bucekting # Gradient Bucekting는 Gradient를 일정한 사이즈의 bucket에 저장해두고 가득차면 다른 프로세스로 전송하는 방식입니다. 가장 먼저 `backward()` 연산 도중 뒤쪽부터 계산된 Gradient들을 차례대로 bucket에 저장하다가 bucket의 용량이 가득차면 All-reduce를 수행해서 각 device에 Gradient의 합을 전달합니다. 그림 때문에 헷갈릴 수도 있는데, bucket에 저장되는 것은 모델의 파라미터가 아닌 해당 레이어에서 출력된 Gradient입니다. 모든 bucket은 일정한 사이즈를 가지고 있으며 `bucket_size_mb` 인자를 통해 mega-byte 단위로 용량을 설정 할 수 있습니다. # # ![](../images/ddp_analysis_5.png) #
notebooks/05_data_parallelism.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Medical Diagnosis with Costs # # In this exercise, we continue off our running medical diagnosis example, now looking at a decision variant of it where we need to decide whether to treat a patient, and there are associated costs. # # We will be using both conditional expectation and the expectation of the function of a random variable. In fact, we need to combine the two! For random variables $X$ and $Y$ for which we know (or have computed) the conditional distribution $p_{X\mid Y}(\cdot \mid y)$, and for any function f such that $f(x)$ is a real number for every $x$ in the alphabet of $X$, then # # $$\mathbb {E}[f(X)\mid Y=y] = \sum _ x f(x) p_{X\mid Y}(x\mid y).$$ # # (See if you can figure out why this is true from our earlier coverage of expectation!) # # We previously computed the probability that someone has a infection given that a test for the infection turned out positive. But how do we translate this into a decision of whether or not we should treat the person? Ideally, we should always choose to treat people who, in truth, have the infection and choose “don't treat" for people without the infection. However, it is possible that we may treat actually healthy people, a false positive that leads to unnecessary medical costs. As well, we may fail to treat people with the infection, a false negative that could require more expensive treatment later on. # # We can encode these costs into the following “cost table" (or cost function) $C(\cdot , \cdot )$, where $C(x,d)$ is the cost when the patient's true infection state is $x$ (“healthy" or “infected"), and we make decision $d$ (“treat" or “don't treat"). For our problem, here is the cost function: # # | Truth $x$ | Decision $d$ | $C(x,d)$ | # |-----------|--------------|---------:| # | healthy | don't treat | \$0 | # | healthy | treat | \$20000 | # | infected | don't treat | \$50000 | # | infected | treat | \$20000 | # # This cost table corresponds to a situation where the treatment costs \$20000, regardless of whether the patient's true state is healthy or infected. If the “don't treat" choice is made, then there are two potential costs: \$0 if the patient is indeed healthy, or \$50000 if the patient actually has the infection, perhaps corresponding to a more expensive future medical procedure needed if the infection is not treated now. # # Recall the medical diagnosis setup from before, summarized in these tables: # # | $X$ | Probability | $p_{Y\mid X}$ | healthy | infected | # |----------|-------------|---------------|---------|----------| # | healthy | $0.999$ | positive | $0.01$ | $0.99$ | # | infected | $0.001$ | negative | $0.99$ | $0.01$ | # # Let's suppose we've observed $Y=\text {positive}$. To use the cost table to help us make a decision, note that there are two possible realizations of the world given that $Y=\text {positive}$: # # - First, it could be that $X=\text{infected}$ given $Y=\text {positive}$. This realization has probability $p_{X\mid Y}(\text {infected}\mid \text {positive})$. Under this realization of the world, by looking at the last two rows in the above cost table: # # | Decision $d$| Cost | # |-------------|--------:| # | treat | \$20000 | # | don't treat | \$50000 | # # - Second, it could be that $X=\text {healthy}$ given $Y=\text {positive}$. This realization has probability $p_{X\mid Y}(\text {healthy}\mid \text {positive})=1-p_{X\mid Y}(\text {infected}\mid \text {positive})$. Under this realization of the world, by looking at the two first rows in the cost table: # # | Decision $d$ | Cost | # |--------------|--------:| # | treat | \$20000 | # | don't treat | \$0 | # # Thus, putting together the pieces: # # If we decide $d=\text{don't treat}$, then the cost is a random variable and is given by the probability table $p_{C(X, \text {don't treat}) \mid Y}(\cdot \mid \text {positive})$ below: # # | Probability | Cost | # |------------------------------------------|--------:| # | $p_{X\mid Y}$(infected $\mid$ positive) | \$50000 | # | $p_{X\mid Y}$(healthy $\mid$ positive) | \$0 | # # **Question:** What is $\mathbb {E}[C(X, \text {don't treat}) \mid Y = \text {positive}]$? # # Express your answer as a function of $a \triangleq p_{X\mid Y}(\text {infected}\mid \text {positive})$ (unless a does not appear in the answer). Do not plug in the numerical value for a that you determined in a previous exercise, and also do not put a dollar sign in your answer. # # **Answer:** The conditional expectection will be # # $$\begin{align} # \mathbb {E}[C(X, \text {don't treat}) \mid Y = \text {positive}] # =& C(\text{healthy, don't treat}) \times p_{X\mid Y}(\text {healthy}\mid \text {positive}) \\ # & + C(\text{infected, don't treat}) \times p_{X\mid Y}(\text {infected}\mid \text {positive})\\ # =&0\times (1-a) + 50000\times a \\ # =&50000 a # \end{align}$$ # If we decide $d=\text {treat}$, then the cost is a random variable and is given by the probability table $p_{C(X, \text {treat}) \mid Y}(\cdot \mid \text {positive})$. # # **Question:** What is $\mathbb {E}[C(X, \text {treat}) \mid Y = \text {positive}]$? (Hint: Figure out what the probability table $p_{C(X, \text {treat}) \mid Y}(\cdot \mid \text {positive})$ is first. See if you can figure out how we obtained the probability table $p_{C(X, \text {don't treat}) \mid Y}(\cdot \mid \text {positive})$ in the previous part.) # # Express your answer as a function of $a \triangleq p_{X\mid Y}(\text {infected}\mid \text {positive})$ (unless a does not appear in the answer). Do not plug in the numerical value for a that you determined in a previous exercise. Express your answer as a function of $a$, where $a \triangleq p_{X\mid Y}(\text {infected}\mid \text {positive})$. # # **Answer:** If we treat we have to pay \$20000 in both cases. Hence 20000 is the expectation. # + [markdown] variables={"t": "0.4"} # Given that $Y=\text {positive}$, to choose between which of the two decisions $d=\text {treat}$ and $d=\text {don't treat}$ to make, we can choose whichever decision $d$ that has the lower expected cost $\mathbb {E}[C(X,d)\mid Y=\text {positive}]$ (note that it could be that the expected costs are the same, in which case we could just break the tie arbitrarily). # # Look at your answers to the previous two parts. You should notice that “don't treat" has the lowest possible cost when a is at most some threshold $t$, which you will now determine. # # Phrased in terms of an equation, with variable a defined as before $a \triangleq p_{X\mid Y}(\text {infected}\mid \text {positive})$, there is a threshold $t$ for which # # $$\begin{eqnarray} # \arg\min_{d \in \{\text{treat}, \text{don't treat}\}} # \mathbb{E}[C(X, d) \mid Y = \text{positive}] # &=& # \begin{cases} # \text{don't treat} & \text{if } a \le t, \\ # \text{treat} & \text{if } a > t, # \end{cases} # \end{eqnarray}$$ # # where we are breaking the tie in favor of not treating when $a=t$. # # Determine what the threshold $t$ is. (Please provide an exact answer.) # # $t$= {{t}} # - t=20000/50000;t # + [markdown] variables={"s": "0.4"} # Let's look at what happens if instead we had observed $y = \text {negative}$. # # Determine what variable $b$ and threshold $s$ should be in the below equation: # # $$\begin{eqnarray} # \arg\min_{d \in \{\text{treat}, \text{don't treat}\}} # \mathbb{E}[C(X, d) \mid Y = \text{negative}] # &=& # \begin{cases} # \text{don't treat} & \text{if } b \le s, \\ # \text{treat} & \text{if } b > s. # \end{cases} # \end{eqnarray}$$ # # $b=$ ? # # $[\times ]$ pX∣Y(healthy∣positive) <br> # $[\times ]$ pX∣Y(infected∣positive)<br> # $[\times ]$ pX∣Y(healthy∣negative) <br> # $[\checkmark]$ pX∣Y(infected∣negative) # # - In case of $d=$ "treat", we have to spend \$20000 in both cases hence expectation will be 20000. # - In case of $d=$ "don't treat", we have to spend \$0 when persion is healthy and \$50000 when the persion is infected. # <br> Hence $E = 50000b + 0(1-b) = 50000b$. # # Threshold $s$ = {{s}} # - s = 20000/50000 # + [markdown] variables={"s": "0"} # $s$= {{s}} # - # Putting together the pieces, the decision rule that achieves the lowest cost can be written as: # # $$\begin{eqnarray} # \widehat{x}(y) # &=&\begin{cases} # \text{don't treat} & y=\text{positive}\text{ and }a \le 0.4,\\ # \text{treat} & y=\text{positive}\text{ and }a > 0.4,\\ # \text{don't treat} & y=\text{negative}\text{ and }b \le 0.4,\\ # \text{treat} & y=\text{negative}\text{ and }b > 0.4, # \end{cases} # \end{eqnarray}$$ # # where $a$ and $b$ are from the previous parts. # # Now finally consider when $a=0.09016$ and $b=0.00001$ (these correspond to values from the previous exercises on this medical diagnosis problem setup). What is the optimal treatment plan given the cost table that we have introduced? # # If we observe that $Y=\text{positive}$, then the decision with lowest expected cost is: # # [$\times $] treat<br> # [$\checkmark$] don't treat # # If we observe that $Y=\text{negative}$, then the decision with lowest expected cost is: # # [$\times $] treat<br> # [$\checkmark$] don't treat # # Of course, if $a$ and $b$ had been different, then the decisions we would make to minimize expected cost could change! In fact, this problem shows just how good the medical test needs to be designed before we can even justify treating the patient. A bad test would be one where the optimal decision for minimizing expected cost does not use the result of the test at all — we might as well not do the test! # ## Expectations of Multiple Random Variables # # How does expected value work when we have many random variables? In this exercise, we look at some specific cases that draw out some important properties involving expected values. # # Let's look at when there are two random variables $X$ and $Y$ with alphabets $\mathcal{X}$ and $\mathcal{Y}$ respectively. Then how expectation is defined for multiple random variables is as follows: For any function $f:\mathcal{X}×\mathcal{Y}→R$, # # $$\mathbb {E}[f(X, Y)] = \sum _{x \in \mathcal{X}, y\in \mathcal{Y}} f(x, y) p_{X,Y}(x, y).$$ # # (Note that “$\sum _{x \in \mathcal{X}, y\in \mathcal{Y}}$" can also be written as “$\sum _{x \in \mathcal{X}}\sum _{y \in \mathcal{Y}}$".) # # For example: # # $$\mathbb {E}[X + Y] = \sum _{x \in \mathcal{X}, y\in \mathcal{Y}} (x + y) p_{X,Y}(x, y).$$ # # Let's look at some properties of the expected value of the sum of multiple random variables. # # First, as a warm-up, show that: # # $$\mathbb {E}[X + Y] = \mathbb {E}[X] + \mathbb {E}[Y].$$ # # This equality is called linearity of expectation and it holds regardless of whether $X$ and $Y$ are independent. # We previously saw that a binomial random variable with parameters $n$ and $p$ can be thought of as how many heads there are in $n$ tosses where the probability of heads is $p$. # # A different way to view a binomial random variable is that it is the sum of $n$ i.i.d. Bernoulli random variables each of parameter $p$. As a reminder, a Bernoulli random variable is $1$ with probability $p$ and $0$ otherwise. Suppose that $X_1,X_2,\ldots,X_n$ are i.i.d. $\text{Bernoulli}(p)$ random variables, and $S=\sum_{i=1}^nX_i$. # # **Question:** What is $\mathbb{E}[X_i]?$ # # **Answer:** $\mathbb{E}[X_i] = 1p + 0(1-p)= p$ # # **Question:** What is $\mathbb{E}[S]?$ # # **Answer:** $\mathbb{E}[S] = \sum_{i=1}^n\mathbb{E}[X_i]=np$ # # A widely useful random variable is the indicator random variable $1(⋅)$, defined as # # $$\begin{eqnarray} # \mathbb{1}(\mathcal{S}) # &=& # \begin{cases} # 1 & \text{if }\mathcal{S}\text{ happens}, \\ # 0 & \text{otherwise} # \end{cases} # \end{eqnarray}$$ # # where $S$ is an event. # # **Question:** Can a Bernoulli random variable corresponding to a coin flip be written as an indicator random variable? # # **Answer:** Yes, we can take $\mathcal{S} = \{\text{'H'}\}$ # **Question:** Suppose a monkey types 1,000,000,000 characters using the 26 letters of the alphabet. The monkey selects each character independently and is equally likely to choose any of the 26 letters. What is the expected number of occurrences of the word “caesar"? # # Hint: First write up the number of occurrences of the word “caesar" as the sum of indicator random variables. # # **Solution:** Let $Z_i$ be the indicator random variable that the word “caesar" starts at position $i$. Note that # # $$\begin{align} \mathbb {E}[Z_ i] =& \mathbb {P}(\mbox{the word ``caesar'' starts at position }i) \\ # (\text {by independence})=& \mathbb {P}(i\mbox{-th letter is ``c''})\mathbb {P}((i+1)\mbox{-th letter is ``a''}) \\ # & \quad \cdots \mathbb {P}((i+5)\mbox{ -th letter is ``r''}) \\ # =& \underbrace{\frac{1}{26}\times \frac{1}{26}\times \cdots \times \frac{1}{26}}_{6\text { times}} \\ # =& \frac{1}{(26)^{6}}.\end{align}$$ # # Then the number of occurrences of “caesar" is given by $Y=\sum _{i=1}^{1,000,000,000-5}Z_ i$, so # # $$\mathbb {E}[Y] = \mathbb {E}[\textstyle \sum _{i=1}^{999,999,995}Z_ i] = \textstyle \sum _{i=1}^{999,999,995}\mathbb {E}[Z_ i] = \boxed {999,999,995\times \frac{1}{(26)^{6}}}.$$ # # Note that in the second equality we used the fact that expectation is linear, even though $X_i$'s are dependent. This is because the linearity of expectation doesn't require independence. # **Question:** Consider a standard 52-card deck that has been shuffled. What is the expected number of times for which a king in the deck is followed by another king? For example, if all four kings are next to each other, then we'd say that there are 3 kings that are followed by another king. # # **Solution:** Let $i$ be an index between two cards in the deck, i.e., $i=1$ refers to the index between cards $1$ and $2$. Thus, the range of possible $i$ is $i=1,2,\cdots ,51$. Define an indicator random variable $X_ i$ that is $1$ when the cards at index $i$ and $i+1$ are both kings. Note that # # $$\mathbb {E}[X_ i]=\mathbb {P}(i\text {-th card is a king},(i+1)\mbox{-th card is a king})=\frac{4}{52}\cdot \frac{3}{51}.$$ # # Then the number of kings followed by another king is $Y=\sum _{i=1}^{51}X_ i$, so # $$\mathbb {E}[Y]=51\cdot \frac{4}{52}\cdot \frac{3}{51} =\frac{12}{52}=\boxed {\frac{3}{13}}.$$ # **Question:** If $X$ and $Y$ are independent, is it true that $\mathbb {E}[XY] = \mathbb {E}[X]\mathbb {E}[Y]$? # # **Solution:** Yes, # # $$\begin{align} # \mathbb {E}[XY] =& \sum _{x}\sum _{y}xyp_{X,Y}(x,y) \\ # (\text {by independence}) =& \sum _{x}\sum _{y}xyp_{X}(x)p_{Y}(y) \\ # =& \sum _{x}\sum _{y}(xp_{x}(x))(yp_{Y}(y)) \\ # =& \Big(\sum _{x}xp_{x}(x)\Big)\Big(\sum _{y}(yp_{Y}(y))\Big) \\ # =& \mathbb {E}[X]\mathbb {E}[Y]. # \end{align}$$ # Finally, let's look at variance again. As a reminder # # $$\text {var}(X) = \mathbb {E}[(X-\mathbb {E}[X])^2].$$ # # **Question:** If $X$ and $Y$ are independent, is it true that $\text {var}(X + Y) = \text {var}(X) + \text {var}(Y)$? # # **Solution:** Yes. # # To show this. We'll be using linearity of expectation repeatedly. To keep the notation here from getting cluttered with the squaring, let $\mu _{X}=\mathbb {E}[X]$ and $\mu _{Y}=\mathbb {E}[Y]$. First off, note that # # $$\begin{align}\displaystyle \text {var}(X) =& \mathbb {E}[(X-\mu _{X})^{2}]\\ # =& \mathbb {E}[X^{2}-2\mu _{X}X+\mu _{X}^{2}]\\ # =&\mathbb {E}[X^{2}]-2\mu _{X}\underbrace{\mathbb {E}[X]}_{\mu _{X}}+\mu _{X}^{2}\\ # =&\mathbb {E}[X^{2}]-\mu _{X}^{2}.\\ # \end{align}$$ # # Similarly, # # $$\text {var}(Y)=\mathbb {E}[Y^{2}]-\mu _{Y}^{2},$$ # # and # # $$\text {var}(X+Y) =\mathbb {E}[(X+Y)^{2}]-(\mu _{X}+\mu _{Y})^{2}.$$ # # Let's simplify the right-hand side above: # # $$\begin{align}\text {var}(X+Y) =& \mathbb {E}[(X+Y)^{2}]-(\mu _{X}+\mu _{Y})^{2}\\ # =& \mathbb {E}[X^{2}+2XY+Y^{2}]-\mu _{X}^{2}-2\mu _{X}\mu _{Y}-\mu _{Y}^{2} \\ # =& \mathbb {E}[X^{2}]+2\underbrace{\mathbb {E}[XY]}_{=\mu _{X}\mu _{Y}\text { by indep.}}+\mathbb {E}[Y^{2}]-\mu _{X}^{2}-2\mu _{X}\mu _{Y}-\mu _{Y}^{2}\\ # =&\displaystyle \underbrace{\mathbb {E}[X^{2}]-\mu _{X}^{2}}_{\text {var}(X)}+\underbrace{\mathbb {E}[Y^{2}]-\mu _{Y}^{2}}_{\text {var}(Y)}. # \end{align}$$ # # **Question:** What is the variance of a $\text {Ber}(p)$ random variable? # # **Solution:** For $X_ i \sim \text {Ber}(p)$, we have # # $$\begin{align}\mathbb {E}[(X_ i-\underbrace{\mathbb {E}[X_ i]}_{p})^{2}]=&(1-p)^{2}\cdot p+(0-p)^{2}\cdot (1-p) =& (1-2p+p^{2})p+p^{2}(1-p)\\ # =&p-2p^{2}+p^{3}+p^{2}-p^{3}\\ # =&p-p^{2}\\ # =&\boxed {p(1-p)}.\end{align}$$ # # **Question:** Recall that a binomial random variable $S$ that has a $\text {Binomial}(n, p)$ distribution can be written as $S = \sum _{i=1}^ n X_ i$ where the $X_ i$'s are i.i.d. $\text {Ber}(p)$ random variables. What is the variance of $S$? # # **Solution:** Since the $X_ i$'s are independent, # # $$\text {var}(S) =\text {var}\Big(\sum _{i=1}^ n X_ i\Big) =\sum _{i=1}^ n \text {var}(X_ i) =\boxed {n p (1-p)}.$$
week04/02 Exercise on Decision Making.ipynb