code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import pandas as pd import seaborn as sns from matplotlib import pyplot as plt import matplotlib.patches as mpatches from sklearn.tree import DecisionTreeRegressor from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report from sklearn.model_selection import GridSearchCV RANDOM_STATE = 103783 # - df = pd.read_csv("student-mat.csv") df def preprocesar_data_frame(df): y = df['G3'] X = df.drop(columns=['G1','G2','G3']) return (X, y) (X,y) = preprocesar_data_frame(df) X_prepos = pd.get_dummies(X, drop_first=True) X_train, X_test, y_train, y_test = train_test_split(X_prepos, y, test_size=0.25, random_state=RANDOM_STATE) # + from sklearn.neighbors import KNeighborsRegressor from sklearn.metrics import r2_score parametros = {'n_neighbors':range(3,9), 'weights':['uniform', 'distance'],'p':[1,2]} knn = KNeighborsRegressor() regresor = GridSearchCV(knn, parametros,n_jobs =-1,verbose = 4, cv= 10) regresor.fit(X_train,y_train) print(r2_score(y_test,regresor.predict(X_test))) regresor.best_params_ # - neigh = KNeighborsRegressor(n_neighbors=2) neigh.fit(X_train,y_train)
.ipynb_checkpoints/M-KNN-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Evaluation # # Evaluating Metrics: # # - Prediction Metrics (Similiar to Regression Problem) # - RMSE # - R2 # - MAE # - Explained Variance # # # ### Hit Metrics (Similiar to Classification Metrics) # **Hit** - defined by relevancy, a hit usually means whether the recommended "k" items hit the "relevant" items by the user. For example, a user may have clicked, viewed, or purchased an item for many times, and a hit in the recommended items indicate that the recommender performs well. Metrics like "precision", "recall", etc. measure the performance of such hitting accuracy. # # - Precision@k # - Recall@k # # # ### Ranking Metrics # # **Ranking** - ranking metrics give more explanations about, for the hitted items, whether they are ranked in a way that is preferred by the users whom the items will be recommended to. Metrics like "mean average precision", "ndcg", etc., evaluate whether the relevant items are ranked higher than the less-relevant or irrelevant items. # # set the environment path to find reco import sys sys.path.append("../") import keras import numpy as np import pandas as pd # + df_true = pd.DataFrame( { "USER": [1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3], "ITEM": [1, 2, 3, 1, 4, 5, 6, 7, 2, 5, 6, 8, 9, 10, 11, 12, 13, 14], "RATING": [5, 4, 3, 5, 5, 3, 3, 1, 5, 5, 5, 4, 4, 3, 3, 3, 2, 1], } ) df_pred = pd.DataFrame( { "USER": [1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3], "ITEM": [3, 10, 12, 10, 3, 5, 11, 13, 4, 10, 7, 13, 1, 3, 5, 2, 11, 14], "RATING_PRED": [14, 13, 12, 14, 13, 12, 11, 10, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5] } ) # - df_true.head() common_users = set(df_true.USER).intersection(set(df_pred.USER)) from reco.evaluate import get_top_k_items def get_hit_df(rating_true, rating_pred, k): # Make sure the prediction and true data frames have the same set of users common_users = set(rating_true["USER"]).intersection(set(rating_pred["USER"])) rating_true_common = rating_true[rating_true["USER"].isin(common_users)] rating_pred_common = rating_pred[rating_pred["USER"].isin(common_users)] n_users = len(common_users) df_hit = get_top_k_items(rating_pred_common, "USER", "RATING_PRED", k) df_hit = pd.merge(df_hit, rating_true_common, on=["USER", "ITEM"])[ ["USER", "ITEM", "rank"] ] # count the number of hits vs actual relevant items per user df_hit_count = pd.merge( df_hit.groupby("USER", as_index=False)["USER"].agg({"hit": "count"}), rating_true_common.groupby("USER", as_index=False)["USER"].agg( {"actual": "count"} ), on="USER", ) return df_hit, df_hit_count, n_users def precision_at_k(rating_true, rating_pred, k): df_hit, df_hit_count, n_users = get_hit_df(rating_true, rating_pred, k) if df_hit.shape[0] == 0: return 0.0 return (df_hit_count["hit"] / k).sum() / n_users def recall_at_k(rating_true, rating_pred, k): df_hit, df_hit_count, n_users = get_hit_df(rating_true, rating_pred, k) if df_hit.shape[0] == 0: return 0.0 return (df_hit_count["hit"] / df_hit_count["actual"]).sum() / n_users precision_at_k(df_true, df_pred, 3) recall_at_k(df_true, df_pred, 3) def ndcg_at_k(rating_true, rating_pred, k): df_hit, df_hit_count, n_users = get_hit_df(rating_true, rating_pred, k) if df_hit.shape[0] == 0: return 0.0 # calculate discounted gain for hit items df_dcg = df_hit.copy() # relevance in this case is always 1 df_dcg["dcg"] = 1 / np.log1p(df_dcg["rank"]) # sum up discount gained to get discount cumulative gain df_dcg = df_dcg.groupby("USER", as_index=False, sort=False).agg({"dcg": "sum"}) # calculate ideal discounted cumulative gain df_ndcg = pd.merge(df_dcg, df_hit_count, on=["USER"]) df_ndcg["idcg"] = df_ndcg["actual"].apply( lambda x: sum(1 / np.log1p(range(1, min(x, k) + 1))) ) # DCG over IDCG is the normalized DCG return (df_ndcg["dcg"] / df_ndcg["idcg"]).sum() / n_users ndcg_at_k(df_true, df_pred, 3)
MovieLens/05-Evaluation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import os import pydub import glob mp3=glob.glob('*.mp3') mp3_file=mp3[0] # print(mp3_file) # print(mp3_file.) wav='fff'+'.wav' sound=pydub.AudioSegment.from_mp3(mp3_file) sound.export(wav,format="wav") print(type(sound)) # mp3_to_wav() # -
pythoncode/fff/.ipynb_checkpoints/Untitled-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/sanfernoronha/manual_twitter_sentiment_analysis/blob/master/Twitter_Sentiment_Analysis.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="H2mzLVACNgcq" colab_type="code" colab={} # !pip install pandas # !pip install tweepy # !pip install vaderSentiment # + id="lZFAtTiIPINn" colab_type="code" colab={} import tweepy from nltk.sentiment.vader import SentimentIntensityAnalyzer import pandas as pd # + id="Fol_WD2cPlRc" colab_type="code" colab={} consumer_key = 'b8mSwzHCe4xEtBfsCphURD3r7' consumer_secret = '<KEY>' access_token = '<KEY>' access_token_secret = '<KEY>' # + id="nxJgnIH9QiMT" colab_type="code" colab={} auth = tweepy.OAuthHandler(consumer_key,consumer_secret) auth.set_access_token(access_token,access_token_secret) api = tweepy.API(auth) # + id="VVS4v-47RLVV" colab_type="code" colab={} import re def clean_tweet(tweet): return ' '.join(re.sub("(@[A-Za-z0-9]+)|([^0-9A-Za-z \t])|(\w+:\/\/\S+)"," ",tweet).split()) # + id="iNFJQuv6UGEa" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 495} outputId="36890adf-ab0b-4a2b-f744-e9574af34ab7" tweets = api.search('bullettrainindia', count=1000) data = pd.DataFrame(data=[clean_tweet(tweet.text) for tweet in tweets], columns=['Tweets']) display(data.head(10)) print(tweets[0].id) print(tweets[0].created_at) print(tweets[0].source) print(tweets[0].favorite_count) print(tweets[0].retweet_count) print(tweets[0].geo) print(tweets[0].coordinates) print(tweets[0].entities) # + id="QoPYCFv0VYPW" colab_type="code" colab={} import nltk nltk.download('vader_lexicon') # + id="NXvUySVhWZOn" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="ead02725-e4b9-45c7-f524-ab90254f1e8c" sid = SentimentIntensityAnalyzer() list = [] for index,row in data.iterrows(): ss = sid.polarity_scores(row["Tweets"]) list.append(ss) se = pd.Series(list) data['polarity'] = se.values display(data.head(300)) # + id="ra4J0VFhZw8V" colab_type="code" colab={}
Twitter_Sentiment_Analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Querying data and building charts from Business Automation Insights data # # This notebook shows how to retrieve and use data stored by Business Automation Insights for Business Automation Workflow. The notebook uses Spark SQL to retrieve process and task data and uses also pixiedust to display charts. # # The notebook is configured to retrieve data from the HDFS data lake where Business Automation Insights stores that data. <strong>Specify the HDFS root path in the code below</strong>. # # ## Table Of Content # # + [Number of Completed Processes](#completeprocesses) # + [Completed Processes By Applications](#completeprocessesperapp) # + [Average Process Duration Per Application](#avgprocessdurationperapp) # + [Average Duration of Processes in Minutes](#avgprocessduration) # + [Total Number of Completed Activities](#totalcompleteactivities) # + [Number of Completed Activities](#completeactivities) # + [Average Duration per Activity](#avgactivityduration) # + [Average Duration of Activities](#avgactivitydurationglobal) # + [Completed Activities Per Users](#activitiesperuser) # First install PixieDust: # + # # !pip install -U --no-deps pixiedust # # !pip install -U --no-deps astunparse # - import pixiedust # + from pyspark.sql import SQLContext, Row from pyspark.sql.types import IntegerType from datetime import datetime hdfs_root = 'hdfs://namenode/user/bai' # - # <a id='completeprocesses'></a> # ## Number of Completed Processes # This section extracts the completed processes from the BAI summaries data and displays the number of completed processes. The 'completed summary' data is covering both processes that are completed and the activities in a completed process. In HDFS, the data is stored as a set of JSON files. For a process, the information is stored within the following path: # # <pre> # [root-directory]/ibm-bai/bpmn-summaries-completed/[process application id]/[process application version id]/process/[process id]/[date] # </pre> # In the code below we use wildcard (\*) to read all the json data for all process applications and all processes. Please refer to the BAI documentation to learn how to use the IBM BPM REST API to retrieve the process application ids and versions. # # # + from pyspark.sql import SparkSession spark = SparkSession.builder.getOrCreate() spark.conf.set("dfs.client.use.datanode.hostname", "true") summaries = spark.read.json(hdfs_root +'/ibm-bai/bpmn-summaries-completed/*/*/process/*/*') print ('The data contains ' + str(summaries.count()) + ' completed process') # + from pyspark.sql.functions import col summaries = summaries.withColumn("duration_in_minutes", col("duration")/60000) summaries.createOrReplaceTempView("bpmnSummaries") summaries.cache(); # - # <a id='completeprocessesperapp'></a> # # ## Completed Processes By Applications # This section uses the summary data for completed processes to display the number of completed processes for each process application. # + pixiedust={"displayParams": {"aggregation": "SUM", "handlerId": "barChart", "keyFields": "processApplicationName", "legend": "false", "mpld3": "false", "orientation": "horizontal", "sortby": "Values ASC", "title": "Process Completed By Applications"}} display(summaries) # - # With Spark SQL, you can use: spark.sql("select processApplicationName, count(id) from bpmnSummaries group by processApplicationName").show() # <a id='avgprocessdurationperapp'></a> # ## Average Process Duration Per Application # This sections uses the summary data for completed processes to display the average process duration in minutes for each process application. It uses the 'duration' property of the 'completed process' data. # + pixiedust={"displayParams": {"aggregation": "AVG", "handlerId": "barChart", "keyFields": "name", "orientation": "horizontal", "sortby": "Values ASC", "timeseries": "false", "title": "Average Process Duration by Process", "valueFields": "duration_in_minutes"}} display(summaries) # - # With Spark SQL, you can use: spark.sql("select name, avg(duration_in_minutes) from bpmnSummaries group by name").show() # <a id='avgprocessduration'></a> # ## Average Duration of Processes in Minutes # This section uses the summary data for completed processes to display the average duration of each process. spark.sql("select avg(duration_in_minutes) from bpmnSummaries").show() # <a id='totalcompleteactivities'></a> # ## Number of Completed Activities # Reading activity information is similar to reading the process information. The path in HDFS is similar: # <pre> [root-directory]/ibm-bai/bpmn-summaries-completed/workflow/[process application id]/[process application version id]/activity/[process id]/[activity id]/[date]</pre> # # The code lines below read all the completed activities by using wildcards in the path, then display the number of activities. # + activitysummaries = spark.read.json(hdfs_root + "/ibm-bai/bpmn-summaries-completed/*/*/activity/*/*/*") activitysummaries = activitysummaries.withColumn("duration_in_minutes", col("duration")/60000) activitysummaries.createOrReplaceTempView("activitysummaries") activitysummaries.cache(); # - print ('The data containts ' + str(activitysummaries.count()) + ' completed activities') # <a id='completeactivities'></a> # # ## Number Of Completed Activities # This section displays the number of completed activities per activity type. # + pixiedust={"displayParams": {"aggregation": "COUNT", "clusterby": "processApplicationName", "handlerId": "barChart", "keyFields": "name", "orientation": "horizontal", "sortby": "Values ASC"}} display(activitysummaries) # - # With Spark SQL, you can use: spark.sql("select name, count(*) from activitysummaries group by name").show() # <a id='avgactivityduration'></a> # ## Average Duration per Activity # In this section, the average duration is displayed per activity. # + pixiedust={"displayParams": {"aggregation": "AVG", "chartsize": "100", "clusterby": "processApplicationName", "handlerId": "barChart", "keyFields": "name", "orientation": "horizontal", "valueFields": "duration_in_minutes"}} display(activitysummaries) # - # In Spark SQL, you can use: spark.sql("select name, avg(duration_in_minutes) from activitysummaries group by name").show() # <a id='avgactivitydurationglobal'></a> # ## Average Duration of Activities # This section computes the average duration of all the activities. spark.sql("select avg(duration_in_minutes) from activitysummaries").show() # <a id='activitiesperuser'></a> # ## Completed Activities By Users # This section displays the activities that are completed by the various individuals or teams. # + pixiedust={"displayParams": {"aggregation": "COUNT", "handlerId": "pieChart", "keyFields": "performerName", "legend": "true"}} display(activitysummaries) # - # With Spark SQL, you can use: spark.sql("select performerName, count(*) from activitysummaries group by performerName").show() # Author: <NAME> is a Senior Technical Staff Member and architect in the Business Automation team in the IBM France Lab.
labs/ai/notebooks/BAI dashboard.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd from datetime import datetime from sqlalchemy import create_engine # + engine = create_engine("mysql+pymysql://capMaster:#jackpot<EMAIL> <EMAIL>:23306/credit_model") con = engine.connect() df = pd.read_sql("select * from debt_score", con) con.close() # - df[(df['probabilidade']==1) & (df['segmento']=='credito')] df[df['cnpj']=='08992524000118']
Modelagem/Score Divida Justa/valiadando_resultados.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Hierarchical Binominal Model: Rat Tumor Example # + # %matplotlib inline import pymc3 as pm import numpy as np import matplotlib.pyplot as plt import seaborn as sns import pandas as pd # import pymc3.distributions.transforms as tr # import theano.tensor as tt from scipy.special import gammaln plt.style.use('seaborn-darkgrid') # print('Running on PyMC3 v{}'.format(pm.__version__)) # - # This short tutorial demonstrates how to use pymc3 to do inference for the rat tumour example found in chapter 5 of *Bayesian Data Analysis 3rd Edition*. Readers should already be familliar with the pymc3 api. # # Suppose we are interested in the probability that a lab rat develops endometrial stromal polyps. We have data from 71 previously performed trials and would like to use this data to perform inference. # # The authors of BDA3 choose to model this problem heirarchically. Let $y_i$ be the number of lab rats which develop endometrial stromal polyps out of a possible $n_i$. We model the number rodents which develop endometrial stromal polyps as binomial # # $$ y_i \sim \operatorname{Bin}(\theta_i;n_i)$$ # # allowing the probability of developing an endometrial stromal polyp (i.e. $\theta_i$) to be drawn from some population distribution. For analytical tractability, we assume that $\theta_i$ has Beta distribution # # $$ \theta_i \sim \operatorname{Beta}(\alpha, \beta)$$ # # We are free to specify a prior distribution for $\alpha, \beta$. We choose a weakly informative prior distribution to reflect our ignorance about the true values of $\alpha, \beta$. The authors of BDA3 choose the joint hyperprior for $\alpha, \beta$ to be # # $$ p(\alpha, \beta) \propto (\alpha + \beta) ^{-5/2}$$ # # For more information, please see *Bayesian Data Analysis 3rd Edition* pg. 110. # ## A Directly Computed Solution # # Our joint posterior distribution is # # $$p(\alpha,\beta,\theta \lvert y) # \propto # p(\alpha, \beta) # p(\theta \lvert \alpha,\beta) # p(y \lvert \theta)$$ # # which can be rewritten in such a way so as to obtain the marginal posterior distribution for $\alpha$ and $\beta$, namely # # $$ p(\alpha, \beta, \lvert y) = # p(\alpha, \beta) # \prod_{i = 1}^{N} \dfrac{\Gamma(\alpha+\beta)}{\Gamma(\alpha)\Gamma(\beta)} # \dfrac{\Gamma(\alpha+y_i)\Gamma(\beta+n_i - y_i)}{\Gamma(\alpha+\beta+n_i)}$$ # # # See BDA3 pg. 110 for a more information on the deriving the marginal posterior distribution. With a little determination, we can plot the marginal posterior and estimate the means of $\alpha$ and $\beta$ without having to resort to MCMC. We will see, however, that this requires considerable effort. # # The authors of BDA3 choose to plot the surfce under the paramterization $(\log(\alpha/\beta), \log(\alpha+\beta))$. We do so as well. Through the remainder of the example let $x = \log(\alpha/\beta)$ and $z = \log(\alpha+\beta)$. # # # + # rat data (BDA3, p. 102) y = np.array([ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 5, 2, 5, 3, 2, 7, 7, 3, 3, 2, 9, 10, 4, 4, 4, 4, 4, 4, 4, 10, 4, 4, 4, 5, 11, 12, 5, 5, 6, 5, 6, 6, 6, 6, 16, 15, 15, 9, 4 ]) n = np.array([ 20, 20, 20, 20, 20, 20, 20, 19, 19, 19, 19, 18, 18, 17, 20, 20, 20, 20, 19, 19, 18, 18, 25, 24, 23, 20, 20, 20, 20, 20, 20, 10, 49, 19, 46, 27, 17, 49, 47, 20, 20, 13, 48, 50, 20, 20, 20, 20, 20, 20, 20, 48, 19, 19, 19, 22, 46, 49, 20, 20, 23, 19, 22, 20, 20, 20, 52, 46, 47, 24, 14 ]) N = len(n) # - data = pd.DataFrame({"nm_tumors" : y, "nm_rats": n}) data.head() # + theta_mean = data.iloc[:-1, :].eval("nm_tumors / nm_rats").mean() theta_var = data.iloc[:-1, :].eval("nm_tumors / nm_rats").var() a_plus_b = (theta_mean * (1 - theta_mean)) / theta_var - 1 alpha = theta_mean * a_plus_b beta = (1 - theta_mean) * a_plus_b print(f"Only using moments in beta distribution, alpha: {alpha} and beta : {beta} means.") # - # Create space for the parameterization in which we wish to plot X, Z = np.meshgrid(np.arange(-2.3, -1.3, 0.01), np.arange(1, 5, 0.01)) param_space = np.c_[X.ravel(), Z.ravel()] df = pd.DataFrame(param_space, columns=['X', 'Z']) # First we have a dataframe with $x$ and $z$ values. If $x = \log(\alpha/\beta)$ and $z = \log(\alpha+\beta)$, so $\beta = e^z \, / \, (1 + e^x)$ and $\alpha = e^{x + z} \, / \, (1 + e^x)$ # To create the plot, we first compute the logarithm of the density function $(5.8)$ with prior density $(5.9)$, multiplying by the Jacobian to obtain the density $p(\log(\alpha / \beta), \log (\alpha + \beta) \, \vert \, y)$ # $$ \log p(\alpha, \beta, \lvert y) = # \log \left[ # (\alpha + \beta)^{-5/2} # \prod_{i = 1}^{N} \dfrac{\Gamma(\alpha+\beta)}{\Gamma(\alpha)\Gamma(\beta)} # \dfrac{\Gamma(\alpha+y_i)\Gamma(\beta+n_i - y_i)}{\Gamma(\alpha+\beta+n_i)} # \right] # $$ # + def trans_to_beta(x, z): return np.exp(z) / (np.exp(x) + 1) def trans_to_alpha(x, z): return np.exp(x) * trans_to_beta(x, z) # Compute on log scale because products turn to sums def log_likelihood(alpha, beta, y, n): LL = 0 # Summing over data for Y, N in zip(y, n): LL += ( gammaln(alpha + beta) - gammaln(alpha) - gammaln(beta) + gammaln(alpha + Y) + gammaln(beta + N - Y) - gammaln(alpha + beta + N) ) return LL def log_prior(alpha, beta): return - 2.5 * np.log(alpha + beta) # - df = df.assign( alpha=lambda x: trans_to_alpha(x["X"], x["Z"]), # Transform the space back to alpha beta to compute the log-posterior beta=lambda x: trans_to_beta(x["X"], x["Z"]), log_posterior=lambda x: log_prior(x["alpha"], x["beta"]) + log_likelihood(x["alpha"], x["beta"], y, n), log_jacobian=lambda x: np.log(x["alpha"]) + np.log(x["beta"]), transformed=lambda x: x["log_posterior"] + x["log_jacobian"], exp_trans=lambda x: np.exp(x["transformed"] - x["transformed"].max()), normed_exp_trans=lambda x: x["exp_trans"] / x["exp_trans"].sum() # This will ensure the density is normalized ) df.head() surface = df.set_index(['X', 'Z']).exp_trans.unstack().values.T # + fig, ax = plt.subplots(figsize=(8, 8)) ax.contourf(X, Z, surface) ax.set_xlabel(r'$\log(\alpha/\beta)$', fontsize=16) ax.set_ylabel(r'$\log(\alpha+\beta)$', fontsize=16) ix_z, ix_x = np.unravel_index(np.argmax(surface, axis=None), surface.shape) ax.scatter([X[0, ix_x]], [Z[ix_z, 0]], color='red') text = r"$({a},{b})$".format(a=np.round( X[0, ix_x], 2), b=np.round(Z[ix_z, 0], 2)) ax.annotate(text, xy=(X[0, ix_x], Z[ix_z, 0]), xytext=(-1.6, 3.5), ha='center', fontsize=16, color='black', arrowprops={'facecolor':'white'} ); # - x_hat=np.round(X[0, ix_x], 2) z_hat=np.round(Z[ix_z, 0], 2) alpha_hat = trans_to_alpha(x_hat, z_hat) beta_hat = trans_to_beta(x_hat, z_hat) print(alpha_hat) print(beta_hat) # The plot shows that the posterior is roughly symetric about the mode (-1.79, 2.74). This corresponds to $\alpha = 2.21$ and $\beta = 13.27$. We can compute the marginal means as the authors of BDA3 do, using # # $$ \operatorname{E}(\alpha \lvert y) \text{ is estimated by } # \sum_{x,z} \alpha p(x,z\lvert y) $$ # # $$ \operatorname{E}(\beta \lvert y) \text{ is estimated by } # \sum_{x,z} \beta p(x,z\lvert y) $$ #Estimated mean of alpha (df.alpha*df.normed_exp_trans).sum().round(3) #Estimated mean of beta (df.beta*df.normed_exp_trans).sum().round(3)
hierarchical-binominal-model.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.6 - AzureML # language: python # name: python3-azureml # --- # # 파이프라인 만들기 # # Azure ML SDK를 사용해 스크립트 기반 실험을 실행하면 데이터를 수집하고 모델을 학습시킨 다음 개별적으로 등록하는 데 필요한 여러 단계를 수행할 수 있습니다. 그러나 엔터프라이즈 환경에서는 보통 기계 학습 솔루션을 빌드하려면 수행해야 하는 개별 단계 순서를 *파이프라인*에 캡슐화합니다. 이 파이프라인은 사용자의 요청 시 컴퓨팅 대상 하나 이상에서 실행하거나, 자동화된 빌드 프로세스에서 실행하거나 일정에 따라 실행할 수 있습니다. # # 이 Notebook에서는 이러한 모든 요소를 취합하여 데이터를 전처리한 다음 모델 학습과 등록을 진행하는 간단한 파이프라인을 만들어 보겠습니다. # ## 작업 영역에 연결 # # 이 Notebook의 작업을 시작하려면 먼저 작업 영역에 연결합니다. # # > **참고**: Azure 구독에 인증된 세션을 아직 설정하지 않은 경우에는 링크를 클릭하고 인증 코드를 입력한 다음 Azure에 로그인하여 인증하라는 메시지가 표시됩니다. # + import azureml.core from azureml.core import Workspace # 저장된 구성 파일에서 작업 영역 로드 ws = Workspace.from_config() print('Ready to use Azure ML {} to work with {}'.format(azureml.core.VERSION, ws.name)) # - # ## 데이터 준비 # # 파이프라인에서는 당뇨병 환자의 세부 정보가 포함된 데이터 세트를 사용합니다. 아래 셀을 실행하여 이 데이터 세트를 만듭니다. 이전에 데이터 세트를 만든 경우, 코드가 기존 버전을 찾습니다. # + from azureml.core import Dataset default_ds = ws.get_default_datastore() if 'diabetes dataset' not in ws.datasets: default_ds.upload_files(files=['./data/diabetes.csv', './data/diabetes2.csv'], # Upload the diabetes csv files in /data target_path='diabetes-data/', # Put it in a folder path in the datastore overwrite=True, # Replace existing files of the same name show_progress=True) #Create a tabular dataset from the path on the datastore (this may take a short while) tab_data_set = Dataset.Tabular.from_delimited_files(path=(default_ds, 'diabetes-data/*.csv')) # Register the tabular dataset try: tab_data_set = tab_data_set.register(workspace=ws, name='diabetes dataset', description='diabetes data', tags = {'format':'CSV'}, create_new_version=True) print('Dataset registered.') except Exception as ex: print(ex) else: print('Dataset already registered.') # - # ## 파이프라인 단계용 스크립트 만들기 # # 파이프라인은 *단계* 하나 이상으로 구성됩니다. 이러한 단계는 Python 스크립트일 수도 있고, 특정 위치 간에 데이터를 복사하는 데이터 전송 단계 등의 특수 단계일 수도 있습니다. 각 단계는 자체 컴퓨팅 컨텍스트에서 실행할 수 있습니다. 이 연습에서는 Python 스크립트 단계 2개가 포함된 간단한 파이프라인을 작성합니다. 한 단계에서는 학습 데이터를 전처리하며, 다른 단계에서는 전처리된 데이터를 사용하여 모델 학습과 등록을 진행합니다. # # 먼저 파이프라인 단계에서 사용할 스크립트 파일용 폴더를 만듭니다. # + import os # 파이프라인 단계 파일용 폴더 만들기 experiment_folder = 'diabetes_pipeline' os.makedirs(experiment_folder, exist_ok=True) print(experiment_folder) # - # 이제 첫 번째 스크립트를 작성합니다. 이 스크립트는 당뇨병 데이터 세트의 데이터를 읽은 후 간단한 전처리 작업을 적용해 데이터가 누락된 행을 제거하고 숫자 특징을 크기가 비슷하도록 정규화합니다. # # 이 스크립트에 포함된 **--prepped-data** 인수는 결과 데이터를 저장해야 하는 폴더를 참조합니다. # + # %%writefile $experiment_folder/prep_diabetes.py # 라이브러리 가져오기 import os import argparse import pandas as pd from azureml.core import Run from sklearn.preprocessing import MinMaxScaler # 매개 변수 가져오기 parser = argparse.ArgumentParser() parser.add_argument("--input-data", type=str, dest='raw_dataset_id', help='raw dataset') parser.add_argument('--prepped-data', type=str, dest='prepped_data', default='prepped_data', help='Folder for results') args = parser.parse_args() save_folder = args.prepped_data # 실험 실행 컨텍스트 가져오기 run = Run.get_context() # 데이터 로드(입력 데이터 세트로 전달됨) print("Loading Data...") diabetes = run.input_datasets['raw_data'].to_pandas_dataframe() # 원시 행 수 로깅 row_count = (len(diabetes)) run.log('raw_rows', row_count) # Null 제거 diabetes = diabetes.dropna() # 숫자 열 정규화 scaler = MinMaxScaler() num_cols = ['Pregnancies','PlasmaGlucose','DiastolicBloodPressure','TricepsThickness','SerumInsulin','BMI','DiabetesPedigree'] diabetes[num_cols] = scaler.fit_transform(diabetes[num_cols]) # 처리된 행 로깅 row_count = (len(diabetes)) run.log('processed_rows', row_count) # 준비된 데이터 저장 print("Saving Data...") os.makedirs(save_folder, exist_ok=True) save_path = os.path.join(save_folder,'data.csv') diabetes.to_csv(save_path, index=False, header=True) # 실행 종료 run.complete() # - # 이제 두 번째 단계(모델 학습)용 스크립트를 만들 수 있습니다. 이 스크립트에 포함된 **--training-folder** 인수는 이전 단계에서 준비한 데이터가 저장된 폴더를 참조합니다. # + # %%writefile $experiment_folder/train_diabetes.py # 라이브러리 가져오기 from azureml.core import Run, Model import argparse import pandas as pd import numpy as np import joblib import os from sklearn.model_selection import train_test_split from sklearn.tree import DecisionTreeClassifier from sklearn.metrics import roc_auc_score from sklearn.metrics import roc_curve import matplotlib.pyplot as plt # 매개 변수 가져오기 parser = argparse.ArgumentParser() parser.add_argument("--training-folder", type=str, dest='training_folder', help='training data folder') args = parser.parse_args() training_folder = args.training_folder # 실험 실행 컨텍스트 가져오기 run = Run.get_context() # 학습 폴더에 준비된 데이터 파일 로드 print("Loading Data...") file_path = os.path.join(training_folder,'data.csv') diabetes = pd.read_csv(file_path) # 기능 및 레이블 분리 X, y = diabetes[['Pregnancies','PlasmaGlucose','DiastolicBloodPressure','TricepsThickness','SerumInsulin','BMI','DiabetesPedigree','Age']].values, diabetes['Diabetic'].values # 데이터를 학습 세트와 테스트 세트로 분할 X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=0) # 의사 결정 트리 모델 학습 진행 print('Training a decision tree model...') model = DecisionTreeClassifier().fit(X_train, y_train) # 정확도 계산 y_hat = model.predict(X_test) acc = np.average(y_hat == y_test) print('Accuracy:', acc) run.log('Accuracy', np.float(acc)) # AUC 계산 y_scores = model.predict_proba(X_test) auc = roc_auc_score(y_test,y_scores[:,1]) print('AUC: ' + str(auc)) run.log('AUC', np.float(auc)) # ROC 곡선 그리기 fpr, tpr, thresholds = roc_curve(y_test, y_scores[:,1]) fig = plt.figure(figsize=(6, 4)) # 대각선 50% 선 그리기 plt.plot([0, 1], [0, 1], 'k--') # 모델의 FPR 및 TPR 그리기 plt.plot(fpr, tpr) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('ROC Curve') run.log_image(name = "ROC", plot = fig) plt.show() # outputs 폴더에 학습된 모델 저장 print("Saving model...") os.makedirs('outputs', exist_ok=True) model_file = os.path.join('outputs', 'diabetes_model.pkl') joblib.dump(value=model, filename=model_file) # 모델 등록 print('Registering model...') Model.register(workspace=run.experiment.workspace, model_path = model_file, model_name = 'diabetes_model', tags={'Training context':'Pipeline'}, properties={'AUC': np.float(auc), 'Accuracy': np.float(acc)}) run.complete() # - # ## 파이프라인 단계용 컴퓨팅 환경 준비 # # 이 연습에서는 두 단계에 같은 컴퓨팅을 사용하지만 각 단계는 독립적으로 실행됩니다. 따라서 해당하는 경우 각 단계에 서로 다른 컴퓨팅 컨텍스트를 지정할 수 있습니다. # # 먼저, 이전 랩에서 만든 컴퓨팅 대상을 가져옵니다. 없는 경우에는 생성됩니다. # # > **중요**: 컴퓨팅 클러스터를 실행하기 전에 아래 코드에서 *your-compute-cluster*를 컴퓨팅 클러스터의 이름으로 변경하세요! 클러스터 이름은 2~16자 사이의 전역으로 고유한 이름이어야 합니다. 유효한 문자는 영문자, 숫자 및 - 문자입니다. # + from azureml.core.compute import ComputeTarget, AmlCompute from azureml.core.compute_target import ComputeTargetException cluster_name = "your-compute-cluster" try: # Check for existing compute target pipeline_cluster = ComputeTarget(workspace=ws, name=cluster_name) print('Found existing cluster, use it.') except ComputeTargetException: # If it doesn't already exist, create it try: compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_DS11_V2', max_nodes=2) pipeline_cluster = ComputeTarget.create(ws, cluster_name, compute_config) pipeline_cluster.wait_for_completion(show_output=True) except Exception as ex: print(ex) # - # 컴퓨팅에는 필요한 패키지 종속성이 설치된 Python 환경이 필요하므로 실행 구성을 만들어야 합니다. # + from azureml.core import Environment from azureml.core.conda_dependencies import CondaDependencies from azureml.core.runconfig import RunConfiguration # 실험용 Python 환경 만들기 diabetes_env = Environment("diabetes-pipeline-env") # 패키지 종속성 집합 만들기 diabetes_packages = CondaDependencies.create(conda_packages=['scikit-learn','ipykernel','matplotlib','pandas','pip'], pip_packages=['azureml-defaults','azureml-dataprep[pandas]','pyarrow']) # 환경에 종속성 추가 diabetes_env.python.conda_dependencies = diabetes_packages # 환경 등록 diabetes_env.register(workspace=ws) registered_env = Environment.get(ws, 'diabetes-pipeline-env') # 파이프라인용 새 runconfig 개체 만들기 pipeline_run_config = RunConfiguration() # 위에서 만든 컴퓨팅을 사용합니다. pipeline_run_config.target = pipeline_cluster # 실행 구성에 환경 할당 pipeline_run_config.environment = registered_env print ("Run configuration created.") # - # ## 파이프라인 작성 및 실행 # # 이제 파이프라인을 만들고 실행할 수 있습니다. # # 먼저 파이프라인용 단계와 파이프라인 간에 전달해야 하는 데이터 참조를 정의해야 합니다. 이 연습의 첫 번째 단계는 두 번째 단계에서 읽을 수 있는 폴더에 준비된 데이터를 써야 합니다. 이 두 단계는 원격 컴퓨팅에서 실행되며 각기 다른 컴퓨팅에서 실행할 수 있으므로, 작업 영역 내 데이터 저장소의 특정 위치에 대한 데이터 참조로 폴더 경로를 전달해야 합니다. **PipelineData** 개체는 중간 스토리지 위치에 사용되며 파이프라인 단계 간에 전달할 수 있는 특수한 종류의 데이터 참조입니다. 여기서는 PipelineData 개체를 만들어 첫 번째 단계의 출력/두 번째 단계의 입력으로 사용할 것입니다. 또한 데이터 참조를 통해 참조하는 데이터 저장소 위치에 코드가 액세스할 수 있도록 이 개체를 스크립트 인수로 전달해야 합니다. # + from azureml.pipeline.core import PipelineData from azureml.pipeline.steps import PythonScriptStep # 학습 데이터 세트 가져오기 diabetes_ds = ws.datasets.get("diabetes dataset") # 모델 폴더용 PipelineData(임시 데이터 참조) 만들기 prepped_data_folder = PipelineData("prepped_data_folder", datastore=ws.get_default_datastore()) # 1단계: 데이터 준비 스크립트 실행 prep_step = PythonScriptStep(name = "Prepare Data", source_directory = experiment_folder, script_name = "prep_diabetes.py", arguments = ['--input-data', diabetes_ds.as_named_input('raw_data'), '--prepped-data', prepped_data_folder], outputs=[prepped_data_folder], compute_target = pipeline_cluster, runconfig = pipeline_run_config, allow_reuse = True) # 2단계: 학습 스크립트 실행 train_step = PythonScriptStep(name = "Train and Register Model", source_directory = experiment_folder, script_name = "train_diabetes.py", arguments = ['--training-folder', prepped_data_folder], inputs=[prepped_data_folder], compute_target = pipeline_cluster, runconfig = pipeline_run_config, allow_reuse = True) print("Pipeline steps defined") # - # 이제 정의한 단계에서 파이프라인을 작성하여 실험으로 실행할 준비가 되었습니다. # + from azureml.core import Experiment from azureml.pipeline.core import Pipeline from azureml.widgets import RunDetails # 파이프라인 생성 pipeline_steps = [prep_step, train_step] pipeline = Pipeline(workspace=ws, steps=pipeline_steps) print("Pipeline is built.") # 실험 작성 및 파이프라인 실행 experiment = Experiment(workspace=ws, name = 'mslearn-diabetes-pipeline') pipeline_run = experiment.submit(pipeline, regenerate_outputs=True) print("Pipeline submitted for execution.") RunDetails(pipeline_run).show() pipeline_run.wait_for_completion(show_output=True) # - # 파이프라인이 실행되면 그래픽 형식 파이프라인 실험이 위젯에 표시됩니다. 페이지 오른쪽 위에 있는 커널 표시기를 주의 깊게 살펴보세요. **&#9899;**에서 **&#9711;**로 바뀌면 코드 실행이 완료된 것입니다. [Azure Machine Learning Studio](https://ml.azure.com)의 **실험** 페이지에서 파이프라인 실행을 모니터링할 수도 있습니다. # # 파이프라인이 완료되고 나면 하위 실행에서 기록된 메트릭을 검사할 수 있습니다. for run in pipeline_run.get_children(): print(run.name, ':') metrics = run.get_metrics() for metric_name in metrics: print('\t',metric_name, ":", metrics[metric_name]) # 파이프라인 실행이 정상적으로 완료되면 *학습 컨텍스트* 태그가 지정된 새 모델이 등록됩니다. 이 태그는 해당 모델이 파이프라인에서 학습되었음을 나타냅니다. 다음 코드를 사용하여 모델 등록 여부를 확인합니다. # + from azureml.core import Model for model in Model.list(ws): print(model.name, 'version:', model.version) for tag_name in model.tags: tag = model.tags[tag_name] print ('\t',tag_name, ':', tag) for prop_name in model.properties: prop = model.properties[prop_name] print ('\t',prop_name, ':', prop) print('\n') # - # ## 파이프라인 게시 # # 작성하여 테스트한 파이프라인은 REST 서비스로 게시할 수 있습니다. # + # 실행에서 파이프라인 게시 published_pipeline = pipeline_run.publish_pipeline( name="diabetes-training-pipeline", description="Trains diabetes model", version="1.0") published_pipeline # - # 게시된 파이프라인에는 엔드포인트가 있습니다. [Azure Machine Learning Studio](https://ml.azure.com)의 (**파이프라인 엔드포인트** 탭에 있는) **엔드포인트** 페이지에서 확인할 수 있습니다. 게시된 파이프라인 개체의 속성으로 엔드포인트의 URI를 확인할 수도 있습니다. rest_endpoint = published_pipeline.endpoint print(rest_endpoint) # ## 파이프라인 엔드포인트 호출 # # 엔드포인트를 사용하려면 클라이언트 애플리케이션이 HTTP를 통해 REST 호출을 수행해야 합니다. 이 요청은 인증해야 하므로 인증 헤더가 필요합니다. 실제 애플리케이션에는 인증에 사용할 서비스 주체가 필요합니다. 여기서는 이 인증 과정을 테스트하기 위해 현재 Azure 작업 영역에 설정되어 있는 연결의 인증 헤더를 사용합니다. 다음 코드를 사용하면 해당 인증 헤더를 가져올 수 있습니다. # + from azureml.core.authentication import InteractiveLoginAuthentication interactive_auth = InteractiveLoginAuthentication() auth_header = interactive_auth.get_authentication_header() print("Authentication header ready.") # - # 이제 REST 인터페이스를 호출할 수 있습니다. 파이프라인은 비동기식으로 실행되므로 식별자를 다시 가져와야 합니다. 이 식별자는 실행 중인 파이프라인 실험을 추적하는 데 사용할 수 있습니다. # + import requests experiment_name = 'mslearn-diabetes-pipeline' rest_endpoint = published_pipeline.endpoint response = requests.post(rest_endpoint, headers=auth_header, json={"ExperimentName": experiment_name}) run_id = response.json()["Id"] run_id # - # 실행 ID가 있으므로 이를 사용하여 실행이 완료될 때까지 기다릴 수 있습니다. # # > **참고**: 각 단계는 출력을 재사용할 수 있도록 구성되었으므로 파이프라인 실행은 빠르게 완료됩니다. 이 구성은 기본적으로 과정 진행 시간을 절약하기 위해 편의상 적용된 것입니다. 실제로는 데이터가 변경될 때마다 첫 단계를 실행하고 1단계의 출력이 변경되는 경우에만 후속 단계를 트리거할 수 있습니다. # + from azureml.pipeline.core.run import PipelineRun published_pipeline_run = PipelineRun(ws.experiments[experiment_name], run_id) published_pipeline_run.wait_for_completion(show_output=True) # - # ## 파이프라인 예약 # # 당뇨병 환자를 진료하는 병원에서 매주 새 데이터를 수집하여 데이터 세트에 추가한다고 가정해 보겠습니다. 이 경우 매주 파이프라인을 실행하여 새 데이터로 모델을 다시 학습시킬 수 있습니다. # + from azureml.pipeline.core import ScheduleRecurrence, Schedule # 매주 월요일 00:00 UTC에 파이프라인 제출 recurrence = ScheduleRecurrence(frequency="Week", interval=1, week_days=["Monday"], time_of_day="00:00") weekly_schedule = Schedule.create(ws, name="weekly-diabetes-training", description="Based on time", pipeline_id=published_pipeline.id, experiment_name='mslearn-diabetes-pipeline', recurrence=recurrence) print('Pipeline scheduled.') # - # 다음과 같이 작업 영역에 정의된 일정을 검색할 수 있습니다. schedules = Schedule.list(ws) schedules # 다음과 같이 최신 실행을 확인할 수 있습니다. # + pipeline_experiment = ws.experiments.get('mslearn-diabetes-pipeline') latest_run = list(pipeline_experiment.get_runs())[0] latest_run.get_details() # - # 이 연습은 파이프라인 작성 원칙을 보여 주는 간단한 예제입니다. 실제로는 더 복잡한 논리를 작성하여 파이프라인 단계에 포함할 수 있습니다. 예를 들어 특정 테스트 데이터를 기준으로 모델을 평가해 AUC, 정확도 등의 성능 메트릭을 계산하고, 이 메트릭을 이전에 등록한 모델 버전의 메트릭과 비교한 다음 성능이 더 우수한 경우에만 새 모델을 등록할 수 있습니다. # # [Azure DevOps용 Azure Machine Learning 확장](https://marketplace.visualstudio.com/items?itemName=ms-air-aiagility.vss-services-azureml)을 사용하여 Azure ML 파이프라인을 Azure DevOps 파이프라인과 결합한 다음 *CI/CD(연속 통합/연속 배포)* 프로세스에 모델 재학습 과정을 통합할 수 있습니다. 이 두 파이프라인은 이름이 같아* *혼동할 수 있으므로 주의하세요. 예를 들어 Azure DevOps *빌드* 파이프라인을 사용해 모델 학습과 등록을 수행하는 Azure ML 파이프라인을 트리거할 수 있습니다. 모델이 등록되면 빌드 파이프라인은 Azure DevOps *릴리스* 파이프라인을 트리거할 수 있습니다. 릴리스 파이프라인은 모델을 사용하는 애플리케이션이나 서비스와 함께 모델을 웹 서비스로 배포합니다.
08 - Create a Pipeline.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Now You Code 4: Mad Libs! # # Write a Python program which creates your own unique Mad-Libs! story. # # If you are not familar with a Mad-Libs! story, check out: # http://www.madlibs.com and http://www.madtakes.com # # Your story should take at least 5 inputs, and should include more than # once sentence. # # ## Step 1: Problem Analysis # # Inputs: # verb # noun # adjective # place # past tense verb # # Outputs: # The madlib # # Algorithm (Steps in Program): # input verb # input noun # input adjective # input place # input past tense verb # print madlib sentences verb = input('Enter verb ') noun = input('Enter noun ') adjective = input('Enter adjective ') place = input('Enter place ') past_verb = input('Enter past tense verb ') print('Sally wanted to', verb, 'at the', noun, 'Unfortunately her', adjective, 'friend wanted to go to', place, 'for fun. So they', past_verb, 'there.') # ## Step 3: Questions # # 1. What happens when neglect to follow the instructions and enter any inputs we desire? Does the code still run? Why? # # Yes it will still run because there are no syntax errors. # # 2. What type of error occurs when the program runs but does not handle bad input? # # This is called a logic error or bug. Meaning there isn't a problem with the code just the logic of the program. # # 3. Is there anything you can do in code to correct this type of error? Why or why not? # # There's not really anything you can do except check your work and make sure the input is correct so it will produce the proper output. # ## Reminder of Evaluation Criteria # # 1. What the problem attempted (analysis, code, and answered questions) ? # 2. What the problem analysis thought out? (does the program match the plan?) # 3. Does the code execute without syntax error? # 4. Does the code solve the intended problem? # 5. Is the code well written? (easy to understand, modular, and self-documenting, handles errors) #
content/lessons/02/Now-You-Code/NYC4-Mad-Libs.ipynb
# --- # jupyter: # jupytext: # formats: ipynb,py # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:PROJ_irox_oer] * # language: python # name: conda-env-PROJ_irox_oer-py # --- # ### Import Modules # + import os import sys import pickle import numpy as np import plotly.graph_objs as go from proj_data import ( scatter_shared_props, shared_axis_dict, layout_shared, ) # - # ######################################################### directory = os.path.join( os.environ["PROJ_irox_oer"], "workflow/seoin_irox_data/featurize_data", "out_data") path_i = os.path.join( directory, "df_features_targets.pickle") with open(path_i, "rb") as fle: df_features_targets = pickle.load(fle) # ######################################################### # + layout = go.Layout( xaxis=go.layout.XAxis( title=dict( text="Ir Effective Oxidation State", ), ), yaxis=go.layout.YAxis( title=dict( text="ΔG<sub>O</sub>", ), ), ) tmp = layout.update(layout_shared) # + data = [] trace = go.Scatter( x=df_features_targets.effective_ox_state, y=df_features_targets.g_o, mode="markers", ) tmp = trace.update( scatter_shared_props ) data.append(trace) fig = go.Figure(data=data, layout=layout) fig.show() # - # ### Violin Plot # + import plotly.express as px fig = px.violin( df_features_targets, y="g_o", x="effective_ox_state", box=True, points="all", ) # + layout = go.Layout( xaxis=go.layout.XAxis( title=dict( text="Ir Effective Oxidation State", ), ), yaxis=go.layout.YAxis( title=dict( text="ΔG<sub>O</sub>", ), ), ) tmp = fig.update_layout(layout) tmp = fig.update_layout(layout_shared) # - fig.show() # ### Computing MAE from trivial Eff. Ox. State model # + group_cols = ["effective_ox_state", ] grouped = df_features_targets.groupby(group_cols) abs_errors = [] for name, group in grouped: abs_errors_i = np.abs(group.g_o - group.g_o.mean()).tolist() abs_errors.extend(abs_errors_i) mae_o = np.mean(abs_errors) # + group_cols = ["effective_ox_state", ] grouped = df_features_targets.groupby(group_cols) abs_errors = [] for name, group in grouped: abs_errors_i = np.abs(group.g_oh - group.g_oh.mean()).tolist() abs_errors.extend(abs_errors_i) mae_oh = np.mean(abs_errors) # + group_cols = ["effective_ox_state", ] grouped = df_features_targets.groupby(group_cols) abs_errors = [] for name, group in grouped: abs_errors_i = np.abs(group.g_ooh - group.g_ooh.mean()).tolist() abs_errors.extend(abs_errors_i) mae_ooh = np.mean(abs_errors) # - np.round( (mae_o + mae_oh + mae_ooh) / 3, 3 ) print( "Average MAE: ", np.round( (mae_o + mae_oh + mae_ooh) / 3, 4 ), sep="") # ### G_OH vs G_O-OH # + layout = go.Layout( xaxis=go.layout.XAxis( title=dict( text="ΔG<sub>O</sub>-ΔG<sub>OH</sub>", ), ), yaxis=go.layout.YAxis( title=dict( text="ΔG<sub>OH</sub>", ), ), ) tmp = layout.update(layout_shared) # + data = [] trace = go.Scatter( x=df_features_targets["g_o"] - df_features_targets["g_oh"], y=df_features_targets["g_oh"], mode="markers", ) tmp = trace.update( scatter_shared_props ) data.append(trace) fig = go.Figure(data=data, layout=layout) fig.show() # + active="" # # # # + jupyter={} # # Pickling data ########################################### # directory = os.path.join( # os.environ["PROJ_irox_oer"], # "workflow/seoin_irox_data/featurize_data", # "out_data") # if not os.path.exists(directory): # os.makedirs(directory) # path_i = os.path.join(directory, "df_features_targets.pickle") # with open(path_i, "wb") as fle: # pickle.dump(df_features_targets, fle) # # ######################################################### # + jupyter={} # df_features_targets # + jupyter={} # scatter_shared_props, # shared_axis_dict, # layout_shared,
workflow/seoin_irox_data/plot_data/eff_ox_vs_oer.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # K近邻法 # # 作者:杨岱川 # # 时间:2019年9月 # # github:https://github.com/DrDavidS/basic_Machine_Learning # # 开源协议:[MIT](https://github.com/DrDavidS/basic_Machine_Learning/blob/master/LICENSE) # # ## K近邻法简介 # # $k$近邻法($k$-nearest neighbor, $k$-NN)是一种基本分类与回归方法,于1968年由 Cover 和 Hart 提出。 # # $k$近邻法的输入为实例的特征向量,输出为实例的类别,可以取多个类。分类时,对于新的实例,根据其 $k$ 个最近邻的训练实例的类别,通过多数表决等方式进行预测。$k$ 值的选择、距离度量及分类决策规则是$k$近邻法的三个基本要素。 # # ## K近邻算法 # # $k$近邻法的基本做法是:对给定的训练实例点和输入实例点,首先确定输入实例点的$k$个最近邻训练实例点,然后利用这$k$个训练实例点的类的多数来预测输入实例点的类。 # # ### 算法描述 # # **输入**:训练数据集 # # $$\large T=\left \{(x_1,y_1),(x_2,y_2),\cdots,(x_N,y_N) \right \} $$ # # 其中: # # $i = 1,2,3,\cdots,N$; # # $x_i \in \Bbb X \subseteq \mathbf R^n$ 为实例的特征向量,可以类比 iris 数据集的 features(特征),有多个维度; # # $ y_i \in \Bbb Y = \left \{ c_1,c_2,\cdots,c_K \right \}$,可以类比为 iris 数据集的 labels(标签),可以是多分类的。 # # **输出**:实例 $x$ 所属的类 $y$,相当于是一个预测。 # # 1. 根据给定的距离度量,在训练集 $T$ 中找出与 $x$ 最邻近的 $k$ 个点,涵盖这 $k$ 个点 $x$ 的邻域记作 $N_k(x)$; # 2. 在 $N_k(x)$ 中根据分类决策规则(如多数表决)决定 $x$ 的类别 $y$: # # $$\large y={\rm argmax}_{c_j}\sum_{x_i\in N_k(x)} I(y_i=c_j) \qquad,i=1,2,\cdots,N;j=1,2,\cdots,K $$ # # 其中 $I$ 为指示函数,当 $y_i=c_j$ 时 $I$ 为1,否则 $I$ 为0。 # # <img src="https://github.com/DrDavidS/basic_Machine_Learning/blob/master/back_up_images/KNN.png?raw=true" width="500" alt="K近邻算法" align=center> # # >$k$近邻算法中,当训练集、距离度量、$k$ 值即分类决策规则(如多数表决)确定后,对于任何一个新输入的实例,它所属的类是唯一确定的。 # ### 距离度量 # # 上面讲了,需要计算**最邻近**的 $k$ 个点,而如何确定点到点之间的距离很重要。 # # 特征空间中的两个实例点的距离是两个实例点相似程度的反映。$k$近邻模型的特征空间一般是 $n$ 维度实数向量空间 $\mathbf R^n$。使用的距离是欧氏距离,但也可以是其他距离,这里我们将介绍 $L_p$ 距离。 # # 设特征空间 $\Bbb X$ 是 $n$ 维实数向量空间 $\mathbf R^n$, $x_i,x_j \in \Bbb X,x_i=(x_i^{(1)},x_i^{(2)},\cdots,x_i^{(n)})^{\rm T}$ ,$ x_j=(x_j^{(1)},x_j^{(2)},\cdots,x_j^{(n)})^{\rm T} $,$x_i,x_j$ 的 $L_p$距离定义为: # # $$\large L_p(x_i,x_j)=\left( \sum_{l=1}^n \left| x_i^{(l)} - x_j^{(l)} \right |^p \right)^{\frac{1}{p}} $$ # # 其中,$p \ge 1$。 # # 举例说明:类比到iris数据集,我们有:花萼长度、花萼宽度、花瓣长度和花瓣宽度四种特征(不算ID列),则 $ x_i^{(l)}$ 就是第 $i$ 朵鸢尾花实例 $x_i$ 的第 $l$ 个特征(一共四个) # # #### $p=1$ # # 当 $p=1$ 时,称为曼哈顿距离(Manhattan distance),公式变为: # # $$\large L_1(x_i,x_j)=\sum_{l=1}^n \left| x_i^{(l)} - x_j^{(l)} \right | $$ # # #### $p=2$ # # 当 $p=2$ 时,成为欧氏距离(Euclidean distance),公式变为: # # $$\large L_1(x_i,x_j)=\left( \sum_{l=1}^n \left| x_i^{(l)} - x_j^{(l)} \right |^2 \right)^{\frac{1}{2}} $$ # # <img src="https://github.com/DrDavidS/basic_Machine_Learning/blob/master/back_up_images/%E6%9B%BC%E5%93%88%E9%A1%BF%E8%B7%9D%E7%A6%BB%E4%B8%8E%E6%AC%A7%E6%B0%8F%E8%B7%9D%E7%A6%BB.jpg?raw=true" width="400" alt="曼哈顿距离与欧氏距离" align=center> # <center>曼哈顿距离与欧氏距离</center> # # 如上图,图中绿色线段的长度就是两个黑点的欧氏距离,通俗地说就是直线距离。其余线段则表示曼哈顿距离(出租车路线)。在日常生活中,房地产商的广告总是喜欢宣传“本小区到核心商圈和交通枢纽只需要10分钟车程”,也许“10分钟”是按照欧氏距离,时速120千米计算出来的。 # # >杭电到西湖边的欧氏距离大概19千米,按行车时速120千米计算真的只要差不多10分钟。 # # #### $p=\infty$ # # 当 $p=\infty$ 时,称为切比雪夫距离(Chebyshev distance),公式变为: # # $$\large L_{\infty}(x_i,x_j)=\max_l \left| x_i^{(l)} - x_j^{(l)} \right |^p $$ # # 切比雪夫距离有在国际象棋上有一种有趣的应用:若将国际象棋棋盘放在二维直角座标系中,格子的边长定义为1,座标的x轴及y轴和棋盘方格平行,原点恰落在某一格的中心点,则王从一个位置走到其他位置需要的步数恰为二个位置的切比雪夫距离,因此切比雪夫距离也称为棋盘距离。 # # <img src="https://github.com/DrDavidS/basic_Machine_Learning/blob/master/back_up_images/%E6%9B%BC%E5%93%88%E9%A1%BF%E8%B7%9D%E7%A6%BB%E4%B8%8E%E6%AC%A7%E6%B0%8F%E8%B7%9D%E7%A6%BB2.jpg?raw=true" width="400" alt="曼哈顿距离与欧氏距离" align=center> # # # 见扩展阅读:[切比雪夫距离](https://zh.wikipedia.org/wiki/切比雪夫距离) # ### $k$ 值的选择 # # $k$ 值的选择会对$k$近邻法的结果产生重大影响。 # # 具体地: # # **$k$ 值的减小意味着整体模型变得复杂,容易发生过拟合。** # # $k$ 值较小意味着预测结果对临近的实例点更加敏感,如果临近实例点恰巧是噪声,预测很可能会出错。 # # **$k$ 值的增大意味着整体模型变得简单。** # # $k$ 值较大意味着与待遇测实例较远(不相似)的训练实例也会对预测起作用,使预测发生错误。 # # > 特别地: # > # >当 $k=N$ 时,相当于取所有点的label投票,无论输入实例是什么,都将简单地预测它在训练实例中最多的类。属于模型最简单的那种情况。 # ## SKlearn中的$k$近邻算法 # # 在SKlearn中,也有现成的$k$近邻法(分类)供我们调用。 # # 参考: # - [Nearest Neighbors Classification](https://scikit-learn.org/stable/modules/neighbors.html#nearest-neighbors-classification)。 # - [sklearn.neighbors.KNeighborsClassifier](https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsClassifier.html#sklearn.neighbors.KNeighborsClassifier) # - [《统计学习方法》的代码实现](https://github.com/fengdu78/lihang-code) # # ### 数据准备 # # 之前我们都是从 iris.csv 导入数据集,实际上SKlearn也有很多内置数据集,包括 iris 数据集。这次我们使用 SKlearn 自带的 iris 数据集。 # + import matplotlib.pyplot as plt # 绘图 import numpy as np # numpy import pandas as pd from sklearn.datasets import load_iris from sklearn.model_selection import train_test_split from sklearn.neighbors import KNeighborsClassifier # %matplotlib inline # 导入iris数据集,转换为 dataframe 格式 iris = load_iris() df = pd.DataFrame(iris.data, columns=iris.feature_names) df['label'] = iris.target df.columns = ['sepal length', 'sepal width', 'petal length', 'petal width', 'label'] # - df # 接下来绘制出数据的分布。在这里我们实际使用的是四维数据,但是只绘制出其中两个维度,即花萼长度和花萼宽度。 # + # 画布大小 plt.figure(figsize=(10,10)) # 中文标题 plt.rcParams['font.sans-serif']=['SimHei'] plt.rcParams['axes.unicode_minus'] = False plt.title('鸢尾花数据示例(二维)') # 点的绘制 plt.scatter(df[:50]['sepal length'], df[:50]['sepal width'], label='0') plt.scatter(df[50:100]['sepal length'], df[50:100]['sepal width'], label='1', marker='x') plt.xlabel('sepal length') plt.ylabel('sepal width') plt.legend() # - # 筛选具有线性可分特征的两类鸢尾花: # + # 筛选两类鸢尾花 data = np.array(df.iloc[:100, [0, 1, -1]]) # 拆分为 features 和 labels X, y = data[:,:-1], data[:,-1] # 随机划分训练集和测试集 X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2) # 二八划分 # - # ### 模型训练 clf = KNeighborsClassifier() clf.fit(X_train, y_train) # clf 自动输出了相关参数,解释其中重要的几个: # # - algorithm='auto':指的是算法使用的具体实现,《统计学习方法》书中的 **kd树** 也被内置在其中,我们可以使用algorithm='kd_tree'调用,此外还有 BallTree 等算法。扩展阅读[Ball tree](https://en.wikipedia.org/wiki/Ball_tree) # - n_jobs:决定我们使用多少进程来执行这个算法。 # - n_neighbors=5:相当于之前讲的 $k$ 的取值。 # - metric='minkowski':度量方式采用闵可夫斯基距离公式,相当于前文的 $L_p$ 距离。 # - p=2:闵可夫斯基距离公式中,当 $p=2$ 时,变为欧几里得距离公式。 # # 其余参数的含义见[sklearn.neighbors.KNeighborsClassifier](https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsClassifier.html#sklearn.neighbors.KNeighborsClassifier)中,**Parameters** 条目。 # # ### 模型预测 # # 接下来看看对测试数据集的结果,以及在测试数据集上的准确率(mean accuracy): y_predict = clf.predict(X_test) y_predict y_test clf.score(X_test, y_test) # ### 可视化展示 # # 单独设置一个点,展示一下模型的判断能力。 # # 假设测试点数据如下: test_point = [[6.0, 3.0]] # 注意这里是一个二维列表 print('Test Point: {}'.format(clf.predict(test_point))) # + # 画布大小 plt.figure(figsize=(10,10)) # 中文标题 plt.rcParams['font.sans-serif']=['SimHei'] plt.rcParams['axes.unicode_minus'] = False plt.title('k近邻模型单点测试') # 点的绘制 plt.scatter(df[:50]['sepal length'], df[:50]['sepal width'], label='0') plt.scatter(df[50:100]['sepal length'], df[50:100]['sepal width'], label='1', marker='x') plt.plot(test_point[0][0], test_point[0][1], c='r', marker='^', label='test_point') plt.xlabel('sepal length') plt.ylabel('sepal width') plt.legend() # -
02机器学习基础/2.03 K近邻法.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="3V8u8Ux1Y8P0" colab_type="text" # ![QUORA](https://i.imgur.com/wJqNOG4.jpg) # + [markdown] id="s1e5iVxWZ0Mt" colab_type="text" # <h2>Business Problem</h2> # <h3>About Quora</h3> # <p>Quora is a place to gain and share knowledge—about anything. It’s a platform to ask questions and connect with people who contribute unique insights and quality answers. This empowers people to learn from each other and to better understand the world.</p> # <p> # Over 100 million people visit Quora every month, so it's no surprise that many people ask similarly worded questions. Multiple questions with the same intent can cause seekers to spend more time finding the best answer to their question, and make writers feel they need to answer multiple versions of the same question. Quora values canonical questions because they provide a better experience to active seekers and writers, and offer more value to both of these groups in the long term. # </p> # <br> # </p> # <h3>Problem Statement</h3> # * We have to identify if two questions are similar semantically. # * We do this because when a user posts a new question we have to verify if similar question has already been asked. # * If a similar question has already been asked we can suggest same answers inabling instant solution for the questoin # # # <h3>Real world objectives</h3> # * Misclassification is of utmost important.Because imagine there are two questions Q1-how to get rid of rats? and Q2- how to get rid of life ? one of the answer for Q1 is **Use ammonia** now we misclassify Q1 and Q2 as similar ,you know what happens next. # <br> # Even if the user is not satisfied with answers he/she might not visit again so classifying properly is crucial. # # * Since this a text similarity problem we can use probabilties and a threshold to classify them as same or not. # # * Solution interpretability is actually not important when deployed but its good to know. # * Not a real time problem so no strict low latency constraint is demanded. # + [markdown] id="HaSogrM9hzAA" colab_type="text" # <h2>Machine learning Problem</h2> # # <h3>Source: [kaggle](https://www.kaggle.com/c/quora-question-pairs/)</h3> # <h3>Data overiew</h3> # # * Train.csv(only) contains below metioned fields # * id - the id of a training set question pair # * qid1, qid2 - unique ids of each question (only available in train.csv) # * question1, question2 - the full text of each question # * is_duplicate - the target variable, set to 1 if question1 and question2 have essentially the same meaning, and 0 otherwise. # # * Size of Train.csv is 60MB # * No of rows in Train.csv is 404k # # <h3>Example instance</h3> # ![alt text](https://i.imgur.com/ZdUy5Z4.png) # + [markdown] id="SiopDQawnHj2" colab_type="text" # <h2>Mapping the real world problem to Machine learning problem</h2> # # <h3>Type of ML problem </h3> # Since we have to predict whether Q1 and Q2 are similar or not. It is a binary classification problem. # # # <h3>Performance metric</h3> # * log-loss # # + [markdown] id="cGaQzSe-pMnO" colab_type="text" # <h2>Exploratory Data Analysis</h2> # + id="sk-YBKsVqLPG" colab_type="code" colab={} import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt # %matplotlib inline import re from nltk.corpus import stopwords from nltk.stem import PorterStemmer # + id="TjW1YLWxroFh" colab_type="code" outputId="9463b539-bac4-437c-f22e-cba85d189b10" executionInfo={"status": "ok", "timestamp": 1557478270283, "user_tz": -330, "elapsed": 86521, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17140425108724862216"}} colab={"base_uri": "https://localhost:8080/", "height": 132} from google.colab import drive drive.mount('/content/gdrive') # + id="kN5BhMDlsCI2" colab_type="code" outputId="fc2ebe26-636b-443a-b1e7-e38664edc66a" executionInfo={"status": "ok", "timestamp": 1557478270289, "user_tz": -330, "elapsed": 4617, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17140425108724862216"}} colab={"base_uri": "https://localhost:8080/", "height": 36} import os os.chdir('/content/gdrive/My Drive/Quora questions pair') os.listdir() # + id="rqUIxhOxvlf5" colab_type="code" outputId="febc2506-9ead-4acc-c8e5-250b84b614a9" executionInfo={"status": "ok", "timestamp": 1557478274326, "user_tz": -330, "elapsed": 8138, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17140425108724862216"}} colab={"base_uri": "https://localhost:8080/", "height": 36} df = pd.read_csv("train.csv") print("Number of data points:",df.shape[0]) # + id="kIKUzfnkv32R" colab_type="code" outputId="c3c2fb8d-b016-49a6-997b-488cb3d1c12f" executionInfo={"status": "ok", "timestamp": 1557478274327, "user_tz": -330, "elapsed": 7738, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17140425108724862216"}} colab={"base_uri": "https://localhost:8080/", "height": 204} df.head() # + id="wfQopnLcv6pJ" colab_type="code" outputId="cc97c554-7201-4e6a-bb59-3f92f709ffd5" executionInfo={"status": "ok", "timestamp": 1557478274713, "user_tz": -330, "elapsed": 7819, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17140425108724862216"}} colab={"base_uri": "https://localhost:8080/", "height": 226} #missing values df.info() # + id="lBaJ9uwFw2Eg" colab_type="code" outputId="ce73f8fb-4d7e-47a8-b956-c4aa54b31c40" executionInfo={"status": "ok", "timestamp": 1557478274715, "user_tz": -330, "elapsed": 7701, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17140425108724862216"}} colab={"base_uri": "https://localhost:8080/", "height": 299} # df.groupby("is_duplicate").count().plot.bar() df.groupby("is_duplicate")['id'].count().plot.bar() # + id="B0o-1lHpxM0y" colab_type="code" outputId="fb8b7e91-a784-4ade-a61c-ebe215677600" executionInfo={"status": "ok", "timestamp": 1557478274716, "user_tz": -330, "elapsed": 7425, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17140425108724862216"}} colab={"base_uri": "https://localhost:8080/", "height": 55} non_duplicate = df['is_duplicate'].value_counts()[0] duplicate = df.shape[0]-non_duplicate print("Duplicate : {:0.2f}%".format((duplicate/df.shape[0])*100)) print("Non duplicate : {:0.2f}%".format((non_duplicate/df.shape[0])*100)) # + id="OR2gUletx9_I" colab_type="code" outputId="205f97a0-9598-4225-b91b-9b22f3a71d16" executionInfo={"status": "ok", "timestamp": 1557478275100, "user_tz": -330, "elapsed": 7682, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17140425108724862216"}} colab={"base_uri": "https://localhost:8080/", "height": 74} l = df['qid1'].tolist() + df['qid2'].tolist() unique_quest = pd.Series(l).value_counts() print('Most repeated question count :',unique_quest.iloc[0]) print('Total number of unique questions {} : '.format(len(unique_quest))) print('Number of questions which appeared more than once : ',len(unique_quest[unique_quest > 1])) # + id="ox3gy1IY4rhs" colab_type="code" outputId="4c877ae6-09cc-4452-c08c-ff7310080f96" executionInfo={"status": "ok", "timestamp": 1557478275883, "user_tz": -330, "elapsed": 8204, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17140425108724862216"}} colab={"base_uri": "https://localhost:8080/", "height": 390} x = ["Unique Questions" , "Repeated Questions"] y = [len(unique_quest),len(unique_quest[unique_quest > 1])] plt.figure(figsize=(10, 6)) plt.title ("Plot representing unique and repeated questions ") sns.barplot(x,y) plt.show() # + id="JmHhOYk6OI9L" colab_type="code" outputId="90192367-f70d-4e71-ea19-b87031b3186c" executionInfo={"status": "ok", "timestamp": 1557478275888, "user_tz": -330, "elapsed": 8019, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17140425108724862216"}} colab={"base_uri": "https://localhost:8080/", "height": 36} #duplicates presence df[df.duplicated(['qid1','qid2'])].shape[0] # + id="GRSfS2_DOUsO" colab_type="code" outputId="79c8212c-9051-444b-b4cb-3814ff77828b" executionInfo={"status": "ok", "timestamp": 1557478277729, "user_tz": -330, "elapsed": 9717, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17140425108724862216"}} colab={"base_uri": "https://localhost:8080/", "height": 640} plt.figure(figsize=(20, 10)) plt.hist(unique_quest, bins=160) plt.yscale('log', nonposy='clip') plt.title('Log-Histogram of question appearance counts') plt.xlabel('Number of occurences of question') plt.ylabel('Number of questions') # + id="_P8lFVYHOzlO" colab_type="code" outputId="8bb99cb6-452c-4bc3-d2b5-48ad652d012b" executionInfo={"status": "ok", "timestamp": 1557478277732, "user_tz": -330, "elapsed": 9580, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17140425108724862216"}} colab={"base_uri": "https://localhost:8080/", "height": 142} df[df.isnull().any(axis=1)] # + id="rjaUZ2LFPW-5" colab_type="code" colab={} df.dropna(inplace=True) # + [markdown] id="Yqjgk9J5QYMt" colab_type="text" # <h2>Trying out new features</h2> # # * freq_qid1 = Frequency of qid1's<br> # * freq_qid2 = Frequency of qid2's <br> # * q1len= Length of q1<br> # * q2len = Length of q2<br> # * q1_n_words = Number of words in Question 1<br> # * q2_n_words = Number of words in Question 2<br> # * word_Common = (Number of common unique words in Question 1 and Question 2)<br> # * word_Total=(Total num of words in Question 1 + Total num of words in Question 2)<br> # * word_share = (word_common)/(word_Total)<br> # * freq_q1+freq_q2= sum total of frequency of qid1 and qid2 <br> # * freq_q1-freq_q2= absolute difference of frequency of qid1 and qid2 <br> # + id="u2rHVWQi_V5Z" colab_type="code" outputId="88fd4109-006b-4d47-d25a-f34339182ff5" executionInfo={"status": "ok", "timestamp": 1557478331173, "user_tz": -330, "elapsed": 62475, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17140425108724862216"}} colab={"base_uri": "https://localhost:8080/", "height": 564} #all are pretty self explanatory .check above points df['freq_qid1'] = df.groupby('qid1')['qid1'].transform('count') df['freq_qid2'] = df.groupby('qid2')['qid2'].transform('count') df['q1len'] = df['question1'].str.len() df['q2len'] = df['question2'].str.len() df['q1_n_words'] = df['question1'].apply(lambda row: len(row.split(" "))) df['q2_n_words'] = df['question2'].apply(lambda row: len(row.split(" "))) def normalized_word_Common(row): w1 = set(map(lambda word: word.lower().strip(), row['question1'].split(" "))) w2 = set(map(lambda word: word.lower().strip(), row['question2'].split(" "))) return 1.0 * len(w1 & w2) df['word_Common'] = df.apply(normalized_word_Common, axis=1) def normalized_word_Total(row): w1 = set(map(lambda word: word.lower().strip(), row['question1'].split(" "))) w2 = set(map(lambda word: word.lower().strip(), row['question2'].split(" "))) return 1.0 * (len(w1) + len(w2)) df['word_Total'] = df.apply(normalized_word_Total, axis=1) def normalized_word_share(row): w1 = set(map(lambda word: word.lower().strip(), row['question1'].split(" "))) w2 = set(map(lambda word: word.lower().strip(), row['question2'].split(" "))) return 1.0 * len(w1 & w2)/(len(w1) + len(w2)) df['word_share'] = df.apply(normalized_word_share, axis=1) df['freq_q1+q2'] = df['freq_qid1']+df['freq_qid2'] df['freq_q1-q2'] = abs(df['freq_qid1']-df['freq_qid2']) df.to_csv("df_fe_without_preprocessing_train.csv", index=False) df.head() # + id="ySkrFVmtGr7D" colab_type="code" outputId="03251ee6-f082-466e-be80-ae49945752b8" executionInfo={"status": "ok", "timestamp": 1557478331181, "user_tz": -330, "elapsed": 62394, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17140425108724862216"}} colab={"base_uri": "https://localhost:8080/", "height": 131} print ("Minimum length of the questions in question1 : " , min(df['q1_n_words'])) print ("Minimum length of the questions in question2 : " , min(df['q2_n_words'])) print ("Number of Questions with minimum length [question1] :", df[df['q1_n_words']== 1].shape[0]) print ("Number of Questions with minimum length [question2] :", df[df['q2_n_words']== 1].shape[0]) print('Average number of words in q1 :',df['q1len'].sum()//df.shape[0]) print('Average number of words in q2 :',df['q2len'].sum()//df.shape[0]) # + [markdown] id="cAGsFJy2DHKZ" colab_type="text" # word_share feature exploration # + id="q0tFkr0dESRD" colab_type="code" outputId="45b604a7-3dca-45a8-e3d9-af84b92eaf83" executionInfo={"status": "ok", "timestamp": 1557478335771, "user_tz": -330, "elapsed": 65439, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17140425108724862216"}} colab={"base_uri": "https://localhost:8080/", "height": 392} plt.figure(figsize=(13, 6)) #this creates 1 row 2 columns and select 1st plot plt.subplot(1,2,1) sns.violinplot(x = 'is_duplicate', y = 'word_share', data = df[:]) #selects 2nd plot plt.subplot(1,2,2) #this shows that word_share has some importance which helps distinguish classes as seen below in the plot sns.distplot(df[df['is_duplicate'] == 1.0]['word_share'][:] , label = "1", color = 'red') sns.distplot(df[df['is_duplicate'] == 0.0]['word_share'][:] , label = "0" , color = 'blue' ) plt.legend() plt.show() # + [markdown] id="Y6tFG8OyFmag" colab_type="text" # <h3>Observations</h3> # # * as word_share increases we can see that duplicate value increases the red histogram # * in violin plot the center part is sort of like a box plot we can see 25,50,75 percentile.We can see thatmean for duplicate class has greater word_share value that not duplicate # + id="-hwIkr0jNvTE" colab_type="code" outputId="b5b0ac3b-c31f-409a-f007-f8743aac0d1d" executionInfo={"status": "ok", "timestamp": 1557478338248, "user_tz": -330, "elapsed": 66259, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17140425108724862216"}} colab={"base_uri": "https://localhost:8080/", "height": 501} plt.figure(figsize=(12, 8)) plt.subplot(1,2,1) sns.violinplot(x = 'is_duplicate', y = 'word_Common', data = df[0:]) plt.subplot(1,2,2) sns.distplot(df[df['is_duplicate'] == 1.0]['word_Common'][0:] , label = "1", color = 'red') sns.distplot(df[df['is_duplicate'] == 0.0]['word_Common'][0:] , label = "0" , color = 'blue' ) plt.show() # + [markdown] id="bXguuyYWN41J" colab_type="text" # As we can see they overlapping so might not be the best feature as it can explain properly. # + [markdown] id="k8SrJ29eOVtf" colab_type="text" # <h2>Preprocessing</h2> # + id="lljcmhqN9ApS" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 132} outputId="1cb1213d-47c7-4df7-dfe3-3d028fd064f3" executionInfo={"status": "ok", "timestamp": 1557478527754, "user_tz": -330, "elapsed": 10872, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17140425108724862216"}} # !pip install fuzzywuzzy # !pip install Distance # + id="BjLjJWQ18g7w" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 75} outputId="27bb6db1-95df-471c-eb69-11237271d9f5" executionInfo={"status": "ok", "timestamp": 1557478527758, "user_tz": -330, "elapsed": 5974, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17140425108724862216"}} import re from nltk.corpus import stopwords from nltk.stem import PorterStemmer from bs4 import BeautifulSoup import re from nltk.corpus import stopwords # This package is used for finding longest common subsequence between two strings import distance from nltk.stem import PorterStemmer from bs4 import BeautifulSoup from fuzzywuzzy import fuzz from sklearn.manifold import TSNE from wordcloud import WordCloud, STOPWORDS from os import path from PIL import Image # + id="DcPeiLvY8lbH" colab_type="code" colab={}
Quora_question_pairs.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Observations and Insights # + # 1. According to data given the best treatment results were achieved with Ramocane and Capomulin drug regimens. However, these two drug regimens have more data points than other drug regimens. When we check Standard Variantion and SEM we observe more stable results with Capomulin and Ramocane regimens. # 2. For further analysis we used two the best treatment results and another two from bottom of our list. Mouse gender were equally separated, 50.96% Male and 49.04% Female. While analysing of rpossible data outliers we found only one, which is definitely tells us the good quality of our data. # 4. Correlation between mouse weight and average tumor volume is 0.96. With increased mouse weight tthe average tumor volume also increases. # 5. Results for a203 by Infubinol Regimen table shows ineffective treatment regimen where tumor volume increased. # 6. I would also consider Metastatic Sites quantity as a data to analyse, we can find that even successful treatment regimens were not able to prevent new metastatic sites appear. # - # # + # Dependencies and Setup import matplotlib.pyplot as plt import pandas as pd import numpy as np import scipy.stats as st from scipy.stats import linregress # Study data files mouse_metadata_path = "data/Mouse_metadata.csv" study_results_path = "data/Study_results.csv" # Read the mouse data and the study results mouse_metadata = pd.read_csv(mouse_metadata_path) study_results = pd.read_csv(study_results_path) # Combine the data into a single dataset comb_data = pd.merge(mouse_metadata, study_results, on="Mouse ID", how="outer") comb_data.rename(columns={"Tumor_Volume_mm3": "Tumor Volume (mm3)", "Age_months": "Age (months)"}, inplace=True) # Display the data table for preview comb_data.head() # - # Checking the number of mice. print(f"Total mice number in merged dataframe: {len(comb_data['Mouse ID'].value_counts())}") # Getting the duplicate mice by ID number that shows up for Mouse ID and Timepoint. duplicate_mice = comb_data.loc[comb_data.duplicated(subset=['Mouse ID', 'Timepoint',]),'Mouse ID'].unique() len(duplicate_mice) # Optional: Get all the data for the duplicate mouse ID. duplicate_mouse_id=pd.DataFrame(duplicate_mice) duplicate_mouse_id.head() # Create a clean DataFrame by dropping the duplicate mouse by its ID. clean_mice = comb_data[comb_data['Mouse ID'].isin(duplicate_mice)==False] clean_mice.head() # Checking the number of mice in the clean DataFrame. print(f"Total mice number in cleaned dataframe: {len(clean_mice['Mouse ID'].value_counts())}") # ## Summary Statistics # + # Generate a summary statistics table of mean, median, variance, standard deviation, and SEM of the tumor volume for each regimen # This method is the most straighforward, creating multiple series and putting them all together at the end. regimen_mean = clean_mice.groupby('Drug Regimen').mean()["Tumor Volume (mm3)"] regimen_med = clean_mice.groupby('Drug Regimen').median()["Tumor Volume (mm3)"] regimen_var = clean_mice.groupby('Drug Regimen').var()["Tumor Volume (mm3)"] regimen_std = clean_mice.groupby('Drug Regimen').std()["Tumor Volume (mm3)"] regimen_sem = clean_mice.groupby('Drug Regimen').sem()["Tumor Volume (mm3)"] sum_stats_table = pd.DataFrame({"Tumor Volume Mean (mm3)": regimen_mean, "Tumor Volume Med (mm3)":regimen_med, "Tumor Volume Var (mm3)":regimen_var, "Tumor Volume Std Dev (mm3)": regimen_std, "Tumor Volume SEM (mm3)": regimen_sem}) sum_stats_table = sum_stats_table.sort_values(by="Tumor Volume Mean (mm3)", ascending=True) sum_stats_table # - # ## Bar and Pie Charts # + # Generate a bar plot showing the total number of mice for each treatment throughout the course of the study using pandas. data_points = clean_mice['Drug Regimen'].value_counts() data = pd.DataFrame(data_points) data = data.reset_index(drop=False) data = data.sort_values(by="Drug Regimen", ascending=True) x_axis= 'index' y_axis = 'Drug Regimen' pandas_bar = data.plot.barh(x_axis, y_axis, xlim=(0,240), ylim=(0,10), color='green', alpha=0.75, figsize = (10,5), width = 0.75, stacked=True) pandas_bar.set_title("Mice per Treatment", fontsize = 20) pandas_bar.set_ylabel("Drug Regimen", fontsize = 14) pandas_bar.set_xlabel("Mice Number", fontsize = 14) pandas_bar.grid(True, linestyle='-', which='major', color='grey', alpha=.5) # + # Generate a bar plot showing the total number of mice for each treatment throughout the course of the study using pyplot. x_axis= np.arange(len(data)) y_axis = data["Drug Regimen"] plt.figure(figsize=(10,5)) bar_chart = plt.barh(x_axis, y_axis, color = 'green', alpha=0.75, align='center', label='Drug Regimen') plt.xlim(0,240) plt.ylim(-0.75,9.75) plt.title('Mice per Treatment', fontsize = 20) plt.xlabel("Mice Number", fontsize = 14) plt.ylabel("Drug Regimen", fontsize = 14) plt.legend(loc='best') tick_locations = [value for value in x_axis] plt.yticks(tick_locations, data["index"], rotation="horizontal") plt.grid(True, linestyle='-', which='major', color='grey', alpha=.5) plt.show() # + # Generate a pie plot showing the distribution of female versus male mice using pandas male = (len(clean_mice.loc[clean_mice["Sex"] == "Male",:])/len(clean_mice["Sex"]))*100 female = (len(clean_mice.loc[clean_mice["Sex"] == "Female",:])/len(clean_mice["Sex"]))*100 gender_df = pd.DataFrame({"Sex": [male, female], "Mice":["Male", "Female"]}) gender_df = gender_df.set_index('Mice') plot = gender_df.plot.pie(y="Sex", figsize =(6, 6), explode=(0.05,0), colors = 'br', autopct='%1.2f%%', startangle = 30) plt.title('Gender Distribution',fontsize = 20) plt.ylabel('Sex',fontsize = 14) # + # Generate a pie plot showing the distribution of female versus male mice using pyplot male = (len(clean_mice.loc[clean_mice["Sex"] == "Male",:])/len(clean_mice["Sex"]))*100 female = (len(clean_mice.loc[clean_mice["Sex"] == "Female",:])/len(clean_mice["Sex"]))*100 gender_df = pd.DataFrame({"Sex": [male, female], "Mice":["Male", "Female"]}) gender_df = gender_df.set_index('Mice') labels = ["Male","Female"] sizes = [male,female] colors = ['blue', 'red'] explode = (0.05, 0) fig1, ax1 = plt.subplots(figsize=(6, 6)) plt.pie(sizes, explode=explode,labels=labels, colors=colors, autopct="%1.2f%%", shadow=True, startangle=30,) plt.title('Gender Distribution',fontsize = 20) plt.ylabel('Sex',fontsize = 14) # - # ## Quartiles, Outliers and Boxplots # + # Calculate the final tumor volume of each mouse across four of the treatment regimens: Capomulin, Ramicane, Infubinol, and Ceftamin regimen_treatment=clean_mice[["Mouse ID","Drug Regimen","Tumor Volume (mm3)"]]\ .groupby(["Mouse ID", "Drug Regimen" ]).last()\ .sort_values(by = "Drug Regimen", ascending = True).reset_index() regimen_treatment.set_index(["Drug Regimen", "Mouse ID"], inplace =True) capomulin = regimen_treatment.loc['Capomulin']["Tumor Volume (mm3)"] quartiles_capomulin = capomulin.quantile([.25,.5,.75]) lower_capomulin = quartiles_capomulin[.25] upper_capomulin = quartiles_capomulin[.75] iqr_capomulin = upper_capomulin-lower_capomulin capomulin_outliers = [] for value in capomulin: if value > upper_capomulin + 1.5*iqr_capomulin: capomulin_outliers.append(value) elif value < lower_capomulin - 1.5*iqr_capomulin: capomulin_outliers.append(value) print(f"There is(are) " + str(len(capomulin_outliers)) + " outlier(s) in Campomulin and here is a list: " + str(capomulin_outliers)) ramicane = regimen_treatment.loc['Ramicane']["Tumor Volume (mm3)"] quartiles_ramicane = ramicane.quantile([.25,.5,.75]) lower_ramicane = quartiles_ramicane[.25] upper_ramicane = quartiles_ramicane[.75] iqr_ramicane = upper_ramicane-lower_ramicane ramicane_outliers = [] for value in ramicane: if value > upper_ramicane + 1.5*iqr_ramicane: ramicane_outliers.append(value) elif value < lower_ramicane - 1.5*iqr_ramicane: ramicane_outliers.append(value) print(f"There is(are) " + str(len(ramicane_outliers)) + " outlier(s) in Ramicane and here is a list: " + str(ramicane_outliers)) infubinol = regimen_treatment.loc['Infubinol']["Tumor Volume (mm3)"] quartiles_infubinol = infubinol.quantile([.25,.5,.75]) lower_infubinol = quartiles_infubinol[.25] upper_infubinol = quartiles_infubinol[.75] iqr_infubinol = upper_infubinol-lower_infubinol infubinol_outliers = [] for value in infubinol: if value > upper_infubinol + 1.5*iqr_infubinol: infubinol_outliers.append(value) elif value < lower_infubinol - 1.5*iqr_infubinol: infubinol_outliers.append(value) print(f"There is(are) " + str(len(infubinol_outliers)) + " outlier(s) in Infubinol and here is a list: " + str(infubinol_outliers)) ceftamin = regimen_treatment.loc['Ceftamin']["Tumor Volume (mm3)"] quartiles_ceftamin = ceftamin.quantile([.25,.5,.75]) lower_ceftamin = quartiles_ceftamin[.25] upper_ceftamin = quartiles_ceftamin[.75] iqr_ceftamin = upper_ceftamin-lower_ceftamin ceftamin_outliers = [] for value in ceftamin: if value > upper_ceftamin + 1.5*iqr_ceftamin: ceftamin_outliers.append(value) elif value < lower_ceftamin - 1.5*iqr_ceftamin: ceftamin_outliers.append(value) print(f"There is(are) " + str(len(ceftamin_outliers)) + " outlier(s) in Ceftamin and here is a list: " + str(ceftamin_outliers)) regimen_treatment.head() # + # Generate a box plot of the final tumor volume of each mouse across four regimens of interest data_to_plot = [capomulin, ramicane, infubinol, ceftamin] Regimen = ['Capomulin', 'Ramicane', 'Infubinol','Ceftamin'] fig1, ax1 = plt.subplots(figsize=(10, 5)) ax1.set_title('Treatment Results on Box Plot',fontsize =20) ax1.set_ylabel('Final Tumor Volume (mm3)',fontsize = 14) ax1.set_xlabel('Drug Regimen',fontsize =14) ax1.boxplot(data_to_plot, labels=Regimen, widths = 0.4, patch_artist=True,vert=True) plt.ylim(10, 80) # - # ## Line and Scatter Plots # + # Generate a line plot of time point versus tumor volume for a mouse treated with Capomulin capomulin_data =clean_mice[["Drug Regimen","Timepoint", "Mouse ID","Tumor Volume (mm3)"]] capomulin_data.set_index(["Drug Regimen"], inplace =True) capomulin_dat = capomulin_data.loc["Capomulin", :] capomulin_dat = capomulin_dat.sort_values(by="Mouse ID", ascending = True) capomulin_dat = capomulin_dat.head(10) capomulin_dat = capomulin_dat.sort_values(by="Timepoint", ascending = True) line = capomulin_dat.plot.line(x="Timepoint", y="Tumor Volume (mm3)", xlim=(-1,46), ylim=(37,46),color="green", figsize = (10,5), fontsize = 14, grid=True) line.set_ylabel("Tumor Volume", fontsize = 14) line.set_xlabel("Timepoint", fontsize = 14) line.set_title(' Results for b128 by Capomulin Regimen', fontsize=20) capomulin_dat.head() # + # Generate a scatter plot of mouse weight versus average tumor volume for the Capomulin regimen mass_df = clean_mice[["Drug Regimen","Tumor Volume (mm3)","Weight (g)"]]\ .groupby(["Weight (g)"]).mean() mass_df = mass_df.reset_index(drop=False) plt.figure(figsize=(10,5)) plt.xlim(14,31) plt.ylim(35.5,60) plt.scatter(mass_df['Weight (g)'], mass_df['Tumor Volume (mm3)'], marker="o", facecolors="green", edgecolors="black") plt.ylabel("Average Tumor Volume (mm3)", fontsize = 14) plt.xlabel("Weight (g)", fontsize = 14) plt.title("Mouse Weight vs Average Tumor Volume", fontsize=20) plt.plot(mass_df['Weight (g)'], mass_df['Tumor Volume (mm3)'], color='green', label="Weight (g)") plt.legend(loc="best") plt.grid(True, linestyle='-', which='major', color='grey', alpha=.25) mass_df.head() # - ## Correlation and Regression x_values = mass_df["Weight (g)"] y_values = mass_df["Tumor Volume (mm3)"] (slope, intercept, rvalue, pvalue, stderr) = linregress(x_values,y_values) regress_values = x_values*slope + intercept line_eq ="y= " + str(round(slope,2)) + "x + " +str(round(intercept,2)) coef = x_values.corr(y_values) plt.figure(figsize=(10,5)) plt.xlim(14,31) plt.ylim(32,60) plt.scatter(x_values, y_values, marker="o", facecolors="green", edgecolors="green" ) plt.plot(x_values, regress_values,"r-", label="Regression") plt.annotate(line_eq,(18, 40), fontsize=15, color="red", rotation = "15") plt.ylabel("Average Tumor Volume (mm3)", fontsize = 14) plt.xlabel("Weight (g)", fontsize = 14) plt.title("Mouse weight vs Average Tumor Volume", fontsize=20) plt.plot(mass_df['Weight (g)'], mass_df['Tumor Volume (mm3)'], color='green', label="Weight (g)") plt.legend(loc="best") plt.grid(True, linestyle='-', which='major', color='grey', alpha=.25) plt.show() print(f" Correlation coefficient is equal to: " + str(coef))
Pymaceuticals/pymaceuticals_starter.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Command to download all NCEP data from MetOffice # + language="bash" # wget -r -np -k -A nc http://192.168.3.11/LAURA/NCEP-NCAR/ # - iris.load_cube('/home/jovyan/data/mon.air.2m.gauss.1980.nc') # + import iris from __future__ import (absolute_import, division, print_function) from six.moves import (filter, input, map, range, zip) # noqa import matplotlib.pyplot as plt import iris import iris.quickplot as qplt cube_2015 = iris.load('/home/jovyan/data/mon.air.2m.gauss.2015.nc')[0] print(cube_2015[1]) qplt.pcolormesh(cube_2015[1]) plt.show() # - # ## Code to fix monthly NCEP data from 2015 to 2020 # # Do the same for all the data from 2015 to 2020 cube_list = iris.load('/home/jovyan/data/mon.air.2m.gauss.2020.nc') cube_list_new = iris.cube.CubeList([cube_list[0]]) iris.fileformats.netcdf.save(cube_list_new, '/home/jovyan/data/mon.air.2m.gauss.2020.new.nc') # To load all cubes in a specific folder # + from pathlib import Path import iris paths = Path('/home/jovyan/data/').glob('*.nc') filenames = [str(path) for path in paths] cubes = iris.load(filenames) # - for i, fn in enumerate(filenames): print(f'{i}: {fn}') # Code to concatenate monthly NCEP data from iris.experimental.equalise_cubes import equalise_attributes equalise_attributes(cubes) concatenated_cubes = cubes.concatenate_cube() concatenated_cubes iris.fileformats.netcdf.save(concatenated_cubes, '/home/jovyan/data/mon.air.2m.gauss.concatenated.nc') print(concatenated_cubes.coord('time'))
notebooks/ncep.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: d1-spark2python3 # language: python # name: spark-python-d1-spark2python3 # --- # Global data variables SANDBOX_NAME = ''# Sandbox Name DATA_PATH = "/data/sandboxes/" + SANDBOX_NAME + "/data/data/" from pyspark.sql import functions as F # # # # Creación o modificación de columnas # # En Spark hay un único método para la creación o modificación de columnas y es `withColumn`. Este método es de nuevo una transformación y toma dos parámetros: el nombre de la columna a crear (o sobreescribir) y la operación que crea la nueva columna. # # Para una ejecución más óptima se recomienda utilizar únicamente las funciones de PySpark cuando se define la operación, pero como se detallará más adelante se pueden utilizar funciones propias. movies_df = spark.read.csv(DATA_PATH + 'movie-ratings/movies.csv', sep=',', header=True, inferSchema=True) ratings_df = spark.read.csv(DATA_PATH + 'movie-ratings/ratings.csv', sep=',', header=True, inferSchema=True) ratings_movies_df = ratings_df.join(movies_df, on='movieId', how='inner') ratings_movies_df.cache() # # # ## Funciones de Spark # # # __valor fijo__ # # El ejemplo más sencillo es crear una columna con un valor fijo, en este caso, columna `now` con valor '2019/01/21 14:08', y columna `rating2`con valor 4.0. # # Hint: `withColumn` ratings_movies_df = ratings_movies_df.withColumn('now', F.lit('2019/01/21 14:08')) ratings_movies_df.show(3) ratings_movies_df = ratings_movies_df.withColumn('rating2', F.lit(4.0)) ratings_movies_df.show(3) # # # __duplicar columna__ ratings_movies_df.withColumn('title2', F.col('title'))\ .select('title', 'title2')\ .show(10) # # # __operaciones aritmeticas__ ratings_movies_df.withColumn('rating_10', F.col('rating') * 2)\ .select('rating', 'rating_10')\ .show(10) ratings_movies_df.withColumn('rating_avg', (F.col('rating') + F.col('rating2')) / 2)\ .select('rating', 'rating2', 'rating_avg')\ .show(10) # # # __if/else__ # # Crea la columna `kind_rating`, que sea 'high' en caso de que rating sea mayor que 4, y 'low' en caso contrario. ratings_movies_df.withColumn('kind_rating', F.when(F.col('rating') >= 4, 'high').otherwise('low')).show(10) # # # Se pueden concatenar multiples sentencias _when_. Esta vez, sobreescribe la columna `kind_rating` para crear un nivel intermedio, donde si es mayor que dos y menor que 4, `kind_rating` sea 'med'. ratings_movies_df.withColumn('kind_rating', F.when(F.col('rating') >= 4, 'high')\ .when(F.col('rating') >= 2, 'med')\ .otherwise('low')).show(20) # # # __operaciones con strings__ # # Pon en mayúsculas todos los títulos de las películas ratings_movies_df.withColumn('title', F.upper(F.col('title'))).show(3) # # # Extrae los 10 primeros caracteres de la columna `title` ratings_movies_df.withColumn('short_title', F.substring(F.col('title'), 0, 10))\ .select('title', 'short_title')\ .show(10) # # # Separa los diferentes géneros de la columna `genres` para obtener una lista, usando el separador '|' ratings_movies_df.withColumn('genres', F.split(F.col('genres'), '\|')).show(4) # # # Crea una nueva columna `1st_genre` seleccionando el primer elemento de la lista del código anterior ratings_movies_df.withColumn('1st_genre', F.split(F.col('genres'), '\|')[0])\ .select('genres', '1st_genre')\ .show(10) # # # Reemplaza el caracter '|' por '-' en la columna `genres` ratings_movies_df.withColumn('genres', F.regexp_replace(F.col('genres'), '\|', '-'))\ .select('title', 'genres')\ .show(10, truncate=False) # # # _Con expresiones regulares_ # # https://regexr.com/ ratings_movies_df.withColumn('title', F.regexp_replace(F.col('title'), ' \(\d{4}\)', '')).show(5, truncate=False) # + ratings_movies_df = ratings_movies_df.withColumn('year', F.regexp_extract(F.col('title'), '\((\d{4})\)', 1)) ratings_movies_df.show(5) # - # # # ## Casting # # Con el método `withColumn` también es posible convertir el tipo de una columna con la función `cast`. Es importante saber que en caso de no poder convertirse (por ejemplo una letra a número) no saltará error y el resultado será un valor nulo. ratings_movies_df.printSchema() # # # Cambia el formato de `year` a entero, y `movieId` a string. ratings_movies_df = ratings_movies_df.withColumn('year', F.col('year').cast('int')) ratings_movies_df.show(5) ratings_movies_df = ratings_movies_df.withColumn('movieId', F.col('movieId').cast('string')) ratings_movies_df.printSchema() ratings_movies_df.withColumn('error', F.col('title').cast('int')).show(5) # # # ## UDF (User Defined Functions) # # Cuando no es posible definir la operación con las funciones de spark se pueden crear funciones propias usando la UDFs. Primero se crea una función de Python normal y posteriormente se crea la UDFs. Es necesario indicar el tipo de la columna de salida en la UDF. from pyspark.sql.types import StringType, IntegerType, DoubleType, DateType # # # _Aumenta el rating en un 15% para cada película más antigua que 2000 (el máximo siempre es 5)._ def increase_rating(year, rating): if year < 2000: rating = min(rating * 1.15, 5.0) return rating increase_rating_udf = F.udf(increase_rating, DoubleType()) ratings_movies_df.withColumn('rating_inc', increase_rating_udf(F.col('year'), F.col('rating')))\ .select('title', 'year', 'rating', 'rating_inc')\ .show(20) # # # Extrae el año de la película sin usar expresiones regulares. title = 'Trainspotting (1996)' title.replace(')', '').replace('(', '') year = title.replace(')', '').replace('(', '').split(' ')[-1] year = int(year) year def get_year(title): year = title.replace(')', '').replace('(', '').split(' ')[-1] if year.isnumeric(): year = int(year) else: year = -1 return year get_year_udf = F.udf(get_year, IntegerType()) ratings_movies_df.withColumn('year2', get_year_udf(F.col('title')))\ .select('title', 'year', 'year2').show(10, truncate=False) # # # # Datetimes # # Hay varias funciones de _pyspark_ que permiten trabajar con fechas: diferencia entre fechas, dia de la semana, año... Pero para ello primero es necesario transformar las columnas a tipo fecha. Se permite la conversion de dos formatos de fecha: # * timestamp de unix: una columna de tipo entero con los segundos trascurridos entre la medianoche del 1 de Enero de 1990 hasta la fecha. # * cadena: la fecha representada como una cadena siguiendo un formato específico que puede variar. ratings_movies_df.select('title', 'timestamp', 'now').show(5) # # # ## unix timestamp a datetime ratings_movies_df = ratings_movies_df.withColumn('datetime', F.from_unixtime(F.col('timestamp'))) ratings_movies_df.select('datetime', 'timestamp').show(10) # # # ## string a datetime # + ratings_movies_df = ratings_movies_df.withColumn('now_datetime', F.from_unixtime(F.unix_timestamp(F.col('now'), 'yyyy/MM/dd HH:mm'))) ratings_movies_df.select('now', 'now_datetime').show(10) # - # # # ## funciones con datetimes ratings_movies_df.select('now_datetime', 'datetime', F.datediff(F.col('now_datetime'), F.col('datetime'))).show(10) ratings_movies_df.select('datetime', F.date_add(F.col('datetime'), 10)).show(10) ratings_movies_df.withColumn('datetime_plus_4_months', F.add_months(F.col('datetime'), 4))\ .select('datetime', 'datetime_plus_4_months').show(5) ratings_movies_df.select('datetime', F.month(F.col('datetime')).alias('month')).show(10) ratings_movies_df.select('datetime', F.last_day(F.col('datetime')).alias('last_day')).show(10) ratings_movies_df.select('datetime', F.dayofmonth(F.col('datetime')).alias('day'), F.dayofyear(F.col('datetime')).alias('year_day'), F.date_format(F.col('datetime'), 'E').alias('weekday')).show(10) # # # Para filtrar por fechas se pueden comparar directamente con una cadena en el formato YYYY-MM-DD hh:mm:ss ya que será interpretada como una fecha. ratings_movies_df.filter(F.col('datetime') >= "2015-09-30 20:00:00").select('datetime', 'title', 'rating').show(10) ratings_movies_df.filter(F.col('datetime').between("2003-01-31", "2003-02-10"))\ .select('datetime', 'title', 'rating').show(5) ratings_movies_df.filter(F.year(F.col('datetime')) >= 2012)\ .select('datetime', 'title', 'rating').show(5) # + [markdown] id="RQQqo7LCY1GE" colab_type="text" # # # # Ejercicio 1 # # 1) Cree una función que acepte un DataFrame y un diccionario. La función debe usar el diccionario para renombrar un grupo de columnas y devolver el DataFrame ya modificado. # # Use el siguiente DataFrame y diccionario: # + colab={} id="OZcBXSoEY1GG" colab_type="code" pokemon_df = spark.read.csv(DATA_PATH + 'pokemon.csv', sep=',', header=True, inferSchema=True) rename_dict = {'Sp. Atk': 'sp_atk', 'Sp. Def': 'sp_def'} # + colab={"height": 173, "base_uri": "https://localhost:8080/"} id="_gFkYDfbodna" colab_type="code" outputId="6ecc769f-077d-432e-99e4-c3bac2d74a7c" pokemon_df.show(3) # + colab={} id="bFegy2Nsogs7" colab_type="code" # Respuesta def rename_df(df, rename_dict): if any(['.' in c for c in rename_dict.keys()]): # withColumnRenamed method for old, new in rename_dict.items(): df = df.withColumnRenamed(old, new) else: # Select method df = df.select([col(c).alias(rename_dict.get(c, c)) for c in df.columns]) return df # + [markdown] id="Xy7_B4HbY1GL" colab_type="text" # # # 2) Use la función definida en el punto anterior para cambiar los nombres del DF usando el diccionario dado. # # 3) Modifique la función de tal forma que también acepte una función en lugar de un diccionario. Use la función para renombrar las columnas. # # 4) Estandarice según las buenas prácticas los nombres de las columnas usando la función que acaba de definir. # # 5) Cree otra función que acepte un DataFrame y una lista con un subconjunto de columnas. El objetivo de esta función es determinar el número de filas duplicadas del DF. # # 6) Use la función creada para obtener el número de duplicados del DataFrame pokemon_df en todas las columnas excepto el nombre (`name`) # + colab={"height": 72, "base_uri": "https://localhost:8080/"} id="f8VKWgPuY1GO" colab_type="code" outputId="dd44e9f9-14d9-4a18-b819-7eafd0a86f85" # Respuesta print(pokemon_df.columns) pokemon_df = rename_df(pokemon_df, rename_dict) print(pokemon_df.columns) # + colab={} id="QJlJcyBKqQf0" colab_type="code" # Respuesta def rename_df(df, rename_object): if isinstance(rename_object, dict): if any(['.' in c for c in rename_object.keys()]): # withColumnRenamed method for old, new in rename_object.items(): df = df.withColumnRenamed(old, new) else: # Select method df = df.select([col(c).alias(rename_object.get(c, c)) for c in df.columns]) elif isinstance(rename_object, type(lambda x: x)): for c in df.columns: df = df.withColumnRenamed(c, rename_object(c)) else: raise Exception('Not implemented') return df # + colab={"height": 52, "base_uri": "https://localhost:8080/"} id="eDV-hsD_r5Yu" colab_type="code" outputId="6f03bb83-2962-482c-e28b-86f95b7a0184" # Respuesta print(pokemon_df.columns) pokemon_df = rename_df(pokemon_df, lambda c: c.strip().lower().replace('.', '').replace(' ', '_')) print(pokemon_df.columns) # + colab={} id="VHOh4-1Btc6O" colab_type="code" # Respuesta def show_duplicates(df, subset): assert isinstance(subset, (list, tuple)), 'Subset is not a list neither a tuple' agg_count = df.groupBy(subset).count().filter(F.col('count') > 1) df = df.join(agg_count, on=subset, how='inner') return df # Note: this could be done more efficiently and with less potential bugs using a window function. # + colab={"height": 139, "base_uri": "https://localhost:8080/"} id="mMji9U1gtdEG" colab_type="code" outputId="bd7ec08a-ca36-4f6f-cd03-a42017615803" # Respuesta subset = [c for c in pokemon_df.columns if c != 'name'] show_duplicates(pokemon_df, subset).show() # - # # # # Ejercicio 2 # # Crea la misma lógica definida en el siguiente UDF, pero sin usar UDFs, es decir, usando exclusivamente funciones de SparkSQL. # + movies_df = spark.read.csv(DATA_PATH + 'movie-ratings/movies.csv', sep=',', header=True, inferSchema=True) movies_df = movies_df.withColumn('genres', F.split(F.col('genres'), '\|')) from pyspark.sql.types import StringType, IntegerType, DoubleType, BooleanType def value_in_col(col, value): return value in col value_in_col_udf = F.udf(value_in_col, BooleanType()) # - # # # *Pista*: Mira la función *explode*. # + # Respuesta def filter_by_array_value(df, array_col, value): df = df.withColumn('element', F.explode(F.col(array_col))) df = df.filter(F.col('element') == value) df = df.drop('element') return df filter_by_array_value(movies_df, 'genres', 'Drama').show(10, truncate=False)
2021Q1_DSF/5.- Spark/notebooks/spark_sql/respuestas/04_dw_formatting_con_respuestas.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Accessing Array Elements # # - [Download the lecture notes](https://philchodrow.github.io/PIC16A/content/np_plt/numpy_3.ipynb). # # Often, we want to access the data contained inside an array, for the purpose of either modifying the data or using it in later computation. For 1d arrays, the simplest way to do this is very similar to how we would do so for a list: import numpy as np a = np.arange(10) a a[5] a[-1] # first four elements a[:4] # "fancy" indexing # passing a list lets you single out individual elements a[[1,7]] # An extremely useful technique is boolean indexing. The important points about boolean indexing is that boolean comparisons in `numpy` are vectorized, and generate arrays of boolean values. # + # More useful: boolean indexing # passing an array of True/False lets you grab elements according to a criterion a > 5 # > is vectorized!! # - # We can also compare two arrays of the same size: b = np.random.randint(0, 10, 10) # 10 random integers between 0 and 10 b ix = a > 5 ix # We can then pass these boolean arrays to select only the elements of an array for which the boolean array has value True. a[ix] # Usually, we would write this a bit more compactly: a[a > 5] # We can also use the bitwise `&` and `|` operators to combine multiple conditions: # large AND even entries using bitwise & a[(a > 5) & (a % 2 == 0)] # large OR even entries using bitwise | a[(a > 5) | (a % 2 == 0)] # ## Modifying Array Entries # # After having accessed array entries, it's easy to modify them: a = np.arange(10) a # modifying a single value a[5] = 50 a # setting multiple values using a single number a[a > 5] = 5 a # using an array to set multiple values a[a == 5] = np.array([0, 1, 2, 3, 4]) a # will lead to errors if the sizes don't match a[a == 4] = np.array([5, 5, 5]) # ## Indexing Multidimensional Arrays # # What about when we're dealing with multidimensional arrays? A = np.reshape(np.arange(15), (3, 5)) A # In this case, we use commas to separate indices along different dimensions. In the case of 2d arrays, it's often convenient to think about the first dimension as representing "rows" and the second as representing "columns." A[0, 2] # zeroth row, second column A[-1, 0] # last row, zeroth column # We can also extract entire rows or columns at a time. The `:` here means "everything along the dimension". A[0,:] # entire zeroth row A[:,1] # first column A[:,:] # same as A A[0,:] # all the odd numbers A[A % 2 == 1] # + # add 2 to all the odd numbers A[A % 2 == 1] += 2 # - A # Indexing gets more complex in higher dimensions, but we generally won't work with higher-dimensional arrays in this course.
content/np_plt/numpy_3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import torch import sys import yaml from torchvision import transforms, datasets import torchvision import numpy as np import os from sklearn import preprocessing from torch.utils.data.dataloader import DataLoader # + # From Yao load_and_convert import torch from torchvision import models class ResNet(torch.nn.Module): def __init__(self, net_name, pretrained=False, use_fc=False): super().__init__() base_model = models.__dict__[net_name](pretrained=pretrained) self.encoder = torch.nn.Sequential(*list(base_model.children())[:-1]) self.use_fc = use_fc if self.use_fc: self.fc = torch.nn.Linear(2048, 512) def forward(self, x): x = self.encoder(x) x = torch.flatten(x, 1) if self.use_fc: x = self.fc(x) return x device = torch.device('cuda:5') model = ResNet('resnet50', pretrained=False, use_fc=False).to(device) model_path = "/home/akash/detconb/ckpt/byol/04_22_05-45/04_22_05-45_resnet50_300.pth.tar" checkpoint = torch.load(model_path, map_location=device)['model']#['online_backbone'] state_dict = checkpoint for k in list(state_dict.keys()): # retain only encoder_q up to before the embedding layer if k.startswith('module.online_network.encoder.'): # remove prefix new_k = k[len("module.online_network.encoder."):] state_dict[new_k] = state_dict[k] # delete renamed or unused k del state_dict[k] msg = model.encoder.load_state_dict(state_dict, strict=True) print(msg) # - batch_size = 512 data_transforms = torchvision.transforms.Compose([ transforms.Resize((224, 224)), #FIXME: They only did smallest side resize to 224 transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)), ]) # + # config = yaml.load(open("../config/config.yaml", "r"), Loader=yaml.FullLoader) # + train_dataset = datasets.ImageFolder('/home/kkallidromitis/data/imagenet/images/train/', transform=data_transforms) test_dataset = datasets.ImageFolder('/home/kkallidromitis/data/imagenet/images/val/', transform=data_transforms) # - print("Input shape:", train_dataset.__getitem__(0)[0].shape) print("Input shape:", test_dataset.__getitem__(0)[0].shape) # + train_loader = DataLoader(train_dataset, batch_size=batch_size, num_workers=8, drop_last=False, shuffle=True) test_loader = DataLoader(test_dataset, batch_size=batch_size, num_workers=8, drop_last=False, shuffle=True) # - # device = 'cpu' #'cuda' if torch.cuda.is_available() else 'cpu' # encoder = ResNet18(**config['network']) encoder = model output_feature_dim = 2048 #encoder.projetion.net[0].in_features class LogisticRegression(torch.nn.Module): def __init__(self, input_dim, output_dim): super(LogisticRegression, self).__init__() self.linear = torch.nn.Linear(input_dim, output_dim) def forward(self, x): return self.linear(x) logreg = LogisticRegression(output_feature_dim, 1000) logreg = logreg.to(device) import tqdm def get_features_from_encoder(encoder, loader): x_train = [] y_train = [] # get the features from the pre-trained model for (x, y) in tqdm.notebook.tqdm((loader)): with torch.no_grad(): feature_vector = encoder(x.to(device)) x_train.extend(feature_vector.cpu()) y_train.extend(y.cpu().numpy()) x_train = torch.stack(x_train) y_train = torch.tensor(y_train) return x_train, y_train # + encoder.eval() print("Getting Train Features") x_train, y_train = get_features_from_encoder(encoder, train_loader) print("Getting Test Features") x_test, y_test = get_features_from_encoder(encoder, test_loader) if len(x_train.shape) > 2: x_train = torch.mean(x_train, dim=[2, 3]) x_test = torch.mean(x_test, dim=[2, 3]) print("Training data shape:", x_train.shape, y_train.shape) print("Testing data shape:", x_test.shape, y_test.shape) # - def create_data_loaders_from_arrays(X_train, y_train, X_test, y_test): train = torch.utils.data.TensorDataset(X_train, y_train) train_loader = torch.utils.data.DataLoader(train, batch_size=100, shuffle=True) test = torch.utils.data.TensorDataset(X_test, y_test) test_loader = torch.utils.data.DataLoader(test, batch_size=512, shuffle=False) return train_loader, test_loader # + # They didn't do this!!! # scaler = preprocessing.StandardScaler() # scaler.fit(x_train) # x_train = scaler.transform(x_train).astype(np.float32) # x_test = scaler.transform(x_test).astype(np.float32) # - train_loader, test_loader = create_data_loaders_from_arrays(x_train, y_train, x_test, y_test) #train_loader, test_loader = create_data_loaders_from_arrays(torch.from_numpy(x_train), y_train, torch.from_numpy(x_test), y_test) # + optimizer = torch.optim.Adam(logreg.parameters(), lr=3e-4) criterion = torch.nn.CrossEntropyLoss() eval_every_n_epochs = 10 for epoch in tqdm.notebook.tqdm((range(100))): # train_acc = [] for x, y in train_loader: x = x.to(device) y = y.to(device) # zero the parameter gradients optimizer.zero_grad() logits = logreg(x) predictions = torch.argmax(logits, dim=1) loss = criterion(logits, y) loss.backward() optimizer.step() total = 0 if epoch % eval_every_n_epochs == 0: correct = 0 for x, y in test_loader: x = x.to(device) y = y.to(device) logits = logreg(x) predictions = torch.argmax(logits, dim=1) total += y.size(0) correct += (predictions == y).sum().item() acc = 100 * correct / total print(f"Testing accuracy: {np.mean(acc)}") # -
eval/linear_feature_eval.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/vladiant/MachineLearningUtils/blob/main/Pix2Pix/Pix2PixGAN.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="abpscqW1cicK" # # Pix2Pix GAN for Image-to-Image Translation # https://machinelearningmastery.com/how-to-develop-a-pix2pix-gan-for-image-to-image-translation/ # + [markdown] id="qm7c3Ksi7fpd" # ## Setup the GPU # + id="HL8eI1ih7jhS" import tensorflow as tf # + colab={"base_uri": "https://localhost:8080/"} id="-4vXVYqy7nu7" outputId="d37038a3-fbf6-4e9e-fe26-389222a64237" print("Num GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU'))) # + colab={"base_uri": "https://localhost:8080/"} id="sFA0h7_j7tyJ" outputId="3f30a670-7938-4d5d-975d-2e0318531d8f" # gpu = tf.config.experimental.list_physical_devices('GPU')[0] # tf.config.experimental.set_memory_growth(gpu, True) gpus = tf.config.experimental.list_physical_devices('GPU') if gpus: try: # Currently, memory growth needs to be the same across GPUs for gpu in gpus: tf.config.experimental.set_memory_growth(gpu, True) logical_gpus = tf.config.experimental.list_logical_devices('GPU') print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs") except RuntimeError as e: # Memory growth must be set before GPUs have been initialized print(e) # + [markdown] id="CMeZj8XPc05d" # ## Dataset # http://efrosgans.eecs.berkeley.edu/pix2pix/datasets/maps.tar.gz # + id="fhglEr0bcC64" # Change to the folder where the images datasets are loaded/present and where the WORKPLACE_FOLDER = "/tmp" # + colab={"base_uri": "https://localhost:8080/"} id="dyaUOmPaeDGH" outputId="10b7d73b-ca81-4a1c-e149-6a564a32cb3e" # https://stackabuse.com/download-files-with-python/ import requests from os import path print('Beginning file download with requests') url = 'http://efrosgans.eecs.berkeley.edu/pix2pix/datasets/maps.tar.gz' local_tar_file = 'maps.tar.gz' r = requests.get(url) with open(path.join(WORKPLACE_FOLDER,local_tar_file), 'wb') as f: f.write(r.content) # Retrieve HTTP meta-data print("Request status code", r.status_code) print("Request content type", r.headers['content-type']) print("Request encode", r.encoding) # + id="lcSvX3ucdPe3" # # !tar -xf /tmp/maps.tar.gz --directory /tmp # + id="d3ZZgqSEdfp7" import tarfile my_tar = tarfile.open(path.join(WORKPLACE_FOLDER,local_tar_file)) my_tar.extractall(WORKPLACE_FOLDER) my_tar.close() # + [markdown] id="udOOaP1zfx6M" # ## Load datafiles and process the data # + id="CB-QQIiNogvp" from os import listdir, path from numpy import asarray, vstack, savez_compressed from tensorflow.keras.preprocessing.image import img_to_array, load_img # + id="sNDyI9WLoAXi" # Load all images in a directory into memory def load_images(file_path, size=(256, 512)): src_list, tar_list = list(), list() # Enumerate filenames in directory, assume all are images for filename in listdir(file_path): # Load and resize the image pixels = load_img(path.join(file_path,filename), target_size=size) # Convert to numpy array pixels = img_to_array(pixels) # Split into satellite and map sat_img, map_img = pixels[:, :256], pixels[:, 256:] src_list.append(sat_img) tar_list.append(map_img) return [asarray(src_list), asarray(tar_list)] # + id="ybvREsKup1mj" # Dataset path file_path = path.join(WORKPLACE_FOLDER,'maps/train') # + colab={"base_uri": "https://localhost:8080/"} id="iCraK1ybp-Xl" outputId="d13f4953-005c-42d6-f192-a0a2725bb882" # Load dataset [src_images, tar_images] = load_images(file_path) src_images.shape, tar_images.shape # + id="ScIMz_ZDq-cm" # Save as compressed numpy array filename = path.join(WORKPLACE_FOLDER,'maps_256.npz') # + colab={"base_uri": "https://localhost:8080/", "height": 35} id="NOqw0bsXrLWu" outputId="a2a3a56f-16b3-422a-bb0d-cd6b8788e42f" savez_compressed(filename,src_images, tar_images) filename # + [markdown] id="xpE24v0IiXp1" # ## Load prepared dataset # + id="SJGHGq8QhvCk" from numpy import load from matplotlib import pyplot # + colab={"base_uri": "https://localhost:8080/"} id="5bMrs2KdhqYQ" outputId="bec4b9c8-78a3-4d1b-d129-95c64422f56d" # Load the prepared dataset data = load(filename) src_images, tar_images = data['arr_0'], data['arr_1'] src_images.shape, tar_images.shape # + colab={"base_uri": "https://localhost:8080/", "height": 129} id="RYGCxmbyinuX" outputId="60fa8b10-d2ee-449d-e6e2-7150ed01fe81" # Plot source images n_samples = 3 for i in range(n_samples): pyplot.subplot(2, n_samples, 1 + i) pyplot.axis('off') pyplot.imshow(src_images[i].astype('uint8')) # + colab={"base_uri": "https://localhost:8080/", "height": 129} id="KAUO82QVjDQE" outputId="df658a5a-5a22-4db2-a67c-c3157130d6ee" # Plot target images n_samples = 3 for i in range(n_samples): pyplot.subplot(2, n_samples, 1 + i) pyplot.axis('off') pyplot.imshow(tar_images[i].astype('uint8')) # + id="DyWtdL3sm2Kx" from numpy import load, zeros, ones from numpy.random import randint from tensorflow.keras.optimizers import Adam from tensorflow.keras.initializers import RandomNormal from tensorflow.keras.models import Model from tensorflow.keras.layers import Conv2D, Conv2DTranspose, LeakyReLU, Activation, Concatenate, Dropout, BatchNormalization, Input from matplotlib import pyplot # + [markdown] id="PbwsUExnjPsp" # ## Define the discriminator model # + id="F16AUmZFjOuP" def define_discriminator(image_shape): # Weight initialization init = RandomNormal(stddev=0.02) # Source image input in_src_image = Input(shape=image_shape) # Target image input in_target_image = Input(shape=image_shape) # Concatenate images channel-wise merged = Concatenate()([in_src_image, in_target_image]) # C64 d = Conv2D(64, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(merged) d = LeakyReLU(alpha=0.2)(d) # C128 d = Conv2D(128, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(d) d = BatchNormalization()(d) d = LeakyReLU(alpha=0.2)(d) # C256 d = Conv2D(256, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(d) d = BatchNormalization()(d) d = LeakyReLU(alpha=0.2)(d) # C512 d = Conv2D(512, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(d) d = BatchNormalization()(d) d = LeakyReLU(alpha=0.2)(d) # Second last output layer d = Conv2D(512, (4,4), padding='same', kernel_initializer=init)(d) d = BatchNormalization()(d) d = LeakyReLU(alpha=0.2)(d) # Patch output d = Conv2D(1, (4,4), padding='same', kernel_initializer=init)(d) patch_out = Activation('sigmoid')(d) # Define model model = Model([in_src_image, in_target_image], patch_out) # Compile model opt = Adam(lr=0.0002, beta_1=0.5) model.compile(loss='binary_crossentropy', optimizer=opt, loss_weights=[0.5]) return model # + id="3qmV4CWzmClG" # Define an encoder block def define_encoder_block(layer_in, n_filters, batchnorm=True): # Weight initialization init = RandomNormal(stddev=0.02) # Add downsampling layer g = Conv2D(n_filters, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(layer_in) # Conditionally add batch normalization if batchnorm: g = BatchNormalization()(g, training=True) # Leaky ReLu activation g = LeakyReLU(alpha=0.2)(g) return g # + id="H6mjfY_6nKBr" # Define a decoder block def decoder_block(layer_in, skip_in, n_filters, dropout=True): # Weight initialization init = RandomNormal(stddev=0.02) # Add upsampling layer g = Conv2DTranspose(n_filters, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(layer_in) # Add batch normalization g = BatchNormalization()(g, training=True) # Conditionally add dropout if dropout: g = Dropout(0.5)(g, training=True) # Merge with skip connection g = Concatenate()([g, skip_in]) # Relu activation g = Activation('relu')(g) return g # + [markdown] id="00RmI9EKvkp8" # ## Define the standalone generator model # + id="slht65DCoKVU" def define_generator(image_shape=(256,256,3)): # Weight initialization init = RandomNormal(stddev=0.02) # image input in_image = Input(shape=image_shape) # Encoder model e1 = define_encoder_block(in_image, 64, batchnorm=False) e2 = define_encoder_block(e1, 128) e3 = define_encoder_block(e2, 256) e4 = define_encoder_block(e3, 512) e5 = define_encoder_block(e4, 512) e6 = define_encoder_block(e5, 512) e7 = define_encoder_block(e6, 512) # Bottleneck, no batch norm and relu b = Conv2D(512, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(e7) b = Activation('relu')(b) # Decoder model d1 = decoder_block(b, e7, 512) d2 = decoder_block(d1, e6, 512) d3 = decoder_block(d2, e5, 512) d4 = decoder_block(d3, e4, 512, dropout=False) d5 = decoder_block(d4, e3, 256, dropout=False) d6 = decoder_block(d5, e2, 128, dropout=False) d7 = decoder_block(d6, e1, 64, dropout=False) # Output g = Conv2DTranspose(3, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(d7) out_image = Activation('tanh')(g) # Define model model = Model(in_image, out_image) return model # + [markdown] id="Xc6gOuvVvoFx" # ## Define the combined generator and disciminator model # + id="eJb107CIvakE" def define_gan(g_model, d_model, image_shape): # Make weights in the discriminator not trainable d_model.trainable = False # Define the source image in_src = Input(shape=image_shape) # Connect the source image to the generator input gen_out = g_model(in_src) # Connect the source input and generator output to the discriminator input dis_out = d_model([in_src, gen_out]) # src image as input, generated image and classification output model = Model(in_src, [dis_out, gen_out]) # Compile model opt = Adam(lr=0.0002, beta_1=0.5) model.compile(loss=['binary_crossentropy', 'mae'], optimizer=opt, loss_weights=[1,100]) return model # + [markdown] id="9JLwOAi7xQlq" # ## Load and prepare train images # + id="ueHu8Qn1xT2R" # To translate Google Maps to Satellite Images: # change the order of the datasets returned def load_real_samples(filename): # Load compressed arrays data = load(filename) # Unpack arrays X1, X2 = data['arr_0'], data['arr_1'] # Scale from [0,255] to [-1,1] X1 = (X1 - 127.5) / 127.5 X2 = (X2 - 127.5) / 127.5 return [X1, X2] # + id="6ohkxazMx1mH" # Select a batch of random samples, return images and target def generate_real_samples(dataset, n_samples, patch_shape): # Unpack dataset trainA, trainB = dataset # Choose random instances ix = randint(0, trainA.shape[0], n_samples) # retrieve selected images X1, X2 = trainA[ix], trainB[ix] # Generate 'real' class labels (1) y = ones((n_samples, patch_shape, patch_shape, 1)) return [X1, X2], y # + id="VQb9tPBbyrgO" # Generate a batch of images, return images and targets def generate_fake_samples(g_model, samples, patch_shape): # Generate fake instance X = g_model.predict(samples) # Create 'fake' class labels (0) y = zeros((len(X), patch_shape, patch_shape, 1)) return X, y # + id="tScyAGlJzNIj" # Generate samples, save as a plot and save the model def summarize_performance(step, g_model, dataset, n_samples=3): # Select a sample of input images [X_realA, X_realB], _ = generate_real_samples(dataset, n_samples, 1) # Generate a batch of fake samples X_fakeB, _ = generate_fake_samples(g_model, X_realA, 1) # Scale all pixels from [-1,1] to [0,1] X_realA = (X_realA + 1) / 2.0 X_realB = (X_realB + 1) / 2.0 X_fakeB = (X_fakeB + 1) / 2.0 # Plot real source images for i in range(n_samples): pyplot.subplot(3, n_samples, 1 + i) pyplot.axis('off') pyplot.imshow(X_realA[i]) # Plot generated target image for i in range(n_samples): pyplot.subplot(3, n_samples, 1 + n_samples + i) pyplot.axis('off') pyplot.imshow(X_fakeB[i]) # Plot real target image for i in range(n_samples): pyplot.subplot(3, n_samples, 1 + n_samples*2 + i) pyplot.axis('off') pyplot.imshow(X_realB[i]) # Save plot to file filename1 = 'plot_%06d.png' % (step+1) pyplot.savefig(filename1) pyplot.close() # Save the generator model filename2 = 'model_%06d.h5' % (step+1) g_model.save(filename2) print('>Saved: %s and %s' % (filename1, filename2)) # + [markdown] id="xFOWni3V4q12" # ## Train pix2pix model # + id="jKKNGdlb4nnm" def train(d_model, g_model, gan_model, dataset, n_epochs=100, n_batch=1): # determine the output square shape of the discriminator n_patch = d_model.output_shape[1] # unpack dataset trainA, trainB = dataset # calculate the number of batches per training epoch bat_per_epo = int(len(trainA) / n_batch) # calculate the number of training iterations n_steps = bat_per_epo * n_epochs # manually enumerate epochs for i in range(n_steps): # select a batch of real samples [X_realA, X_realB], y_real = generate_real_samples(dataset, n_batch, n_patch) # generate a batch of fake samples X_fakeB, y_fake = generate_fake_samples(g_model, X_realA, n_patch) # update discriminator for real samples d_loss1 = d_model.train_on_batch([X_realA, X_realB], y_real) # update discriminator for generated samples d_loss2 = d_model.train_on_batch([X_realA, X_fakeB], y_fake) # update the generator g_loss, _, _ = gan_model.train_on_batch(X_realA, [y_real, X_realB]) # summarize performance print('>%d, d1[%.3f] d2[%.3f] g[%.3f]' % (i+1, d_loss1, d_loss2, g_loss)) # summarize model performance if (i+1) % (bat_per_epo * 10) == 0: summarize_performance(i, g_model, dataset) # + colab={"base_uri": "https://localhost:8080/"} id="eGRMqXMD7yFx" outputId="92e46d98-0d04-4b90-e98b-8183d7e638dd" # Load image data # filename = path.join(WORKPLACE_FOLDER,'maps_256.npz') dataset = load_real_samples(filename) dataset[0].shape, dataset[1].shape # + id="6PZwVmH48hMi" # Define input shape based on loaded dataset image_shape = dataset[0].shape[1:] # + id="lrg8M29C8rph" # Define the models d_model = define_discriminator(image_shape) g_model = define_generator(image_shape) # + id="Y2kNN7xq9OX8" # Define the composite model gan_model = define_gan(g_model, d_model, image_shape) # + colab={"base_uri": "https://localhost:8080/"} id="ayzx2DrR9Z45" outputId="4d663841-17ba-4b37-930a-313aa11f4afb" # Train model train(d_model, g_model, gan_model, dataset) # + [markdown] id="kwgXuZvHg-Bp" # ## Load a model to translate images # + id="Z559ykJrg7Cy" from tensorflow.keras.models import load_model from numpy import load from numpy import vstack from matplotlib import pyplot from numpy.random import randint # + id="zbth1UNrhQCz" # load and prepare training images # To translate Google Maps to Satellite Images: # change the order of the datasets returned def load_real_samples(filename): # load compressed arrays data = load(filename) # unpack arrays X1, X2 = data['arr_0'], data['arr_1'] # scale from [0,255] to [-1,1] X1 = (X1 - 127.5) / 127.5 X2 = (X2 - 127.5) / 127.5 return [X1, X2] # + id="C-asghw7hUjz" # plot source, generated and target images def plot_images(src_img, gen_img, tar_img): images = vstack((src_img, gen_img, tar_img)) # scale from [-1,1] to [0,1] images = (images + 1) / 2.0 titles = ['Source', 'Generated', 'Expected'] # plot images row by row for i in range(len(images)): # define subplot pyplot.subplot(1, 3, 1 + i) # turn off axis pyplot.axis('off') # plot raw pixel data pyplot.imshow(images[i]) # show title pyplot.title(titles[i]) pyplot.show() # + colab={"base_uri": "https://localhost:8080/", "height": 199} id="SRlqRSZ6hYUU" outputId="bbb48fa4-890d-4dab-ec0a-1090632e6ae6" # load dataset # filename = path.join(WORKPLACE_FOLDER,'maps_256.npz') [X1, X2] = load_real_samples(filename) print('Loaded', X1.shape, X2.shape) # load model model = load_model('model_109600.h5') # select random example ix = randint(0, len(X1), 1) src_image, tar_image = X1[ix], X2[ix] # generate image from source gen_image = model.predict(src_image) # plot all three images plot_images(src_image, gen_image, tar_image)
Pix2Pix/Pix2PixGAN.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + deletable=false editable=false # Initialize OK from client.api.notebook import Notebook ok = Notebook('lab03.ok') # + [markdown] nbgrader={"grade": false, "grade_id": "intro", "locked": true, "schema_version": 2, "solution": false} # # Lab 3: Data Cleaning and Visualization # # In this lab, you will be working with a dataset from the City of Berkeley containing data on calls to the Berkeley Police Department. Information about the dataset can be found [at this link](https://data.cityofberkeley.info/Public-Safety/Berkeley-PD-Calls-for-Service/k2nh-s5h5). # # **This assignment should be completed and submitted before Wednesday April 24, 2019 at 11:59 PM.** # # ### Collaboration Policy # # Data science is a collaborative activity. While you may talk with others about the labs, we ask that you **write your solutions individually**. If you do discuss the assignments with others, please **include their names** at the top of this notebook. # + [markdown] nbgrader={"grade": false, "grade_id": "setup", "locked": true, "schema_version": 2, "solution": false} # ## Setup # # Note that after activating matplotlib to display figures inline via the IPython magic `%matplotlib inline`, we configure a custom default figure size. Virtually every default aspect of matplotlib [can be customized](https://matplotlib.org/users/customizing.html). # + nbgrader={"grade": false, "grade_id": "imports", "locked": true, "schema_version": 2, "solution": false} import pandas as pd import numpy as np import zipfile import matplotlib import matplotlib.pyplot as plt import seaborn as sns # %matplotlib inline # + nbgrader={"grade": false, "grade_id": "imports2", "locked": true, "schema_version": 2, "solution": false} plt.rcParams['figure.figsize'] = (12, 9) # + [markdown] nbgrader={"grade": false, "grade_id": "part1", "locked": true, "schema_version": 2, "solution": false} # # Part 1: Cleaning and Exploring the Data # # To retrieve the dataset, we will use the `ds100_utils.fetch_and_cache` utility. # + nbgrader={"grade": false, "grade_id": "download-data", "locked": true, "schema_version": 2, "solution": false} import ds100_utils data_dir = 'data' data_url = 'http://www.ds100.org/sp19/assets/datasets/lab03_data_sp19.zip' file_name = 'lab03_data_sp19.zip' dest_path = ds100_utils.fetch_and_cache(data_url=data_url, file=file_name, data_dir=data_dir) print(f'Located at {dest_path}') # + [markdown] nbgrader={"grade": false, "grade_id": "unzip-data-1", "locked": true, "schema_version": 2, "solution": false} # We will now directly unzip the ZIP archive and start working with the uncompressed files. # # Note: There is no single right answer regarding whether to work with compressed files in their compressed state or to uncompress them on disk permanently. If you for example need to work with multiple tools on the same files, or write many notebooks to analyze them, and they are not too large, it may be more convenient to uncompress them once. But you may also have situations where you find it preferable to work with the compressed data directly. # # Python gives you tools for both approaches, and you should know how to perform both tasks in order to choose the one that best suits the problem at hand. # # --- # # Run the cell below to extract the zip file into the data directory. # + nbgrader={"grade": false, "grade_id": "unzip-data-2", "locked": true, "schema_version": 2, "solution": false} my_zip = zipfile.ZipFile(dest_path, 'r') my_zip.extractall(data_dir) # + [markdown] nbgrader={"grade": false, "grade_id": "q0", "locked": true, "schema_version": 2, "solution": false} # Now, we'll use a method of the `Pathlib.Path` class called `glob` to list all files in the `data` directory. You will find useful information in pathlib [docs](https://docs.python.org/3/library/pathlib.html). # # Below, we use pathlib's `glob` method to store the list of all files' names from the `data_dir` directory in the variable `file_names`. These names should be strings that contain only the file name (e.g. `dummy.txt` not `data/dummy.txt`). The asterisk (*) character is used with the `glob` method to match any string. # + nbgrader={"grade": false, "grade_id": "q0-answer", "locked": true, "schema_version": 2, "solution": false} from pathlib import Path data_dir_path = Path('data') # creates a Path object that points to the data directory file_names = [x.name for x in data_dir_path.glob('*') if x.is_file()] file_names # + [markdown] nbgrader={"grade": false, "grade_id": "explore-0", "locked": true, "schema_version": 2, "solution": false} # Let's now load the CSV file we have into a `pandas.DataFrame` object. # + nbgrader={"grade": false, "grade_id": "explore-1", "locked": true, "schema_version": 2, "solution": false} calls = pd.read_csv("data/Berkeley_PD_-_Calls_for_Service.csv") calls.head() # + [markdown] nbgrader={"grade": false, "grade_id": "explore-2", "locked": true, "schema_version": 2, "solution": false} # We see that the fields include a case number, the offense type, the date and time of the offense, the "CVLEGEND" which appears to be related to the offense type, a "CVDOW" which has no apparent meaning, a date added to the database, and the location spread across four fields. # # Let's also check some basic information about these files using the `DataFrame.describe` and `DataFrame.info` methods. # + nbgrader={"grade": false, "grade_id": "explore-3", "locked": true, "schema_version": 2, "solution": false} calls.info() calls.describe() # + [markdown] nbgrader={"grade": false, "grade_id": "explore-4", "locked": true, "schema_version": 2, "solution": false} # Notice that the functions above reveal type information for the columns, as well as some basic statistics about the numerical columns found in the DataFrame. However, we still need more information about what each column represents. Let's explore the data further in Question 1. # # Before we go over the fields to see their meanings, the cell below will verify that all the events happened in Berkeley by grouping on the `City` and `State` columns. You should see that all of our data falls into one group. # + nbgrader={"grade": false, "grade_id": "explore-5", "locked": true, "schema_version": 2, "solution": false} calls.groupby(["City","State"]).count() # + [markdown] nbgrader={"grade": false, "grade_id": "q1", "locked": true, "schema_version": 2, "solution": false} # ## Question 1 # Above, when we called `head`, it seemed like `OFFENSE` and `CVLEGEND` both contained information about the type of event reported. What is the difference in meaning between the two columns? One way to probe this is to look at the `value_counts` for each Series. # + nbgrader={"grade": false, "grade_id": "offense-val-counts", "locked": true, "schema_version": 2, "solution": false} calls['OFFENSE'].value_counts().head(10) # + nbgrader={"grade": false, "grade_id": "cvlegend-val-counts", "locked": true, "schema_version": 2, "solution": false} calls['CVLEGEND'].value_counts().head(10) # + [markdown] deletable=false editable=false nbgrader={"grade": false, "grade_id": "q1a", "locked": true, "schema_version": 2, "solution": false} # ### Question 1a # # Above, it seems like `OFFENSE` is more specific than `CVLEGEND`, e.g. "LARCENY" vs. "THEFT FELONY (OVER $950)". For those of you who don't know the word "larceny", it's a legal term for theft of personal property. # # To get a sense of how many subcategories there are for each `OFFENSE`, set `calls_by_cvlegend_and_offense` equal to a multi-indexed series where the data is first indexed on the `CVLEGEND` and then on the `OFFENSE`, and the data is equal to the number of offenses in the database that match the respective `CVLEGEND` and `OFFENSE`. For example, calls_by_cvlegend_and_offense["LARCENY", "THEFT FROM PERSON"] should return 24. # # <!-- # BEGIN QUESTION # name: q1a # --> # + nbgrader={"grade": false, "grade_id": "q1a-answer", "locked": false, "schema_version": 2, "solution": true} calls_by_cvlegend_and_offense = ... ... print(calls_by_cvlegend_and_offense) # + deletable=false editable=false ok.grade("q1a"); # + [markdown] deletable=false editable=false nbgrader={"grade": false, "grade_id": "q1b", "locked": true, "schema_version": 2, "solution": false} # ### Question 1b # # In the cell below, set `answer1b` equal to a list of strings corresponding to the possible values for `OFFENSE` when `CVLEGEND` is "LARCENY". You can type the answer manually, or you can create an expression that automatically extracts the names. # # <!-- # BEGIN QUESTION # name: q1b # --> # + nbgrader={"grade": false, "grade_id": "q1b-answer", "locked": false, "schema_version": 2, "solution": true} tags=["student"] # You may use this cell for your scratch work as long as you enter # in your final answers in the answer1 variable. answer1b = ... ... # + deletable=false editable=false ok.grade("q1b"); # + [markdown] deletable=false editable=false nbgrader={"grade": false, "grade_id": "q2", "locked": true, "schema_version": 2, "solution": false} # ## Question 2 # # What are the five crime types of CVLEGEND that have the most crime events? You may need to use `value_counts` to find the answer. # Save your results into `answer2` as a list of strings. # # **Hint:** *The `keys` method of the Series class might be useful.* # # <!-- # BEGIN QUESTION # name: q2 # --> # + nbgrader={"grade": false, "grade_id": "q2-answer", "locked": false, "schema_version": 2, "solution": true} answer2 = ... ... print(answer2) # + deletable=false editable=false ok.grade("q2"); # + [markdown] nbgrader={"grade": false, "grade_id": "part2", "locked": true, "schema_version": 2, "solution": false} # --- # # Part 2: Visualizing the Data # # ## Pandas vs. Seaborn Plotting # # Pandas offers basic functionality for plotting. For example, the `DataFrame` and `Series` classes both have a `plot` method. However, the basic plots generated by pandas are not particularly pretty. While it's possible to manually use matplotlib commands to make pandas plots look better, we'll instead use a high level plotting library called Seaborn that will take care of most of this for us. # # As you learn to do data visualization, you may find the [pandas documentation](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.plot.html) and [Seaborn documentation](https://seaborn.pydata.org/api.html) helpful! # + [markdown] nbgrader={"grade": false, "grade_id": "plot-demo", "locked": true, "schema_version": 2, "solution": false} # As an example of the built-in plotting functionality of pandas, the following example uses `plot` method of the `Series` class to generate a `barh` plot type to visually display the value counts for `CVLEGEND`. # + nbgrader={"grade": false, "grade_id": "plot-demo1", "locked": true, "schema_version": 2, "solution": false} ax = calls['CVLEGEND'].value_counts().plot(kind='barh') ax.set_ylabel("Crime Category") ax.set_xlabel("Number of Calls") ax.set_title("Number of Calls By Crime Type"); # + [markdown] nbgrader={"grade": false, "grade_id": "plot-demo2", "locked": true, "schema_version": 2, "solution": false} # By contrast, the Seaborn library provides a specific function `countplot` built for plotting counts. It operates directly on the DataFrame itself i.e. there's no need to call `value_counts()` at all. This higher level approach makes it easier to work with. Run the cell below, and you'll see that the plot is much prettier (albeit in a weird order). # + nbgrader={"grade": false, "grade_id": "plot-demo3", "locked": true, "schema_version": 2, "solution": false} ax = sns.countplot(data=calls, y="CVLEGEND") ax.set_ylabel("Crime Category") ax.set_xlabel("Number of Calls") ax.set_title("Number of Calls By Crime Type"); # + [markdown] nbgrader={"grade": false, "grade_id": "plot-demo4", "locked": true, "schema_version": 2, "solution": false} # If we want the same ordering that we had in the pandas plot, we can use the order parameter of the `countplot` method. It takes a list of strings corresponding to the axis to be ordered. By passing the index of the `value_counts`, we get the order we want. # + nbgrader={"grade": false, "grade_id": "plot-demo5", "locked": true, "schema_version": 2, "solution": false} ax = sns.countplot(data=calls, y="CVLEGEND", order=calls["CVLEGEND"].value_counts(ascending=True).index); ax.set_ylabel("Crime Category") ax.set_xlabel("Number of Calls") ax.set_title("Number of Calls By Crime Type"); # + [markdown] nbgrader={"grade": false, "grade_id": "plot-demo6", "locked": true, "schema_version": 2, "solution": false} # Voilà! Now we have a pretty bar plot with the bars ordered by size. Though `seaborn` appears to provide a superior plot from a aesthetic point of view, the `pandas` plotting library is also good to understand. You'll get practice using both libraries in the following questions. # # ## An Additional Note on Plotting in Jupyter Notebooks # # You may have noticed that many of our code cells involving plotting end with a semicolon (;). This prevents any extra output from the last line of the cell that we may not want to see. Try adding this to your own code in the following questions! # + [markdown] deletable=false editable=false nbgrader={"grade": false, "grade_id": "q3", "locked": true, "schema_version": 2, "solution": false} # ## Question 3 # # Now it is your turn to make some plots using `pandas` and `seaborn`. Let's start by looking at the distribution of calls over days of the week. # # The CVDOW field isn't named helpfully and it is hard to see the meaning from the data alone. According to the website linked at the top of this notebook, CVDOW is actually indicating the day that events happened. 0->Sunday, 1->Monday ... 6->Saturday. # # ### Question 3a # # Add a new column `Day` into the `calls` dataframe that has the string weekday (eg. 'Sunday') for the corresponding value in CVDOW. For example, if the first 3 values of `CVDOW` are `[3, 6, 0]`, then the first 3 values of the `Day` column should be `["Wednesday", "Saturday", "Sunday"]`. # # **Hint:** *Try using the [Series.map](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.Series.map.html) function on `calls["CVDOW"]`. Can you assign this to the new column `calls["Day"]`?* # # <!-- # BEGIN QUESTION # name: q3a # --> # + nbgrader={"grade": false, "grade_id": "q3a-answer", "locked": false, "schema_version": 2, "solution": true} days = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"] day_indices = range(7) indices_to_days_dict = dict(zip(day_indices, days)) # Should look like {0:"Sunday", 1:"Monday", ..., 6:"Saturday"} ... ... # + deletable=false editable=false ok.grade("q3a"); # + [markdown] nbgrader={"grade": false, "grade_id": "q3b", "locked": true, "schema_version": 2, "solution": false} # ### Question 3b # # Run the cell below to create a `seaborn` plot. This plot shows the number of calls for each day of the week. Notice the use of the `rotation` argument in `ax.set_xticklabels`, which rotates the labels by 90 degrees. # + nbgrader={"grade": false, "grade_id": "q3b-ex", "locked": true, "schema_version": 2, "solution": false} ax = sns.countplot(data=calls, x='Day', order=days) ax.set_xticklabels(ax.get_xticklabels(), rotation=90) ax.set_ylabel("Number of Calls") ax.set_title("Number of Calls For Each Day of the Week"); # + [markdown] deletable=false editable=false nbgrader={"grade": false, "grade_id": "q3b-instructions", "locked": true, "schema_version": 2, "solution": false} # Now, let's make the same plot using `pandas`. Construct a vertical bar plot with the count of the number of calls (entries in the table) for each day of the week **ordered by the day of the week** (eg. `Sunday`, `Monday`, ...). Do not use `sns` for this plot. Be sure that your axes are labeled and that your plot is titled. # # **Hint:** *Given a series `s`, and an array `coolIndex` that has the same entries as in `s.index`, `s[coolIndex]` will return a copy of the series in the same order as `coolIndex`.* # # <!-- # BEGIN QUESTION # name: q3b # --> # + nbgrader={"grade": false, "grade_id": "q3b-answer", "locked": false, "schema_version": 2, "solution": true} ... # Leave this for grading purposes ax_3b = plt.gca() # + deletable=false editable=false ok.grade("q3b"); # + [markdown] deletable=false editable=false nbgrader={"grade": false, "grade_id": "q4", "locked": true, "schema_version": 2, "solution": false} # ## Question 4 # # It seems weekdays generally have slightly more calls than Saturday or Sunday, but the difference does not look significant. # # We can break down into some particular types of events to see their distribution. For example, let's make a bar plot for the CVLEGEND "NOISE VIOLATION". Which day is the peak for "NOISE VIOLATION"? # # ### Question 4a # # This time, use `seaborn` to create a vertical bar plot of the number of total noise violations reported on each day of the week, again ordered by the days of the week starting with Sunday. Do not use `pandas` to plot. # # **Hint:** *If you're stuck, use the code for the seaborn plot in Question 3b as a starting point.* # # <!-- # BEGIN QUESTION # name: q4a # --> # + nbgrader={"grade": false, "grade_id": "q4a-answer", "locked": false, "schema_version": 2, "solution": true} ... # Leave this for grading purposes ax_4a = plt.gca() # + deletable=false editable=false ok.grade("q4a"); # + [markdown] nbgrader={"grade": false, "grade_id": "q4b", "locked": true, "schema_version": 2, "solution": false} # ### Question 4b # # Do you realize anything interesting about the distribution of NOISE VIOLATION calls over a week? Type a 1-sentence answer in the cell below. # + [markdown] nbgrader={"grade": true, "grade_id": "q4b-answer", "locked": false, "points": 1, "schema_version": 2, "solution": true} # # + [markdown] deletable=false editable=false nbgrader={"grade": false, "grade_id": "q5", "locked": true, "schema_version": 2, "solution": false} # ## Question 5 # # Let's look at a similar distribution but for a crime we have much more calls data about. In the cell below, create the same plot as you did in Question 4, but now looking at instances of the CVLEGEND "FRAUD" (instead of "NOISE VIOLATION"). Use either `pandas` or `seaborn` plotting as you desire. # # <!-- # BEGIN QUESTION # name: q5 # --> # + nbgrader={"grade": false, "grade_id": "q5-answer", "locked": false, "schema_version": 2, "solution": true} ... # Leave this for grading purposes ax_5 = plt.gca() # + deletable=false editable=false ok.grade("q5"); # + [markdown] deletable=false editable=false nbgrader={"grade": false, "grade_id": "q6", "locked": true, "schema_version": 2, "solution": false} # ## Question 6 # # ### Question 6a # # Now let's look at the EVENTTM column which indicates the time for events. Since it contains hour and minute information, let's extract the hour info and create a new column named `Hour` in the `calls` dataframe. You should save the hour as an `int`. Then plot the frequency of each hour in the table (i.e., `value_counts()`) sorted by the hour of the day (i.e., `sort_index()`). # # You will want to look into how to use: # # * [Series.str.slice](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.Series.str.slice.html#pandas.Series.str.slice) to select the substring. # * [Series.astype](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.Series.astype.html) to change the type. # # **Hint:** *The `str` helper member of a series can be used to grab substrings. For example, `calls["EVENTTM"].str.slice(3,5)` returns the minute of each hour of the `EVENTTM`.* # # <!-- # BEGIN QUESTION # name: q6 # --> # + nbgrader={"grade": false, "grade_id": "q6a-answer", "locked": false, "schema_version": 2, "solution": true} tags=["solution"] ... # + deletable=false editable=false ok.grade("q6"); # + [markdown] nbgrader={"grade": false, "grade_id": "pandas-fraud-plot", "locked": true, "schema_version": 2, "solution": false} # The code in the cell below creates a pandas bar plot showing the number of FRAUD crimes committed at each hour of the day. # + nbgrader={"grade": false, "grade_id": "pandas-fraud-plot-code", "locked": true, "schema_version": 2, "solution": false} ax = calls[calls["CVLEGEND"] == "FRAUD"]['Hour'].value_counts().sort_index().plot(kind='bar') ax.set_xlabel("Hour of the Day") ax.set_ylabel("Number of Calls") ax.set_title("Number of Calls Reporting Fraud For Each Day of the Week"); # + [markdown] nbgrader={"grade": false, "grade_id": "q6b", "locked": true, "schema_version": 2, "solution": false} # The cell below contains a seaborn plot of the same data. # + nbgrader={"grade": false, "grade_id": "q6b-answer", "locked": false, "schema_version": 2, "solution": true} ax = sns.countplot(calls[calls["CVLEGEND"] == "FRAUD"]['Hour']) ax.set_xlabel("Hour of the Day") ax.set_ylabel("Number of Calls") ax.set_title("Number of Calls Reporting Fraud For Each Day of the Week"); #alternate solution: sns.countplot(data=calls[calls["CVLEGEND"] == "FRAUD"], x = 'Hour'); # + [markdown] nbgrader={"grade": false, "grade_id": "q6c", "locked": true, "schema_version": 2, "solution": false} # # + [markdown] nbgrader={"grade": true, "grade_id": "q6c-answer", "locked": false, "points": 1, "schema_version": 2, "solution": true} # # + [markdown] nbgrader={"grade": false, "grade_id": "q7", "locked": true, "schema_version": 2, "solution": false} # ## Question 7 (OPTIONAL) # # In the cell below, we generate a boxplot which examines the hour of day of each crime broken down by the `CVLEGEND` value. To construct this plot we used the [DataFrame.boxplot](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.boxplot.html) documentation. # + nbgrader={"grade": false, "grade_id": "q7-pandas-boxplot", "locked": true, "schema_version": 2, "solution": false} tags=["student"] calls.boxplot(column="Hour", by='CVLEGEND', rot=90); # + [markdown] nbgrader={"grade": false, "grade_id": "q7-instructions", "locked": true, "schema_version": 2, "solution": false} # While the pandas boxplot is informative, we can use seaborn to create a more visually-appealing plot. Using seaborn, regenerate a better box plot. See either the [ds 100 textbook](https://www.textbook.ds100.org/ch/06/viz_quantitative.html) or the [seaborn boxplot documentation](https://seaborn.pydata.org/generated/seaborn.boxplot.html). # # Looking at your plot, which crime type appears to have the largest interquartile range? Put your results into `answer7` as a string. # # + nbgrader={"grade": false, "grade_id": "q7-answer", "locked": false, "schema_version": 2, "solution": true} answer7 = ... #your answer here # Todo: Make a boxplot with seaborn ... # - # # + [markdown] nbgrader={"grade": false, "grade_id": "finish", "locked": true, "schema_version": 2, "solution": false} # ## Congratulations # # Congrats! You are finished with this assignment. # + [markdown] deletable=false editable=false # # Submit # Make sure you have run all cells in your notebook in order before running the cell below, so that all images/graphs appear in the output. # **Please save before submitting!** # + deletable=false editable=false # Save your notebook first, then run this cell to submit. ok.submit()
lab/lab03/lab03.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.6 64-bit (''advent'': venv)' # metadata: # interpreter: # hash: 0cc350383af1a3ab72b32736f19c7624e2a417239ea40fc71ef184bed3bdfde8 # name: python3 # --- from aocd.models import Puzzle # ### day 1 puzzle = Puzzle(year=2020, day=1) inp = puzzle.input_data data = list(map(int, inp.split('\n'))) import itertools for x, y in itertools.combinations(data, 2): if x+y==2020: print(x, y, x*y) for x, y, z in itertools.combinations(data, 3): if x+y+z==2020: print(x, y, z, x*y* z) # ### day 2 import numpy as np import pandas as pd puzzle = Puzzle(year=2020, day=2) inp = puzzle.input_data # + tags=[] data = inp.split('\n') # + tags=[] print(data[:10]) # - '5-10 b: bhbjlkbbbbbbb'.split("-")[1].split(" ")[2] def parse(x): min_cnt = int(x.split("-")[0]) max_cnt = int(x.split("-")[1].split(" ")[0]) letter = x.split("-")[1].split(" ")[1][0] password = x.split("-")[1].split(" ")[2] cnt = np.sum([char==letter for char in password]) return cnt >= min_cnt and cnt <= max_cnt np.sum([parse(x) for x in data]) def parse(x): min_cnt = int(x.split("-")[0]) max_cnt = int(x.split("-")[1].split(" ")[0]) letter = x.split("-")[1].split(" ")[1][0] password = x.split("-")[1].split(" ")[2] return (password[min_cnt - 1] == letter) + (password[max_cnt - 1] == letter) == 1 np.sum([parse(x) for x in data]) [parse(x) for x in data[:10]] data[:10] # a more elegant solution with regex groups import re def parse(x): min_cnt, max_cnt, letter, password = re.search('(.*)-(.*) (.*): (.*)', x).groups() cnt = np.sum([char==letter for char in password]) return cnt >= int(min_cnt) and cnt <= int(max_cnt) np.sum([parse(x) for x in data]) # ### day 3 puzzle = Puzzle(year=2020, day=3) inp = puzzle.input_data data = inp.split('\n') test_data = """..##.........##.........##.........##.........##.........##....... #...#...#..#...#...#..#...#...#..#...#...#..#...#...#..#...#...#.. .#....#..#..#....#..#..#....#..#..#....#..#..#....#..#..#....#..#. ..#.#...#.#..#.#...#.#..#.#...#.#..#.#...#.#..#.#...#.#..#.#...#.# .#...##..#..#...##..#..#...##..#..#...##..#..#...##..#..#...##..#. ..#.##.......#.##.......#.##.......#.##.......#.##.......#.##..... .#.#.#....#.#.#.#....#.#.#.#....#.#.#.#....#.#.#.#....#.#.#.#....# .#........#.#........#.#........#.#........#.#........#.#........# #.##...#...#.##...#...#.##...#...#.##...#...#.##...#...#.##...#... #...##....##...##....##...##....##...##....##...##....##...##....# .#..#...#.#.#..#...#.#.#..#...#.#.#..#...#.#.#..#...#.#.#..#...#.# """.split("\n") test_data def score(r, d, inp_data=data): v = 0 for i, row in enumerate(inp_data): check = (( int ((i/d) * r ))) % len(inp_data[0]) v = v + (row[check] == "#") * (i % d == 0) return v # + tags=[] score(1, 1) * score(3, 1) * score(5, 1) * score(7, 1) * score(1, 2) # - # ## day 4 import numpy as np import re import pandas as pd puzzle = Puzzle(year=2020, day=4) inp = puzzle.input_data data = inp.split('\n\n') data[:10] fields = [ 'byr', 'iyr', 'eyr', 'hgt', 'hcl', 'ecl', 'pid', # 'cid', ] def valid(x): return np.sum([field in x for field in fields]) == len(fields) np.sum([valid(x) for x in data]) valids = [x.replace("\n", " ") for x in data if valid(x)] ecls = 'amb blu brn gry grn hzl oth'.split(" ") # + from collections import ChainMap def parser(x): k,v = re.search('(.*):(.*)', x).groups() return {k: v} def parser_full(x): z = [parser(y) for y in x.split(" ")] return ChainMap(*z) parsed_list = [parser_full(x) for x in valids] # - df = pd.DataFrame(parsed_list) df df['byr'] = df['byr'].apply(int) df['eyr'] = df['eyr'].apply(int) df['iyr'] = df['iyr'].apply(int) valid_byr = [1920 <= x <= 2002 for x in df['byr']] valid_eyr = [2020 <= x <= 2030 for x in df['eyr']] valid_iyr = [2010 <= x <= 2020 for x in df['iyr']] valid_years = [(x + y + z) == 3 for x, y, z in list(zip(valid_byr, valid_eyr, valid_iyr))] df2 = df[valid_years] def parse_height(v): dig, unit = re.search('([0-9]*)([a-z]*)', v).groups() if unit == "cm": return (int(dig) >= 150) & (int(dig) <= 193 ) if unit == "in": return (int(dig) >= 59) & (int(dig) <= 76) return False df2['hgt_p'] = df2['hgt'].apply(parse_height) chars = "abcdef0123456789" def parse_hcl(v): if v[0] != "#": return False if len(v) > 7: return False for char in v[1:]: if char not in chars: return False return True df2['hcl_p'] = df2['hcl'].apply(parse_hcl) def parse_pid(v): for ch in v: if ch.isalpha(): return False return len(str(v)) == 9 df2['pid_p'] = df2['pid'].apply(parse_pid) df2['valid_ecl'] = [x in ecls for x in df2['ecl']] df2.query('pid_p == True and hcl_p == True and hgt_p == True and valid_ecl == True') print(1) puzzle = Puzzle(year=2020, day=5) inp = puzzle.input_data data = inp.split('\n') data[:10] # + def _sc(x, upper, lower, upper_char): for char in x[:-1]: diff = (upper - lower) # print(f"diff: {diff}") split = int(lower + (diff / 2)) if char == upper_char: upper = split else: lower = split #print(char, split, "upper", upper, "lower", lower) if x[-1] == upper_char: return lower + 1 else: return upper # return int(upper) - 1, int(lower) - 1 def sc(x): row = _sc(x[:-3], 127, -1, "F") #print("-----") col = _sc(x[-3:], 7, -1, "L") #print(row, col) return row * 8 + col sc("BBFFBBFRLL") # - max([sc(x) for x in data]) ids = [sc(x) for x in data] ids_sorted = sorted(ids) for i, x in enumerate(ids_sorted): if x - i != 46: print(i, x) break ids_sorted[480:500] puzzle = Puzzle(year=2020, day=6) inp = puzzle.input_data data = inp.split('\n\n') import collections def parse_line(x): groups = x.split("\n\n") i = 0 for group in groups: group_repl = group.strip().split("\n") n_users = len(group_repl) cnt = collections.Counter() chars = [] for char in group_repl: chars.append(char) cnt.update(char) val = 0 for c in set(cnt): if cnt[c] == n_users: val = val + 1 i = i + val return i # + tags=[] parse_line(""" abc a b c ab ac a a a a b """) # + tags=[] parse_line(inp) # - puzzle = Puzzle(year=2020, day=7) inp = puzzle.input_data data = inp.split('\n') data[:10] test_data = """light red bags contain 1 bright white bag, 2 muted yellow bags. dark orange bags contain 3 bright white bags, 4 muted yellow bags. bright white bags contain 1 shiny gold bag. muted yellow bags contain 2 shiny gold bags, 9 faded blue bags. shiny gold bags contain 1 dark olive bag, 2 vibrant plum bags. dark olive bags contain 3 faded blue bags, 4 dotted black bags. vibrant plum bags contain 5 faded blue bags, 6 dotted black bags. faded blue bags contain no other bags. dotted black bags contain no other bags.""".split("\n") import networkx as nx # + tags=[] G = nx.DiGraph() for row in data: bag_content = row.split(" bag")[0] # G.add_node(bag_content) elements = " ".join(row.split("contain")[1:]).strip().split(", ") element_list = [] for ele in elements: if "no other" in ele: pass elif ele == "": pass else: n = int(ele[0]) base_str = ele[2:] base_str = base_str.replace(" bags", "") base_str = base_str.replace(" bag", "") base_str = base_str.replace(".", "") element_list.append((base_str, n)) if bag_content != base_str and base_str != "shiny gold": G.add_edge(bag_content, base_str) G.edges[bag_content, base_str]['weight'] = n # + # SOLVE ONE # paths = [] # for node in G.nodes: # for x in nx.all_simple_paths(G, source=node, target="shiny gold"): # paths.append(x) # finals = list(set([item for sublist in paths for item in sublist])) # len(finals) - 1 # alternatively: # len(nx.predecessor(G, "shiny gold")) - 1 # - def look_inside(x): cnt = 1 if G.out_degree(x) == 0: return cnt for n in G.neighbors(x): cnt += G[x][n]['weight'] * look_inside(n) return cnt list(G.neighbors("shiny gold")) look_inside("shiny gold") - 1
2020/2020_d01.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Are High School Equivalency Test Passing Rates Correlated to Length of Test (short vs long version)? import pandas as pd from pandas import ExcelWriter from pandas import ExcelFile from sqlalchemy import create_engine from datetime import datetime import matplotlib.pyplot as plt # Read and Store XLSX file into DataFrame NWEA_Data_df = pd.read_excel('Resources/NWEAGrowthReportYear2019.xlsx', sheet_name='2019') print("Column headings:") print(NWEA_Data_df.columns) # + # Remove Inactive CMs NWEA1_df = NWEA_Data_df[~NWEA_Data_df["Term Tested"].str.contains("Summer", na=False)] NWEA2_df = NWEA1_df[~NWEA1_df["Term Tested"].str.contains("Fall", na=False)] NWEA3_df = NWEA2_df[~NWEA2_df["Term Tested"].str.contains("Winter", na=False)] NWEA_df = NWEA3_df[~NWEA3_df["Term Tested"].str.contains("Spring", na=False)] print(NWEA_df) # + # Convert 'Test Date' from objects to datetime NWEA_df[["Test Date"]] = NWEA_df[["Term Tested"]].astype('datetime64[ns]') NWEA_df = NWEA_df.drop('Term Tested', axis=1) NWEA_df.head() # + # Using .rename(columns={}) in order to rename columns renamed_df = NWEA_df.rename(columns={"Assessment Name":"Version", "Math: Operations and Algebraic Thinking": "Math: Algebra", "Math: The Real and Complex Number Systems": "Math: Numbers", "Reading: Vocabulary Acquisition and Use":"Reading: Vocab", "Language: Language: Understand, Edit for Grammar, Usage":"Language: Grammar", "Language: Language: Understand, Edit Mechanics": "Language: Mechanics", "Language: Writing: Plan, Organize, Develop, Revise, Research": "Language: Writing"}) renamed_df.head() # - # Reorganizing the columns using double brackets organized_df = renamed_df[["Student ID","Student Last","Student First", "Test Date", "Subject", "Version", "Test RIT Score", "Rapid-Guessing %"]] organized_df.head() # + # Parse out each subject of test Language_df = organized_df[organized_df["Subject"].str.contains("Language", na=False)] Math_df = organized_df[organized_df["Subject"].str.contains("Math", na=False)] Reading_df = organized_df[organized_df["Subject"].str.contains("Reading", na=False)] Math_df.head() # + # Math Tests Given Analysis Total_Math_Tests = len(Math_df["Student ID"]) Total_Math_Tests Total_MStudents_Tested = len(Math_df["Student ID"].unique()) Total_MStudents_Tested Short_MVersions = Math_df[Math_df["Version"].str.contains("Screening")] Short_MVersions_Count = len(Short_MVersions) Short_MVersions_Count th Long_MVersions = Math_df[Math_df["Version"].str.contains("Growth")] Long_MVersions_Count = len(Long_MVersions) Long_MVersions_Count Math_summary_table = pd.DataFrame({ "Total Math Tests": [Total_Math_Tests], "Total Students Tested": [Total_MStudents_Tested], "Short Versions": [Short_MVersions_Count], "Long Versions": [Long_MVersions_Count], "Short Version as % of Total Tests": [round(((Short_MVersions_Count/Total_Math_Tests) * 100), 1)] }) Math_summary_table # + # Language Tests Given Analysis Total_Lang_Tests = len(Language_df["Student ID"]) Total_Lang_Tests Total_LStudents_Tested = len(Language_df["Student ID"].unique()) Total_LStudents_Tested Short_LVersions = Language_df[Language_df["Version"].str.contains("Screening")] Short_LVersions_Count = len(Short_LVersions) Short_LVersions_Count Long_LVersions = Language_df[Language_df["Version"].str.contains("Growth")] Long_LVersions_Count = len(Long_LVersions) Long_LVersions_Count Lang_summary_table = pd.DataFrame({ "Total Language Tests": [Total_Lang_Tests], "Total Students Tested": [Total_LStudents_Tested], "Short Versions": [Short_LVersions_Count], "Long Versions": [Long_LVersions_Count], "Short Version as % of Total Tests": [round(((Short_LVersions_Count/Total_Lang_Tests) * 100), 1)] }) Lang_summary_table # + # Reading Tests Given Analysis Total_Reading_Tests = len(Reading_df["Student ID"]) Total_Reading_Tests Total_RStudents_Tested = len(Reading_df["Student ID"].unique()) Total_RStudents_Tested Short_RVersions = Reading_df[Reading_df["Version"].str.contains("Screening")] Short_RVersions_Count = len(Short_RVersions) Short_RVersions_Count Long_RVersions = Reading_df[Reading_df["Version"].str.contains("Growth")] Long_RVersions_Count = len(Long_RVersions) Long_RVersions_Count Reading_summary_table = pd.DataFrame({ "Total Reading Tests": [Total_Reading_Tests], "Total Students Tested": [Total_RStudents_Tested], "Short Versions": [Short_RVersions_Count], "Long Versions": [Long_RVersions_Count], "Short Version as % of Total Tests": [round(((Short_RVersions_Count/Total_Reading_Tests) * 100), 1)] }) Reading_summary_table # - # Find Average Score for Short vs. Long Test Language_mean = Language_df["Test RIT Score"].mean() Language_mean Language_Smean = Language_df.loc[Language_df["Version"] == "Screening (Short)", :] Language_short = Language_Smean["Test RIT Score"].describe() Language_short Language_Lmean = Language_df.loc[Language_df["Version"] == "Growth (Long)", :] Language_long = Language_Lmean["Test RIT Score"].describe() Language_long Math_mean = Math_df["Test RIT Score"].mean() Math_mean Math_Smean = Math_df.loc[Math_df["Version"] == "Screening (Short)", :] Math_short = Math_Smean["Test RIT Score"].describe() Math_short Math_Smean = Math_df.loc[Math_df["Version"] == "Growth (Long)", :] Math_long = Math_Smean["Test RIT Score"].describe() Math_long # Find Average Score for Short vs. Long Test Reading_mean = Reading_df["Test RIT Score"].mean() Reading_mean Reading_Smean = Reading_df.loc[Reading_df["Version"] == "Screening (Short)", :] Reading_short = Reading_Smean["Test RIT Score"].describe() Reading_short Reading_Lmean = Reading_df.loc[Reading_df["Version"] == "Growth (Long)", :] Reading_long = Reading_Lmean["Test RIT Score"].describe() Reading_long
TestData.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/dimi-fn/Spotify-Songs/blob/master/Spotify_Songs_Popularity_Regression.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="X8ob6Ybn_Xdq" # importing libraries for data analysis and manipulation import pandas as pd import numpy as np # + id="AgzmyUx-_Xdt" # To plot pretty figures # %matplotlib inline import matplotlib as mpl import matplotlib.pyplot as plt mpl.rc('axes', labelsize=14) mpl.rc('xtick', labelsize=12) mpl.rc('ytick', labelsize=12) # To ignore potential useless warnings from Scipy (SciPy issue #5998) import warnings warnings.filterwarnings(action="ignore", message="^internal gelsd") # to make this notebook's output identical at every run np.random.seed(42) # + id="BcsFBSXC_9np" # Code to read csv file into colaboratory: # !pip install -U -q PyDrive from pydrive.auth import GoogleAuth from pydrive.drive import GoogleDrive from google.colab import auth from oauth2client.client import GoogleCredentials # + id="qFZA-tYQAZfK" auth.authenticate_user() gauth = GoogleAuth() gauth.credentials = GoogleCredentials.get_application_default() drive = GoogleDrive(gauth) # + [markdown] id="aiMfmnZw_Xdw" # > Inserting the dataset for regression and classification (the split between train and test set is already done). # + id="IMfQgMjgAjE6" downloaded = drive.CreateFile({'id':'1faxDrc-DqcciP0PghJbJJts9CRa0bL82'}) downloaded.GetContentFile('CS98XClassificationTest.csv') classification_test = pd.read_csv('CS98XClassificationTest.csv') downloaded = drive.CreateFile({'id':'1TQq8nRRdowS_aULA1SKviqAisw9NWIUd'}) downloaded.GetContentFile('CS98XClassificationTrain.csv') classification_train = pd.read_csv('CS98XClassificationTrain.csv') downloaded = drive.CreateFile({'id':'1tmBD1ct5ig0UF7iIPmcM0MqDwJpYvtpo'}) downloaded.GetContentFile('CS98XRegressionTest.csv') regression_test = pd.read_csv('CS98XRegressionTest.csv') downloaded = drive.CreateFile({'id':'1Zgg12bV-xXozSRI5Kj4631gwbBe2K8By'}) downloaded.GetContentFile('CS98XRegressionTrain.csv') regression_train = pd.read_csv('CS98XRegressionTrain.csv') # + [markdown] id="xSZWksmZ_Xdy" # # Regression Task # ### The regression task is about building a model which predicts the popularity of a song, hence: # + [markdown] id="e9Hdfw_z_Xdz" # > Model should learn from the data and be able to predict the popularity of a song given other metrics. It is a supervised learning task since we are given labelled training examples. It is also a multiple regression problem, since we are going to use multiple features which will help the model make predictions about the popularity of a song. Furthermore, it s a univariate regression problem (not multivariate) because we are only trying to predict a single feature (i.e. popularity) for each song. Lastly, this is batch learning (not online learning) since we do not have flow of data coming from external source. # # + [markdown] id="Kn3LGT1I_Xdz" # ## Exploration of the dataset # + [markdown] id="Tvl8xAkO_Xd0" # > Getting the shape and the 5 first rows: # + id="9VWDo18U_Xd0" outputId="8f46ab43-977c-4037-88b9-c7f1794f15f0" colab={"base_uri": "https://localhost:8080/", "height": 34} regression_train.shape # + id="9Z9Xld5E_Xd5" outputId="32c63243-b5b8-4012-c5bb-382fa756ee73" colab={"base_uri": "https://localhost:8080/", "height": 34} regression_test.shape # we can understand that the split of test and test set was 80/20 (453+114=567 rows totally) # + id="rTKe5V-N_Xd8" outputId="9bc342e5-6807-426b-b88d-280e4825d1a6" colab={"base_uri": "https://localhost:8080/", "height": 195} regression_train.head() # + id="lgL_d8i1_XeA" outputId="c1b76b24-2349-490f-c1a8-7fa96e02f287" colab={"base_uri": "https://localhost:8080/", "height": 195} regression_test.head() # + [markdown] id="yR1NPS-3_XeC" # > Information about the features of the dataset: # + [markdown] id="DL3WbEfL_XeD" # >> We notice there are 15 columns: # - 3 of them (title, artist, top genre) with categorical values # - the rest 12 columns contain numemical values # - 'pop', i.e. song's popularity, is the target value (label) # - the other 14 attributes can be used as predictors # + id="KQE87IP-_XeD" outputId="08367fb5-c5f0-450b-e33e-3f887b1d3d39" colab={"base_uri": "https://localhost:8080/", "height": 386} regression_train.info() # + id="CGYzmmyC_XeG" outputId="da0bb686-ed36-4b19-a0b0-2df0e9c4c8de" colab={"base_uri": "https://localhost:8080/", "height": 284} # summary statistics for numerical features regression_train.describe() # + [markdown] id="Dg6I0YEF_XeJ" # > Exploration of the categorical attributes (title, artist, top genre): # + id="cXi4G61V_XeN" outputId="d831da1b-c3b8-4851-f89e-7ff83e4f0fe5" colab={"base_uri": "https://localhost:8080/", "height": 34} regression_train['title'].nunique() # + id="Iwf4rG20_XeX" outputId="ea2ddba3-db92-4bff-8168-e183597db796" colab={"base_uri": "https://localhost:8080/", "height": 34} regression_train['artist'].nunique() # totally 345 artists # + id="WOMhU6dy_Xea" outputId="19f0c83d-4539-4ae9-852e-274f967d2b93" colab={"base_uri": "https://localhost:8080/", "height": 34} regression_train['top genre'].nunique() # totally 86 top genres # + id="qQVzWBp0_Xec" # top 10 most popular songs top_10_popular=regression_train.sort_values(by=["pop"], ascending=False).head(10) # + id="tZwg1l0a_Xeh" outputId="c27a4cc2-834e-43ed-e90f-04dd91d18f89" colab={"base_uri": "https://localhost:8080/", "height": 343} top_10_popular # + [markdown] id="y9XmMaWu_Xek" # ## Visualisation of the dataset # + id="PXoCLrdu_Xel" outputId="3cc65944-9b85-4f3e-ecb9-6ad78e584557" colab={"base_uri": "https://localhost:8080/", "height": 70} # For visualising distributional values import seaborn as sns import matplotlib.pyplot as plt # + id="qJMUNPgL_Xeo" outputId="ed3938f0-b5b6-4a17-f8bc-3dc1e5124bd5" colab={"base_uri": "https://localhost:8080/", "height": 302} regression_train["top genre"].value_counts().nlargest(5).sort_values(ascending=True).plot.barh() plt.ylabel("Top genre") plt.xlabel("Count") plt.title("Top Genres (top 5)") plt.show() # + id="XTsQnDFv_Xer" outputId="55bf4b72-87ea-4c96-913f-3941208032bc" colab={"base_uri": "https://localhost:8080/", "height": 447} # Plot histograms # %matplotlib inline import matplotlib.pyplot as plt regression_train.hist(bins=50, figsize=(12,7)) plt.show() # + id="0IS_Xp7p_Xet" outputId="81126ffb-362c-4334-f998-ec91570732b7" colab={"base_uri": "https://localhost:8080/", "height": 286} # Plot boxplot of particular feautures regression_train_boxplot= regression_train[['bpm', 'nrgy', 'dnce', 'dB', 'live', 'val', 'spch', 'pop']] sns.boxplot(data=regression_train_boxplot) plt.xlabel('Features') plt.ylabel('Value') plt.show() # + id="KPh6J1C9_Xev" outputId="c31c0d3e-6144-4ad2-ce4b-85ab23ce9f5c" colab={"base_uri": "https://localhost:8080/", "height": 613} # violin plot between popularity and loudness regarding the top 10 popular songs plt.figure(figsize=(15,10)) sns.violinplot(x= "pop", y="dB", data= top_10_popular) plt.show() # + [markdown] id="WudLZrTC_Xey" # >Heatmap of the dataset: # # + [markdown] id="D0GqzpDH_Xey" # >>The darker the colour(closest to 1), the stronger is the correlation. Here we notice that popularity has strong correlation with energy(nrgy), danceability(dnce), loudness(dB) (and duration, but probably not important): # + id="CGROjypo_Xey" outputId="3f264ada-a824-410f-87c1-2854afa02721" colab={"base_uri": "https://localhost:8080/", "height": 611} # Plot linear correlation matrix fig, ax = plt.subplots(figsize=(15,10)) sns.heatmap(regression_train.corr(), annot=True, cmap='YlGnBu', vmin=-1, vmax=1, center=0, ax=ax) plt.title('Linear Correlation Matrix') plt.show() # + [markdown] id="g3N3KCXb_Xe1" # > Fitting linear lines between the numerical attributes which seemed to have strong correlation in heatmap # + id="OVL6jgWh_Xe1" outputId="7c492b72-58f7-43e9-c713-02ba0f520e05" colab={"base_uri": "https://localhost:8080/", "height": 1000} ''' Fit line between popoluarity and energy ''' fig, ax = plt.subplots() fit = np.polyfit(regression_train['nrgy'], regression_train['pop'], deg=1) print(fit) ax.plot(regression_train['nrgy'], fit[0] * regression_train['nrgy'] + fit[1], color='green') ax.scatter(regression_train['nrgy'],regression_train['pop']) plt.xlabel("Energy") plt.ylabel("Popularity") plt.title('Fitting the line between popularity and energy') plt.show() ''' Fit line between popoluarity and loudness ''' fig, ax = plt.subplots() fit = np.polyfit(regression_train['dB'], regression_train['pop'], deg=1) print(fit) ax.plot(regression_train['dB'], fit[0] * regression_train['dB'] + fit[1], color='green') ax.scatter(regression_train['dB'],regression_train['pop']) plt.xlabel("Loudness") plt.ylabel("Popularity") plt.title('Fitting the line between popularity and loudness') plt.show() ''' Fit line between popoluarity and danceability ''' fig, ax = plt.subplots() fit = np.polyfit(regression_train['dnce'], regression_train['pop'], deg=1) print(fit) ax.plot(regression_train['dnce'], fit[0] * regression_train['dnce'] + fit[1], color='green') ax.scatter(regression_train['dnce'],regression_train['pop']) plt.xlabel("Danceability") plt.ylabel("Popularity") plt.title('Fitting the line between popularity and danceability') plt.show() ''' Fit line between popoluarity and duration ''' fig, ax = plt.subplots() fit = np.polyfit(regression_train['dur'], regression_train['pop'], deg=1) print(fit) ax.plot(regression_train['dur'], fit[0] * regression_train['dur'] + fit[1], color='green') ax.scatter(regression_train['dur'],regression_train['pop']) plt.xlabel("Duration") plt.ylabel("Popularity") plt.title('Fitting the line between popularity and duration') plt.show() # + [markdown] id="E_Ix7WbW_Xe3" # > Exploring the attribute "energy" more in depth: # + id="LWadvo28_Xe4" outputId="fb8cf2f7-6bb0-4797-8ffa-5492f6f028b4" colab={"base_uri": "https://localhost:8080/", "height": 291} import seaborn as sns color= sns.color_palette() sns.set_style('darkgrid') sns.distplot(regression_train.nrgy) plt.show() # + [markdown] id="xHVkdYXR_Xe7" # ### Further Correlations # + [markdown] id="JLYVEF-B_Xe7" # > Below we will use the standard correlation coefficient (Pearson's r) between every pair of attributes compared to 'popularity', with the corr() method: # + [markdown] id="AKd-_IKr_Xe8" # > Standard correlation coefficient calculates strength of linear relationship: # * close to +1 it indicates a strong positive correlation # * close to -1 means a strong negative one # + id="kAAlryGZ_Xe8" outputId="c5cc6840-cbc8-437c-874a-4c16be515224" colab={"base_uri": "https://localhost:8080/", "height": 235} corr_matrix= regression_train.corr() corr_matrix corr_matrix['pop'].sort_values(ascending=False) # + [markdown] id="mY37Pdma_XfD" # > So from above, we can notice that popularity has strong positive linear relationship with: duration, loudness (dB), energy and danceability (and in lower degree with speechiness) # + id="rLZi2jag_XfD" outputId="8017da53-51bc-40a5-e8cf-8e4f64c9491f" colab={"base_uri": "https://localhost:8080/", "height": 285} # histogram of loudness regression_train['dB'].hist() # + id="-2H1VW-x_XfG" outputId="cf36f020-23b3-4b3f-8f4d-34452ae81339" colab={"base_uri": "https://localhost:8080/", "height": 285} # Histogram of energy regression_train['nrgy'].hist() # + id="w_W7oBIh_XfI" outputId="097eb7e1-5738-4547-918c-da1288ba384d" colab={"base_uri": "https://localhost:8080/", "height": 307} #scatter plot between energy and loudness which seem to be important indicators for song's popularity regression_train.plot(kind='scatter', x='nrgy', y='dB', alpha=0.4) plt.show # + id="xDou9Ier_XfK" outputId="866f2220-c6e5-4549-d475-09a6a3f10499" colab={"base_uri": "https://localhost:8080/", "height": 286} # alpha for transparency regression_train.plot(kind='scatter', x= 'dB', y='pop', alpha=0.6) plt.show() # + [markdown] id="ueo4G79n_XfN" # ## Preparing the data for machine learning algorithms # + [markdown] id="iZuqiPw6_XfO" # > Calling the isolated dataframe as: 'spotify' # + id="cJ7gzRyo_XfO" spotify= regression_train.drop('pop', axis=1) # isolating the target which is 'pop' spotify_labels= regression_train['pop'].copy() # target variable/label is 'pop' # + [markdown] id="MW0MKPhX_XfR" # ### Data Cleaning # # ### Feature Engineering: feature selection # + id="9CcGZUsF_XfR" spotify = spotify[['top genre', 'nrgy', 'dnce', 'dB', 'dur']] # the final train predictos we are keeping # + id="LeaFT4J3_XfT" outputId="896d327a-c791-4532-e7c2-bceaa07f6381" colab={"base_uri": "https://localhost:8080/", "height": 34} spotify.shape # + [markdown] id="P31bSZKP_XfV" # > Most ML algorithms cannot work with missing features so this is an important step: # + id="XFZUcUbw_XfV" outputId="c992234e-975f-4c86-d5c1-1242cc80ffa5" colab={"base_uri": "https://localhost:8080/", "height": 118} spotify.isnull().any() # + id="ScNV8PXv_XfX" outputId="028ded43-a513-4777-e09b-d209c2a174e1" colab={"base_uri": "https://localhost:8080/", "height": 118} spotify.isnull().sum() # + [markdown] id="MZYURdJi_XfZ" # > We can view the 15 missing values in the column 'top genre' # + id="NhJfT0SB_XfZ" outputId="91417894-5668-4095-dd14-c0b3e9ef490d" colab={"base_uri": "https://localhost:8080/", "height": 491} missing_values_regr= spotify[spotify.isnull().any(axis=1)] missing_values_regr # + [markdown] id="HhrdEnRM_Xfb" # > Replacing those with 'adult standards', as in exploration of the dataset it was the most famous top genre and, thus, it seems to be one good choice: # + id="JllA8rg2_Xfb" spotify['top genre'].fillna(value='adult standards',inplace=True) # + id="Kd2RmTuN_Xfd" outputId="e8f863a1-74f7-435f-c21e-df6821a5f350" colab={"base_uri": "https://localhost:8080/", "height": 118} spotify.isnull().sum() # + [markdown] id="v2QlSr-__Xfe" # > Below we are going to use 'SimpleImputer' which can handle the missing values. Based on the way we going to fill its parameters, we are going to specify how we want to handle the missing values in the unseed data (test data): # + [markdown] id="XCpAAi0R_Xff" # Although in this dataset we can know that we have missing values only in top genre in test data (as we will see below), we are continuing this way for the generalisation of the model. If the test data had missing numerical values, then they would be filled by the median value: # + id="mYRoSxb1_Xfg" from sklearn.impute import SimpleImputer imputer = SimpleImputer(strategy="median") # + id="h-wGBYoE_Xfi" outputId="746e909e-59af-41b7-f01d-816f92a214ad" colab={"base_uri": "https://localhost:8080/", "height": 218} spotify.info() # + id="q2M8cE1L_Xfk" spotify_num= spotify.drop(['top genre'], axis=1) #the numerical attributes # + id="4rkBrsos_Xfo" outputId="086217fc-2f31-41f4-ddcf-f34e230b7e3b" colab={"base_uri": "https://localhost:8080/", "height": 402} spotify_num # + id="Tka4IJOV_Xfr" outputId="a47cf000-1cea-4645-94f9-eab01211579c" colab={"base_uri": "https://localhost:8080/", "height": 218} spotify.info() # + id="WsL-a2w4_Xfu" outputId="4421aae8-5a8c-40a3-cb01-c0ac70bc1242" colab={"base_uri": "https://localhost:8080/", "height": 50} imputer.fit(spotify_num) # + id="hzYcgqyz_Xfw" outputId="0e30c667-c7e2-4357-960d-9c8cc2c61dd7" colab={"base_uri": "https://localhost:8080/", "height": 34} imputer.statistics_ # + id="vGEugcFV_Xfy" outputId="cac0fa48-0508-40ce-be62-658af18c38c9" colab={"base_uri": "https://localhost:8080/", "height": 34} spotify_num.median().values # + id="oJpYUsj2_Xf1" X = imputer.transform(spotify_num) # + id="vX1d4_WW_Xf2" spotify_tr = pd.DataFrame(X, columns=spotify_num.columns, index=spotify.index) # + id="nTRa3Kcd_Xf6" outputId="a43ceb99-91c8-4e01-dc43-79aa0a1f6b7d" colab={"base_uri": "https://localhost:8080/", "height": 35} imputer.strategy # + id="-hSulKf7_Xf8" outputId="9149157d-a7ad-4fee-876b-0393a3a3303e" colab={"base_uri": "https://localhost:8080/", "height": 106} spotify_tr = pd.DataFrame(X, columns=spotify_num.columns, index=spotify_num.index) spotify_tr.head(2) # a sample of how inputer works # + id="7cajfXzw_Xf_" outputId="0a9be92f-3218-491e-978c-a3e543d8029d" colab={"base_uri": "https://localhost:8080/", "height": 218} spotify.info() # + id="GOTuGZDn_XgB" # categorical attributes which we will hold for the construction of the model categorical_attribute= spotify[['top genre']] # + [markdown] id="rWrwpjbF_XgF" # > Process for using OneHoTencoder for converting categorical into numerical values: # + id="OuSKAR7o_XgF" outputId="15638311-077c-45d2-ce70-d52e61233959" colab={"base_uri": "https://localhost:8080/", "height": 101} from sklearn.preprocessing import OrdinalEncoder ordinal_encoder = OrdinalEncoder() spotify_cat_encoded = ordinal_encoder.fit_transform(categorical_attribute) spotify_cat_encoded[:5] # + id="-MMXAw_w_XgI" outputId="a06c516b-b1c1-494b-ccdb-bcd77d7ccc09" colab={"base_uri": "https://localhost:8080/", "height": 134} from sklearn.preprocessing import OneHotEncoder cat_encoder = OneHotEncoder(sparse=False, handle_unknown='ignore') spotify_cat_hot1 = cat_encoder.fit_transform(categorical_attribute) spotify_cat_hot1 # + id="CBtk3nCb_XgM" from sklearn.compose import ColumnTransformer from sklearn.pipeline import Pipeline from sklearn.impute import SimpleImputer from sklearn.preprocessing import StandardScaler, OneHotEncoder # + [markdown] id="ilSLSr-n_XgN" # > Machine Learning Algorithms do not perform well when the input numerical attributes have different scales. Below we are going to scale the numerical attributes, and construct a pipeline which can link the tranformations both of numerical and categorical values in the test set: # + id="F5IJFsgI_XgO" # We create the preprocessing pipelines for both numerical and categorical data # credits to: https://scikit-learn.org/stable/auto_examples/compose/plot_column_transformer_mixed_types.html # we are using the 'most frequent' strategy with the rational that out categorical attributes are # very possible to be repeated as the song becomes more popular (one popular singer might own many popular songs in the test set) numeric_features = list(spotify_num) numeric_transformer = Pipeline(steps=[ ('imputer', SimpleImputer(strategy='median')), ('scaler', StandardScaler())]) categorical_features = list(categorical_attribute) categorical_transformer = Pipeline(steps=[ ('imputer', SimpleImputer(strategy='most_frequent', fill_value='missing')), ('onehot', OneHotEncoder(handle_unknown='ignore'))]) full_pipeline = ColumnTransformer( transformers=[ ('num', numeric_transformer, numeric_features), ('cat', categorical_transformer, categorical_features)]) spotify_final= full_pipeline.fit_transform(spotify) #spofify_final : final and transformed train predictors-features # + [markdown] id="w0GdtUCr_XgP" # > OneHotEncoder returns a sparse matrix while num_pipeline a dense matrix. In this case of mixing the numerical with categorical transformations, ColumnTransformer returns either a sparse or a dense matrix. Here it returned a sparse matrix: # + id="z7sMwkbq_XgP" outputId="3610c9d2-bf5e-4e6b-fb15-bb9738c7ba90" colab={"base_uri": "https://localhost:8080/", "height": 50} spotify_final # + id="Lf69CipM_XgS" # we can convert to an array if needed: spotify_final= spotify_final.toarray() # + [markdown] id="Aa4Qd9rF_XgU" # # Linear Regression # + id="FyfD07pe_XgU" from sklearn.linear_model import LinearRegression # + id="f85FGUqq_XgW" outputId="abc21b75-7854-4e36-b1ce-78284caa285a" colab={"base_uri": "https://localhost:8080/", "height": 34} lin_reg= LinearRegression() lin_reg.fit(spotify_final, spotify_labels) # + [markdown] id="8RZVGIU8_XgX" # ### Root Mean Square Error (RMSE) # # > Measures the standard deviation of the errors the systems makes in its predictions: # + id="gObbMZ8U_XgX" from sklearn.metrics import mean_squared_error spotify_predictions= lin_reg.predict(spotify_final) # + [markdown] id="s1njC23d_XgZ" # >> The interval of popularity scores (from the data exploration) was between 26 and 84. Here RMSE is 9.27. We could say it not a bad result, but neither a good one: # + id="WV5RXbuE_XgZ" outputId="1fd1eb2a-278a-4ba2-d2bb-10d5b32406ef" colab={"base_uri": "https://localhost:8080/", "height": 34} lin_mse = mean_squared_error(spotify_labels, spotify_predictions) lin_rmse = np.sqrt(lin_mse) lin_rmse # + [markdown] id="y9LO7gVv_Xgb" # > Using r2_score to conclude if the dataset is linear. If close to 1, the dataset is linear by nature. Here it is not negative, but it has not a high value neither: # + id="fmhg81R8_Xgb" outputId="0d6834dc-e907-44c5-80e0-712596d1ef9f" colab={"base_uri": "https://localhost:8080/", "height": 34} from sklearn.metrics import r2_score r2_score(spotify_predictions, spotify_labels) # + [markdown] id="WQIecPqE_Xgc" # ### MAE # + [markdown] id="BDDK1HEL_Xgc" # > Calculating the Mean Absolute Error (MAE), which is smaler than that of the RMSE, since RMSE raises the errors to its square values # + id="fn5oV-rJ_Xgc" outputId="66813ebc-de49-4bc7-e32d-0706c4d3da48" colab={"base_uri": "https://localhost:8080/", "height": 34} from sklearn.metrics import mean_absolute_error lin_mae = mean_absolute_error(spotify_labels, spotify_predictions) lin_mae # + [markdown] id="xmY7CfGx_Xgd" # ## Training a Decision Tree Regressor # + id="QdghWD6c_Xgd" outputId="f19500f1-b60c-4122-95ff-c49c3d8737cc" colab={"base_uri": "https://localhost:8080/", "height": 0} from sklearn.tree import DecisionTreeRegressor tree_reg= DecisionTreeRegressor(random_state=42) tree_reg.fit(spotify_final, spotify_labels) # + id="NJBKjTyI_Xgf" outputId="fc8a60d4-4846-4a73-d826-21132424ed4d" colab={"base_uri": "https://localhost:8080/", "height": 0} spotify_predictions = tree_reg.predict(spotify_final) tree_mse = mean_squared_error(spotify_labels, spotify_predictions) tree_rmse = np.sqrt(tree_mse) tree_rmse # + [markdown] id="d0D0NYNH_Xgg" # > Obviously this cannot be a valid result and this model is extremely overfitting the data # + [markdown] id="4Uehrw41_Xgg" # >> Evaluation of Decision tree using Cross-Validation with K-folds = 10: # + id="8BlkjpBg_Xgi" outputId="f208be37-4a9f-4462-b615-119435ae7e88" colab={"base_uri": "https://localhost:8080/", "height": 0} from sklearn.model_selection import cross_val_score scores = cross_val_score(tree_reg, spotify_final, spotify_labels, scoring="neg_mean_squared_error", cv=10) tree_rmse_scores = np.sqrt(-scores) # the scoring function is the opposite of the MSE so that's why we use '-scores' tree_rmse_scores # + id="5eombRx-_Xgj" outputId="f5fdfa96-fee0-468b-8474-71514ae5e5ff" colab={"base_uri": "https://localhost:8080/", "height": 0} def display_tree_scores(tree_scores): print("Scores:", tree_scores) print("Mean:", tree_scores.mean()) print("Standard deviation:", tree_scores.std()) display_tree_scores(tree_rmse_scores) # + [markdown] id="UW58K_5Y_Xgk" # > Here we can see than DecisionTreeRegressor is performing much worse than linear regression # + [markdown] id="kWAJGJna_Xgk" # > Below it is an effort to change decision tree's parameters. Decision Trees are prone to overfitting when dealing with regression tasks, for this reason we set min_samples_leaf=10, and max_depth=1 which gave better results than the default. # # >> It performs better than the previous DecisionTreeRegressor, however the final result is still not satisfactory. # + id="3O6Gxnyb_Xgk" outputId="6e502ae9-b872-4e97-b74d-fe81cfc70e86" colab={"base_uri": "https://localhost:8080/", "height": 0} from sklearn.tree import DecisionTreeRegressor tree_reg= DecisionTreeRegressor( random_state=42, min_samples_leaf=10, max_depth=1) tree_reg.fit(spotify_final, spotify_labels) # + id="OC6glgIX_Xgm" outputId="21d911ef-59fd-4e8c-f5ca-93cbe9f4799a" colab={"base_uri": "https://localhost:8080/", "height": 0} from sklearn.model_selection import cross_val_score scores = cross_val_score(tree_reg, spotify_final, spotify_labels, scoring="neg_mean_squared_error", cv=10) tree_rmse_scores = np.sqrt(-scores) # the scoring function is the opposite of the MSE so that's why we use '-scores' tree_rmse_scores # + id="IjzSKu8u_Xgn" outputId="a3a27adf-4278-440f-b673-33915b1ec310" colab={"base_uri": "https://localhost:8080/", "height": 0} def display_tree_scores(tree_scores): print("Scores:", tree_scores) print("Mean:", tree_scores.mean()) print("Standard deviation:", tree_scores.std()) display_tree_scores(tree_rmse_scores) # + [markdown] id="AV0SmCoM_Xgo" # ## Evaluation in the Test Set # + id="bBEmhjK9_Xgo" outputId="f40ecd22-b680-416f-c1e9-b6141b394107" colab={"base_uri": "https://localhost:8080/", "height": 370} regression_test.info() # + [markdown] id="2_BJS8o3_Xgt" # > Sychronizing the test's shape with that of the train set: # + id="BpAo2Cv3_Xgu" x= regression_test[['top genre','nrgy', 'dnce', 'dB', 'dur']] # test predictors # + id="v-YeY6lH_Xgv" outputId="2e7edf86-f2ca-4d85-c41a-ade97f1213e5" colab={"base_uri": "https://localhost:8080/", "height": 218} x.info() # + id="lsz-XxrT_Xgx" outputId="e02cb331-30bd-4b48-eeb0-34540ef15f95" colab={"base_uri": "https://localhost:8080/", "height": 118} x.isnull().any() # + [markdown] id="qu-i8ZSL_Xgy" # > For example this command: "x['top genre'].fillna(value='adult standards',inplace=True)" is not needed to be executed because of the aforementioned pipeline transformations. If executed then the warning is: "A value is trying to be set on a copy of a slice from a DataFrame". # + [markdown] id="DGgHCgmU_Xgy" # > Running the full pipeline to tranform the test data. # >> Before we used 'fit_transform', now we are calling only 'transform' for the successfull transition: # + id="HzUhXFa1_Xgy" x_final=full_pipeline.transform(x) # + [markdown] id="1tGxHbvs_Xg1" # >The number of rows is normal to be different and it depends on the initial split which was 80%-20% between train and test set. The number of columns are transformed because of the previous steps, but the important here is that both train and test predictors to have the same number of columns (the given inputs compared to the unseen data of test set): # + id="467EXHPl_Xg1" outputId="e932567a-2be8-4705-928d-b4d92f460a75" colab={"base_uri": "https://localhost:8080/", "height": 34} x_final.shape # + id="x8rNOCmE_Xg2" outputId="26339cd2-2164-4360-d09a-63785e5f0d83" colab={"base_uri": "https://localhost:8080/", "height": 34} spotify_final.shape # + id="M6GI8URU_Xg3" pop_predictions= lin_reg.predict(x_final) # + id="6utXNfxJ_Xg5" outputId="1e36ef54-6afd-4e12-a871-8e24193570b3" colab={"base_uri": "https://localhost:8080/", "height": 403} pop_predictions # + [markdown] id="xslFhBKV_Xg6" # >Exporting the results of 'pop' and 'Id' into a csv file: # + id="o8PNTiCt_Xg7" outputId="80bb177b-86d0-4114-9b5a-31d14c2bce9c" colab={"base_uri": "https://localhost:8080/", "height": 34} len(list(regression_test['Id'])) # + id="ol7VhhoN_Xg9" outputId="93049ab6-a926-478a-8953-6efdd8b4a314" colab={"base_uri": "https://localhost:8080/", "height": 34} len(list(pop_predictions)) # + id="NlB_DpGI_Xg-" regr_results= pd.DataFrame({'Id': regression_test['Id'], 'pop': pop_predictions}) #regr_results # + id="FOAtnnfk_Xg_" #regr_results.to_csv('GroupN.csv', index = False) # + id="hZdCss3y_XhA" outputId="cbdaa764-895e-4f91-e1f5-17a8709cd364" colab={"base_uri": "https://localhost:8080/", "height": 34} len(regr_results) # + [markdown] id="6I2uDowt_XhC" # ### Random Forest Regressor # + id="aidm58MC_XhC" outputId="faaba0d5-14cd-4cb4-c0eb-d2d682f97a7a" colab={"base_uri": "https://localhost:8080/", "height": 134} from sklearn.ensemble import RandomForestRegressor forest_reg= RandomForestRegressor() forest_reg.fit(spotify_final, spotify_labels) # + id="fsUQnd3__XhD" outputId="0e641e67-f073-41a1-d7d9-93d10a960114" colab={"base_uri": "https://localhost:8080/", "height": 34} pop_predictions = forest_reg.predict(spotify_final) forest_mse = mean_squared_error(spotify_labels, pop_predictions) forest_rmse = np.sqrt(forest_mse) forest_rmse # + [markdown] id="wsTaimD2_XhF" # > The ForestRegressor is overfitting the test data, since the RMSE in test set was approximately double reaching 8.45 (the model is performing better in training that in test data) # + id="Qu9xg1sG_XhF" outputId="462799de-099b-4ea4-ba93-92acc2f21ec4" colab={"base_uri": "https://localhost:8080/", "height": 84} from sklearn.model_selection import cross_val_score scores = cross_val_score(forest_reg, spotify_final, spotify_labels, scoring="neg_mean_squared_error", cv=10) forest_reg_scores = np.sqrt(-scores) # the scoring function is the opposite of the MSE so that's why we use '-scores' forest_reg_scores def display_forest_reg_scores(forest_reg_scores): print("Scores:", forest_reg_scores) print("Mean:", forest_reg_scores.mean()) print("Standard deviation:", forest_reg_scores.std()) display_forest_reg_scores(forest_reg_scores) # + id="SGIz0YBF_XhH" outputId="c547b505-14c6-456a-81ed-c099e2840062" colab={"base_uri": "https://localhost:8080/", "height": 134} model_rfr= RandomForestRegressor(n_estimators=500, max_leaf_nodes=16, n_jobs=-1) model_rfr.fit(spotify_final,spotify_labels) # + id="eGTOazTg_XhI" pop_predictions_rfr= model_rfr.predict(x_final) #pop_predictions_rfr # + id="q1IjA697_XhL" regr_results_rfr= pd.DataFrame({'Id': regression_test['Id'], 'pop': pop_predictions_rfr}) #regr_results_rfr # + id="0WDeAyDa_XhM" #regr_results_rf= pd.DataFrame({'Id': regression_test['Id'], 'pop': pop_predictions_rfr}) #regr_results_rf #regr_results_rf.to_csv('rf.csv', index = False) #### RMSE 8.45 in test set # + [markdown] id="DxjlhhLh_XhN" # > Combining Random Forest Regressor with grid search for detecting the best estimator: # + id="UCecr0aX_XhO" outputId="9684b046-c4cb-4655-af0e-084a5e1546cc" colab={"base_uri": "https://localhost:8080/", "height": 370} from sklearn.model_selection import GridSearchCV param_grid = [ # try 12 (3×4) combinations of hyperparameters {'n_estimators': [3, 10, 30], 'max_features': [2, 4, 6, 8]}, # then try 6 (2×3) combinations with bootstrap set as False {'bootstrap': [False], 'n_estimators': [3, 10], 'max_features': [2, 3, 4]}, ] forest_reg = RandomForestRegressor(random_state=42) # train across 5 folds, that's a total of (12+6)*5=90 rounds of training grid_search = GridSearchCV(forest_reg, param_grid, cv=5, scoring='neg_mean_squared_error', return_train_score=True) grid_search.fit(spotify_final, spotify_labels) # + id="JDqxrY43_XhP" outputId="30d5a1ac-d497-49f7-d81a-e6dc317edd97" colab={"base_uri": "https://localhost:8080/", "height": 34} #best hyperparameter grid_search.best_params_ # + id="0O0Ssasi_XhQ" outputId="317b69b6-71ce-4ef4-afce-c159fc50714f" colab={"base_uri": "https://localhost:8080/", "height": 134} grid_search.best_estimator_ # + id="b7IKQ1yL_XhS" # score of each hyperparameter combination tested during the grid search # + id="ld6sg7q9_XhT" outputId="560ff0ad-00d4-4c75-92f3-010f414ed4fa" colab={"base_uri": "https://localhost:8080/", "height": 319} cvres = grid_search.cv_results_ for mean_score, params in zip(cvres["mean_test_score"], cvres["params"]): print(np.sqrt(-mean_score), params) # + id="QWfcwR1D_XhU" outputId="a44efd01-b3ba-47a2-b9f8-2e86bd5af5a9" colab={"base_uri": "https://localhost:8080/", "height": 386} from sklearn.model_selection import RandomizedSearchCV from scipy.stats import randint param_distribs = { 'n_estimators': randint(low=1, high=200), 'max_features': randint(low=1, high=8), } forest_reg = RandomForestRegressor(random_state=42) rnd_search = RandomizedSearchCV(forest_reg, param_distributions=param_distribs, n_iter=10, cv=5, scoring='neg_mean_squared_error', random_state=42) rnd_search.fit(spotify_final, spotify_labels) # + id="pDUpPo47_XhV" outputId="655ddb1e-e367-4641-809a-cb6006c90860" colab={"base_uri": "https://localhost:8080/", "height": 185} cvres = rnd_search.cv_results_ for mean_score, params in zip(cvres["mean_test_score"], cvres["params"]): print(np.sqrt(-mean_score), params) # + id="Qnv6xtzy_XhW" outputId="bf1a6d88-6222-4add-8e66-1a7493c30b47" colab={"base_uri": "https://localhost:8080/", "height": 403} feature_importances = grid_search.best_estimator_.feature_importances_ feature_importances # + id="VGuwpu8z_XhX" grid_model = grid_search.best_estimator_ grid_model = grid_search.best_estimator_ x_final=full_pipeline.transform(x) pop_predictions_grid= grid_model.predict(x_final) # + id="FgQmgcVd_XhY" #pop_predictions_grid # + id="yYnpJdzW_XhZ" regr_results_grid= pd.DataFrame({'Id': regression_test['Id'], 'pop': pop_predictions_grid}) #regr_results_grid #**************************************************************************************************** #**************************************************************************************************** ## 8.11 #regr_results_grid.to_csv('gr.csv', index = False) # + [markdown] id="gjyffVbZ_Xha" # ### Stochastic Gradient Descent (SGD) Regressor # # > Gradient Descent is a different way of training a linear regression model, which is better suited for cases where there is a large number of features or too many training instances to fit in memory. Here, none of these hold true, however, we are going to try it: # # > The main idea here is to tweak SGD's parameters iteratively in order to minimise the cost function. # # >> Performing SGD linear regression using SGDRegressor(): maximum epochs= 1000, or until the loss drops less than 0.001 (eta=1e-3), default learning scedule, does not use regularisation (penalty=none) and learning rate "η"(eta)=0.1 # + id="FXePiMPw_Xha" outputId="bf11333f-e99f-46d6-efed-256f170f4b2a" colab={"base_uri": "https://localhost:8080/", "height": 118} from sklearn.linear_model import SGDRegressor sgd_regressor= SGDRegressor(max_iter=1000, tol=1e-3, penalty=None, eta0=0.1, random_state=42) sgd_regressor.fit(spotify_final, spotify_labels) # + id="_NJHuKXX_Xhd" sgd_pop_predictions= sgd_regressor.predict(x_final) # + [markdown] id="GyUgrBUa_Xhd" # > As we can notice (rmse in test data=16.88) SGD is not appropriate here # + id="caZcYvmp_Xhe" #regr_results= pd.DataFrame({'Id': regression_test['Id'], 'pop': sgd_pop_predictions}) #regr_results #regr_results.to_csv('-&-.csv', index = False) #rmse = 16.88 # + [markdown] id="owQpX3ms_Xhg" # ### Polynomial Features # + id="Q9po6-CU_Xhg" from sklearn.preprocessing import PolynomialFeatures poly_features= PolynomialFeatures(degree=2, include_bias=False) X_poly= poly_features.fit_transform(spotify_final) # + id="HCYF-K7C_Xhh" outputId="247608ee-db2b-4649-8607-4b996147fb32" colab={"base_uri": "https://localhost:8080/", "height": 34} lin_reg=LinearRegression() lin_reg.fit(X_poly, spotify_labels) # + id="rInlpHdn_Xhj" Y_poly= poly_features.fit_transform(x_final) # + id="RBSVo3kz_Xhj" poly_pop_predictions= lin_reg.predict(Y_poly) # + [markdown] id="33DyJwhD_Xhk" # > As exprected, using high-degree polynomial would not prevent the training data from overfitting the test data # + id="xukKBsCl_Xhk" #regr_results= pd.DataFrame({'Id': regression_test['Id'], 'pop': poly_pop_predictions}) #regr_results #regr_results.to_csv('dwe.csv', index = False) # rmse = 16.23 # + [markdown] id="UJeLODMz_Xhl" # ### SVM Linear Regression # # > SVM in general can perform linear and non-linear regression, classification and outlier detection. # # >> C: hyperparameter of the model # # Using Support Vector Regressor with kerner='linear': # # + id="g2fV0x-C_Xhl" outputId="0003b3ef-e89c-44d4-878e-282408de3617" colab={"base_uri": "https://localhost:8080/", "height": 34} from sklearn.svm import SVR svm_reg = SVR(kernel="linear") svm_reg.fit(spotify_final, spotify_labels) pop_predictions = svm_reg.predict(spotify_final) svm_mse = mean_squared_error(spotify_labels, pop_predictions) svm_rmse = np.sqrt(svm_mse) svm_rmse # + [markdown] id="Y7v2sa5f_Xhm" # Setting epsilon=1.5.Epsilon specifies the epsilon-tube within which no penalty is associated in the training loss function with points predicted within a distance epsilon from the actual value # + id="gRN5RYyZ_Xhm" outputId="5e27dc28-45b8-4c0e-c73f-727c1a170b4d" colab={"base_uri": "https://localhost:8080/", "height": 67} from sklearn.svm import LinearSVR svm_reg= LinearSVR(epsilon=1.5, random_state=42) svm_reg.fit(spotify_final, spotify_labels) # + id="uvn-LdWX_Xho" svm_pop_predictions= svm_reg.predict(x_final) # + id="G2nfiIft_Xho" regr_results= pd.DataFrame({'Id': regression_test['Id'], 'pop': svm_pop_predictions}) #regr_results #regr_results.to_csv('-&-.csv', index = False) #rmse = 8.84!! but still is < 8.11 of grid search # + id="pCBVkBRp_Xhp" outputId="f2fbdc4c-0817-47b5-9b52-cb427a5fe262" colab={"base_uri": "https://localhost:8080/", "height": 84} from sklearn.model_selection import cross_val_score scores = cross_val_score(svm_reg, spotify_final, spotify_labels, scoring="neg_mean_squared_error", cv=10) svm_reg_scores = np.sqrt(-scores) # the scoring function is the opposite of the MSE so that's why we use '-scores' svm_reg_scores def display_svm_reg_scores(svm_reg_scores): print("Scores:", svm_reg_scores) print("Mean:", svm_reg_scores.mean()) print("Standard deviation:", svm_reg_scores.std()) display_svm_reg_scores(svm_reg_scores) # + id="m9oXEwLH_Xhr" svm_reg= LinearSVR(epsilon=1.5, random_state=42, C=12) svm_reg.fit(spotify_final, spotify_labels) svm_pop_predictions= svm_reg.predict(x_final) # + [markdown] id="DaUUlLsM_Xht" # > Cross Evaluation using cross_val_svore # + id="RfEUAe5__Xhu" outputId="4964f7a2-0d85-439d-df28-61ac1853af9f" colab={"base_uri": "https://localhost:8080/", "height": 84} from sklearn.model_selection import cross_val_score scores = cross_val_score(svm_reg, spotify_final, spotify_labels, scoring="neg_mean_squared_error", cv=10) svm_reg_scores = np.sqrt(-scores) # the scoring function is the opposite of the MSE so that's why we use '-scores' svm_reg_scores def display_svm_reg_scores(svm_reg_scores): print("Scores:", svm_reg_scores) print("Mean:", svm_reg_scores.mean()) print("Standard deviation:", svm_reg_scores.std()) display_svm_reg_scores(svm_reg_scores) # + id="0QLTVC05_Xhw" regr_results= pd.DataFrame({'Id': regression_test['Id'], 'pop': svm_pop_predictions}) #regr_results #regr_results.to_csv('-&-.csv', index = False) #*************************************************************************************************************** #rmse = 7.86967 So far the 2nd best model #*************************************************************************************************************** # + [markdown] id="i4QVD_Vr_Xhx" # ### LinearSVR with an RBF kernel # > Using randomised search with cross validation to find the appropriate hyperparameter values for C and gamma: # + id="C3gahI91_Xhx" outputId="d09288b8-c015-49fc-c78e-6312ac9f8ab8" colab={"base_uri": "https://localhost:8080/", "height": 67} from sklearn.svm import LinearSVR lin_svr = LinearSVR(random_state=42) lin_svr.fit(spotify_final, spotify_labels) # + id="0qAiGSfY_Xhy" pop_pred=lin_svr.predict(x_final) #len(pop_pred) # + id="cLMJ3mWF_Xhz" outputId="9aa611fb-b192-4c30-8f2b-7d85f7c46b3e" colab={"base_uri": "https://localhost:8080/", "height": 1000} from sklearn.svm import SVR from sklearn.model_selection import RandomizedSearchCV from scipy.stats import reciprocal, uniform param_distributions = {"gamma": reciprocal(0.001, 0.1), "C": uniform(1, 15)} rnd_search_cv = RandomizedSearchCV(SVR(), param_distributions, n_iter=10, verbose=2, cv=3, random_state=42) rnd_search_cv.fit(spotify_final, spotify_labels) # + id="qn2hREOh_Xh0" outputId="72381bc2-7ea6-41b7-ce83-92c331004e84" colab={"base_uri": "https://localhost:8080/", "height": 67} rnd_search_cv.best_estimator_ # + id="uVykcJXi_Xh1" pop_pred = rnd_search_cv.best_estimator_.predict(x_final) # + id="GmkfSG5d_Xh2" regr_results= pd.DataFrame({'Id': regression_test['Id'], 'pop': pop_pred}) #regr_results #regr_results.to_csv('cc.csv', index = False) #*************************************************************************************************************** # RMSE in test: 7.83639. So far the best model #*************************************************************************************************************** # + id="D6lBOv7K_Xh5" spotify_final_dense= spotify_final.toarray() x_final_dense= x_final.toarray() # + [markdown] id="bB62sUme_Xh5" # ### Gradient Boosting Regressor # # > Boosting hypothesis refers to any Ensemble method that can combine several weak learners into a strong learner: It works by sequentially adding predictors to an ensemble, each one correcting its predecessor (popular methods: Adaptive Boost, Gradient Boosting). They are both suitable for regression and classification. # # > Gradient Boosting Regressor works by adding predictos to an ensemble, each one correcting its predecessor, by fitting the new predictor to the residual errors made by the previous predictor (and not by increasing the relative weight of misclassified training instances as in Adaboost). # >> n_estimators= numbers of trees # # + [markdown] id="RIzajjX0_Xh6" # >> max_depth, min_samples_leaf: hyperparameters controlling the growth of Desicion Trees # + id="R0YfUHD__Xh6" from sklearn.ensemble import GradientBoostingRegressor gbr= GradientBoostingRegressor(max_depth=2, n_estimators=100, learning_rate=0.1, random_state=42) gbr.fit(spotify_final_dense, spotify_labels) pop_predictions_gbr= gbr.predict(x_final_dense) # + id="-BjfAwMR_Xh-" regr_results= pd.DataFrame({'Id': regression_test['Id'], 'pop': pop_predictions_gbr}) #regr_results #regr_results.to_csv('gb.csv', index = False) #rmse= 8.9 # + [markdown] id="4xU1EBm2_Xh_" # ### XGBoost # > Extreme Gradient Boosting (Optimised Implementation of Gradient Boosting) # + id="hGBsM0D1_Xh_" import xgboost # + [markdown] id="vfKfEfgh_XiA" # > It requires dense arrays: # + id="_E6e_hOc_XiB" outputId="19797fa1-611e-44b7-b6ee-e025ec3ef4c8" colab={"base_uri": "https://localhost:8080/", "height": 34} xgb_reg = xgboost.XGBRegressor(random_state=42) xgb_reg.fit(spotify_final_dense, spotify_labels) pop_predictions_gb= xgb_reg.predict(x_final_dense) # + id="nBmsKyuc_XiC" regr_results= pd.DataFrame({'Id': regression_test['Id'], 'pop': pop_predictions_gb}) #regr_results #regr_results.to_csv('cc.csv', index = False) # RMSE= 8.1 # + [markdown] id="PaxXRt7Q_XiD" # As we can see above, indeed it performed quite better than Gradient Boosting, but still is not the best model overall. # + [markdown] id="QKT43fV-_XiD" # ### Reflection on Regression: # # Overall, the best models seemed to be: Linear SVM using best estimator (RMSE=7.836 in kaggle's test set) which gave us the best parametres (such as gamma and C) for this model. Xboost Regressor follows reaching 8.1 while Random Forest Regressor along with grid search gave RMSE 8.11. # # In many cases the models were overfitting the test data. This can be solved with several ways such as: # - gathering more training data (indeed, this dataset can be considered quite small) # - implementing better feature engineering such as proceeding to better feature selection and/or introducing new columns in the dataset which would reflect potentially important feature relationships. # - Undersampling and removal of outliers and noise # #
Spotify_Songs_Popularity_Regression/Spotify_Songs_Popularity_Regression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Padding and Stride # In the example in the previous section,we used an input with a height and width of 3 and a convolution kernel with a height and width of 2 to get an output with a height and a width of 2. In general, assuming the input shape is n<sub>h</sub> × n<sub>w</sub> and the convolution kernel window shape is k<sub>h</sub> × k<sub>w</sub> , then the output shape will be (n<sub>h</sub> − k<sub>h</sub> + 1) × (n<sub>w</sub> − k<sub>w</sub> + 1). # Therefore, the output shape of the convolutional layer is determined by the shape of the input and the shape of the convolution kernel window. In several cases we might want to change the dimensionality of # the output: # # • Multiple layers of convolutions reduce the information available at the boundary, often by much # more than what we would want. If we start with a 240x240 pixel image, 10 layers of 5x5 convo- # lutions reduce the image to 200x200 pixels, effectively slicing off 30% of the image and with it # obliterating anything interesting on the boundaries. Padding mitigates this problem. # • In some cases we want to reduce the resolution drastically, e.g. halving it if we think that such # a high input dimensionality is not required. In this case we might want to subsample the output. # Strides address this. # # • In some cases we want to increase the resolution, e.g. for image superresolution or for audio # generation. Again, strides come to our rescue. # # • In some cases we want to increase the length gently to a given size (mostly for sentences of variable # length or for filling in patches). Padding addresses this. # ## Padding # As we saw so far, convolutions are quite useful. Alas, on the boundaries we encounter the problem # that we keep on losing pixels. For any given convolution it’s only a few pixels but this adds up as we # discussed above. If the image was larger things would be easier - we could simply record one that’s # larger. Unfortunately, that’s not what we get in reality. One solution to this problem is to add extra # pixels around the boundary of the image, thus increasing the effective size of the image (the extra pixels # typically assume the value 0). In the figure below we pad the 3×5 to increase to 5×7. The corresponding # output then increases to a 4×6 matrix. from IPython.display import Image Image(filename="img/padding.png") # Fig. 6.2: Two-dimensional cross-correlation with padding. The shaded portions are the input and kernel # array elements used by the first output element: 0×0 + 0×1 + 0×2 + 0×3 = 0. # # In general, if a total of p<sub>h</sub> rows are padded on both sides of the height and a total of p<sub>w</sub> columns are # padded on both sides of width, the output shape will be (n<sub>h</sub> − k<sub>h</sub> + p<sub>h</sub> + 1) × (n<sub>w</sub> − k<sub>w</sub> + p<sub>w</sub> + 1), # This means that the height and width of the output will increase by p<sub>h</sub> and p<sub>w</sub> respectively. # # In many cases, we will want to set p<sub>h</sub> = k<sub>h</sub> − 1 and p<sub>w</sub> = k<sub>w</sub> − 1 to give the input and output the same # height and width. This will make it easier to predict the output shape of each layer when constructing the # network. Assuming that k<sub>h</sub> is odd here, we will pad p<sub>h</sub> /2 rows on both sides of the height. If k<sub>h</sub> is even, # one possibility is to pad ⌈p<sub>h</sub> /2⌉ rows on the top of the input and ⌊p<sub>h</sub> /2⌋ rows on the bottom. We will pad # both sides of the width in the same way. # # Convolutional neural networks often use convolution kernels with odd height and width values, such as # 1, 3, 5, and 7, so the number of padding rows or columns on both sides are the same. For any two- # dimensional array X, assume that the element in its ith row and jth column is X[i,j]. When the # number of padding rows or columns on both sides are the same so that the input and output have the same # height and width, we know that the output Y[i,j] is calculated by cross-correlation of the input and # convolution kernel with the window centered on X[i,j]. # # In the following example we create a two-dimensional convolutional layer with a height and width of 3, # and then assume that the padding number on both sides of the input height and width is 1. Given an input # with a height and width of 8, we find that the height and width of the output is also 8. import torch import torch.nn as nn # We define a convenience function to calculate the convolutional layer. This # function initializes the convolutional layer weights and performs # corresponding dimensionality elevations and reductions on the input and # output def comp_conv2d(conv2d, X): # (1,1) indicates that the batch size and the number of channels # (described in later chapters) are both 1 X = X.reshape((1, 1) + X.shape) Y = conv2d(X) # Exclude the first two dimensions that do not interest us: batch and # channel return Y.reshape(Y.shape[2:]) # Note that here 1 row or column is padded on either side, so a total of 2 # rows or columns are added conv2d = nn.Conv2d(in_channels=1,out_channels=1,kernel_size=3,padding=1) X = torch.rand(size=(8, 8)) comp_conv2d(conv2d, X).shape # When the height and width of the convolution kernel are different, we can make the output and input have # the same height and width by setting different padding numbers for height and width. # Here, we use a convolution kernel with a height of 5 and a width of 3. The # padding numbers on both sides of the height and width are 2 and 1, # respectively conv2d = nn.Conv2d(in_channels=1,out_channels=1, kernel_size=(5, 3), padding=(2, 1)) comp_conv2d(conv2d, X).shape # ## Stride # When computing the cross-correlation the convolution window starts from the top-left of the input array, # and slides in the input array from left to right and top to bottom. We refer to the number of rows and # columns per slide as the stride. # # In the current example, the stride is 1, both in terms of height and width. We can also use a larger stride. # The figure below shows a two-dimensional cross-correlation operation with a stride of 3 vertically and # 2 horizontally. We can see that when the second element of the first column is output, the convolution # window slides down three rows. The convolution window slides two columns to the right when the second # element of the first row is output. When the convolution window slides two columns to the right on the # input, there is no output because the input element cannot fill the window (unless we add padding). Image(filename="img/stride.png") # Fig.6.3: Cross-correlation with strides of 3 and 2 for height and width respectively. The shaded portions # are the output element and the input and core array elements used in its computation: 0×0 + 0×1 + 1×2 + 2×3 = 8, 0×0 + 6×1 + 0×2 + 0×3 = 6. # # In general, when the stride for the height is s<sub>h</sub> and the stride for the width is s<sub>w</sub>,the output shape is ⌊(n<sub>h</sub> − k<sub>h</sub> + p<sub>h</sub> + s<sub>h</sub> ) / s<sub>h</sub> ⌋ × ⌊(n<sub>w</sub> − k<sub>w</sub> + p<sub>w</sub> + s<sub>w</sub> ) / s<sub>w</sub> ⌋. # # If we set p<sub>h</sub> = k<sub>h</sub> − 1 and p<sub>w</sub> = k<sub>w</sub> − 1, # then the output shape will be simplified to <br> ⌊(n<sub>h</sub> + s<sub>h</sub> − 1) / s<sub>h</sub> ⌋ × ⌊(n<sub>w</sub> + s<sub>w</sub> − 1) / s<sub>w</sub> ⌋. # # Going a step further, if the input height and width are divisible by the strides on # the height and width, then the output shape will be (n<sub>h</sub> / s<sub>h</sub> ) × (n<sub>w</sub> / s<sub>w</sub> ). # # Below, we set the strides on both the height and width to 2, thus halving the input height and width. conv2d = nn.Conv2d(in_channels=1,out_channels=1, kernel_size=3, padding=1, stride=2) comp_conv2d(conv2d, X).shape # Next, we will look at a slightly more complicated example. conv2d = nn.Conv2d(in_channels=1,out_channels=1, kernel_size=(3, 5), padding=(0, 1), stride=(3, 4)) comp_conv2d(conv2d, X).shape # For the sake of brevity, when the padding number on both sides of the input height and width are p<sub>h</sub> and # p<sub>w</sub> respectively, we call the padding (p<sub>h</sub> , p<sub>w</sub> ). Specifically, when p<sub>h</sub> = p<sub>w</sub> = p, the padding is p. When # the strides on the height and width are s<sub>h</sub> and s<sub>w</sub> , respectively, we call the stride (s<sub>h</sub>,s<sub>w</sub>). Specifically, # when s<sub>h</sub> = s<sub>w</sub> = s, the stride is s. By default, the padding is 0 and the stride is 1. In practice we rarely # use inhomogeneous strides or padding, i.e. we usually have p<sub>h</sub> = p<sub>w</sub> and s<sub>h</sub> = s<sub>w</sub> . # ## Summary # # • Padding can increase the height and width of the output. This is often used to give the output the same height and width as the input. # # • The stride can reduce the resolution of the output, for example reducing the height and width of the output to only 1/n of the height and width of the input (n is an integer greater than 1). # # • Padding and stride can be used to adjust the dimensionality of the data effectively. # # ## Exercises # 1. or the last example in this section, use the shape calculation formula to calculate the output shape to see if it is consistent with the experimental results. # # # 2. Try other padding and stride combinations on the experiments in this section. # # # 3. For audio signals, what does a stride of 2 correspond to? # # # 4. What are the computational benefits of a stride larger than 1.
Ch08_Convolutional_Neural_Networks/Padding_and_Stride.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="exDsOocHOWui" colab_type="code" colab={} import numpy as np import struct as st # + id="ZLGw7b4LOaWl" colab_type="code" colab={} import matplotlib.pyplot as plt # + id="1O10Ip8iPe5x" colab_type="code" colab={} import time import math # + id="c9WwVqo4Ocan" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 202} outputId="f7b7d5a7-6ac8-4d22-af82-090f34dbd125" # !wget http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz # + id="gdFBjlKrOghU" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 202} outputId="eb3e50fd-a57f-4408-dc73-817d9d1d9d54" # !wget http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz # + id="DyIdseZoOi7u" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 202} outputId="9748ec54-0807-4296-a927-1613aab53fd1" # !wget http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz # + id="LAAPrRCBOljY" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 202} outputId="38e85451-9cf4-4fbf-bc3d-bcb6c6b513ea" # !wget http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz # + id="IclAgrFUOngh" colab_type="code" colab={} # !gzip -d /content/train-images-idx3-ubyte.gz # + id="5lpS81rMOz4C" colab_type="code" colab={} # !gzip -d /content/train-labels-idx1-ubyte.gz # + id="GWm6_69ZO28h" colab_type="code" colab={} # !gzip -d /content/t10k-labels-idx1-ubyte.gz # + id="cM4lmx-xO53-" colab_type="code" colab={} # !gzip -d /content/t10k-images-idx3-ubyte.gz # + id="i_i-gjgbO8jE" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 638} outputId="3b034ee4-d056-4dfd-8999-19007ef8e2ae" trainingfilenames = {'images' : '/content/train-images-idx3-ubyte' ,'labels' : '/content/train-labels-idx1-ubyte'} testfilenames = {'images' : '/content/t10k-images-idx3-ubyte' ,'labels' : '/content/t10k-labels-idx1-ubyte'} data_types = { 0x08: ('ubyte', 'B', 1), 0x09: ('byte', 'b', 1), 0x0B: ('>i2', 'h', 2), 0x0C: ('>i4', 'i', 4), 0x0D: ('>f4', 'f', 4), 0x0E: ('>f8', 'd', 8)} #..........................................................For training dataset.............................................................. print("Training Dataset.......") stime = time.time() for name in trainingfilenames.keys(): if name == 'images': train_imagesfile = open(trainingfilenames[name],'rb') if name == 'labels': train_labelsfile = open(trainingfilenames[name],'rb')#,encoding='latin-1') train_imagesfile.seek(0) magic = st.unpack('>4B',train_imagesfile.read(4)) if(magic[0] and magic[1])or(magic[2] not in data_types): raise ValueError("File Format not correct") #Information nDim = magic[3] print("Data is "+str(nDim)+"-D") dataType = data_types[magic[2]][0] print("Data Type :: ",dataType) dataFormat = data_types[magic[2]][1] print("Data Format :: ",dataFormat) dataSize = data_types[magic[2]][2] print("Data Size :: "+str(dataSize)+" byte\n") #offset = 0004 for number of images #offset = 0008 for number of rows #offset = 0012 for number of columns #32-bit integer (32 bits = 4 bytes) train_imagesfile.seek(4) nImg = st.unpack('>I',train_imagesfile.read(4))[0] #num of images/labels nR = st.unpack('>I',train_imagesfile.read(4))[0] #num of rows nC = st.unpack('>I',train_imagesfile.read(4))[0] #num of columns train_labelsfile.seek(8) #Since no. of items = no. of images and is already read print("no. of images :: ",nImg) print("no. of rows :: ",nR) print("no. of columns :: ",nC) print #Training set #Reading the labels train_labels_array = np.asarray(st.unpack('>'+dataFormat*nImg,train_labelsfile.read(nImg*dataSize))).reshape((nImg,1)) #Reading the Image data nBatch = 10000 nIter = int(math.ceil(nImg/nBatch)) nBytes = nBatch*nR*nC*dataSize nBytesTot = nImg*nR*nC*dataSize train_images_array = np.array([]) for i in range(0,nIter): #try: temp_images_array = np.asarray(st.unpack('>'+dataFormat*nBytes,train_imagesfile.read(nBytes))).reshape((nBatch,nR,nC)) '''except: nbytes = nBytesTot - (nIter-1)*nBytes temp_images_array = 255 - np.asarray(st.unpack('>'+'B'*nbytes,train_imagesfile.read(nbytes))).reshape((nBatch,nR,nC))''' #Stacking each nBatch block to form a larger block if train_images_array.size == 0: train_images_array = temp_images_array else: train_images_array = np.vstack((train_images_array,temp_images_array)) temp_images_array = np.array([]) print("Time taken :: "+str(time.time()-stime)+" seconds\n") print(str((float(i+1)/nIter)*100)+"% complete...\n") print("Training Set Labels shape ::",train_labels_array.shape) print("Training Set Image shape ::",train_images_array.shape) print("Time of execution :: "+str(time.time()-stime)+" seconds\n") # + id="tfBt9-QrPaKT" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 218} outputId="30bfc439-c414-47fc-fb54-f8f849a22989" #..........................................................For test dataset.................................................................. print("Test Dataset.......") stime = time.time() for name in testfilenames.keys(): if name == 'images': test_imagesfile = open(testfilenames[name],'rb') if name == 'labels': test_labelsfile = open(testfilenames[name],'rb') test_imagesfile.seek(0) magic = st.unpack('>4B',test_imagesfile.read(4)) if(magic[0] and magic[1])or(magic[2] not in data_types): raise ValueError("File Format not correct") nDim = magic[3] print("Data is ",nDim,"-D") print #offset = 0004 for number of images #offset = 0008 for number of rows #offset = 0012 for number of columns #32-bit integer (32 bits = 4 bytes) test_imagesfile.seek(4) nImg = st.unpack('>I',test_imagesfile.read(4))[0] #num of images/labels nR = st.unpack('>I',test_imagesfile.read(4))[0] #num of rows nC = st.unpack('>I',test_imagesfile.read(4))[0] #num of columns test_labelsfile.seek(8) #Since no. of items = no. of images and is already read print("no. of images :: ",nImg) print("no. of rows :: ",nR) print("no. of columns :: ",nC) print #Test set #Reading the labels test_labels_array = np.asarray(st.unpack('>'+dataFormat*nImg,test_labelsfile.read(nImg*dataSize))).reshape((nImg,1)) #Reading the Image data nBatch = 10000 nIter = int(math.ceil(nImg/nBatch)) nBytes = nBatch*nR*nC*dataSize nBytesTot = nImg*nR*nC*dataSize test_images_array = np.array([]) for i in range(0,nIter): #try: temp_images_array = np.asarray(st.unpack('>'+dataFormat*nBytes,test_imagesfile.read(nBytes))).reshape((nBatch,nR,nC)) '''except: nbytes = nBytesTot - (nIter-1)*nBytes temp_images_array = 255 - np.asarray(st.unpack('>'+'B'*nbytes,test_imagesfile.read(nbytes))).reshape((nBatch,nR,nC))''' #Stacking each nBatch block to form a larger block if test_images_array.size == 0: test_images_array = temp_images_array else: test_images_array = np.vstack((test_images_array,temp_images_array)) temp_images_array = np.array([]) print("Time taken :: "+str(time.time()-stime)+" seconds\n") print(str((float(i+1)/nIter)*100)+"% complete...\n") print("Test Set Labels shape ::",test_labels_array.shape) print("Test Set Image shape ::",test_images_array.shape) print("Time of execution : %s seconds" % str(time.time()-stime)) # + id="SuDBSzGwVenC" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="52afd7f5-19fb-475b-946a-4b33dd68b371" plt.imshow(train_images_array[0],cmap='gray') # + id="_RnmW72NWJGS" colab_type="code" colab={}
mnist.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Programming with Python # # ## Episode 2 - Repeating Actions with Loops # # Teaching: 30 min, # Exercises: 30 min # # # ### Objectives # - Explain what a for loop does. # - Correctly write for loops to repeat simple calculations. # - Trace changes to a loop variable as the loop runs. # - Trace changes to other variables as they are updated by a for loop. # ### How can I do the same operations on many different values? # # In the last episode, we wrote some code that plots some values of interest from our first inflammation dataset (`inflammation-01.csv`, and revealed some suspicious features in it. # # # We have a dozen data sets right now, though, and more on the way. We want to create plots for all of our data sets with a single statement. To do that, we'll have to teach the computer how to repeat things. # # An example simple task that we might want to repeat is printing each character in a word on a line of its own. For example the if the variable `word` contains the string `lead`, we would like to print: # # ``` # l # e # a # d # ``` # # In Python, a string is just an ordered collection of characters. In our example `l` `e` `a` `d`. Every character has a unique number associated with it – its index. This means that we can access characters in a string using their indices. For example, we can get the first character of the word `lead`, by using `word[0]`. One way to print each character is to use four print statements: # # ``` # word = 'lead' # print(word[0]) # print(word[1]) # print(word[2]) # print(word[3]) # ``` word = 'lead' print(word[0]) print(word[1]) print(word[2]) print(word[3]) # This is a bad approach for three reasons: # # - Not scalable. Imagine you need to print characters of a string that is hundreds of letters long. It might be easier just to type them in manually. # # - Difficult to maintain. If we want to decorate each printed character with an asterisk or any other character, we would have to change four lines of code. While this might not be a problem for short strings, it would definitely be a problem for longer ones. # # - Fragile. If we use it with a word that has more characters than what we initially envisioned, it will only display part of the word's characters. A shorter string, on the other hand, will cause an error because it will be trying to display part of the string that don't exist. # # ``` # word = 'tin' # print(word[0]) # print(word[1]) # print(word[2]) # print(word[3]) # ``` word = 'tin' print(word[0]) print(word[1]) print(word[2]) # Here's a better approach: # ``` # word = 'lead' # for char in word: # print(char) # ``` # + active="" # word = 'lead' # for char in word: # print (char) # - # This is shorter — certainly shorter than something that prints every character in a hundred-letter string — and more robust as well: # ``` # word = 'oxygen' # for char in word: # print(char) # ``` # # The improved version uses a `for` loop to repeat an operation — in this case, printing letters — once for each thing in a sequence. word = 'oxygen' for char in word: print(char) # The general form of a `for` loop is: # # ``` # for variable in collection: # # do things using variable, such as print # ``` # # In our example, `char` is the variable, `word` is the collection being looped through and `print(char)` is the thing we want to do. # # We can call the loop variable anything we like, but there must be a colon `:` at the end of the line starting the loop, and we must *indent* anything we want to run inside the loop. Unlike many other languages, there is no syntax to signify the end of the loop body (e.g. `endfor`) - a loop ends when you stop indenting. # # ``` # word = 'oxygen' # for char in word: # print(char) # print(char) # print(word) # ``` # word = 'oxygen' for char in word: print(char) print(char) print(word) # #### What's in a name? # In the example above, the loop variable was given the name `char` as a mnemonic; it is short for *character*. We can choose any name we want for variables. We might just as easily have chosen the name `banana` for the loop variable, as long as we use the same name when we use the variable inside the loop: # # word = 'oxygen' # for banana in word: # print(banana) # # It is a good idea to choose variable names that are meaningful, otherwise it would be more difficult to understand what the loop is doing. word = 'oxygen' for banana in word: print(banana) # Here's another loop that repeatedly updates a variable: # ``` # length = 0 # for vowel in 'aeiou': # length = length + 1 # print('There are', length, 'vowels') # ``` length = 0 for vowel in 'aeiou': length = length + 1 print('There are', length, 'vowels') # It's worth tracing the execution of this little program step by step. Since there are five characters in `'a'` `'e'` `'i'` `'o'` `'u'`, the statement on line 3 will be executed five times. # At the start of the loop, `length` is `0` (zero) (the value assigned to it on line 1) and `vowel` is `'a'`. # The statement *inside* the loop adds `1` to the old value of `length`, producing `1`, and assigns `length` the new value. The next time around, `vowel` is `'e'` and `length` is 1, so `length` is updated to be 2. After three more updates, 'length' is '5'; since there is nothing left in 'aeiou' for Python to process, the loop finishes and the `print` statement on line 4 tells us our final answer. # Note that a loop variable is just a variable that's being used to record progress in a loop. It still exists after the loop is over, and we can re-use variables previously defined as loop variables as well: # # ``` # letter = 'z' # for letter in 'abc': # print(letter) # print('after the loop, letter is', letter) # ``` letter = 'z' for letter in 'abc': print(letter) print('after the loop, letter is', letter) # Note also that finding the length of a string is such a common operation that Python actually has a built-in function to do it called `len`: # # ``` # print(len('aeiou')) # ``` # # `len` is much faster than any function we could write ourselves, and much easier to read than a two-line loop; it will also give us the length of many other things that we haven't met yet, so we should always use it when we can. print(len('aeiou')) print(len('abcdefg')) # #### From 1 to n # Python has a built-in function called `range` that generates a sequence of numbers. `range` can accept 1, 2, or 3 parameters: # # - if one parameter is given, `range` generates a sequence of that length, starting at zero and incrementing by 1. For example, `range(3)` produces the numbers 0, 1, 2. # - if two parameters are given, `range` starts at the first and ends just before the second, incrementing by one. For example, `range(2, 5)` produces 2, 3, 4. # - if 'range' is given 3 parameters, it starts at the first one, ends just before the second one, and increments by the third one. For example, 'range(3, 10, 2)' produces 3, 5, 7, 9. for i in range(3, 10, 2): print(i) # ## Exercises # #### Using ranges ... # # Using `range`, write a loop that uses range to print the first 3 natural numbers: # # ``` # 1 # 2 # 3 # ``` for i in range(1, 4): print(i) for i in range(1, 4): print(i, end = ' ') print() # #### Understanding loops # # Given the following loop: # ``` # word = 'oxygen' # for char in word: # print(char) # ``` # # How many times is the body of the loop executed? print('6 times') # #### Computing Powers With Loops # # Exponentiation is built into Python: # ``` # print(5 ** 3) # ``` # produces 125. # # Write a loop that calculates the same result as `5 ** 3` using multiplication (and without exponentiation - i.e. 5 * 5 * 5). # # #### Reverse a String # Knowing that two strings can be concatenated using the `+` operator: # ``` # print('a' + 'b') # ``` # write a loop that takes a string and produces a new string with the characters in reverse order, so 'Newton' becomes 'notweN'. newstring = '' oldstring = 'Newton' for char in oldstring: newstring = char + newstring print(newstring) print(newstring) # #### Computing the Value of a Polynomial # # The built-in function `enumerate` takes a sequence (e.g. a list) and generates a new sequence of the same length. Each element of the new sequence is a pair composed of the index and the value from the original sequence: # ``` # for i, j in enumerate([2.22, 4.44, 3.33]): # print('i =',i, 'j =', j) # ``` for i, j in enumerate([2.22, 4.44, 3.33]): print('i =',i, 'j =', j) # Suppose you have encoded a polynomial as a list of coefficients in the following way: # # The first element is the constant term (x^0), the second element is the coefficient of the linear term (x^1), the third is the coefficient of the quadratic term (x^2), etc. # # So to evaluate: # # ``` # y = 2 + 4x + 3x^2 # ``` # where x = 5, we could use the following code: # ``` # x = 5 # coefficients = [2, 4, 3] # y = coefficients[0] * x**0 + coefficients[1] * x**1 + coefficients[2] * x**2 # print(y) # ``` # # Try it - you should get the answer `97`. x = 5 coefficients = [2, 4, 3] y = coefficients[0] * x**0 + coefficients[1] * x**1 + coefficients[2] * x**2 print(y) # Now, write a loop using `enumerate` which computes the value y of any polynomial, given and x any coefficients. Here's a starting templates ... y = 0 x = 5 coefficients = [2, 4, 3] for idx, coef in enumerate(coefficients): y = y + coef * x**idx print(y) print(y) # ## Key Points # # - Use `for variable in sequence` to process the elements of a sequence one at a time. # - The body of a `for` loop must be indented. # - Use `len(thing)` to determine the length of something that contains other values. # # # ### Save, and version control your changes # # - save your work: `File -> Save` # - add all your changes to your local repository: `Terminal -> git add .` # - commit your updates a new Git version: `Terminal -> git commit -m "End of Episode 2"` # - push your latest commits to GitHub: `Terminal -> git push`
lessons/python/ep2-loops.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="Lu0faHHR0KfI" # ### Deep Kung-Fu with advantage actor-critic # # In this notebook you'll build a deep reinforcement learning agent for Atari [Kung-Fu Master](https://gym.openai.com/envs/KungFuMaster-v0/) and train it with Advantage Actor-Critic. # # Note that, strictly speaking, this will be neither [A3C](https://arxiv.org/abs/1602.01783) nor [A2C](https://openai.com/blog/baselines-acktr-a2c/), but rather a simplified version of the latter. # # Special thanks to <NAME> for making an [initial version](https://www.coursera.org/learn/practical-rl/discussions/all/threads/6iDjkbhPQoGg45G4T7KBHQ/replies/5eM_hA7vEeuoKgpcLmLqdw) of the PyTorch port of this assignment. # # ![https://upload.wikimedia.org/wikipedia/en/6/66/Kung_fu_master_mame.png](https://upload.wikimedia.org/wikipedia/en/6/66/Kung_fu_master_mame.png) # + id="RvNTg4NS0KfM" colab={"base_uri": "https://localhost:8080/"} outputId="b66b2655-fd50-455d-abdf-ab3e19da68a3" import sys, os if 'google.colab' in sys.modules: # %tensorflow_version 1.x if not os.path.exists('.setup_complete'): # !wget -q https://raw.githubusercontent.com/yandexdataschool/Practical_RL/master/setup_colab.sh -O- | bash # !wget -q https://raw.githubusercontent.com/yandexdataschool/Practical_RL/coursera/grading.py -O ../grading.py # !wget -q https://raw.githubusercontent.com/yandexdataschool/Practical_RL/coursera/week5_policy_based/submit.py # !wget -q https://raw.githubusercontent.com/yandexdataschool/Practical_RL/coursera/week5_policy_based/atari_util.py # !touch .setup_complete # If you are running on a server, launch xvfb to record game videos # Please make sure you have xvfb installed if type(os.environ.get("DISPLAY")) is not str or len(os.environ.get("DISPLAY")) == 0: # !bash ../xvfb start os.environ['DISPLAY'] = ':1' # + id="hTIYI4b-0KfN" import numpy as np import matplotlib.pyplot as plt # %matplotlib inline from IPython.display import display # + [markdown] id="EGBx42ik0KfO" # For starters, let's take a look at the game itself: # # * Image resized to 42x42 and converted to grayscale to run faster # * Agent sees last 4 frames of game to account for object velocity # + id="pmJ5QnFD0KfO" colab={"base_uri": "https://localhost:8080/"} outputId="631f9843-1d15-43d9-b359-fa6aff0cf1bc" import gym from atari_util import PreprocessAtari def make_env(): env = gym.make("KungFuMasterDeterministic-v0") env = PreprocessAtari( env, height=42, width=42, crop=lambda img: img[60:-30, 5:], dim_order='pytorch', color=False, n_frames=4) return env env = make_env() obs_shape = env.observation_space.shape n_actions = env.action_space.n print("Observation shape:", obs_shape) print("Num actions:", n_actions) print("Action names:", env.env.env.get_action_meanings()) # + id="k5Xif5ZS0KfO" colab={"base_uri": "https://localhost:8080/", "height": 412} outputId="4547d65c-48e0-42bb-bd12-27da7a3ac7c5" s = env.reset() for _ in range(100): s, _, _, _ = env.step(env.action_space.sample()) plt.title('Game image') plt.imshow(env.render('rgb_array')) plt.show() plt.title('Agent observation (4-frame buffer)') plt.imshow(s.transpose([1, 0, 2]).reshape([42, -1]), cmap='gray') plt.show() # + [markdown] id="ecf6x4xS0KfP" # ### Build a network # # We now have to build an agent for actor-critic training — a convolutional neural network that converts states into action probabilities $\pi$ and state values $V$. # # Your assignment here is to build and apply a neural network. You can use any framework you want, but in this notebook we prepared for you a template in PyTorch. # # For starters, we want you to implement this architecture: # # ![https://s17.postimg.cc/orswlfzcv/nnet_arch.png](https://s17.postimg.cc/orswlfzcv/nnet_arch.png) # # Notes: # * This diagram was originally made for Tensorflow. In PyTorch, the input shape is `[batch_size, 4, 42, 42]`. # * Use convolution kernel size 3x3 throughout. # * After your agent gets mean reward above 5000, we encourage you to experiment with model architecture to score even better. # + id="f2atikAH0KfP" import torch import torch.nn as nn import torch.nn.functional as F # + id="LnFLQ3Yj0KfQ" colab={"base_uri": "https://localhost:8080/"} outputId="afb1459e-32d6-41ce-ec67-a00ce5e43492" device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') device # + id="E4UUTs4B0KfQ" def conv2d_size_out(size, kernel_size, stride): """ Helper function to compute the spatial dimensions of the output of a convolutional layer, copied from Week 4. Common use case: cur_layer_img_w = conv2d_size_out(cur_layer_img_w, kernel_size, stride) cur_layer_img_h = conv2d_size_out(cur_layer_img_h, kernel_size, stride) This can be used to understand the shape for dense layer's input. """ return (size - (kernel_size - 1) - 1) // stride + 1 # + id="DzgEKd1r0KfQ" class Agent(nn.Module): def __init__(self, input_dims, n_actions, lr): super(Agent, self).__init__() self.input_dims = input_dims # Initialize layers as shown in image above # <YOUR CODE> self.conv2d_relu_stack = nn.Sequential( nn.Conv2d(4, 32, (3,3), stride=2), nn.ReLU(), nn.Conv2d(32, 32, (3,3), stride=2), nn.ReLU(), nn.Conv2d(32, 32, (3,3), stride=2), nn.ReLU(), nn.Flatten(), ) self.Dense_State_Values = nn.Linear(in_features=512, out_features=1) self.Dense_Logits = nn.Linear(in_features=512, out_features=n_actions) self.optimizer = torch.optim.Adam(self.parameters(), lr=lr) self.device = device self.to(self.device) def forward(self, state): # Compute logits and values using network. # Note that if you do so naively, your state_values will have shape # ending in 1, since they come from a Linear(..., 1) layer. It is useful # to .squeeze(dim=-1) them, since this will help avoid shape conflicts # in the loss function part, after we add multiple environments. # If you don't do this here, don't forget to do that in the # loss function! # <YOUR CODE> forward = self.conv2d_relu_stack(state) logits = self.Dense_Logits(forward) state_values = self.Dense_State_Values(forward).squeeze(dim=-1) return logits, state_values def choose_action(self, observation): # PyTorch wants a batch dimension, so if we feed only a single observation we need to wrap it with an extra layer. # This line will allow the network to handle both single and multi-environment tests. if observation.ndim == 3: observation = [observation] observation = torch.tensor(observation, dtype=torch.float32, device=device) # Pass states into agent network and get back logits and states logits, _ = self.forward(observation) # <YOUR CODE> policy = F.softmax(logits, dim=-1) actions = np.array([np.random.choice(len(p), p=p) for p in policy.detach().cpu().numpy()]) return actions # + id="cOYpMh6W0KfR" colab={"base_uri": "https://localhost:8080/"} outputId="81d86125-3e78-4d09-dde6-d160999150d1" # Test network agent = Agent(input_dims=obs_shape, n_actions=n_actions, lr=1e-4) state = env.reset() state = torch.tensor([state], dtype=torch.float32, device=device) logits, state_values = agent(state) assert isinstance(logits, torch.Tensor) and len(logits.shape) == 2, \ "Please return a 2D Torch tensor of logits with shape (batch_size, n_actions). You returned %s" % repr(logits) assert isinstance(state_values, torch.Tensor) and len(state_values.shape) == 1, \ "Please return a 1D Torch tensor of state values with shape (batch_size,). You returned %s" % repr(state_values) # + [markdown] id="9E6hnFA00KfR" # ###Actor-Critic # # Here we define loss functions and learning algorithms as usual. # + id="cDAHIUjf0KfR" def compute_actor_critic_loss(agent, state, action, reward, next_state, done, gamma=0.99): # Infer batch_size from shape of state tensor: batch_size = state.shape[0] # Convert everything to a tensor, send to GPU if available state = torch.tensor(state, dtype=torch.float32, device=device) next_state = torch.tensor(next_state, dtype=torch.float32, device=device) reward = torch.tensor(reward, dtype=torch.float32, device=device) done = torch.tensor(done, dtype=torch.bool, device=device) # logits[n_envs, n_actions] and state_values[n_envs, n_actions] logits, state_value = agent(state) next_logits, next_state_value = agent(next_state) # Probabilities and log-probabilities for all actions probs = F.softmax(logits, dim=-1) #[n_envs, n_actions] logprobs = F.log_softmax(logits, dim=-1) #[n_envs, n_actions] # Set new state values with done == 1 to be 0.0 (no future rewards if done!) next_state_value[done] = 0.0 # Compute target state values using temporal difference formula. # Use reward, gamma, and next_state_value. target_state_value = reward + gamma * next_state_value # <YOUR CODE> # Compute advantage using reward, gamma, state_value, and next_state_value. advantage = reward + gamma * next_state_value - state_value # <YOUR CODE> # We need to slice out only the actions we took for actor loss -- we can use # the actions themselves as indexes, but we also need indexes on the batch dim batch_idx = np.arange(batch_size) logp_actions = logprobs[batch_idx, action] # Compute policy entropy given logits_seq. Mind the "-" sign! entropy = - torch.sum(logprobs * probs, axis=1) # <YOUR CODE> actor_loss = -(logp_actions * advantage.detach()).mean() - 0.001 * entropy.mean() critic_loss = F.mse_loss(target_state_value.detach(), state_value) total_loss = actor_loss + critic_loss # Never forget to zero grads in PyTorch! agent.optimizer.zero_grad() total_loss.backward() agent.optimizer.step() return actor_loss.cpu().detach().numpy(), critic_loss.cpu().detach().numpy(), entropy.cpu().detach().numpy() # + id="v7KXdb3J0KfR" colab={"base_uri": "https://localhost:8080/"} outputId="dfa3ab84-00ec-4217-9ecb-74fd533e4c92" state = env.reset() state = torch.tensor([state], dtype=torch.float32).to(device) logits, value = agent(state) print("action logits:\n", logits) print("state values:\n", value) # + [markdown] id="3A7sUUSr0KfS" # ### Let's play! # Let's build a function that measures the agent's average reward. # + id="1ZNbFwdE0KfS" def evaluate(agent, env, n_games=1): """Plays an a game from start till done, returns per-game rewards """ game_rewards = [] for _ in range(n_games): state = env.reset() total_reward = 0 while True: action = agent.choose_action(state) state, reward, done, info = env.step(action) total_reward += reward if done: break game_rewards.append(total_reward) return game_rewards # + id="2TDZlWSs0KfS" colab={"base_uri": "https://localhost:8080/"} outputId="ede4d0a8-31cc-447c-c0df-e8b8b67354ed" import gym.wrappers with gym.wrappers.Monitor(make_env(), directory="videos", force=True) as env_monitor: rewards = evaluate(agent, env_monitor, n_games=3) print(rewards) # + id="NdWoQ-k-0KfS" colab={"resources": {"http://localhost:8080/videos/openaigym.video.0.2309.video000001.mp4": {"data": "CjwhRE9DVFlQRSBodG1sPgo8aHRtbCBsYW5nPWVuPgogIDxtZXRhIGNoYXJzZXQ9dXRmLTg+CiAgPG1ldGEgbmFtZT12aWV3cG9ydCBjb250ZW50PSJpbml0aWFsLXNjYWxlPTEsIG1pbmltdW0tc2NhbGU9MSwgd2lkdGg9ZGV2aWNlLXdpZHRoIj4KICA8dGl0bGU+RXJyb3IgNDA0IChOb3QgRm91bmQpISExPC90aXRsZT4KICA8c3R5bGU+CiAgICAqe21hcmdpbjowO3BhZGRpbmc6MH1odG1sLGNvZGV7Zm9udDoxNXB4LzIycHggYXJpYWwsc2Fucy1zZXJpZn1odG1se2JhY2tncm91bmQ6I2ZmZjtjb2xvcjojMjIyO3BhZGRpbmc6MTVweH1ib2R5e21hcmdpbjo3JSBhdXRvIDA7bWF4LXdpZHRoOjM5MHB4O21pbi1oZWlnaHQ6MTgwcHg7cGFkZGluZzozMHB4IDAgMTVweH0qID4gYm9keXtiYWNrZ3JvdW5kOnVybCgvL3d3dy5nb29nbGUuY29tL2ltYWdlcy9lcnJvcnMvcm9ib3QucG5nKSAxMDAlIDVweCBuby1yZXBlYXQ7cGFkZGluZy1yaWdodDoyMDVweH1we21hcmdpbjoxMXB4IDAgMjJweDtvdmVyZmxvdzpoaWRkZW59aW5ze2NvbG9yOiM3Nzc7dGV4dC1kZWNvcmF0aW9uOm5vbmV9YSBpbWd7Ym9yZGVyOjB9QG1lZGlhIHNjcmVlbiBhbmQgKG1heC13aWR0aDo3NzJweCl7Ym9keXtiYWNrZ3JvdW5kOm5vbmU7bWFyZ2luLXRvcDowO21heC13aWR0aDpub25lO3BhZGRpbmctcmlnaHQ6MH19I2xvZ297YmFja2dyb3VuZDp1cmwoLy93d3cuZ29vZ2xlLmNvbS9pbWFnZXMvbG9nb3MvZXJyb3JwYWdlL2Vycm9yX2xvZ28tMTUweDU0LnBuZykgbm8tcmVwZWF0O21hcmdpbi1sZWZ0Oi01cHh9QG1lZGlhIG9ubHkgc2NyZWVuIGFuZCAobWluLXJlc29sdXRpb246MTkyZHBpKXsjbG9nb3tiYWNrZ3JvdW5kOnVybCgvL3d3dy5nb29nbGUuY29tL2ltYWdlcy9sb2dvcy9lcnJvcnBhZ2UvZXJyb3JfbG9nby0xNTB4NTQtMngucG5nKSBuby1yZXBlYXQgMCUgMCUvMTAwJSAxMDAlOy1tb3otYm9yZGVyLWltYWdlOnVybCgvL3d3dy5nb29nbGUuY29tL2ltYWdlcy9sb2dvcy9lcnJvcnBhZ2UvZXJyb3JfbG9nby0xNTB4NTQtMngucG5nKSAwfX1AbWVkaWEgb25seSBzY3JlZW4gYW5kICgtd2Via2l0LW1pbi1kZXZpY2UtcGl4ZWwtcmF0aW86Mil7I2xvZ297YmFja2dyb3VuZDp1cmwoLy93d3cuZ29vZ2xlLmNvbS9pbWFnZXMvbG9nb3MvZXJyb3JwYWdlL2Vycm9yX2xvZ28tMTUweDU0LTJ4LnBuZykgbm8tcmVwZWF0Oy13ZWJraXQtYmFja2dyb3VuZC1zaXplOjEwMCUgMTAwJX19I2xvZ297ZGlzcGxheTppbmxpbmUtYmxvY2s7aGVpZ2h0OjU0cHg7d2lkdGg6MTUwcHh9CiAgPC9zdHlsZT4KICA8YSBocmVmPS8vd3d3Lmdvb2dsZS5jb20vPjxzcGFuIGlkPWxvZ28gYXJpYS1sYWJlbD1Hb29nbGU+PC9zcGFuPjwvYT4KICA8cD48Yj40MDQuPC9iPiA8aW5zPlRoYXTigJlzIGFuIGVycm9yLjwvaW5zPgogIDxwPiAgPGlucz5UaGF04oCZcyBhbGwgd2Uga25vdy48L2lucz4K", "ok": false, "headers": [["content-length", "1449"], ["content-type", "text/html; charset=utf-8"]], "status": 404, "status_text": ""}}, "base_uri": "https://localhost:8080/", "height": 501} outputId="2a0176d0-0089-4d9f-8af3-52d2d0faea51" # Show video. This may not work in some setups. If it doesn't # work for you, you can download the videos and view them locally. from pathlib import Path from IPython.display import HTML video_names = sorted([s for s in Path('videos').iterdir() if s.suffix == '.mp4']) HTML(""" <video width="640" height="480" controls> <source src="{}" type="video/mp4"> </video> """.format(video_names[-1])) # You can also try other indices # + [markdown] id="rsMj1Bop0KfS" # ### Training on parallel games # # ![img](https://s7.postimg.cc/4y36s2b2z/env_pool.png) # # # To make actor-critic training more stable, we shall play several games in parallel. This means ya'll have to initialize several parallel gym envs, send agent's actions there and .reset() each env if it becomes terminated. To minimize learner brain damage, we've taken care of them for ya - just make sure you read it before you use it. # + id="Clo5P2uG0KfS" class EnvBatch: def __init__(self, n_envs = 10): """ Creates n_envs environments and babysits them for ya' """ self.envs = [make_env() for _ in range(n_envs)] def reset(self): """ Reset all games and return [n_envs, *obs_shape] observations """ return np.array([env.reset() for env in self.envs]) def step(self, actions): """ Send a vector[batch_size] of actions into respective environments :returns: observations[n_envs, *obs_shape], rewards[n_envs], done[n_envs,], info[n_envs] """ results = [env.step(a) for env, a in zip(self.envs, actions)] new_obs, rewards, done, infos = map(np.array, zip(*results)) # reset environments automatically for i in range(len(self.envs)): if done[i]: new_obs[i] = self.envs[i].reset() return new_obs, rewards, done, infos # + [markdown] id="8ehCUlRz0KfT" # __Let's try it out:__ # + id="fLCPQcfg0KfT" colab={"base_uri": "https://localhost:8080/"} outputId="8e8a1a6e-acd6-4fe3-8eb2-fb41e32ada7a" env_batch = EnvBatch(10) batch_states = env_batch.reset() batch_actions = agent.choose_action(batch_states) batch_next_states, batch_rewards, batch_done, _ = env_batch.step(batch_actions) print("State shape:", batch_states.shape) print("Actions:", batch_actions) print("Rewards:", batch_rewards) print("Done:", batch_done) # + [markdown] id="E2X0rCok0KfT" # # Sanity Check # + id="IHroOPSR0KfT" colab={"base_uri": "https://localhost:8080/"} outputId="8593ca05-b986-4482-f3d9-595548f0dcff" agent = Agent(lr=1e-4, n_actions=n_actions, input_dims=obs_shape) state = env_batch.reset() action = agent.choose_action(state) next_state, reward, done, info = env_batch.step(action) l_act, l_crit, ent = compute_actor_critic_loss(agent, state, action, reward, next_state, done) assert abs(l_act) < 100 and abs(l_crit) < 100, "losses seem abnormally large" assert 0 <= ent.mean() <= np.log(n_actions), "impossible entropy value, double-check the formula pls" if ent.mean() < np.log(n_actions) / 2: print("Entropy is too low for an untrained agent") print("You just might be fine!") # + [markdown] id="WZasbrVJ0KfT" # # Train # # Just the usual - play a bit, compute loss, follow the graidents, repeat a few million times. # # ![<NAME> training the Karate Kid](https://media.giphy.com/media/W4uQMqlKVoiXK89T5j/giphy.gif) # + id="hH66b76R0KfT" import pandas as pd def ewma(x, span=100): return pd.DataFrame({'x':np.asarray(x)}).x.ewm(span=span).mean().values env_batch = EnvBatch(10) batch_states = env_batch.reset() rewards_history = [] entropy_history = [] # + [markdown] id="jCFBENus0KfT" # Please pay extra attention to how we scale rewards in training. We do that for multiple reasons. # # 1. All rewards are multiples of 100, and even an untrained agent can get a score of 800. Therefore, even in the beginning of training, the critic will have to predict pretty large numbers. Neural networks require extra tinkering to output large numbers reliably. In this case, the easiest workaround is just to scale back those numbers. # 2. We have already tweaked the hyperparameters (loss coefficients) to work well with this scaling. # # Please note however that we would not have needed to do this in plain REINFORCE without entropy regularization but with Adam optimizer. # # In REINFORCE, there is only actor and no critic. Without entropy regularization, actor loss is just policy gradient. It is proportional to rewards, but it only affects the scale of the gradient. However, Adam maintains a running average of the variance of the gradient for each parameter it optimizes, and normalizes the gradient by its variance in each optimization step. This will negate any scaling of the gradient. # # If your implementation works correctly, you can comment out the `batch_rewards = batch_rewards * 0.01` line, restart training, and see it explode. # + id="Q0GyR1Ap0KfT" colab={"base_uri": "https://localhost:8080/", "height": 299} outputId="ed102deb-74e7-4a26-840b-d0ac8eb4f6a5" import tqdm from IPython.display import clear_output with tqdm.trange(len(entropy_history), 100000) as progress_bar: for i in progress_bar: batch_actions = agent.choose_action(batch_states) batch_next_states, batch_rewards, batch_done, _ = env_batch.step(batch_actions) # Reward scaling. See above for explanation. batch_rewards = batch_rewards * 0.01 agent_loss, critic_loss, entropy = compute_actor_critic_loss( agent, batch_states, batch_actions, batch_rewards, batch_next_states, batch_done) entropy_history.append(np.mean(entropy)) batch_states = batch_next_states if i % 500 == 0: if i % 2500 == 0: rewards_history.append(np.mean(evaluate(agent, env, n_games=3))) if rewards_history[-1] >= 5000: print("Your agent has earned the yellow belt") clear_output(True) plt.figure(figsize=[8, 4]) plt.subplot(1, 2, 1) plt.plot(rewards_history, label='rewards') plt.plot(ewma(np.array(rewards_history), span=10), marker='.', label='rewards ewma@10') plt.title("Session rewards") plt.grid() plt.legend() plt.subplot(1, 2, 2) plt.plot(entropy_history, label='entropy') plt.plot(ewma(np.array(entropy_history), span=1000), marker='.', label='entropy ewma@1000') plt.title("Policy entropy") plt.grid() plt.legend() plt.show() # + [markdown] id="ro0a95-Z0KfU" # Relax and grab some refreshments while your agent is locked in an infinite loop of violence and death. # # __How to interpret plots:__ # # The session reward is the easy thing: it should in general go up over time, but it's okay if it fluctuates ~~like crazy~~. It's also OK if it doesn't increase substantially before some 10-20k initial steps, and some people who tried this assignment [told us](https://www.coursera.org/learn/practical-rl/discussions/all/threads/3OnFNVxEEemLZA644RFX2A) they didn't see improvements until around 60k steps. However, if reward reaches zero and doesn't seem to get up over 2-3 evaluations, there's something wrong happening. # # Since we use a policy-based method, we also keep track of __policy entropy__ — the same one you used as a regularizer. The only important thing about it is that your entropy shouldn't drop too low (`< 0.1`) before your agent gets the yellow belt. Or at least it can drop there, but _it shouldn't stay there for long_. # # If it does, the culprit is likely: # * Some bug in entropy computation. Remember that it is $-\sum p(a_i) \cdot \log p(a_i)$. # * Your model architecture is broken in some way: for example, if you create layers in `Agent.symbolic_step()` rather than in `Agent.__init__()`, then effectively you will be training two separate agents: one for `logits, state_values` and another one for `next_logits, next_state_values`. # * Your architecture is different from the one we suggest and it converges too quickly. Change your architecture or increase entropy coefficient in actor loss. # * Gradient explosion: just [clip gradients](https://stackoverflow.com/a/43486487) and maybe use a smaller network # * Us. Or TF developers. Or aliens. Or lizardfolk. Contact us on forums before it's too late! # # If you're debugging, just run `logits, values = agent.step(batch_states)` and manually look into logits and values. This will reveal the problem 9 times out of 10: you'll likely see some NaNs or insanely large numbers or zeros. Try to catch the moment when this happens for the first time and investigate from there. # + [markdown] id="w5VGAIHs0KfU" # ### "Final" evaluation # + id="hygMl5NI0KfU" colab={"base_uri": "https://localhost:8080/"} outputId="9a54e497-36e4-4910-8a3f-b8953c207400" import gym.wrappers with gym.wrappers.Monitor(make_env(), directory="videos", force=True) as env_monitor: final_rewards = evaluate(agent, env_monitor, n_games=3) print("Final mean reward:", np.mean(final_rewards)) # + id="R9LfxiL50KfU" colab={"resources": {"http://localhost:8080/videos/openaigym.video.1.2309.video000001.mp4": {"data": "<KEY>", "ok": false, "headers": [["content-length", "1449"], ["content-type", "text/html; charset=utf-8"]], "status": 404, "status_text": ""}}, "base_uri": "https://localhost:8080/", "height": 501} outputId="ec352deb-31c6-49b3-c90e-d69ee582795f" # Show video. This may not work in some setups. If it doesn't # work for you, you can download the videos and view them locally. from pathlib import Path from IPython.display import HTML video_names = sorted([s for s in Path('videos').iterdir() if s.suffix == '.mp4']) HTML(""" <video width="640" height="480" controls> <source src="{}" type="video/mp4"> </video> """.format(video_names[-1])) # + id="kM4j8-PC0KfU" colab={"resources": {"http://localhost:8080/videos/openaigym.video.1.2309.video000000.mp4": {"data": "<KEY>QRSBodG1sPgo8aHRtbCBsYW5nPWVuPgogIDxtZXRhIGNoYXJzZXQ9dXRmLTg+CiAgPG1ldGEgbmFtZT12aWV3cG9ydCBjb250ZW50PSJpbml0aWFsLXNjYWxlPTEsIG1pbmltdW0tc2NhbGU9MSwgd2lkdGg9ZGV2aWNlLXdpZHRoIj4KICA8dGl0bGU+RXJyb3IgNDA0IChOb3QgRm91bmQpISExPC90aXRsZT4KICA8c3R5bGU+CiAgICAqe21hcmdpbjowO3BhZGRpbmc6MH1odG1sLGNvZGV7Zm9udDoxNXB4LzIycHggYXJpYWwsc2Fucy1zZXJpZn1odG1se2JhY2tncm91bmQ6I2ZmZjtjb2xvcjojMjIyO3BhZGRpbmc6MTVweH1ib2R5e21hcmdpbjo3JSBhdXRvIDA7bWF4LXdpZHRoOjM5MHB4O21pbi1oZWlnaHQ6MTgwcHg7cGFkZGluZzozMHB4IDAgMTVweH0qID4gYm9keXtiYWNrZ3JvdW5kOnVybCgvL3d3dy5nb29nbGUuY29tL2ltYWdlcy9lcnJvcnMvcm9ib3QucG5nKSAxMDAlIDVweCBuby1yZXBlYXQ7cGFkZGluZy1yaWdodDoyMDVweH1we21hcmdpbjoxMXB4IDAgMjJweDtvdmVyZmxvdzpoaWRkZW59aW5ze2NvbG9yOiM3Nzc7dGV4dC1kZWNvcmF0aW9uOm5vbmV9YSBpbWd7Ym9yZGVyOjB9QG1lZGlhIHNjcmVlbiBhbmQgKG1heC13aWR0aDo3NzJweCl7Ym9keXtiYWNrZ3JvdW5kOm5vbmU7bWFyZ2luLXRvcDowO21heC13aWR0aDpub25lO3BhZGRpbmctcmlnaHQ6MH19I2xvZ297YmFja2dyb3VuZDp1cmwoLy93d3cuZ29vZ2xlLmNvbS9pbWFnZXMvbG9nb3MvZXJyb3JwYWdlL2Vycm9yX2xvZ28tMTUweDU0LnBuZykgbm8tcmVwZWF0O21hcmdpbi1sZWZ0Oi01cHh9QG1lZGlhIG9ubHkgc2NyZWVuIGFuZCAobWluLXJlc29sdXRpb246MTkyZHBpKXsjbG9nb3tiYWNrZ3JvdW5kOnVybCgvL3d3dy5nb29nbGUuY29tL2ltYWdlcy9sb2dvcy9lcnJvcnBhZ2UvZXJyb3JfbG9nby0xNTB4NTQtMngucG5nKSBuby1yZXBlYXQgMCUgMCUvMTAwJSAxMDAlOy1tb3otYm9yZGVyLWltYWdlOnVybCgvL3d3dy5nb29nbGUuY29tL2ltYWdlcy9sb2dvcy9lcnJvcnBhZ2UvZXJyb3JfbG9nby0xNTB4NTQtMngucG5nKSAwfX1AbWVkaWEgb25seSBzY3JlZW4gYW5kICgtd2Via2l0LW1pbi1kZXZpY2UtcGl4ZWwtcmF0aW86Mil7I2xvZ297YmFja2dyb3VuZDp1cmwoLy93d3cuZ29vZ2xlLmNvbS9pbWFnZXMvbG9nb3MvZXJyb3JwYWdlL2Vycm9yX2xvZ28tMTUweDU0LTJ4LnBuZykgbm8tcmVwZWF0Oy13ZWJraXQtYmFja2dyb3VuZC1zaXplOjEwMCUgMTAwJX19I2xvZ297ZGlzcGxheTppbmxpbmUtYmxvY2s7aGVpZ2h0OjU0cHg7d2lkdGg6MTUwcHh9CiAgPC9zdHlsZT4KICA8YSBocmVmPS8vd3d3Lmdvb2dsZS5jb20vPjxzcGFuIGlkPWxvZ28gYXJpYS1sYWJlbD1Hb29nbGU+PC9zcGFuPjwvYT4KICA8cD48Yj40MDQuPC9iPiA8aW5zPlRoYXTigJlzIGFuIGVycm9yLjwvaW5zPgogIDxwPiAgPGlucz5UaGF04oCZcyBhbGwgd2Uga25vdy48L2lucz4K", "ok": false, "headers": [["content-length", "1449"], ["content-type", "text/html; charset=utf-8"]], "status": 404, "status_text": ""}}, "base_uri": "https://localhost:8080/", "height": 501} outputId="d866f6cc-bff1-4433-f61b-3a412cf82531" HTML(""" <video width="640" height="480" controls> <source src="{}" type="video/mp4"> </video> """.format(video_names[-2])) # You can also try other indices # + [markdown] id="cvTOpUpe0KfU" # If you don't see videos above, just navigate to `./videos` and download `.mp4` files from there. # + id="6j7jp_cp0KfU" colab={"base_uri": "https://localhost:8080/"} outputId="b63cf9a4-7011-4284-bbd2-1e3fbdfd1063" from submit import submit_kungfu env = make_env() submit_kungfu(agent, env, evaluate, '<EMAIL>', 'fkJIOqirZ2R3YlF6') # + [markdown] id="2BNKtYXf0KfU" # ### Now what? # Well, 5k reward is [just the beginning](https://www.buzzfeed.com/mattjayyoung/what-the-color-of-your-karate-belt-actually-means-lg3g). Can you get past 200? With recurrent neural network memory, chances are you can even beat 400! # # * Try n-step advantage and "lambda"-advantage (aka GAE) - see [this article](https://arxiv.org/abs/1506.02438) # * This change should improve early convergence a lot # * Try recurrent neural network # * RNN memory will slow things down initially, but in will reach better final reward at this game # * Implement asynchronuous version # * Remember [A3C](https://arxiv.org/abs/1602.01783)? The first "A" stands for asynchronuous. It means there are several parallel actor-learners out there. # * You can write custom code for synchronization, but we recommend using [redis](https://redis.io/) # * You can store full parameter set in redis, along with any other metadate # * Here's a _quick_ way to (de)serialize parameters for redis # ``` # import joblib # from six import BytesIO # ``` # ``` # def dumps(data): # "converts whatever to string" # s = BytesIO() # joblib.dump(data,s) # return s.getvalue() # ``` # ``` # def loads(string): # "converts string to whatever was dumps'ed in it" # return joblib.load(BytesIO(string)) # ```
week_5/practice_actorcritic_pytorch_gpu.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # **Bioinformatics with Jupyter Notebooks for WormBase:** # ## **Utilities 1 - Genome Data Viewer** # Welcome to the thirteenth jupyter notebook in the WormBase tutorial series. Over this series of tutorials, we will write code in Python that allows us to retrieve and perform simple analyses with data available on the WormBase sites. # # This tutorial will deal with obtaining the right URLs to easily view data on the NCBI Genome Data Viewer. Let's get started! # We start by importing the required python libraries. import webbrowser from IPython.display import HTML # The NCBI Genome Data Viewer has a neat trick for viewing data in which we can manipulate the URL according to our needs and thereby the webpage will lead to the results of our query. # # We will perform different queries in this tutorial to creat URLs that can be easily modified to fit any of your needs. This can also be extended to species other than C. elegans. # ID - Identifier (typically accession) from NCBI resource with associated tracks to be displayed in GDV id_value = 'GCF_000002985.6' url = 'https://www.ncbi.nlm.nih.gov/genome/gdv/?id=' + id_value webbrowser.open_new_tab(url) # context - NCBI resource context that defines default tracks displayed context_value = 'genome' id_value = 'GCF_000002985.6' #RefSeq genome for C. elegans!! url = 'https://www.ncbi.nlm.nih.gov/genome/gdv/?id='+id_value+'&context='+context_value webbrowser.open_new_tab(url) # chr - chromosome number or alternate locus (can be changed based on requirement and organism) chr_value = 'II' id_value = 'GCF_000002985.6' url = 'https://www.ncbi.nlm.nih.gov/genome/gdv/?id='+id_value+'&chr='+chr_value webbrowser.open_new_tab(url) # from/to - range start, end (both 1-based) from_value = '100' to_value = '1000' chr_value = 'II' id_value = 'GCF_000002985.6' url = 'https://www.ncbi.nlm.nih.gov/genome/gdv/?id='+id_value+'&chr='+chr_value+'&from='+from_value+'&to='+to_value webbrowser.open_new_tab(url) # q - search term (You can search for genes, dbSNP IDs, Sequence IDs, chromosome ranges, etc.) #keyword = 'daf-16' keyword = 'F52D10.5' id_value = 'GCF_000002985.6' url = 'https://www.ncbi.nlm.nih.gov/genome/gdv/?id='+id_value+'&q='+keyword webbrowser.open_new_tab(url) # mk - list of marker spec - comma separated list of position|name|color to customise the display on the GDV position_value = 'chr2:100-1000' name_value = 'myMarker' color_value = 'red' id_value = 'GCF_000002985.6' url = 'https://www.ncbi.nlm.nih.gov/genome/gdv/?id='+id_value+'&mk='+position_value+'|'+name_value+'|'+color_value webbrowser.open_new_tab(url) # This is the end of the short tutorial on manipulating URLs to use NCBI GDV easily! # # In the next tutorial, we will look at WormCat, another utility which can be used with WormBase data! # # Acknowledgements: # - NCBI Genome Data Viewer (https://www.ncbi.nlm.nih.gov/genome/gdv/?org=caenorhabditis-elegans)
Tutorial-13-utilities-genome-data-viewer.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: web_scrape_fin # language: python # name: web_scrape_fin # --- # # Burning forests # * topics: Ecology, forest fires and arson, sets, Venn diagrams, interception of sets, pandas module, quantiles. # # Hi, welcome to the solution of forest fires excercise. We are going to use set operations and venn diagrams to look into possible forest fires caused by arsonists. # # Python wise, we are going to use pandas module, which is a powerful data science tool for working with data organized in tables. # # # ## Task # * Imagine you are the UN ambassador for climate change. # * It is pretty hard position since you cannot really do anything about the climate change. # * Which is also claimed by scientist to be responsible for the more extreme weather patters. # * One of your goals is to find low hanging fruits in combating climate change. # * One immediate problem is the increase of intense fires all over the world. # * So you got an idea. Some part of the fires must be due to arson. # * Instead of combating climate change directly, you might be able to identify countries which are 'soft' on arson related crimes. # * And countries love to combat crime. # * If you then convince them to do legal adjustments against arson (which would be cheap), you could save lots of CO2 emission, lives and money. # # ## Questions # * based on the presented tabular data for most countries of the world, can you identify those potentially soft on crime? # --- # --- # # In case this is your first Jupyter Notebook: # * Every cell is executed with `Shift-Enter`, once your cursor is in it. # * After successul run, a serial number of the execution will appear on the left from the cell # * For cell execution and creation of a new cell below, use `Alt-Enter`. # * Any text after a # symbol is a comment (to annotate your code) and it is ignored by Python # * Caution: If you execute a cell which has a hint in the output, the hint will disappear, therefore it is better to use `Alt-Enter`. # --- # --- # + # imports of modules we need import pandas as pd # probably the most used module for data science working with lots of data organized in tables import wbgapi as wb # module which allows to download and import data from the World Bank using its API import matplotlib.pyplot as plt # Standard plotting module from matplotlib_venn import venn2, venn3 # specific module for plotting venn diagrams plt.rcParams['figure.facecolor'] = 'white' # - # ## Sidenotes: # * In these types of tasks, collecting correct and reliable data is a time and effort demanding work. # * We can never be sure that our data is 100% correct. # * Besides, some data are missing (which we do not tackle here properly), how important do you think it is for our world to flourish to have all the data for all the countries available? # * Such considerations are extremely important, especially with a responsibility and weight of the position of UN ambassador. # --- # # Loading precompiled data table df = pd.read_csv('data_fires.csv') # pandas can read from many datastructures df.set_index('iso', inplace=True) type(df) # To see how our table looks like, we can print first few lines with `head()` method (btw. `tail()` method also exists) # print top of table using df.head() # how many countries do we have data on. len(df) # # Notes on the data in your table. # * Scientifically analysed data from MODIS observations serve as resource for: # 1. MODIS stands for Moderate Resolution Imaging Spectroradiometer (on-board on NASA's Terra and Aqua satellites) # 2. Available for example from Global Forest Watch ([GFW](https://www.globalforestwatch.org/dashboards/global/?burnedAreaCumulative=eyJzdGFydEluZGV4IjowLCJlbmRJbmRleCI6NTEsInN0YXJ0RGF0ZUFic29sdXRlIjoiMjAyMS0wMS0wNCIsImVuZERhdGVBYnNvbHV0ZSI6IjIwMjEtMTItMjcifQ%3D%3D&burnedAreaRanked=eyJoaWdobGlnaHRlZCI6ZmFsc2V9&category=fires&dashboardPrompts=eyJzaG93UHJvbXB0cyI6dHJ1ZSwicHJvbXB0c1ZpZXdlZCI6WyJ2aWV3TmF0aW9uYWxEYXNoYm9hcmRzIiwiZG93bmxvYWREYXNoYm9hcmRTdGF0cyIsInNoYXJlV2lkZ2V0Iiwid2lkZ2V0U2V0dGluZ3MiXSwic2V0dGluZ3MiOnsic2hvd1Byb21wdHMiOnRydWUsInByb21wdHNWaWV3ZWQiOlsidmlld05hdGlvbmFsRGFzaGJvYXJkcyIsImRvd25sb2FkRGFzaGJvYXJkU3RhdHMiLCJzaGFyZVdpZGdldCJdLCJzZXR0aW5ncyI6eyJzaG93UHJvbXB0cyI6dHJ1ZSwicHJvbXB0c1ZpZXdlZCI6WyJ2aWV3TmF0aW9uYWxEYXNoYm9hcmRzIiwiZG93bmxvYWREYXNoYm9hcmRTdGF0cyIsInNoYXJlV2lkZ2V0Il0sInNldHRpbmdzIjp7InNob3dQcm9tcHRzIjp0cnVlLCJwcm9tcHRzVmlld2VkIjpbInZpZXdOYXRpb25hbERhc2hib2FyZHMiLCJkb3dubG9hZERhc2hib2FyZFN0YXRzIl0sInNldHRpbmdzIjp7Im9wZW4iOmZhbHNlLCJzdGVwSW5kZXgiOjAsInN0ZXBzS2V5IjoiIn0sIm9wZW4iOnRydWUsInN0ZXBJbmRleCI6MCwic3RlcHNLZXkiOiJzaGFyZVdpZGdldCJ9LCJzdGVwc0tleSI6InNoYXJlV2lkZ2V0Iiwic3RlcEluZGV4IjowLCJvcGVuIjpmYWxzZSwiZm9yY2UiOnRydWV9LCJvcGVuIjp0cnVlLCJzdGVwSW5kZXgiOjAsInN0ZXBzS2V5Ijoid2lkZ2V0U2V0dGluZ3MifSwic3RlcHNLZXkiOiJ3aWRnZXRTZXR0aW5ncyIsImZvcmNlIjp0cnVlfQ%3D%3D&fireAlertStats=eyJjb21wYXJlWWVhciI6MjAxM30%3D&gfwfires=true&location=WyJnbG9iYWwiXQ%3D%3D&map=<KEY>)) # 3. Column `forest_area_ha` for every country, just a single number (not changing over time) for simplicity (in hectares). # 4. Column `burned_area_ha` of forests burned between years 2012-2020 (I sum all years upto 1 number again, in hectares) # * Temperature changes might contribute to the increase fires too, such data are available from Food and Agriculture Organization of the United Nations [FAO](http://www.fao.org/faostat/en/#data/ET) # 1. There you can download temperature changes for each country in respect to its base level, which is given as average of temperatures between years 1951-1980. # 2. Data are originally per month each year. # 3. We averaged them over months and years between 2012-2020 into a single number in column `t_change`. # * Lack of precipitations could justify large fires # 1. Therefore we have column `av_prec` in mm per year (again single constant for all years). # 2. Available from [UNdata](http://data.un.org/Data.aspx?d=CLINO&f=ElementCode%3A06) # ## Find fraction of burnt forests # command with similar logic as in excel # new column equals ratio of two columns df['ratio_burn'] = df['burned_area_ha']/df['forest_area_ha'] # check again top of the table df.head() # # The master logic # 1. Find countries which have too high ratio of burned forest # 2. At the same time their temperature rise is not high, so it cannot justify the fires # 3. Precipitations are not relatively low, which could again explain extensive fires # 4. Finally, intersection of these 3 sets will give us countries which have decent rains, temperature is not rising crazily, but have lots of fires: Maybe because of arson? # # Try Europe first # ## Quantiles # * Quantiles are values out of group of values, which split the group into ratio # * Example: `quantile(0.6)` is a value, where 60% of values lie below, and 40% of values lie above # * So quantile is a good way to compare countries relative to each other. # * Note that quantile 0.6 means the same thing as 60th percentile. # select european countries df_europe = df[df.Region=='Europe'] # + # forest burned # apply method quantile to to calculate 30% of countries with highest burned ratio of forests high_burn_limit = df_europe.ratio_burn.quantile(0.7) # use condition similar to selecting european countries to creat df which have # ratio_burn larger than high_burn_limit high_burn_df = df_europe[df_europe.ratio_burn > high_burn_limit] # command to create a set out of the index of our new table, remember, we set index to iso set_high_burn = set(high_burn_df.index) # - # Repeat the same logic to column `t_change` to create a set of european states which are in lower 0.5 quantile of temperature increase. # + # not high temperature change, 0.5 quantile t_change_limit = df_europe.t_change.quantile(0.5) # DF of countries with t_change < t_change_limit not_large_t_change_df = df_europe[df_europe.t_change < t_change_limit] # define a set out of the df above set_not_large_t_change = set(not_large_t_change_df.index) # - # Repeat the same logic to column `av_prec_mm_per_year` to create a set of european states which have higher precipitation than 0.5 quantile. # + # not low precipitation, 0.5 quantile prec_limit = df_europe.av_prec_mm_per_year.quantile(0.5) # DF of countries with prec > prec_limit not_low_prec_df = df_europe[df_europe.av_prec_mm_per_year > prec_limit] # define a set out of the df above set_not_low_prec = set(not_low_prec_df.index) # - # NOTE: There is no strict rule which quantile to use, feel free to experiment # # ### Now we do intersection of 2 sets, high burn states with not so high temperature rise. # intersection between two sets # Countries which have high burns AND temperature rise is below average over the decades inter2 = set_high_burn.intersection(set_not_large_t_change) # using .loc on our df, we can select countries of inter2 from the original df df.loc[inter2] # ## Temperature is only one factor # * Extremely low precipitations might justify lots of burned forest # * Let's add the condition that the precipitations cannot be too low # * Now our logic says: high burns AND not large temperature increase AND not very low precipitations # intersection of three sets inter3 = set_high_burn.intersection(set_not_large_t_change).intersection(set_not_low_prec) # listing the countries from the original df df.loc[inter3] # list all high burning countries df.loc[set_high_burn].Country # ## Plotting venn diagram # * using functions from matplotlib_venn module, we can visualize how our set share the elements (Countries) plt.figure(figsize=(6,4)) venn3([set_high_burn, set_not_large_t_change, set_not_low_prec], set_labels=('High burn', 'Low Temp change', 'Not low prec'), alpha=0.5) plt.title('European countries with excessive fires', fontsize=16) plt.show() # * Our motivation was to identify the intersection of the three sets shown in dark purple here # # --- # --- # ## Comparing all countries together feels wrong, because of huge differences between them. # # * There is no unique answer how to group countries, consider options based on: # * development level (GDP) # * Geography # * Climate # * And more # * Here we measure development level of each country in terms of GDP # 1. GDP per capita (for 2020) in dollars (column 'gdp_per_capita') # 2. Data obtained from [World Bank](https://data.worldbank.org/indicator) # * In order to compare similar contries we could pick continents one by one # * Above, Europe can be considered fairly homogeneous, thanks to its small size and EU integration. # * Or alternatively use four level division developed by [<NAME>](https://www.gapminder.org/factfulness-book/) and [Gapminder](https://www.gapminder.org/), debanking the myth of poor south and rich north or in other words US and THEM. # # --- # --- # ## Let's organize countries according to the GDP per capita by adding a column `level` to our table # using wb module, we can load lots of datasets directly into pandas dataframes (tables) gdp = wb.data.DataFrame('NY.GDP.PCAP.CD', time=2020) gdp.index.names = ['iso'] # rename the index df = df.merge(gdp, on='iso') # merging our two tables according to 'iso' index # and rename gdp column df = df.rename(columns={'NY.GDP.PCAP.CD':'gdp_per_capita_dollars'}) df.head() # ## Example # * what is the dollar per year money, people in the 10 percent of poorest countries live on in average? # use quantile again df['gdp_per_capita_dollars'].quantile(0.1) # in dollars per day df['gdp_per_capita_dollars'].quantile(0.1)/365 # ## Extreme poverty is defines as living with less than 2 or 2.5 dollars a day. # * As mentioned in the initial notes, the world is not US and THEM any more, but can be more described by four levels of countries' wealth # * levels go from the poverty line of 2$ a day, and then multiply by 4 to reach the next level # * So we have 2, 8, 32 dollars per day levels # * In which level are you? # We need to mark our countries according to these levels in our df # let's make all countries level 4 first df['gdp_level'] = 4 # Now if the gdp per capita is lower than of the level 1,2 or 3, we change the value of gdp_level # gdp is per year, so we have to multiply the level per day by 365 df.loc[df['gdp_per_capita_dollars'] < 365*32, 'gdp_level'] = 3 df.loc[df['gdp_per_capita_dollars'] < 365*8, 'gdp_level'] = 2 df.loc[df['gdp_per_capita_dollars'] < 365*2.5, 'gdp_level'] = 1 df.head() # how many countries (rows) do we have in df len(df) # leaving out rows with any missing data # Proper approach is to try to find additional sources for the missing data df.dropna(inplace=True) # Out of 195 countries, we are omitting many people of this world. len(df) # How many countries we have at each GDP level? # apply value_counts() method on column gdp_level df.gdp_level.value_counts() # # Level 1 countries analysis # * here we repeat the same steps as we did for Europe # * Now it includes whole world, grouped by GDP # similar to selecting european countries above, select level 1 countries. df_l1 = df[df['gdp_level']==1] # repeat the process as above for lots forest burned high_burn_limit = df_l1.ratio_burn.quantile(0.7) high_burn_df = df_l1[df_l1.ratio_burn > high_burn_limit] set_high_burn = set(high_burn_df.index) # not high temperature change t_change_limit = df_l1.t_change.quantile(0.5) not_large_t_change_df = df_l1[df_l1.t_change < t_change_limit] set_not_large_t_change = set(not_large_t_change_df.index) # not low precipitation prec_limit = df_l1.av_prec_mm_per_year.quantile(0.5) not_low_prec_df = df_l1[df_l1.av_prec_mm_per_year > prec_limit] set_not_low_prec = set(not_low_prec_df.index) # intersection of two sets inter2 = set_high_burn.intersection(set_not_large_t_change) df.loc[inter2] # finally intersection of three sets inter3 = set_high_burn.intersection(set_not_large_t_change).intersection(set_not_low_prec) df.loc[inter3] plt.figure(figsize=(6,4)) venn3([set_high_burn, set_not_large_t_change, set_not_low_prec], set_labels=('High burn', 'Low Temp change', 'Not low prec'), alpha=0.5) plt.title('Level 1 countries with excessive fires', fontsize=16) plt.show() # # Conclusion for level 1: # * It seems that no country stands out as for getting too many fires at favourable climate # * Fire extend for Mali and Chad seems to be justified by low precipitations # # Level 2 countries analysis df_l2 = df[df['gdp_level']==2] # forest burned high_burn_limit = df_l2.ratio_burn.quantile(0.7) high_burn_df = df_l2[df_l2.ratio_burn > high_burn_limit] set_high_burn = set(high_burn_df.index) # not high temperature change t_change_limit = df_l2.t_change.quantile(0.5) not_large_t_change_df = df_l2[df_l2.t_change < t_change_limit] set_not_large_t_change = set(not_large_t_change_df.index) # not low precipitation prec_limit = df_l2.av_prec_mm_per_year.quantile(0.5) not_low_prec_df = df_l2[df_l2.av_prec_mm_per_year > prec_limit] set_not_low_prec = set(not_low_prec_df.index) inter2 = set_high_burn.intersection(set_not_large_t_change) df.loc[inter2] inter3 = set_high_burn.intersection(set_not_large_t_change).intersection(set_not_low_prec) df.loc[inter3] plt.figure(figsize=(6,4)) venn3([set_high_burn, set_not_large_t_change, set_not_low_prec], set_labels=('High burn', 'Low Temp change', 'Not low prec'), alpha=0.5) plt.title('Level 2 countries with excessive fires', fontsize=16) plt.show() # # Conclusion for level 2: # * Lack of precipitation is not a reason for the strong fires. # * UN general law department might want to have a deeper look here for some easy gains. # # Level 3 countries analysis df_l3 = df[df['gdp_level']==3] # forest burned high_burn_limit = df_l3.ratio_burn.quantile(0.7) high_burn_df = df_l3[df_l3.ratio_burn > high_burn_limit] set_high_burn = set(high_burn_df.index) # not high temperature change t_change_limit = df_l3.t_change.quantile(0.5) not_large_t_change_df = df_l3[df_l3.t_change < t_change_limit] set_not_large_t_change = set(not_large_t_change_df.index) # not low precipitation prec_limit = df_l3.av_prec_mm_per_year.quantile(0.5) not_low_prec_df = df_l3[df_l3.av_prec_mm_per_year > prec_limit] set_not_low_prec = set(not_low_prec_df.index) inter2 = set_high_burn.intersection(set_not_large_t_change) df.loc[inter2] inter3 = set_high_burn.intersection(set_not_large_t_change).intersection(set_not_low_prec) df.loc[inter3] plt.figure(figsize=(6,4)) venn3([set_high_burn, set_not_large_t_change, set_not_low_prec], set_labels=('High burn', 'Low Temp change', 'Not low prec'), alpha=0.5) plt.title('Level 3 countries with excessive fires', fontsize=16) plt.show() # # Conclusion for level 3: # * Lack of precipitation removed only Mexico from the list. # * UN general law department might want to have a deeper look into these 7 countries. # # Level 4 countries analysis df_l4 = df[df['gdp_level']==4] # forest burned high_burn_limit = df_l4.ratio_burn.quantile(0.7) high_burn_df = df_l4[df_l4.ratio_burn > high_burn_limit] set_high_burn = set(high_burn_df.index) # not high temperature change t_change_limit = df_l4.t_change.quantile(0.5) not_large_t_change_df = df_l4[df_l4.t_change < t_change_limit] set_not_large_t_change = set(not_large_t_change_df.index) # not low precipitation prec_limit = df_l4.av_prec_mm_per_year.quantile(0.5) not_low_prec_df = df_l4[df_l4.av_prec_mm_per_year > prec_limit] set_not_low_prec = set(not_low_prec_df.index) inter2 = set_high_burn.intersection(set_not_large_t_change) df.loc[inter2] inter3 = set_high_burn.intersection(set_not_large_t_change).intersection(set_not_low_prec) df.loc[inter3] plt.figure(figsize=(6,4)) venn3([set_high_burn, set_not_large_t_change, set_not_low_prec], set_labels=('High burn', 'Low Temp change', 'Not low prec'), alpha=0.5) plt.title('Level 4 countries with excessive fires', fontsize=16) plt.show() # # Conclusion for level 4: # * Central America seems to have potential to reduce forest fires. # * Or maybe there is something more specific going on there? # * I do not know enough about central America, so let me know your thoughts. # # Overall conclusions: # * You have learnt a lot in this exercise, especially about the power of working with tables using pandas module # * And doing it with real-world data, compiled from various sources. # * As life itself, it is an open end problem, if you have ides how to improve what we have done, it could actually save lives. Please let me know. # # --- # --- # # ## Food for thought: # 1. Apart from the intersection of three sets, there are other subsets which you can find by using combinations of `union()`, `intersection()` methods. What do they logically represent and are they of any use for the UN secretary? # 2. Some data are missing (which we do not tackle here properly), how important do you think it is for our world to flourish to have all the data for all the countries available? # 3. How do you improve the logic, meaning what parameters should we add. # 4. In programming, we define functions whenever some pieces of code are repeating often. Can you identify which such parts could be put into functions? # # # GOOD job everyone!!!
en/03_sets/ignore/forest_fires_solution_helper.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # ### Entrepreneurial Competency Analysis and Predict import pandas as pd import numpy as np import seaborn as sns import matplotlib as mat import matplotlib.pyplot as plt import warnings warnings.filterwarnings("ignore") data = pd.read_csv('entrepreneurial competency.csv') data.head() data.describe() data.corr() list(data) data.shape data_reasons = pd.DataFrame(data.ReasonsForLack.value_counts()) data_reasons data.ReasonsForLack.value_counts().idxmax() data.isnull().sum()[data.isnull().sum()>0] data['ReasonsForLack'] = data.ReasonsForLack.fillna('Desconhecido') fill_na = pd.DataFrame(data.ReasonsForLack.value_counts()) fill_na.head(5) edu_sector = data.EducationSector.value_counts().sort_values(ascending=False) edu_sector edu_sector_pd = pd.DataFrame(edu_sector, columns = ['Sector', 'Amount']) edu_sector_pd.Sector = edu_sector.index edu_sector_pd.Amount = edu_sector.values edu_sector_pd perc_sec = round(data.EducationSector.value_counts()/data.EducationSector.shape[0],2) edu_sector_pd['Percentual'] = perc_sec.values *100 edu_sector_pd labels = [str(edu_sector_pd['Sector'][i])+' '+'['+str(round(edu_sector_pd['Percentual'][i],2)) +'%'+']' for i in edu_sector_pd.index] from matplotlib import cm cs = cm.Set3(np.arange(100)) f = plt.figure() plt.pie(edu_sector_pd['Amount'], labeldistance = 1, radius = 3, colors = cs, wedgeprops = dict(width = 0.8)) plt.legend(labels = labels, loc = 'center', prop = {'size':12}) plt.title("Students distribution based on Education Sector - General Analysis", loc = 'Center', fontdict = {'fontsize':20,'fontweight':20}) plt.show() rank_edu_sec = data.EducationSector.value_counts().sort_values(ascending=False) rank = pd.DataFrame(rank_edu_sec, columns=['Sector', 'Amount']) rank.Sector = rank_edu_sec.index rank.Amount = rank_edu_sec.values rank_3 = rank.head(3) rank_3 fig, ax = plt.subplots(figsize=(8,5)) colors = ["#00e600", "#ff8c1a", "#a180cc"] sns.barplot(x="Sector", y="Amount", palette=colors, data=rank_3) ax.set_title("Sectors with largest students number",fontdict= {'size':12}) ax.xaxis.set_label_text("Sectors",fontdict= {'size':12}) ax.yaxis.set_label_text("Students amount",fontdict= {'size':12}) plt.show() fig, ax = plt.subplots(figsize=(8,6)) sns.histplot(data["Age"], color="#33cc33",kde=True, ax=ax) ax.set_title('Students distribution based on Age', fontsize= 15) plt.ylabel("Density (KDE)", fontsize= 15) plt.xlabel("Age", fontsize= 15) plt.show() fig = plt.figure(figsize=(10,5)) plt.boxplot(data.Age) plt.show() gender = data.Gender.value_counts() gender perc_gender = round((data.Gender.value_counts()/data.Gender.shape[0])*100, 2) perc_gender df_gender = pd.DataFrame(gender, columns=['Gender','Absolut_Value', 'Percent_Value']) # + df_gender.Gender = gender.index df_gender.Absolut_Value = gender.values df_gender.Percent_Value = perc_gender.values df_gender # - fig, ax = plt.subplots(figsize=(8,6)) sns.histplot(data["Gender"], color="#33cc33", ax=ax) ax.set_title('Students distribution by gender', fontsize= 15) plt.ylabel("Amount", fontsize= 15) plt.xlabel("Gender", fontsize= 15) plt.show() # # Education Sector, Gender and Age Analyses, where Target = 1 data_y = data[data.y == 1] data_y.head() data_y.shape edu_sector_y = data_y.EducationSector.value_counts().sort_values(ascending=False) edu_sector_y edu_sector_ypd = pd.DataFrame(edu_sector_y, columns = ['Sector', 'Amount']) edu_sector_ypd.Sector = edu_sector_y.index edu_sector_ypd.Amount = edu_sector_y.values edu_sector_ypd perc_sec_y = round(data_y.EducationSector.value_counts()/data_y.EducationSector.shape[0],2) edu_sector_ypd['Percent'] = perc_sec_y.values *100 edu_sector_ypd labels = [str(edu_sector_ypd['Sector'][i])+' '+'['+str(round(edu_sector_ypd['Percent'][i],2)) +'%'+']' for i in edu_sector_ypd.index] cs = cm.Set3(np.arange(100)) f = plt.figure() plt.pie(edu_sector_ypd['Amount'], labeldistance = 1, radius = 3, colors = cs, wedgeprops = dict(width = 0.8)) plt.legend(labels = labels, loc = 'center', prop = {'size':12}) plt.title("Students distribution based on Education Sector - Target Analysis", loc = 'Center', fontdict = {'fontsize':20,'fontweight':20}) plt.show() fig, ax = plt.subplots(figsize=(8,6)) sns.histplot(data_y["Age"], color="#1f77b4",kde=True, ax=ax) ax.set_title('Students distribution based on Age - Target Analysis', fontsize= 15) plt.ylabel("Density (KDE)", fontsize= 15) plt.xlabel("Age", fontsize= 15) plt.show() # + gender_y = data_y.Gender.value_counts() perc_gender_y = round((data_y.Gender.value_counts()/data_y.Gender.shape[0])*100, 2) df_gender_y = pd.DataFrame(gender_y, columns=['Gender','Absolut_Value', 'Percent_Value']) df_gender_y.Gender = gender_y.index df_gender_y.Absolut_Value = gender_y.values df_gender_y.Percent_Value = perc_gender_y.values df_gender_y # - fig, ax = plt.subplots(figsize=(8,6)) sns.histplot(data_y["Gender"], color="#9467bd", ax=ax) ax.set_title('Students distribution by gender', fontsize= 15) plt.ylabel("Amount", fontsize= 15) plt.xlabel("Gender", fontsize= 15) plt.show() pcy= round(data_y.IndividualProject.value_counts()/data_y.IndividualProject.shape[0]*100,2) pcy pc= round(data.IndividualProject.value_counts()/data.IndividualProject.shape[0]*100,2) pc fig = plt.figure(figsize=(15,5)) #tamanho do frame plt.subplots_adjust(wspace= 0.5) #espaço entre os graficos plt.suptitle('Comparation between Idividual Project on "y general" and "y == 1"') plt.subplot(1,2,2) plt.bar(data_y.IndividualProject.unique(), pcy, color = 'green') plt.title("Individual Project Distribution - y==1") plt.subplot(1,2,1) plt.bar(data.IndividualProject.unique(), pc, color = 'grey') plt.title("Individual Project Distribution - Full dataset") plt.show() round(data.Influenced.value_counts()/data.Influenced.shape[0],2)*100 round(data_y.Influenced.value_counts()/data_y.Influenced.shape[0],2)*100 # Here we can observe that the categoric features have no influence on Target. Each feature measure almost haven´t any impact when compared on 'y general' and 'y == 1'. In other words, we must take the numerical features as predict parameters. data.head() list(data) data_num = data.drop(['EducationSector', 'Age', 'Gender', 'City','MentalDisorder'], axis = 1) data_num.head() data_num.corr() plt.hist(data_num.GoodPhysicalHealth, bins = 30) plt.title("Good Physical Health distribution") plt.show() data_num_fil1 = data_num[data_num.y == 1] plt.hist(data_num_fil1.GoodPhysicalHealth, bins = 30) plt.title("Good Physical Health distribution, where target == 1") plt.show() pers_fil = round(data_num.GoodPhysicalHealth.value_counts()/data_num.GoodPhysicalHealth.shape[0],2) pers_fil1 = round(data_num_fil1.GoodPhysicalHealth.value_counts()/data_num_fil1.GoodPhysicalHealth.shape[0],2) pers_fil pers_fil1 list(data_num) def plot_features(df, df_filtered, columns): df_original = df.copy() df2 = df_filtered.copy() for column in columns: a = df_original[column] b = df2[column] fig = plt.figure(figsize=(15,5)) #tamanho do frame plt.subplots_adjust(wspace= 0.5) #espaço entre os graficos plt.suptitle('Comparation between Different Features on "y general" and "y == 1"') plt.subplot(1,2,2) plt.bar(a.unique(), round(a.value_counts()/a.shape[0],2), color = 'green') plt.title("Comparation between " + column + " on 'y == 1'") plt.subplot(1,2,1) plt.bar(b.unique(), round(a.value_counts()/b.shape[0],2), color = 'grey') plt.title("Comparation between " + column + " Full dataset") plt.show() plot_features(data_num,data_num_fil1,columns=['Influenced', 'Perseverance', 'DesireToTakeInitiative', 'Competitiveness', 'SelfReliance', 'StrongNeedToAchieve', 'SelfConfidence']) # ### Data Transformation and Preprocessing data_num.shape data_num.dtypes from sklearn.preprocessing import OneHotEncoder X = data_num.drop(['y', 'Influenced', 'ReasonsForLack'], axis = 1) def ohe_drop(data, columns): df = data.copy() ohe = OneHotEncoder() for column in columns: var_ohe = df[column].values.reshape(-1,1) ohe.fit(var_ohe) ohe.transform(var_ohe) OHE = pd.DataFrame(ohe.transform(var_ohe).toarray(), columns = ohe.categories_[0].tolist()) df = pd.concat([df, OHE], axis = 1) df = df.drop([column],axis = 1) return df X = ohe_drop(data_num, columns =['Perseverance', 'DesireToTakeInitiative', 'Competitiveness', 'SelfReliance', 'StrongNeedToAchieve', 'SelfConfidence', 'GoodPhysicalHealth', 'Influenced', 'KeyTraits'] ) X X = X.drop(['y', 'ReasonsForLack', 'IndividualProject'], axis = 1) y = np.array(data_num.y) X.shape y.shape X = np.array(X) type(X) from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X,y, test_size = 0.30, random_state = 0) X_train.shape X_test.shape y_train.shape y_test.shape # ### Logistic Regression from sklearn.linear_model import LogisticRegression logreg = LogisticRegression() logreg.fit(X_train, y_train) logreg.predict(X_train) logreg.predict(X_train)[:20] y_train[:20] # ### Performance metrics calculation # Accuracy Score # Transformar df em matrix from sklearn.metrics import accuracy_score accuracy_score(y_true = y_train, y_pred = logreg.predict(X_train)) # Cross Validation from sklearn.model_selection import KFold kf = KFold(n_splits = 3) classif= LogisticRegression() # + train_accuracy_list = [] val_accuracy_list = [] for train_idx, val_idx in kf.split(X_train, y_train): Xtrain_folds = X_train[train_idx] ytrain_folds = y_train[train_idx] Xval_fold = X_train[val_idx] yval_fold = y_train[val_idx] classif.fit(Xtrain_folds,ytrain_folds) train_pred = classif.predict(Xtrain_folds) pred_validacao = classif.predict(Xval_fold) train_accuracy_list.append(accuracy_score(y_pred = train_pred, y_true = ytrain_folds)) val_accuracy_list.append(accuracy_score(y_pred = pred_validacao, y_true = yval_fold)) print("acurácias em treino: \n", train_accuracy_list, " \n| média: ", np.mean(train_accuracy_list)) print() print("acurácias em validação: \n", val_accuracy_list, " \n| média: ", np.mean(val_accuracy_list)) # - from sklearn.metrics import confusion_matrix confusion_matrix(y_true = y_train, y_pred = logreg.predict(X_train)) cm = confusion_matrix(y_true = y_train, y_pred = logreg.predict(X_train)) cm[1,1] / cm[1, :].sum() cm[1,1] / cm[:, 1].sum() from sklearn.metrics import precision_score, recall_score from sklearn.metrics import f1_score f1_score(y_true = y_train, y_pred = logreg.predict(X_train)) # ### Y test Predict logreg.predict(X_test) f1_score(y_true = y_test, y_pred = logreg.predict(X_test)) # The predict of "y_test" is too low, so I'll optimize the model # ### Model Optimization from sklearn.feature_selection import SelectKBest, chi2 def try_k(x, y, n): the_best = SelectKBest(score_func = chi2, k =n) fit = the_best.fit(x, y) features = fit.transform(x) logreg.fit(features,y) preds = logreg.predict(features) f1 = f1_score(y_true = y, y_pred = preds) precision = precision_score(y_true = y, y_pred = preds) recall = recall_score(y_true = y, y_pred = preds) return preds, f1, precision, recall for n in n_list: preds,f1, precision, recall = try_k(X_test, y_test, n) f1 precision recall from sklearn.metrics import classification_report, plot_confusion_matrix,plot_roc_curve the_best = SelectKBest(score_func = chi2, k =30) fit = the_best.fit(X_test, y_test) feature = fit.transform(X_test) preds = logreg.predict(feature) plot_confusion_matrix(logreg,features,y_test) plot_roc_curve(logreg,features,y_test) print(classification_report(y_test, preds))
Entrepreneurial Competency Analysis and Predict.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Demonstration of GPU Accelerated SigMF Reader # Please note that our work with both SigMF readers and writers is focused on appropriately handling the data payload on GPU. This is similar to our usage of DPDK within the Aerial SDK and cuVNF. # + import json import numpy as np import cupy as cp import cusignal cusignal.precompile_kernels() # - # We are using the Northeastern University Oracle SigMF recordings found [here](http://www.genesys-lab.org/oracle) meta_file = '/data/oracle/KRI-16Devices-RawData/2ft/WiFi_air_X310_3123D7B_2ft_run1.sigmf-meta' data_file = '/data/oracle/KRI-16Devices-RawData/2ft/WiFi_air_X310_3123D7B_2ft_run1.sigmf-data' # # Reader (Binary and SigMF) # For our purposes here, [SigMF](https://github.com/gnuradio/SigMF) data is treated as a JSON header and processed on CPU, while the *binary* payload file is mmaped to GPU and cuSignal uses a CUDA kernel to parse the file. While we've focused on SigMF here, you can use the underlying `cusignal.read_bin` and `cusignal.parse_bin` (and corresponding write functions) for your own datasets. # ### Baseline Reader (CPU, Numpy) # + with open(meta_file, 'r') as f: md = json.loads(f.read()) if md['_metadata']['global']['core:datatype'] == 'cf32': data_type = np.complex64 # - # %%timeit data_cpu = np.fromfile(data_file, dtype=data_type) # ### Baseline Reader (GPU, Numpy) # %%timeit data_gpu = cp.fromfile(data_file, dtype=data_type) cp.cuda.runtime.deviceSynchronize() # ### cuSignal - Use Paged Memory (Default) # This method is preferred for offline signal processing and is the easiest to use # %%timeit data_cusignal = cusignal.read_sigmf(data_file, meta_file) cp.cuda.runtime.deviceSynchronize() # ### cuSignal - Use Pinned Buffer (Pinned) # This method is preferred for online signal processing tasks when you're streaming data to the GPU with known and consistent data sizes binary = cusignal.read_bin(data_file) buffer = cusignal.get_pinned_mem(binary.shape, cp.ubyte) # %%timeit data_cusignal_pinned = cusignal.read_sigmf(data_file, meta_file, buffer) cp.cuda.runtime.deviceSynchronize() # ### cuSignal - Use Shared Buffer (Mapped) # This method is preferred for the Jetson line of embedded GPUs. We're showing performance here on a PCIe GPU (which is why it's so slow!) binary = cusignal.read_bin(data_file) buffer = cusignal.get_shared_mem(binary.shape, cp.ubyte) # %%timeit data_cusignal_shared = cusignal.read_sigmf(data_file, meta_file, buffer) cp.cuda.runtime.deviceSynchronize() # # Writer (Binary and SigMF) # + import os sigmf = cusignal.read_sigmf(data_file, meta_file) test_file_ext = "test-data.sigmf-data" if os.path.exists(test_file_ext): os.remove(test_file_ext) # - # ### Baseline Writer # %%timeit sigmf.tofile(test_file_ext) cp.cuda.runtime.deviceSynchronize() # ### cuSignal - Use Paged Memory (Default) # %%timeit cusignal.write_sigmf(test_file_ext, sigmf, append=False) cp.cuda.runtime.deviceSynchronize() # ### cuSignal - Use Pinned Buffer (Pinned) binary = cusignal.read_bin(data_file) buffer = cusignal.get_pinned_mem(binary.shape, cp.ubyte) # %%timeit cusignal.write_sigmf(test_file_ext, sigmf, buffer, append=False) cp.cuda.runtime.deviceSynchronize() # ### cuSignal - Use Mapped Buffer (Mapped) binary = cusignal.read_bin(data_file) buffer = cusignal.get_shared_mem(binary.shape, cp.ubyte) # %%timeit cusignal.write_sigmf(test_file_ext, sigmf, buffer, append=False) cp.cuda.runtime.deviceSynchronize()
notebooks/api_guide/io_examples.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/pachterlab/GRNP_2020/blob/master/notebooks/FASTQ_processing/ProcessPBMC_V3_2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="UUR5-eTx6o8Z" # **BUTTERFLY - Processing of the FASTQ files for the PBMC_V3_2 dataset.** # # 1. Download and build kallisto and bustools from source. # 2. Download the genome FASTA file and build a kallisto index # 3. Download the FASTQ files and process with kallisto # 4. Process the output from kallisto with bustools (the butterfly branch) # + [markdown] id="638S-dtx-M0q" # **1. Download and build kallisto and bustools from source** # + id="5Gt6rQkSXriM" colab={"base_uri": "https://localhost:8080/"} outputId="fcafcbd4-2b43-41e3-bc66-c72f33d62a85" # Install dependencies needed for build # !apt update # !apt install -y cmake # !apt-get install autoconf # + id="jJ3rQJCdgeJa" colab={"base_uri": "https://localhost:8080/"} outputId="ac22c04e-951e-4c3b-b78b-473d4665d00a" #Need to download and build htslib to be able to build kallisto # !cd /usr/bin && wget https://github.com/samtools/htslib/releases/download/1.9/htslib-1.9.tar.bz2 &&tar -vxjf htslib-1.9.tar.bz2 && cd htslib-1.9 && make # + id="R6kuhOmzZL_X" colab={"base_uri": "https://localhost:8080/"} outputId="867eac47-24b3-4c18-dd7b-41bbff90f1bc" #clone the kallisto repo, build and install # !rm -r temporary #if the code is run more than once # !mkdir temporary # !cd temporary && git clone https://github.com/pachterlab/kallisto.git # !cd temporary/kallisto && git checkout v0.46.2 && mkdir build && cd build && cmake .. && make # !chmod +x temporary/kallisto/build/src/kallisto # !mv temporary/kallisto/build/src/kallisto /usr/local/bin/ # + id="CffgQFeiW2tc" colab={"base_uri": "https://localhost:8080/"} outputId="15df9f66-74a1-46c2-9d6f-d14d83823f4b" #clone the bustools repo, build and install # !cd temporary && rm -r * # !git clone https://github.com/BUStools/bustools.git # !mv bustools/ temporary/ # !cd temporary/bustools && git checkout butterfly && mkdir build && cd build && cmake .. && make # !chmod +x temporary/bustools/build/src/bustools # !mv temporary/bustools/build/src/bustools /usr/local/bin/ # + id="Cb5DifYYcB6g" colab={"base_uri": "https://localhost:8080/"} outputId="0d045a8e-9f62-4a32-eec1-d706c7404c6a" # !kallisto version # + [markdown] id="3w4pMTRzAqlK" # **2. Download the genome FASTA file and build a kallisto index** # + id="mzIUfCIaaUsP" colab={"base_uri": "https://localhost:8080/"} outputId="7e668a1b-4e77-4e63-c213-d86f0e6d1936" #Download fasta and build kallisto index # !wget "ftp://ftp.ensembl.org/pub/release-94/fasta/homo_sapiens/cdna/Homo_sapiens.GRCh38.cdna.all.fa.gz" -O human.fa.gz # !kallisto index -i Homo_sapiens.GRCh38.cdna.all.idx human.fa.gz # + [markdown] id="7wgjBC9OB7-Y" # **3. Download the FASTQ files and process with kallisto** # + [markdown] id="UycRnVGXBL_6" # # + id="ylqhA_6Ca2-8" #clean up a bit first # !rm -r sample_data # !rm -r temporary # + id="y5w4WuHIuTP8" colab={"base_uri": "https://localhost:8080/"} outputId="cda88e67-5b01-4b69-fb5d-7232e700a470" #Download fastqs # !wget "http://s3-us-west-2.amazonaws.com/10x.files/samples/cell-exp/3.0.0/pbmc_10k_protein_v3/pbmc_10k_protein_v3_fastqs.tar" # + id="Y1ZMjRqliQ6A" colab={"base_uri": "https://localhost:8080/"} outputId="6bfc0e53-cf84-424a-c927-481656ff65b4" #stream from the tar directly into kallisto # !rm A_R1.gz A_R2.gz B_R1.gz B_R2.gz # in case of running this several times # !mkfifo A_R1.gz A_R2.gz B_R1.gz B_R2.gz # !tar -O --to-stdout -xf pbmc_10k_protein_v3_fastqs.tar pbmc_10k_protein_v3_fastqs/pbmc_10k_protein_v3_gex_fastqs/pbmc_10k_protein_v3_gex_S1_L001_R1_001.fastq.gz > A_R1.gz & tar -O --to-stdout -xf pbmc_10k_protein_v3_fastqs.tar pbmc_10k_protein_v3_fastqs/pbmc_10k_protein_v3_gex_fastqs/pbmc_10k_protein_v3_gex_S1_L001_R2_001.fastq.gz > A_R2.gz & tar -O --to-stdout -xf pbmc_10k_protein_v3_fastqs.tar pbmc_10k_protein_v3_fastqs/pbmc_10k_protein_v3_gex_fastqs/pbmc_10k_protein_v3_gex_S1_L002_R1_001.fastq.gz > B_R1.gz & tar -O --to-stdout -xf pbmc_10k_protein_v3_fastqs.tar pbmc_10k_protein_v3_fastqs/pbmc_10k_protein_v3_gex_fastqs/pbmc_10k_protein_v3_gex_S1_L002_R2_001.fastq.gz > B_R2.gz & kallisto bus -i Homo_sapiens.GRCh38.cdna.all.idx -o bus_output/ -x 10xv3 -t 2 A_R1.gz A_R2.gz B_R1.gz B_R2.gz # + [markdown] id="EAHG4NeYIphe" # **4. Process the output from kallisto with bustools (the butterfly branch)** # + id="dxZfK436D_mo" colab={"base_uri": "https://localhost:8080/"} outputId="a0bb4d10-bb13-429b-fe72-3c3e4a3b0801" #get the whitelist # !rm -r GRNP_2020 #in case the code is run several times # !git clone https://github.com/pachterlab/GRNP_2020.git # !cd GRNP_2020/whitelists && unzip 10xv3_whitelist.zip # !cp GRNP_2020/tr2g/Human/* bus_output/. # !cd GRNP_2020/whitelists && ls # + id="gOTj9GRJEfKg" colab={"base_uri": "https://localhost:8080/"} outputId="5205f034-b086-4c65-a8d8-2fd1b1a9304c" # !bustools correct -w GRNP_2020/whitelists/10xv3_whitelist.txt -p bus_output/output.bus | bustools sort -T tmp/ -t 2 -o bus_output/sort.bus - # + id="8SlZ3p3zWizM" colab={"base_uri": "https://localhost:8080/"} outputId="551101e5-c2a9-4219-8887-40db5de046e4" #collapse # !bustools collapse -o bus_output/coll -t bus_output/transcripts.txt -g bus_output/transcripts_to_genes.txt -e bus_output/matrix.ec bus_output/sort.bus # + id="c1H1V_yxXXK_" colab={"base_uri": "https://localhost:8080/"} outputId="3d6280ee-3ddf-4a46-dfa9-002c2e0d4ab8" #umicorrect - this code is not optimized for speed in this branch and may take a while to run, it is much faster in the master branch # !bustools umicorrect -o bus_output/umicorr.bus bus_output/coll.bus # + id="5SsVe8r1e6ye" colab={"base_uri": "https://localhost:8080/"} outputId="f12defd9-21c1-4ecc-eb19-771fda8d1211" #convert to text # !bustools text -o bus_output/bug.txt bus_output/umicorr.bus # + id="ihx0UaMtfMW5" colab={"base_uri": "https://localhost:8080/"} outputId="e1b87f48-b184-4bfb-c0ad-957996a40540" # !ls -l # !cd bus_output && ls -l
notebooks/FASTQ_processing/ProcessPBMC_V3_2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: AutoCNet # language: python # name: autocnet # --- # # Extracting Keypoints (Interest Points) # + import math import os import sys sys.path.insert(0, os.path.abspath('/data/autocnet')) import autocnet from autocnet import CandidateGraph # The GPU based extraction library that contains SIFT extraction and matching import cudasift as cs # A method to resize the images on the fly. from scipy.misc import imresize # %pylab inline figsize(16,4) # - # ## Candidate Graph # As before, create the candidate graph object that stores the adjacency between images. # + a = 'AS15-P-0111_CENTER_LRG_CROPPED.png' b = 'AS15-P-0112_CENTER_LRG_CROPPED.png' adj = {a:[b], b:[a]} cg = CandidateGraph.from_adjacency(adj) # - # ## Enable GPU use # # The library can utilize either the CPU or the GPU for a number of computationally expensive functions. One example if [keypoint or correspondence identification](https://en.wikipedia.org/wiki/Correspondence_problem). The process of finding correspondences requires 3 steps: # # - The identification of [interest points](https://en.wikipedia.org/wiki/Interest_point_detection). # - The extraction of said interest points # - Matching of interest points between images to identify correspondences. # # We support this processing flow using: # # - (OpenCV functionality)[http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_feature2d/py_table_of_contents_feature2d/py_table_of_contents_feature2d.html] # - (VLFeat)[http://www.vlfeat.org] # - (CUDA SIFT)[https://github.com/USGS-Astrogeology/CudaSift] # # CUDA SIFT is exceptionally fast as it extracts (and matches) keypoints in parallel on 1000s of GPU cores. For all but the smallest images, GPU use is encouraged. # # ![gpu](https://upload.wikimedia.org/wikipedia/commons/b/bd/CPU_and_GPU.png) # # In house, my work station is available with 2 M5000 GPUs containing approximately 2500 GPU cores and 8GB of RAM each. The GPU processing node has 4 K80 GPUs with ~5000 GPU cores and 12GB of RAM each. autocnet.cuda(enable=True, gpu=0) # Explicitly select a GPU since the system has 2 and GPU1 is running monitors. # A GPU with 8GB of memory can run the SIFT algorithm for approximately $12500^{2}$ pixels. The CudaSift code is written to support 32-bit floating point numbers (a major improvement over OpenCV for our use case). This is also a limiting factor as the 8-bit Apollo Pan `.png` files are taking up significantly more space than they really need. # # # check the total size of the input image. cg.node[0].geodata.raster_size # ## Aside: AutoCNet as a library # # We have developed the AutoCNet library and not an end-to-end application intentionally. The Apollo Pan data is a prime example of why this decision was made. The images are unique and the order and pre-processing required for successful matching require chaining the AutoCNet functionality in a unique way. The "application" can be taylored to the data as opposed to expanding the application to support all possible processing paths. # # ![autolib](https://github.com/USGS-Astrogeology/autocnet/blob/dev/docs/_static/images/autocnet_modules.png?raw=true) # # #### Modules: # # - `graph`: This module manages the CandidateGraph, Node, and Edge constructs. All of the syntax sugar is embedded in these objects. # - `matcher`: The meat-and-potatoes module with our CPU/GPU feature matchers, subpixel matchers, outlier detection methods, and spatial suppression functions. # - `camera`: Lightweight pinhole camera capabilities for working with epipolar lines, estimating the relationship between an ideal pinhole and non-ideal pinhole using image correspondences, and triangulation. # - `transformation`: Decomposition and transformation (fundamental and homography) matrices. # - `control`: ISIS3 style control class (not broadly used). # - `cg`: Computational Geometry module with convex hull and Voronoi diagram functionality. # - `vis`: A tiny visualization module - AutoCNet is not a collection of data views, but a library. This module is designed for quick development peaks at the state of things. # - `plio/io`: The `plio` library is leveraged heavily to support I/O. We also have a lightweight io module within AutoCNet for saving/loading this project. # - `utils`: This module contains an assortment of utility functions for linear algebra operations (aggregating numpy functions), nearest neighbor searches, recursive dict traversal, etc. # ## Images that are too large # # A few options exist for images that are too large for the SIFT algorithm. If geospatial information existed, it would be possible to contrain the extraction to just the overlap between two (or more) images. We could then cross our fingers and hope that the overlap area was small enough to fit onto a GPU. Alternatively, it is possible to downsample the image and work with the reduced resolution initially. Due to these challenges, the [syntax sugar](https://en.wikipedia.org/wiki/Syntactic_sugar) that exists on the `CandidateGraph`, `Node` and `Edge` objects are largely unusable. # # What follows is the result of experimentation with the images. # # **Step I**: Read the input images from a node's geodata object, downsample the image so it will fit in memory and extract keypoints. # + # Read the image into memory from disk # Image 1 arr0 = cg.node[0].geodata.read_array() # Check the size of the image total_size = arr0.shape[0] * arr0.shape[1] downsample_amount = math.ceil(total_size / 12500**2) # Compute the new shape of the output and downsample using Lanczos interpolation shape = (int(arr0.shape[0] / downsample_amount), int(arr0.shape[1] / downsample_amount)) arr0 = imresize(arr0, shape, interp='lanczos') # Compute the approximate number of points to extract - we are looking for good coverage without being super dense. This took a bit of trial and error npts = max(arr0.shape) / 3.5 # Create the SiftData object to store the results sd0 = cs.PySiftData(npts) # Extract the keypoints. cs.ExtractKeypoints(arr0, sd0, thresh=1) kp0, des0 = sd0.to_data_frame() kp0 = kp0[['x', 'y', 'scale', 'sharpness', 'edgeness', 'orientation', 'score', 'ambiguity']] kp0['score'] = 0.0 kp0['ambiguity'] = 0.0 # Check the total number returned print(len(kp0)) # - # ## Parameterization & Result Visualization # # The `cs.ExtractKeypoints` function takes the input array (image) and sift data object as mandatory input parameters. We also pass `thresh=1` in. This parameter controls the threshold for pruning Difference of Gaussian (DoG) features. In short - if not enough features are being identified, try reducing the `thresh` parameter. # # # In the above, we got 3412 (or there abouts on a rerun) points. What is important is the spatial distirbution of these. Below, we visualize these to check the distribution. imshow(arr0, cmap='gray') plot(kp0['x'], kp0['y'], 'ro', markersize=3) # ## Repeat for the other array # # The spatial distribution looks good - time to repeat for the next image! # + # Image 2 arr1 = cg.node[1].geodata.read_array() shape = (int(arr1.shape[0] / 6), int(arr1.shape[1] / 6)) # 5 because the max number of pixels is 12500^2 arr1 = imresize(arr1, shape, interp='lanczos') npts = max(arr1.shape) / 3.5 sd1 = cs.PySiftData(npts) cs.ExtractKeypoints(arr1, sd1, thresh=1) kp1, des1 = sd1.to_data_frame() kp1 = kp1[['x', 'y', 'scale', 'sharpness', 'edgeness', 'orientation', 'score', 'ambiguity']] kp1['score'] = 0.0 kp1['ambiguity'] = 0.0 imshow(arr1, cmap='gray') plot(kp1['x'], kp1['y'], 'ro', markersize=3) # - # Interesting linear feature on the left, but overall looks okay. It might be nice to get a few more correspondences, but lets try this for now.
docs/users/tutorials/apollopan/2. Extracting Keypoints (Interest Points).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.9.5 64-bit (''master'': conda)' # language: python # name: python395jvsc74a57bd0e5d34a57892e62c64696f4f28864301f58fd12f04690d261f74726cd452afbc6 # --- import numpy as np import pandas as pd import pyspark #importing pyspark module # from pyspark import SparkContext from pyspark.sql import SparkSession from pyspark.sql.functions import col,udf,lit #udf(),col() from pyspark.sql.types import FloatType #FloatType()from pyspark.sql import SparkSession from pyspark.ml.classification import RandomForestClassifier,LogisticRegression,DecisionTreeClassifier,LinearSVC from pyspark.ml.feature import VectorAssembler spark = SparkSession.builder.appName("PDP").getOrCreate() #creating Pyspark Session for proceed further. def train_ml(): #Read Data data = spark.read.csv("./Data/glass.csv",header=True,inferSchema=True) #Feture col name data = data.drop("Index") col = data.columns[:-1] #Group feature feature_assembler = VectorAssembler(inputCols=col,outputCol="feature") data = feature_assembler.transform(data) #dataset contain only feature and class label data = data.select(["feature", "type"]) train,test = data.randomSplit([.7,.3]) #Create ML Classifirer rf_model = RandomForestClassifier(labelCol="type", featuresCol="feature") lr_model = LogisticRegression(labelCol="type", featuresCol="feature") sv_model = LinearSVC(maxIter=10, regParam=0.1, labelCol="type", featuresCol="feature") #train and return train model object rf_model = rf_model.fit(train) lr_model = lr_model.fit(train) sv_model = sv_model.fit(train) return rf_model,lr_model,sv_model,col def get_data(): data = spark.read.csv("./Data/glass.csv",header=True,inferSchema=True) #Feture col name data = data.drop("Index") col = data.columns[:-1] #Group feature feature_assembler = VectorAssembler(inputCols=col,outputCol="feature") data = feature_assembler.transform(data) #dataset contain only feature and class label data = data.select(["feature", "type"]) train,test = data.randomSplit([.7,.3]) return train,test target_column = "type" rf_model,lr_model,sv_model, col = train_ml() train,test = get_data() m = rf_model.transform(train) rf_model rf_model.featureImportances model = pd.DataFrame(rf_model.featureImportances.values, columns=["values"]) features_col = pd.Series(col) model["features"] = features_col model # + model = model.sort_values(by=['values'], ascending=False).round(2) model.iloc[:4] # - import numpy as np import pandas as pd import plotly as py import plotly.tools as tls import plotly.offline as pyo import plotly.graph_objs as go lr_model.coefficients sv_model.coefficients print("Multinomial coefficients: " + str(lr_model.coefficientMatrix.values)) print("Multinomial intercepts: " + str(lr_model.interceptVector)) print(lr_model.coefficients.values) print(lr_model.intercept) coff = lr_model.coefficientMatrix.values coff model = pd.DataFrame(lr_model.coefficients.values, columns=["values"]) features_col = pd.Series(col) model["features"] = features_col model = model.sort_values(by=['values'], ascending=False).round(2) model model["values"]= model.iloc[:,0]/np.sum(model.iloc[:,0]) model fi = pd.DataFrame((lr_model.coefficients.values/np.sum(lr_model.coefficients.values)), columns=["values"]) fi # + # fi["values"] = fi.iloc[:, 0] / np.sum(fi.iloc[:, 0]) # - trace = go.Bar(x=model["features"], y=model["values"]*100, marker=dict(color='#32E0C4')) data = [trace] layout = go.Layout() fig = go.Figure(data=data, layout=layout) fig.update_layout({'plot_bgcolor': 'rgba(0,0,0,0)', 'paper_bgcolor': 'rgba(0,0,0,0)'}) fig.update_layout(title_text="<b>RANDOM FOREST BASED FEATURE IMPORTANCE<b> ", title_x=0.5) fig.update_xaxes(title_text='Features') fig.update_yaxes(title_text='Feature Importance %') fig.update_xaxes(showline=True, linewidth=1, linecolor='black', rangemode='nonnegative') fig.update_yaxes(showline=True, linewidth=1, linecolor='black', rangemode='nonnegative') pyo.plot(fig) # + def feat_imp(model, data, target, depth): if model.lower() == "RandomForestClassifier".lower(): features = data.drop(target) data = VectorAssembler(inputCols=features.columns, outputCol="feature").transform(data) model = RandomForestClassifier(labelCol=target, featuresCol="feature", numTrees=10) model = model.fit(data) fi = pd.DataFrame(model.featureImportances.values, columns=["values"]) features_col = pd.Series(features.columns) fi["features"] = features_col fi = fi.sort_values(by=['values'], ascending=False).round(2) fi = fi.iloc[:depth] trace = go.Bar(x=fi["features"], y=fi["values"], marker=dict(color='#32E0C4')) data = [trace] layout = go.Layout() fig = go.Figure(data=data, layout=layout) fig.update_layout({'plot_bgcolor': 'rgba(0,0,0,0)', 'paper_bgcolor': 'rgba(0,0,0,0)'}) fig.update_layout(title_text="<b>RANDOM FOREST BASED FEATURE IMPORTANCE<b> ", title_x=0.5) fig.update_xaxes(title_text='Features') fig.update_yaxes(title_text='Feature Importance %') fig.update_xaxes(showline=True, linewidth=1, linecolor='black', rangemode='nonnegative') fig.update_yaxes(showline=True, linewidth=1, linecolor='black', rangemode='nonnegative') return fi, fig elif model.lower() == "LogisticRegression".lower(): features = data.drop(target) data = VectorAssembler(inputCols=features.columns, outputCol="feature").transform(data) model = LogisticRegression(labelCol=target, featuresCol="feature") model = model.fit(data) fi = pd.DataFrame(model.coefficients.values/np.sum(model.coefficients.values), columns=["values"]) features_col = pd.Series(features.columns) fi["features"] = features_col fi = fi.sort_values(by=['values'], ascending=False).round(2) fi = fi.iloc[:depth] trace = go.Bar(x=fi["features"], y=fi["values"]*100, marker=dict(color='#32E0C4')) data = [trace] layout = go.Layout() fig = go.Figure(data=data, layout=layout) fig.update_layout({'plot_bgcolor': 'rgba(0,0,0,0)', 'paper_bgcolor': 'rgba(0,0,0,0)'}) fig.update_layout(title_text="<b>Logistic Regression BASED FEATURE IMPORTANCE<b> ", title_x=0.5) fig.update_xaxes(title_text='Features') fig.update_yaxes(title_text='Feature Importance %') fig.update_xaxes(showline=True, linewidth=1, linecolor='black', rangemode='nonnegative') fig.update_yaxes(showline=True, linewidth=1, linecolor='black', rangemode='nonnegative') return fi, fig elif model.lower() == "LinearSVC".lower(): features = data.drop(target) data = VectorAssembler(inputCols=features.columns, outputCol="feature").transform(data) model = LinearSVC(maxIter=10, regParam=0.1, labelCol="type", featuresCol="feature") model = model.fit(data) fi = pd.DataFrame(model.coefficients.values, columns=["values"]) features_col = pd.Series(features.columns) fi["features"] = features_col fi = fi.sort_values(by=['values'], ascending=False).round(2) fi = fi.iloc[:depth] data = [trace] layout = go.Layout() fig = go.Figure(data=data, layout=layout) fig.update_layout({'plot_bgcolor': 'rgba(0,0,0,0)', 'paper_bgcolor': 'rgba(0,0,0,0)'}) fig.update_layout(title_text="<b>LinearSVC BASED FEATURE IMPORTANCE<b> ", title_x=0.5) fig.update_xaxes(title_text='Features') fig.update_yaxes(title_text='Feature Importance %') fig.update_xaxes(showline=True, linewidth=1, linecolor='black', rangemode='nonnegative') fig.update_yaxes(showline=True, linewidth=1, linecolor='black', rangemode='nonnegative') return fi, fig # + data = spark.read.csv("./Data/glass.csv",header=True,inferSchema=True) #Feture col name data = data.drop("Index") target="type" depth =8 # - # LinearSVC,RandomForestClassifier,LinearSVC fi,fig = feat_imp("RandomForestClassifier", data, target, depth) pyo.plot(fig) # + from pyspark.mllib.evaluation import BinaryClassificationMetrics class CurveMetrics(BinaryClassificationMetrics): def __init__(self, *args): super(CurveMetrics, self).__init__(*args) def _to_list(self, rdd): points = [] # Note this collect could be inefficient for large datasets # considering there may be one probability per datapoint (at most) # The Scala version takes a numBins parameter, # but it doesn't seem possible to pass this from Python to Java for row in rdd.collect(): # Results are returned as type scala.Tuple2, # which doesn't appear to have a py4j mapping points += [(float(row._1()), float(row._2()))] return points def get_curve(self, method): rdd = getattr(self._java_model, method)().toJavaRDD() return self._to_list(rdd) # + import matplotlib.pyplot as plt # Create a Pipeline estimator and fit on train DF, predict on test DF predictions = lr_model.transform(train) # Returns as a list (false positive rate, true positive rate) preds = predictions.select('type','probability').rdd.map(lambda row: (float(row['probability'][1]), float(row['type']))) points = CurveMetrics(preds).get_curve('roc') plt.figure() x_val = [x[0] for x in points] y_val = [x[1] for x in points] plt.title("ROC") plt.xlabel("FPR") plt.ylabel("TPR") plt.plot(x_val, y_val) # - predictions = rf_model.transform(train) from handyspark import BinaryClassificationMetrics train stratify(['Pclass']).cols[['Age', 'logFare']].scatterplot(figsize=(12, 6)) bcm = BinaryClassificationMetrics(preds) bcm.plot_pr_curve() preds = predictions.select('type','probability').rdd.map(lambda row: (float(row['probability'][1]), float(row['type']))) preds # BCM = predictions.select(col("prediction").cast('float'),col("type").cast('float')).rdd.map(tuple) # + # type(preds) # + # model_fpr = np.array([x[0] for x in points]) # model_tpr = np.array([x[1] for x in points]) # + from plotly.subplots import make_subplots go.Figure() fig = make_subplots(specs=[[{"secondary_y": True}]]) fig.add_trace(go.Scatter(x=model_fpr, y=1 - model_tpr, mode='lines', name='Specificity', line=dict(color='rgba(0,0,0,255)'))) fig.add_trace(go.Scatter(x=model_fpr, y=model_tpr, mode='lines', name='Sensitivity', line=dict(color='rgba(0,0,255,255)')),secondary_y=True) fig.update_layout(title_text=f"<b>SPECIFICITY VS SENSITIVITY<b>", title_x=0.5) fig.update_layout({'plot_bgcolor': 'rgba(0,0,0,0)', 'paper_bgcolor': 'rgba(0,0,0,0)'}) fig.update_yaxes(title_text='Specificity ') fig.update_yaxes(title_text='Sensitivity') fig.update_xaxes(title_text='Cutoff') fig.update_xaxes(showline=True, linewidth=1, linecolor='black', rangemode='nonnegative') fig.update_yaxes(showline=True, linewidth=1, linecolor='black', rangemode='nonnegative') fig.update_layout(legend=dict(orientation="h", yanchor="bottom", y=-0.23, xanchor="right", x=0.6)) # + from plotly.subplots import make_subplots fig = go.Figure() fig.add_trace(go.Scatter(x=model_fpr, y=model_tpr, mode='lines', name="ROC", line=dict(color='rgba(0,0,0,255)'))) fig.update_layout(title_text=f"<b>ROC AUC Curve<b>", title_x=0.5) fig.update_layout({'plot_bgcolor': 'rgba(0,0,0,0)', 'paper_bgcolor': 'rgba(0,0,0,0)'}) fig.update_xaxes(title_text='False Positive Rate') fig.update_yaxes(title_text='True Positive Rate') fig.update_xaxes(showline=True, linewidth=1, linecolor='black', rangemode='nonnegative') fig.update_yaxes(showline=True, linewidth=1, linecolor='black', rangemode='nonnegative') fig.update_layout(legend=dict(orientation="h", yanchor="bottom", y=-0.23, xanchor="right", x=0.6)) # - pyo.plot(fig) predictions model_fpr, model_tpr = lr_model.summary.roc.select(["FPR"]).toPandas(), lr_model.summary.roc.select(["TPR"]).toPandas() plt.figure() plt.title("ROC") plt.xlabel("FPR") plt.ylabel("TPR") plt.plot(model_fpr, model_tpr) from sklearn.metrics import roc_curve y = predictions.select(target_column).toPandas() prob = udf(lambda v: float(v[1]), FloatType()) proba = predictions.select(prob('probability')).toPandas() # model_fpr, model_tpr, _ = roc_curve(y, proba) points = metrics.get_curve('roc') model_fpr = np.array([x[0] for x in points]) model_tpr = np.array([x[1] for x in points]) plt.figure() plt.title("ROC") plt.xlabel("FPR") plt.ylabel("TPR") plt.plot(model_fpr, model_tpr) metrics = CurveMetrics(preds) # # metrics.get_curve('roc') # # metrics.get_curve('fMeasureByThreshold') # metrics.get_curve('precisionByThreshold') # # metrics.get_curve('recallByThreshold') # # metrics.get_curve('recallByThreshold') # # metrics.areaUnderROC # # metrics.areaUnderPR # metrics.get_curve('roc') # metrics.get_curve('fMeasureByThreshold') p_point=metrics.get_curve('precisionByThreshold') r_point=metrics.get_curve('recallByThreshold') # metrics.get_curve('recallByThreshold') # metrics.areaUnderROC # metrics.areaUnderPR rec pre = np.array([x[0] for x in p_point]) rec = np.array([x[0] for x in r_point]) import matplotlib.pyplot as plt plt.figure() plt.title("PR-Curve") plt.xlabel("Recall") plt.ylabel("Precision") plt.plot(r_point, p_point) from pyspark.sql.functions import pandas_udf, PandasUDFType from pyspark.sql.types import DataType,StringType,FloatType from scikitplot.helpers import cumulative_gain_curve @pandas_udf('double', PandasUDFType.SCALAR) def plot_lift_curve(y_true, y_probas): y_true = np.array(y_true) y_probas = np.array(y_probas) classes = set(y_true) print(classes) if len(classes) != 2: raise ValueError('Cannot calculate Lift Curve for data with ' '{} category/ies'.format(len(classes))) # Compute Cumulative Gain Curves percentages, gains1 = cumulative_gain_curve(y_true, y_probas[:, 0], classes[0]) percentages, gains2 = cumulative_gain_curve(y_true, y_probas[:, 1], classes[1]) percentages = percentages[1:] gains1 = gains1[1:] gains2 = gains2[1:] gains1 = gains1 / percentages gains2 = gains2 / percentages # fig = go.Figure() # fig.add_trace(go.Scatter(x=percentages, y=gains1, # mode='lines+markers', # name='Class 0', # line=dict(color='blue', width=4))) # fig.add_trace(go.Scatter(x=percentages, y=gains2, # mode='lines+markers', # name='Class 1', # line=dict(color='orange', width=4))) # fig.add_trace(go.Scatter(x=[0,1], y=[1,1], # mode='lines', # name='Baseline', # line=dict(color='black', width=4,dash = 'dash'))) # fig.update_layout( # title="Lift Curve", # yaxis_title="Lift", # xaxis_title="Percentage of sample", # font=dict( # family="Courier New, monospace", # size=18, # color="#7f7f7f")) return percentages, gains1,percentages, gains2 prob=udf(lambda v : float(v[1]), FloatType()) m = m.withColumn("y_prob", prob(col("probability"))) m.show() y = m.select("type").toPandas() y_prob = m.select("y_prob").toPandas() type(y_prob) f1,f2,f3,f4 = m.select(plot_lift_curve(y,y_prob)) type(m) def feature_importance_graph_ModelComparison(target_column, data): """ :param target_column: :param data: :return: """ features = data.drop(target_column) data = VectorAssembler(inputCols=features.columns, outputCol="feature").transform(data) rf_model = RandomForestClassifier(labelCol=target_column, featuresCol="feature", numTrees=10) rf_model = rf_model.fit(data) fi = pd.DataFrame(rf_model.featureImportances.values, columns=["values"]) features_col = pd.Series(features.columns) fi["features"] = features_col fi = fi.sort_values(by=['values'], ascending=False).round(2) trace = go.Bar(x=fi["features"], y=fi["values"], marker=dict(color='#32E0C4')) data = [trace] layout = go.Layout() fig = go.Figure(data=data, layout=layout) fig.update_layout({'plot_bgcolor': 'rgba(0,0,0,0)', 'paper_bgcolor': 'rgba(0,0,0,0)'}) fig.update_layout(title_text="<b>RANDOM FOREST BASED FEATURE IMPORTANCE<b> ", title_x=0.5) fig.update_xaxes(title_text='Features') fig.update_yaxes(title_text='Feature Importance') fig.update_xaxes(showline=True, linewidth=1, linecolor='black', rangemode='nonnegative') fig.update_yaxes(showline=True, linewidth=1, linecolor='black', rangemode='nonnegative') return fi, fig # + def lift_OngoingComparison(ann_fpr, ann_tpr, ns_fpr, ns_tpr, ann_auc, ann_fpr2, ann_tpr2, ns_fpr2, ns_tpr2, ann_auc2, name, user): """ :param ann_fpr: :param ann_tpr: :param ns_fpr: :param ns_tpr: :param ann_auc: :param ann_fpr2: :param ann_tpr2: :param ns_fpr2: :param ns_tpr2: :param ann_auc2: :param name: :param user: :return: """ helper = Model_helper(username=user) colors = helper.preference_maker(2) fig = go.Figure() fig.add_trace(go.Scatter(x=ann_fpr, y=ann_tpr, mode='lines', name=f'{name}', line=dict(color = colors[0]))) fig.add_trace(go.Scatter(x=ns_fpr, y=ns_tpr, mode='lines', name='No Skill', line=dict(color = colors[1]))) fig.add_annotation(x=max(ns_fpr), y=0.05, showarrow=False, text="AUC : " + str(ann_auc)) fig.update_layout(title_text=f"Monitoring ROC AUC Curve", title_x=0.5) fig.update_layout({'plot_bgcolor': 'rgba(0,0,0,0)', 'paper_bgcolor': 'rgba(0,0,0,0)'}) fig.update_xaxes(title_text='False Positive Rate') fig.update_yaxes(title_text='True Positive Rate') fig.update_xaxes(showline=True, linewidth=1, linecolor='black', rangemode='nonnegative') fig.update_yaxes(showline=True, linewidth=1, linecolor='black', rangemode='nonnegative') fig.update_layout(legend=dict(orientation="h", yanchor="bottom", y=-0.23, xanchor="right", x=0.6)) fig2 = go.Figure() fig2.add_trace(go.Scatter(x=ann_fpr2, y=ann_tpr2, mode='lines', name=f'{name}', line=dict(color=colors[0]))) fig2.add_trace(go.Scatter(x=ns_fpr2, y=ns_tpr2, mode='lines', name='No Skill', line=dict(color=colors[1]))) fig2.add_annotation(x=max(ns_fpr2), y=0.05, showarrow=False, text="AUC : " + str(ann_auc2)) fig2.update_layout(title_text=f"Development ROC AUC Curve", title_x=0.5) fig2.update_layout({'plot_bgcolor': 'rgba(0,0,0,0)', 'paper_bgcolor': 'rgba(0,0,0,0)'}) fig2.update_xaxes(title_text='False Positive Rate') fig2.update_yaxes(title_text='True Positive Rate') fig2.update_xaxes(showline=True, linewidth=1, linecolor='black', rangemode='nonnegative') fig2.update_yaxes(showline=True, linewidth=1, linecolor='black', rangemode='nonnegative') fig2.update_layout(legend=dict(orientation="h", yanchor="bottom", y=-0.23, xanchor="right", x=0.6)) return fig, fig2 # ROC AUC function for ModelComparison def lift_ModelComparison(ann_fpr, ann_tpr, ns_fpr, ns_tpr, ann_auc, name, user ): """ :param ann_fpr: :param ann_tpr: :param ns_fpr: :param ns_tpr: :param ann_auc: :param name: :param user: :return: """ helper = Model_helper(username=user) colors = helper.preference_maker(2) fig = go.Figure() fig.add_trace(go.Scatter(x=ann_fpr, y=ann_tpr, mode='lines', name=f'{name}', line=dict(color = colors[0]))) fig.add_trace(go.Scatter(x=ns_fpr, y=ns_tpr, mode='lines', name='No Skill', line=dict(color = colors[1]))) fig.add_annotation(x=max(ns_fpr), y=0.05, showarrow=False, text="AUC : " + str(ann_auc)) fig.update_layout(title_text=f"<b>ROC AUC Curve<b>", title_x=0.5) fig.update_layout({'plot_bgcolor': 'rgba(0,0,0,0)', 'paper_bgcolor': 'rgba(0,0,0,0)'}) fig.update_xaxes(title_text='False Positive Rate') fig.update_yaxes(title_text='True Positive Rate') fig.update_xaxes(showline=True, linewidth=1, linecolor='black', rangemode='nonnegative') fig.update_yaxes(showline=True, linewidth=1, linecolor='black', rangemode='nonnegative') fig.update_layout(legend=dict(orientation="h", yanchor="bottom", y=-0.23, xanchor="right", x=0.6)) return fig def draw_AUC_ModelComparison(model, target_column, name, user): """ :param model: :param name: :param target_column: :param user: :return: """ model = model.withColumn("ns_prob",lit(0.0)) # fpr and tpr only possible in spark lr model BCM = model.select(target_column, 'probability').rdd.map(lambda row: (float(row['probability'][1]), float(row['type']))) metrics = CurveMetrics(BCM) model_auc = metrics.areaUnderROC points = metrics.get_curve('roc') model_fpr = np.array([x[0] for x in points]) model_tpr = np.array([x[1] for x in points]) BCM = model.select(target_column, 'ns_prob').rdd.map(lambda row: (float(row['ns_prob']), float(row['type']))) metrics = CurveMetrics(BCM) points = metrics.get_curve('roc') ns_fpr = np.array([x[0] for x in points]) ns_tpr = np.array([x[1] for x in points]) return lift_ModelComparison(model_fpr, model_tpr, ns_fpr, ns_tpr, model_auc, name, user) # ROC AUC Curve def draw_AUC_OngoingComparison(train_model, test_model, target_column, name, user): """ :param train_model: :param test_model: :param target_column: :param name: :param user: :return: """ # fpr and tpr only possible in spark lr model # ROC for Train Data train_model = train_model.withColumn("ns_prob",lit(0.0)) test_model = test_model.withColumn("ns_prob",lit(0.0)) BCM = train_model.select(target_column, 'probability').rdd.map(lambda row: (float(row['probability'][1]), float(row['type']))) metrics = CurveMetrics(BCM) train_model_auc = metrics.areaUnderROC points = metrics.get_curve('roc') train_model_fpr = np.array([x[0] for x in points]) train_model_tpr = np.array([x[1] for x in points]) BCM = train_model.select(target_column, 'ns_prob').rdd.map(lambda row: (float(row['ns_prob']), float(row['type']))) metrics = CurveMetrics(BCM) points = metrics.get_curve('roc') ns_fpr = np.array([x[0] for x in points]) ns_tpr = np.array([x[1] for x in points]) # ROC for Test Data BCM = test_model.select(target_column, 'probability').rdd.map(lambda row: (float(row['probability'][1]), float(row['type']))) metrics = CurveMetrics(BCM) test_model_auc = metrics.areaUnderROC points = metrics.get_curve('roc') test_model_fpr = np.array([x[0] for x in points]) test_model_tpr = np.array([x[1] for x in points]) BCM = test_model.select(target_column, 'ns_prob').rdd.map(lambda row: (float(row['ns_prob']), float(row['type']))) metrics = CurveMetrics(BCM) points = metrics.get_curve('roc') ns_fpr2 = np.array([x[0] for x in points]) ns_tpr2 = np.array([x[1] for x in points]) return lift_OngoingComparison(train_model_fpr, train_model_tpr, ns_fpr, ns_tpr, train_model_auc, test_model_fpr, test_model_tpr, ns_fpr2, ns_tpr2, test_model_auc, name, user) # - target_column = "type" rf_model,lr_model,sv_model, col = train_ml() train,test = get_data() tlr_model = lr_model.transform(train) trf_model = rf_model.transform(train) tsv_model = sv_model.transform(train) # + tlr_model = tlr_model.withColumn("ns_prob",lit(0.0)) # - tlr_model.printSchema # + pyo.plot(draw_AUC_ModelComparison(tlr_model,"type","AJAY",3)) # - train_model = lr_model.transform(train) test_model = lr_model.transform(test) f1, f2 = draw_AUC_OngoingComparison(train_model, test_model, "type", "AJAY", 4) pyo.plot(f1) pyo.plot(f2) type(lr_model.select("prediction").count()) # + def graph_sen_OngoingComparison(train_model, test_model, target_column): """ :param train_model: :param test_model: :param user: :return: """ # helper = Model_helper(username=user) # colors = helper.preference_maker(2) # ROC for train dataset preds = train_model.select(target_column, 'probability').rdd.map( lambda row: (float(row['probability'][1]), float(row['type']))) points = CurveMetrics(preds).get_curve('roc') model_fpr = np.array([x[0] for x in points]) model_tpr = np.array([x[1] for x in points]) go.Figure() fig = make_subplots(specs=[[{"secondary_y": True}]]) fig.add_trace(go.Scatter(x=model_fpr, y=1 - model_tpr, mode='lines', name='Specificity', line=dict(color='rgba(255,0,0,255)'), )) fig.add_trace(go.Scatter(x=model_fpr, y=model_tpr, mode='lines', name='Sensitivity', line=dict(color='rgba(0,0,255,255)')), secondary_y=True, ) fig.update_layout(title_text=f"Development<br>Specificity vs Sensitivity", title_x=0.5) fig.update_layout({'plot_bgcolor': 'rgba(0,0,0,0)', 'paper_bgcolor': 'rgba(0,0,0,0)'}) fig.update_yaxes(title_text='Specificity') fig.update_yaxes(title_text='Sensitivity') fig.update_xaxes(title_text='Cutoff') fig.update_xaxes(showline=True, linewidth=1, linecolor='black', rangemode='nonnegative') fig.update_yaxes(showline=True, linewidth=1, linecolor='black', rangemode='nonnegative') fig.update_layout(legend=dict(orientation="h", yanchor="bottom", y=-0.23, xanchor="right", x=0.6)) # ROC for test dataset preds = test_model.select(target_column, 'probability').rdd.map( lambda row: (float(row['probability'][1]), float(row['type']))) points = CurveMetrics(preds).get_curve('roc') model_fpr = np.array([x[0] for x in points]) model_tpr = np.array([x[1] for x in points]) go.Figure() fig2 = make_subplots(specs=[[{"secondary_y": True}]]) fig2.add_trace(go.Scatter(x=model_fpr, y=1 - model_tpr, mode='lines', name='Specificity', line=dict(color='rgba(255,0,0,255)'), )) fig2.add_trace(go.Scatter(x=model_fpr, y=model_tpr, mode='lines', name='Sensitivity', line=dict(color='rgba(0,0,255,255)')), secondary_y=True, ) fig2.update_layout(title_text=f"Monitoring<br>Specificity vs Sensitivity", title_x=0.5) fig2.update_layout({'plot_bgcolor': 'rgba(0,0,0,0)', 'paper_bgcolor': 'rgba(0,0,0,0)'}) fig2.update_yaxes(title_text='Specificity') fig2.update_yaxes(title_text='Sensitivity') fig2.update_xaxes(title_text='Cutoff') fig2.update_xaxes(showline=True, linewidth=1, linecolor='black', rangemode='nonnegative') fig2.update_yaxes(showline=True, linewidth=1, linecolor='black', rangemode='nonnegative') fig2.update_layout(legend=dict(orientation="h", yanchor="bottom", y=-0.23, xanchor="right", x=0.6)) return fig, fig2 def graph_sen_ModelComparison(test_model,target_column): """ :param test_model: :param user: :return: """ # helper = Model_helper(username=user) # colors = helper.preference_maker(2) # ROC for test dataset preds = test_model.select(target_column, 'probability').rdd.map( lambda row: (float(row['probability'][1]), float(row['type']))) points = CurveMetrics(preds).get_curve('roc') model_fpr = np.array([x[0] for x in points]) model_tpr = np.array([x[1] for x in points]) go.Figure() fig = make_subplots(specs=[[{"secondary_y": True}]]) fig.add_trace(go.Scatter(x=model_fpr, y=1 - model_tpr, mode='lines', name='Specificity', line=dict(color='rgba(255,0,0,255)'))) fig.add_trace(go.Scatter(x=model_fpr, y=model_tpr, mode='lines', name='Sensitivity', line=dict(color='rgba(0,0,255,255)')),secondary_y=True) fig.update_layout(title_text=f"<b>SPECIFICITY VS SENSITIVITY<b>", title_x=0.5) fig.update_layout({'plot_bgcolor': 'rgba(0,0,0,0)', 'paper_bgcolor': 'rgba(0,0,0,0)'}) fig.update_yaxes(title_text='Specificity ') fig.update_yaxes(title_text='Sensitivity') fig.update_xaxes(title_text='Cutoff') fig.update_xaxes(showline=True, linewidth=1, linecolor='black', rangemode='nonnegative') fig.update_yaxes(showline=True, linewidth=1, linecolor='black', rangemode='nonnegative') fig.update_layout(legend=dict(orientation="h", yanchor="bottom", y=-0.23, xanchor="right", x=0.6)) return fig # - t_lr_model = lr_model.transform(test) t_rf_model = rf_model.transform(test) t_sv_model = sv_model.transform(test) f1, f2 = graph_sen_OngoingComparison(tlr_model,t_lr_model,"type") pyo.plot(f1) pyo.plot(f2) from pyspark.ml.feature import VectorAssembler from pyspark.mllib.evaluation import MulticlassMetrics, BinaryClassificationMetrics # MM for cm and BM for roc,auc from pyspark.ml.evaluation import MulticlassClassificationEvaluator # P,R,F-1,TPR,FPR # + def cm_fig(cm): """ :param cm: :return: """ x = ['Positive', 'Negative'] y = ['Positive', 'Negative'] fig = go.Figure() for step in range(9): fig.add_trace(go.Heatmap(visible=False, x=x, y=y, z=cm, colorscale="teal")) fig.data[4].visible = True anno2 = [] for i, row in enumerate(cm): for j, value2 in enumerate(row): anno2.append( { "x": x[j], "y": y[i], "font": {"color": "white", "size": 16}, "text": str(value2), "xref": "x1", "yref": "y1", "showarrow": False } ) # Create and add slider steps = [] for i in range(len(fig.data)): step = dict( method="update", args=[{"visible": [False] * len(fig.data)}, {"title": "Slider switched to Threshold: " + str((i + 1) / 10)}], label="Threshold : " + str((i + 1) / 10), # layout attribute ) step["args"][0]["visible"][i] = True # Toggle trace to "visible" steps.append(step) fig.update_layout(title_text=f"<b>CONFUSION MATRIX<b>", title_x=0.5, annotations=anno2) return fig def perf_metrics (model, data, target_column): features = data.drop(target_column) data = VectorAssembler(inputCols=features.columns, outputCol="feature").transform(data) model = model.transform(data) pred_actual = model.select(col("prediction").cast('float'), col(target_column).cast('float')) metrics = MulticlassMetrics(pred_actual.rdd.map(tuple)) fig = cm_fig(metrics.confusionMatrix().toArray()) metrics_data = {} metrics = ["Accuracy", "Precision","Recall","F1-Recall",'ROC-AUC'] for i in metrics: if i == "Accuracy": evaluator = MulticlassClassificationEvaluator(labelCol=target_column, predictionCol="prediction", metricName="accuracy") metrics_data['Accuracy'] = evaluator.evaluate(model) elif i == 'Precision': evaluator = MulticlassClassificationEvaluator(labelCol=target_column, predictionCol="prediction", metricName="precisionByLabel", ) metrics_data['Precision'] = evaluator.evaluate(model) elif i == 'Recall': evaluator = MulticlassClassificationEvaluator(labelCol=target_column, predictionCol="prediction", metricName="recallByLabel", ) metrics_data['Recall'] = evaluator.evaluate(model) elif i == 'F1-Recall': evaluator = MulticlassClassificationEvaluator(labelCol=target_column, predictionCol="prediction", metricName="f1", ) metrics_data['F1-Score'] = evaluator.evaluate(model) elif i == 'ROC-AUC': metrics = BinaryClassificationMetrics(pred_actual.rdd.map(tuple)) metrics_data['areaUnderROC'], metrics_data['areaUnderPR'] = metrics.areaUnderROC, metrics.areaUnderPR return fig,metrics_data # - fig , m =perf_metrics(rf_model, data, "type") from pyspark.sql.functions import col, udf from pyspark.sql.types import FloatType pyo.plot(fig) m "metric name in evaluation " "(f1|accuracy|weightedPrecision|weightedRecall|weightedTruePositiveRate| " "weightedFalsePositiveRate|weightedFMeasure|truePositiveRateByLabel| " "falsePositiveRateByLabel|precisionByLabel|recallByLabel|fMeasureByLabel| " "logLoss|hammingLoss) evaluator = MulticlassClassificationEvaluator(labelCol=target_column, predictionCol="prediction",metricName="logLoss") evaluator.evaluate(t_lr_model)
FM AUC Curve, TPR vs FPR.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # https://spacy.io/usage/linguistic-features # + import spacy nlp = spacy.load('de') # - # ## Tokenizing text = 'Apple erwägt ein Startup in UK für zehn Millarden Dollar zu kaufen.' doc = nlp(text) type(doc) # + # https://spacy.io/usage/linguistic-features # all attributes: https://spacy.io/api/token#attributes for token in doc: print(token.text, token.lemma_, token.pos_, token.tag_, token.dep_, token.shape_, token.is_alpha, token.is_stop) # - spacy.explain('ADP') # ### Similarities based on word embeddings # + # https://spacy.io/models/de#de_core_news_sm # https://spacy.io/usage/spacy-101#vectors-similarity for token in doc: for token2 in doc: if token2.pos_ == 'NOUN': print(token.text, token2.text, token.similarity(token2)) # - # ## Part of Speech Tagging (POS) # + from spacy import displacy # https://spacy.io/usage/visualizers # - displacy.render(doc, style='dep', jupyter=True) # displacy.serve(doc, style='dep') spacy.explain('sb') spacy.explain('oc') spacy.explain('oa') spacy.explain('nk') spacy.explain('mnr') spacy.explain('pm') spacy.explain('mo') spacy.explain('PROPN') spacy.explain('ADP') spacy.explain('PART') # ## Dependencies / Tokens actually form a tree for chunk in doc.noun_chunks: print(chunk.text, chunk.root.text, chunk.root.dep_, chunk.root.head.text) for token in doc: print(token.text, token.dep_, token.head.text, token.head.pos_, [child for child in token.children]) # ## Entities displacy.render(doc, style='ent', jupyter=True)
notebooks/nlp/spacy-sandbox.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import urllib.request import json from tqdm import tnrange, tqdm_notebook import time # + def youtubeLinkFromPvList(list): for i in list: if 'youtu.be' in i['url']: return i['url'] return '' def genLinkForGet(ind): return 'https://vocadb.net/api/songs?start=' \ + str(ind) \ + '&getTotalCount=true&maxResults=50&fields=pvs&lang=Default&nameMatchMode=Auto&sort=RatingScore&childTags=false&artistParticipationStatus=Everything&onlyWithPvs=false' # + finalPvList = [] totalSongs = 1000 maxPerReq = 50 # - for i in tqdm_notebook(range(int(totalSongs / maxPerReq)), desc='outer loop'): link = genLinkForGet(i * maxPerReq) contents = json.loads(urllib.request.urlopen(link).read()) for item in tqdm_notebook(contents['items'], desc='inner loop', leave=False): ytLink = youtubeLinkFromPvList(item['pvs']) if ytLink: finalPvList.append(ytLink) outfile = open('pvlist.txt', 'w') for item in finalPvList: outfile.write("%s\n" % item) infile = open('pvlist.txt', 'r') finalPvList = infile.readlines() # + import subprocess from threading import Thread from queue import Queue limit = 3 def worker(queue): for link in iter(queue.get, None): try: downloadCmd = 'youtube-dl -o "vocaloid/%(title)s.%(ext)s" --download-archive downloaded.txt --no-post-overwrites -ciwx --audio-format mp3 ' + link subprocess.check_call(downloadCmd, stderr=subprocess.STDOUT, shell=True) except: continue # + downloadQueue = Queue() threads = [Thread(target=worker, args=(downloadQueue,)) for _ in range(limit)] for link in finalPvList: # feed commands to threads downloadQueue.put_nowait(link) for t in threads: # start workers t.daemon = True t.start() for _ in threads: downloadQueue.put(None) # signal no more commands for t in threads: t.join() # wait for completion # -
vocaloid_dl.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Django Shell-Plus # language: python # name: django_extensions # --- # + [markdown] toc=true # <h1>Table of Contents<span class="tocSkip"></span></h1> # <div class="toc" style="margin-top: 1em;"><ul class="toc-item"><li><span><a href="#Supercuts" data-toc-modified-id="Supercuts-1"><span class="toc-item-num">1&nbsp;&nbsp;</span>Supercuts</a></span><ul class="toc-item"><li><span><a href="#Get-all-intervals-of-person-P" data-toc-modified-id="Get-all-intervals-of-person-P-1.1"><span class="toc-item-num">1.1&nbsp;&nbsp;</span>Get all intervals of person P</a></span></li><li><span><a href="#For-each-word-W-in-sentence,-create-list-of-intervals-for-W" data-toc-modified-id="For-each-word-W-in-sentence,-create-list-of-intervals-for-W-1.2"><span class="toc-item-num">1.2&nbsp;&nbsp;</span>For each word W in sentence, create list of intervals for W</a></span></li><li><span><a href="#For-each-word-W,-intersect-its-interval-list-with-person-P-intervals-to-get-P-+-W-intervals" data-toc-modified-id="For-each-word-W,-intersect-its-interval-list-with-person-P-intervals-to-get-P-+-W-intervals-1.3"><span class="toc-item-num">1.3&nbsp;&nbsp;</span>For each word W, intersect its interval list with person P intervals to get P + W intervals</a></span></li><li><span><a href="#Get-all-intervals-where-there-is-exactly-one-face-on-screen" data-toc-modified-id="Get-all-intervals-where-there-is-exactly-one-face-on-screen-1.4"><span class="toc-item-num">1.4&nbsp;&nbsp;</span>Get all intervals where there is exactly one face on screen</a></span></li><li><span><a href="#For-each-word-W-in-sentence,-intersect-P-with-word-intervals-with-one-face-intervals" data-toc-modified-id="For-each-word-W-in-sentence,-intersect-P-with-word-intervals-with-one-face-intervals-1.5"><span class="toc-item-num">1.5&nbsp;&nbsp;</span>For each word W in sentence, intersect P with word intervals with one face intervals</a></span></li><li><span><a href="#Random-sample-one-element-from-each-P-+-W-alone-interval-list" data-toc-modified-id="Random-sample-one-element-from-each-P-+-W-alone-interval-list-1.6"><span class="toc-item-num">1.6&nbsp;&nbsp;</span>Random sample one element from each P + W alone interval list</a></span></li></ul></li></ul></div> # - # # Supercuts # # Our goal is to get intervals for a short supercut video of a certain person (e.g, <NAME>) saying a funny sentence, like: # # ``` # P = a person in the dataset # sentence = "Intel is great because they fund Stanford." # ``` # # We'll use `rekall` to get the candidate intervals and the caption index to get caption intervals. Make sure the caption index and `rekall` are installed in your Esper instance before running this notebook. If they aren't, the imports will fail. # # Strategy: # 1. Get all intervals where person P is on screen # 2. For each word W in sentence, create list of intervals for W # 3. For each word W, intersect its interval list with person P intervals to get P + W intervals # 4. Get all intervals where there is exactly one face on screen # 5. For each word W, intersect P + W intervals with one face intervals to get P + W alone intervals # 6. Random sample one element from each P + W alone interval list # + # import rekall from esper.rekall import * from rekall.video_interval_collection import VideoIntervalCollection from rekall.interval_list import Interval, IntervalList from rekall.temporal_predicates import * from rekall.spatial_predicates import * from esper.utility import * # import caption search from esper.captions import * # import face identities for person search from query.models import Video, Face, FaceIdentity # import esper widget for debugging from esper.prelude import esper_widget import random import os import pickle import tempfile from multiprocessing import Pool # - # Set these parameters for the notebook. person_name = "<NAME>" sentence = "Make america great again" # video_list = pickle.load(open('/app/data/tvnews_std_sample.pkl', 'rb'))['sample_100'] videos = [Video.objects.filter(path__contains=video_name)[0] for video_name in video_list] video_ids = [video.id for video in videos if video.threeyears_dataset] print(len(video_ids)) # ## Get all intervals of person P person_intrvllists = qs_to_intrvllists( FaceIdentity.objects # .filter(face__shot__video_id__in=video_ids) .filter(identity__name=person_name.lower()) .filter(probability__gt=0.99) .annotate(video_id=F("face__shot__video_id")) .annotate(shot_id=F("face__shot_id")) .annotate(min_frame=F("face__shot__min_frame")) .annotate(max_frame=F("face__shot__max_frame")), schema={ 'start': 'min_frame', 'end': 'max_frame', 'payload': 'shot_id' }) person_intrvlcol = VideoIntervalCollection(person_intrvllists) print("Got all occurrences of {}".format(person_name)) # ## For each word W in sentence, create list of intervals for W # + # helper function for 2. to convert caption search to dict mapping from video ID to IntervalList def caption_to_intrvllists(search_term, dilation=0, video_ids=None): results = topic_search([search_term], dilation) if video_ids == None: videos = {v.id: v for v in Video.objects.all()} else: videos = {v.id: v for v in Video.objects.filter(id__in=video_ids).all()} def convert_time(k, t): return int(t * videos[k].fps) segments_by_video = {} flattened = [ (v.id, convert_time(v.id, l.start), convert_time(v.id, l.end)) for v in results.documents if v.id in videos for l in v.locations ] for video_id, t1, t2 in flattened: if video_id in segments_by_video: segments_by_video[video_id].append((t1, t2, 0)) else: segments_by_video[video_id] = [(t1, t2, 0)] for video in segments_by_video: segments_by_video[video] = IntervalList(segments_by_video[video]) print("Got all occurrences of the word {} by searching".format(search_term)) return segments_by_video # scans for search terms across videos in parallel def scan_for_search_terms_intrvllist(search_terms, video_ids, dilation=0): results = scan_for_ngrams_in_parallel(search_terms, video_ids) search_terms_intrvllists = [{} for term in search_terms] videos = {v.id: v for v in Video.objects.filter(id__in=video_ids).all()} def convert_time(k, t): return int(t * videos[k].fps) for video_id, result in results: if result == []: continue for i, term in enumerate(search_terms): term_result = result[i] interval_list = IntervalList([ (convert_time(video_id, start - dilation), convert_time(video_id, end + dilation), 0) for start, end in term_result ]) if interval_list.size() > 0: search_terms_intrvllists[i][video_id] = interval_list print("Got all occurrences of the words {} by scanning".format(search_terms)) return search_terms_intrvllists import pysrt def scan_aligned_transcript_intrvllist(search_terms, video_ids): word_intrvllists = {term: {} for term in search_terms} for video_id in video_ids: video = Video.objects.filter(id=video_id)[0] video_name = os.path.basename(video.path)[:-4] print(video_name) word_lists = {term: [] for term in search_terms} transcript_path = os.path.join('/app/result/aligned_transcript_100/', video_name+'.word.srt') if not os.path.exists(transcript_path): continue subs = pysrt.open(transcript_path) for sub in subs: for term in search_terms: if term in sub.text: word_lists[term].append((time2second(tuple(sub.start)[:4])*video.fps, time2second(tuple(sub.end)[:4])*video.fps, 0)) # print(word_lists) for term, value in word_lists.items(): if len(value) > 0: word_intrvllists[term][video_id] = IntervalList(value) return [ VideoIntervalCollection(intrvllist) for intrvllist in word_intrvllists.values()] # + # search words from caption index # Get extremely frequent words EXTREMELY_FREQUENT_WORDS = { w.token for w in caption_util.frequent_words(LEXICON, 99.997) } # Split words into words to search by index and words to scan through documents for words = [word.upper() for word in sentence.split()] words_to_scan = set() words_to_search_by_index = set() for word in words: if word in EXTREMELY_FREQUENT_WORDS: words_to_scan.add(word) else: words_to_search_by_index.add(word) words_to_scan = list(words_to_scan) words_to_search_by_index = list(words_to_search_by_index) video_ids = list(person_intrvllists.keys()) scanned_words = caption_scan_to_intrvllists( scan_for_ngrams_in_parallel(words_to_scan, video_ids), words_to_scan, video_ids) searched_words = [ topic_search_to_intrvllists(topic_search([word], 0), video_ids) for word in words_to_search_by_index ] sentence_intrvllists = [ scanned_words[words_to_scan.index(word)] if word in words_to_scan else searched_words[words_to_search_by_index.index(word)] for word in words ] sentence_intrvlcol = [VideoIntervalCollection(intrvllist) for intrvllist in sentence_intrvllists] # + # search words from aligned transcript words = [word.upper() for word in sentence.split()] sentence_intrvlcol = scan_aligned_transcript_intrvllist(words, video_ids) # - # ## For each word W, intersect its interval list with person P intervals to get P + W intervals # + # person_with_sentence_intrvlcol = [] # for i, word_intrvlcol in enumerate(sentence_intrvlcol): # person_with_word_intrvlcol = person_intrvlcol.overlaps(word_intrvlcol) # print(len(person_with_word_intrvlcol.get_allintervals())) # if len(person_with_word_intrvlcol.get_allintervals()) == 0: # print("Could not find instance of person {} with word {}".format(person_name, words[i])) # else: # person_with_sentence_intrvlcol.append(person_with_word_intrvlcol) person_with_sentence_intrvlcol = sentence_intrvlcol # - # ## Get all intervals where there is exactly one face on screen # + from rekall.parsers import in_array, bbox_payload_parser from rekall.merge_ops import payload_plus from rekall.payload_predicates import payload_satisfies from rekall.list_predicates import length_exactly relevant_shots = set() for person_with_word_intrvlcol in person_with_sentence_intrvlcol: for intrvllist in list(person_with_word_intrvlcol.get_allintervals().values()): for interval in intrvllist.get_intervals(): relevant_shots.add(interval.get_payload()) print(len(relevant_shots)) faces = Face.objects.filter(shot__in=list(relevant_shots)) \ .annotate(video_id=F('shot__video_id')) \ .annotate(min_frame=F('shot__min_frame')) \ .annotate(max_frame=F('shot__max_frame')) # Materialize all the faces and load them into rekall with bounding box payloads # Then coalesce them so that all faces in the same frame are in the same interval # NOTE that this is slow right now since we're loading all faces! oneface_intrvlcol = VideoIntervalCollection.from_django_qs( faces, with_payload=in_array( bbox_payload_parser(VideoIntervalCollection.django_accessor)) ).coalesce(payload_merge_op=payload_plus).filter(payload_satisfies(length_exactly(1))) # - len(oneface_intrvlcol.get_allintervals()) # ## For each word W in sentence, intersect P with word intervals with one face intervals person_with_sentence_alone_intrvlcol = [] for i, person_with_word_intrvlcol in enumerate(person_with_sentence_intrvlcol): person_alone_intrvlcol = person_with_word_intrvlcol.overlaps(oneface_intrvlcol) print(len(person_alone_intrvlcol.get_allintervals())) if len(person_alone_intrvlcol.get_allintervals()) == 0: print("Could not find instance of person {} along with word {}".format(person_name, words[i])) else: person_with_sentence_alone_intrvlcol.append(person_alone_intrvlcol) # + supercut_intervals_all = [] for i, person_with_word_alone_intrvlcol in enumerate(sentence_intrvlcol): supercut_intervals = [] for video, intrvllist in person_with_word_alone_intrvlcol.intervals.items(): for interval in intrvllist.get_intervals(): supercut_intervals.append((video, interval.get_start(), interval.get_end())) supercut_intervals_all.append(supercut_intervals) # - # ## Random sample one element from each P + W alone interval list supercut_intervals = [random.choice(intervals) for intervals in supercut_intervals_all] print("Supercut intervals: ", supercut_intervals) # Display the supercut intervals in Esper widget for debugging supercut_intrvllists = {} for video, start, end in supercut_intervals: supercut_intrvllists[video] = IntervalList([(start, end, 0)]) esper_widget(intrvllists_to_result(supercut_intrvllists, video_order = [video for video, start, end in supercut_intervals])) # + # make supercut video supercut_path = '/app/result/supercut.mp4' local_cut_list = [] local_cut_list_path = tempfile.NamedTemporaryFile(suffix='.txt').name.replace('tmp/', 'app/result/') flist = open(local_cut_list_path, 'w') for video_id, sfid, efid in supercut_intervals: video = Video.objects.filter(id=video_id)[0] filename = tempfile.NamedTemporaryFile(suffix='.mp4').name.replace('tmp/', 'app/result/') cmd = 'ffmpeg -y -i ' + '\"' + video.url() + '\"' + ' -async 1 ' cmd += '-ss {:s} -t {:s} '.format(second2time(sfid/video.fps, '.'), second2time((efid-sfid)/video.fps, '.')) cmd += filename print(cmd) os.system(cmd) local_cut_list.append(filename) flist.write('file ' + filename + '\n') flist.close() os.system('ffmpeg -y -f concat -safe 0 -i ' + local_cut_list_path + ' -c copy ' + supercut_path)
app/notebooks/supercuts.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline import numpy as np import tensorflow as tf import math import sys sys.path.append("..") import d2lzh_tensorflow2 as d2l # + def gd(eta): x = 10 results = [x] for i in range(10): x -= eta * 2 * x # f(x) = x * x的导数为f'(x) = 2 * x results.append(x) print('epoch 10, x:', x) return results res = gd(0.2) # + def show_trace(res): n = max(abs(min(res)), abs(max(res)), 10) f_line = np.arange(-n, n, 0.1) d2l.set_figsize() d2l.plt.plot(f_line, [x * x for x in f_line]) d2l.plt.plot(res, [x * x for x in res], '-o') d2l.plt.xlabel('x') d2l.plt.ylabel('f(x)') show_trace(res) # - show_trace(gd(0.05)) show_trace(gd(1.1)) # + def train_2d(trainer): # 本函数将保存在d2lzh_tensorflow2包中方便以后使用 x1, x2, s1, s2 = -5, -2, 0, 0 # s1和s2是自变量状态,本章后续几节会使用 results = [(x1, x2)] for i in range(20): x1, x2, s1, s2 = trainer(x1, x2, s1, s2) results.append((x1, x2)) print('epoch %d, x1 %f, x2 %f' % (i + 1, x1, x2)) return results def show_trace_2d(f, results): # 本函数将保存在d2lzh_tensorflow2包中方便以后使用 d2l.plt.plot(*zip(*results), '-o', color='#ff7f0e') x1, x2 = np.meshgrid(np.arange(-5.5, 1.0, 0.1), np.arange(-3.0, 1.0, 0.1)) d2l.plt.contour(x1, x2, f(x1, x2), colors='#1f77b4') d2l.plt.xlabel('x1') d2l.plt.ylabel('x2') # + eta = 0.1 def f_2d(x1, x2): # 目标函数 return x1 ** 2 + 2 * x2 ** 2 def gd_2d(x1, x2, s1, s2): return (x1 - eta * 2 * x1, x2 - eta * 4 * x2, 0, 0) show_trace_2d(f_2d, train_2d(gd_2d)) # + def sgd_2d(x1, x2, s1, s2): return (x1 - eta * (2 * x1 + np.random.normal(0.1)), x2 - eta * (4 * x2 + np.random.normal(0.1)), 0, 0) show_trace_2d(f_2d, train_2d(sgd_2d))
code/chapter07_optimization/7.2_gd-sgd.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Primer on CYTOXNET ToxModels # Initializing, training, evaluating, and visualizing the results of package machine learning models. # *** # *** # The package includes a number of model types already, but natively supports any model class from [deepchem](https://deepchem.io/) or [sklearn](https://scikit-learn.org/stable/). These are all accesed through the `ToxModel` class. This class is effectively a wrapper for both deepchem and sklearn models in one place, along with some additional functionality including and quicker API for calling for desired metrics, and visualization methods. from cytoxnet.models.models import ToxModel # + jupyter={"outputs_hidden": true} tags=[] help(ToxModel) # - # We pass to the model the name of the wrapped model to use, a list of `deepchem` transformers that were used to prepare the data (if any), a list of tasks corresponding to the model's targets, and finally __any keyword arguments to pass to the wrapped model initialization__. # *** # ### Minimally prepare a dataset to use for demonstration # See the dataprep example notebook for functionality and options in preparing data # ### <span style='color:red'>NEED TO UPDATE WITH DATABASE CALL</span> import cytoxnet.dataprep.io import cytoxnet.dataprep.dataprep import cytoxnet.dataprep.featurize import pandas as pd fish = cytoxnet.dataprep.io.load_data('lunghini_fish_LC50') algea = cytoxnet.dataprep.io.load_data('lunghini_algea_EC50') df = pd.concat([fish, algea]).reset_index() df = cytoxnet.dataprep.featurize.add_features(df, method='ConvMolFeaturizer') data = cytoxnet.dataprep.dataprep.convert_to_dataset( df, X_col='ConvMolFeaturizer', y_col=['algea_EC50', 'fish_LC50'] ) data = cytoxnet.dataprep.dataprep.handle_sparsity(data) data, transformers = cytoxnet.dataprep.dataprep.data_transformation( data, ['MinMaxTransformer'], to_transform='y' ) # *** # ### Initializing a model # The models currently wrapped in the class can be listed with the `help` method. ToxModel.help() # We can additionally ask for the wrapped model's documentation to help with initialization; in this example, let's use a graph CNN. # + jupyter={"outputs_hidden": true} tags=[] ToxModel.help('GraphCNN') # - # If we do not pass a list of tasks to the model, it will by default assume that there will only be a single target, and if we do not pass any transformers, some options within the class such as untransforming output data for evaluation metrics will not be available. We can pass keywords to the wrapped model class here. Let's specify a dense regressor layer with 12 neurons. We also ask for a regression task, otherwise classification is chosen by default. my_model = ToxModel('GraphCNN', dense_layer_size=12, tasks=['algea_EC50', 'fish_LC50'], transformers=transformers, mode='regression' ) # *** # ### Fitting the model # Fitting whatever model being used is simply a matter off passing the `deepchem` dataset to train on, and any keyword arguments for the wrapped model's fit method. Here we specify 25 epochs. # + jupyter={"outputs_hidden": true} tags=[] my_model.fit(data, nb_epoch=25) # - # *** # ### Predicting with the model # Call the model's predict method with a dataset object (containing at least X data of the type/shape expected by the model) to return a prediction vector of our targets, in this case 2 columns for two targets. We also pass `untransform=True` causing the predictions to be untransformed by the transformers stored in the model, otherwise predictions would not be interpritable to the desired output space. # + jupyter={"outputs_hidden": true} tags=[] my_model.predict(data, untransform=True) # - # *** # ### Evaluating the model # We can ask for metrics to be used for evaluation of a test set with both X and y data. Again we can pass `untransform=True` because we initialized the model with the transformers initially used on the data. Because this task had multiple targets and a sparse target matrices with weight masks, we can specify `use_sample_weights=True` to indicate that predictions made for data we do not have should not be compared to the masked 0.0 value my_model.evaluate(data, metrics=['mean_absolute_error'], untransform=True, use_sample_weights=True) # We can also plot predictions for one of the tasks directly from the class. my_model.visualize('pair_predict', data, untransform=True, task='algea_EC50') # We note the sharp line of data with true value=0 are the sparse points in the data that were replaced by zeros and deweighted for training and evaluation.
examples/using_ToxModels.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Visualizing simple geospatial data # In this first exercise we want to see the simplictiy of geoplotlib in its fullest. # Loading data and displaying it with pre-defined plots is really simple but already enables us to get incredible insights into our datasets. # # We'll be looking at a dataset containing all poaching points in RUNAPA. It can be downloaded here: https://opendata.socrata.com/Education/All-Poaching-Points/96zb-vb4w # Please note that it's a rather small dataset. We'll be looking at bigger ones in further activites. # #### Loading our dataset # importing the necessary dependencies import geoplotlib from geoplotlib.utils import read_csv # loading the Dataset with geoplotlib dataset = read_csv('./data/poaching_points_cleaned.csv') # looking at the dataset structure dataset # **Note:** # Geoplotlib has a built-in way to load datasets which is really convenient if you don't want to import additional libraries like Pandas. # Note that geoplotlib is compatible with Pandas dataframes, too. So if you need to do some pre-processing with the data it might make sense to use Pandas right away. # + import pandas as pd pd_dataset = pd.read_csv('./data/poaching_points_cleaned.csv') pd_dataset.head() # - # --- # #### Simple plots with geoplotlib # For now we want to only focus on columns `lat` and `lon` since those are the column names geoplotlib will look out for when calling its plotting methods. # Right now, when only looking at the stub of data displayed with `dataset.head()` we see some numbers displayed. However we are not able to visually map them to some specific area on the globe. # # We want to get some insights into our data, rather than looking at the values themselves. # plotting our dataset with points geoplotlib.dot(dataset) geoplotlib.show() # **Note:** # Here we can see the automatic selection of the bounding box for what will be shown. # We'll take a look at how to change the default bounding box with the `BoundingBox` class of geoplotlib later on. # plotting our dataset as a histogram geoplotlib.hist(dataset, binsize=20) geoplotlib.show() # Histogram plots will gives us a better understanding about the distribution of density of our dataset. # Looking at the above plot, we can see that there are some "hotspot". # plotting a voronoi map geoplotlib.voronoi(dataset, cmap='Blues_r', max_area=1e5, alpha=255) geoplotlib.show() # Looking at either the histogram or voronoi visualizations gives us a better understanding of not only the areas in which poaching is present but also shows us that there are areas where it's more likely to be poached in. # # Insights, also, are about finding out the right questions to ask to understand your subject better. # One of the questions that come to mind when looking at the given dataset is, what this rather big area without recorded poaches tells us about the kind of animals present there.
Lesson05/Exercise06/exercise06_solution.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # SPY GAME: Write a function that takes in a list of integers and returns True if it contains 007 in order def spy_game(nums): code = [0,0,7,'x'] for num in nums: if num == code[0]: code.pop(0) # code.remove(num) also works return len(code) == 1 # - spy_game([1,2,4,0,0,7,5]) spy_game([1,0,2,4,0,5,7]) spy_game([1,7,2,0,4,5,0]) # + # + # COUNT PRIMES: Write a function that returns the number of prime numbers # that exist up to and including a given number def count_primes(num): primes = [2] x = 3 if num < 2: # for the case of num = 0 or 1 return 0 while x <= num: for y in range(3,x,2): # test all odd factors up to x-1 if x%y == 0: x += 2 break else: primes.append(x) x += 2 print(primes) return len(primes) # - count_primes(100) def count_primes2(num): primes = [2] x = 3 if num < 2: return 0 while x <= num: for y in primes: # use the primes list! if x%y == 0: x += 2 break else: primes.append(x) x += 2 print(primes) return len(primes) count_primes(100) # PRINT BIG: Write a function that takes in a single letter, and returns a 5x5 representation of that letter def print_big(letter): patterns = {1:' * ',2:' * * ',3:'* *',4:'*****',5:'**** ',6:' * ',7:' * ',8:'* * ',9:'* '} alphabet = {'A':[1,2,4,3,3],'B':[5,3,5,3,5],'C':[4,9,9,9,4],'D':[5,3,3,3,5],'E':[4,9,4,9,4]} for pattern in alphabet[letter.upper()]: print(patterns[pattern]) print_big('B')
Jupyter/spy_game.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda root] # language: python # name: conda-root-py # --- # # Libraries needed import numpy as np import pandas as pd import matplotlib.pyplot as plt from keras.layers import Dense, Dropout from keras.layers import LSTM from keras.models import Sequential from keras.optimizers import Adam from keras.wrappers.scikit_learn import KerasRegressor from sklearn.model_selection import GridSearchCV from keras.models import model_from_json from sklearn.metrics import confusion_matrix from urad_Preprocessing import get_data_preprocessing time_prediction = 7 # Prepare the dataset np.random.seed(5) #set pseudo checkpoint dataset = pd.read_csv("fb_ads_preprocess.csv", header=None) dataset = dataset.iloc[:,-1] len_dataset = len(dataset) dataset = dataset.values.reshape(len_dataset, 1) print(dataset.shape) # # Function List ("Data splitting (training and testing)", "ANN", "LSTM", "dataset converter") # convert an array of values into a dataset matrix def convert_cpc(dataset, time_prediction=7): # look back default value is in day, it is equal to one week dataX, dataY = [], [] for i in range(len(dataset)-time_prediction+1): a = dataset[i:i+time_prediction-1] dataX.append(a) dataY.append(dataset[i + time_prediction-1]) return np.array(dataX), np.array(dataY) # Function List ("Data splitting (training and testing)", "ANN", "LSTM", "dataset converter") #Divide the data into training, and test data def divide_data(dataset, time_prediction): datasetX, datasetY= convert_cpc(dataset, time_prediction) #prepare the dataset, and divide them into training and testing dataset train_size = int(len_dataset * 0.8) test_size = len_dataset - train_size trainX, trainY, testX, testY = datasetX[:train_size], datasetY[:train_size], \ datasetX[train_size:], datasetY[train_size:] trainX = trainX.reshape(-1, 1, trainX.shape[1]) testX = testX.reshape(-1, 1, testX.shape[1]) print (datasetX.shape, trainX.shape, testX.shape, len_dataset) return trainX, trainY, testX, testY # + #Make the data into classification problem (up and down) def substract(x1, x0): return (x1 - x0) > 0 def regression_to_class(inp): len_input = len(inp) output = np.zeros_like(inp) for i in range(1,len_input): output[i,0] = substract(inp[i,0], inp[i-1,0]) return output # + #build LSTM model batch_size = 128 # reshape input to be [samples, time steps, features] trainX, trainY, testX, testY = divide_data(dataset, time_prediction) def build_model_LSTM(optimizer_value, time_prediction): print("LSTM", trainX.shape, trainY.shape, testX.shape, testY.shape) input_shape = np.array([1, time_prediction - 1]) lstm_model = Sequential() lstm_model.add( LSTM(time_prediction*4, input_shape = input_shape) ) lstm_model.add(Dropout(0.5)) lstm_model.add(Dense(1)) adam = Adam(decay = optimizer_value*0.1, lr=optimizer_value) lstm_model.compile(loss='mean_squared_error', optimizer = adam) return lstm_model # - # # Validation # Do Validation and See its Performance #using the training data that will be divided again into 80% train, and 20% validation lstm_model = build_model_LSTM(0.001, time_prediction) history = lstm_model.fit(trainX, trainY, epochs=1000, batch_size=batch_size, validation_split=0.2, shuffle=True) #plot training and validation loss using MSE print(history.history.keys()) plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('ANN: model missing accuracy') plt.ylabel('loss') plt.xlabel('epoch') plt.ylim((.0,0.05)) plt.legend(['train', 'validation'], loc='upper left') plt.show() # # Prediction # Do The Prediction and Re-Evaluate The Model Performance lstm_model = build_model_LSTM(0.001, time_prediction) lstm_model.fit(trainX, trainY, epochs=1000, batch_size=batch_size) train_predict = lstm_model.predict(trainX) test_predict = lstm_model.predict(testX) #Find the accuracy of our model y_label = np.append(trainY, testY, axis = 0).reshape(-1,1) train_predict_class, test_predict_class = regression_to_class(train_predict), regression_to_class(test_predict) real_class = regression_to_class(y_label) print (real_class.shape, train_predict_class.shape, test_predict_class.shape) print(train_predict.shape, real_class.shape) print (train_predict_class[:5,0], real_class[:5,0]) # tn, fp, fn, tp = confusion_matrix(real_class[len(train_predict):-1,0], test_predict_class[1:,0]).ravel() tn, fp, fn, tp = confusion_matrix(real_class[:len(train_predict)-1], train_predict_class[1:]).ravel() print (tn, fp, fn, tp) print ("Performance's Accuracy: ",(tn+tp)* 100/ (tn + fp + fn + tp)) print ("Sensitivity or True Positive Rate: ", tp*100 / (tp+fn) ) print ("Specificity or True Negative Rate: ", tn*100 / (tn+fp) ) # + #----------plot the prediction result # shift train predictions for plotting time_prediction = 7 train_predict_plot = np.empty_like(dataset) train_predict_plot[:, :] = np.nan train_predict_plot[time_prediction-1:len(train_predict)+time_prediction-1, :] = train_predict # shift test predictions for plotting test_predict_plot = np.empty_like(dataset) test_predict_plot[:, :] = np.nan test_predict_plot[len(train_predict) + time_prediction-1:, :] = test_predict # plot baseline and predictions plot_dataset = np.zeros((len_dataset, 1)) plot_dataset[:,:] = np.nan plot_dataset[time_prediction-1:] = y_label plt.plot(plot_dataset) plt.plot(train_predict_plot) plt.plot(test_predict_plot) plt.title('ANN: Prediction Results') plt.ylabel('CPC') plt.xlabel('Dataset') plt.legend(['dataset', 'training accuracy', 'testing accuracy'], loc='upper left') plt.show() # - # # Forecasting # Forecasting the CPC for n-days #LSTM forecast, inp, predict = np.array([]), .0, .0 adding = 7 for i in range(time_prediction+adding): if i > 0: inp = np.concatenate([inp[-1, 0, 1:], predict[0] ]).reshape((1, 1,time_prediction-1) ) else: inp = np.concatenate([trainX[-1, 0, 1:], train_predict[-1]]).reshape((1, 1, time_prediction-1) ) predict = lstm_model.predict(inp) forecast = np.append(forecast, predict) # + #------------plot the forecasting result plot_dataset = np.zeros((len_dataset, 1)) plot_dataset[:,:] = np.nan plot_dataset[time_prediction-1:] = y_label plt.plot(plot_dataset) # shift test forecasting for plotting forecast = forecast.reshape((time_prediction+adding,1)) forecast_plot = np.empty_like(dataset) forecast_plot [:, :] = np.nan forecast_plot [len(train_predict) + time_prediction:len(train_predict) + (2*time_prediction)+adding, :] = forecast plt.plot(train_predict_plot) plt.plot(forecast_plot) plt.title('LSTM: Prediction Results') plt.ylabel('CPC') plt.xlabel('Dataset') plt.legend(['dataset', 'training', 'forecasting'], loc='upper left') plt.show() # - forecast_result = regression_to_class(forecast) test_data = regression_to_class(testY) print (np.append(forecast_result[:14], test_data[:14], axis =1)) # # Save the trained NN-weights, and Load it to Predict the Input # reshape the dataset (to be used for forecasting using ANN) # Save the model's weights # + model_json = lstm_model.to_json() # for LSTM with open("model.json", "w") as json_file: json_file.write(model_json) # serialize weights to HDF5 model.save_weights("model.h5") print("Saved model to disk") # - # Load and compile the model's weights # + # load json and create model json_file = open('model.json', 'r') loaded_model_json = json_file.read() json_file.close() loaded_model = model_from_json(loaded_model_json) # load weights into new model loaded_model.load_weights("model.h5") print("Loaded model from disk") # compile loaded model optimizer_value = 0.0001 adam = Adam(decay = optimizer_value*0.05, lr=optimizer_value) loaded_model.compile(loss="mean_squared_error", optimizer = adam) # - train_predict_loaded_model = model.predict(trainX) test_predict_loaded_model = model.predict(testX) print(train_predict_loaded_model.shape, test_predict_loaded_model.shape) # # Fine-Tuning process #------------LSTM:fine tuning using gridSearch model = KerasRegressor(build_fn=build_model_LSTM) parameters = {'batch_size' : [128, 256, 512], 'nb_epoch' : [100, 1000, 10000 ], 'optimizer_value': [0.01, 0.001, 0.0001]}#, # 'hidden_layer' : [1, 3, 5]} fine_tune = GridSearchCV(estimator=model, param_grid=parameters, cv = 10) fine_tune_result = fine_tune.fit(trainX, trainY) best_params = fine_tune_result.best_params_ best_accuracy = fine_tune_result.best_score_ #LSTM: print the loss and the best parameters means = fine_tune_result.cv_results_['mean_test_score'] stds = fine_tune_result.cv_results_['std_test_score'] params = fine_tune_result.cv_results_['params'] for mean, stdev, param in zip(means, stds, params): print("%f (%f) with: %r" % (mean, stdev, param))
lstm_univariate_time_series.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # https://buzzsumo.com/blog/most-shared-headlines-study/ # https://medium.com/the-mission/this-new-data-will-make-you-rethink-how-you-write-headlines-751358f6639a # https://www.analyticsvidhya.com/blog/2017/10/art-story-telling-data-science/ # https://medium.com/@josh_2774/how-do-you-become-a-developer-5ef1c1c68711 # http://www.storytellingwithdata.com/
Storytelling.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: tf-gpu-cuda115 # language: python # name: tf-gpu-cuda115 # --- ''' 使用裝飾器@tf.function、keras api,皆是使用靜態圖,運算效能高 純粹使用 tf.GradientTape() 則是使用動態圖,效能較低 tf.GradientTape 是自動微分 (Record operations for automatic differentiation.) tf.gradients(ys, xs, ...) 是符號微分 (Constructs symbolic derivatives of sum of ys w.r.t. x in xs.) ''' '''靜態/動態圖、符號/自動微分''' ''' 名詞翻譯: dimension reduction: 維度縮減 https://terms.naer.edu.tw/detail/3648035/?index=1 Hadamard product: 哈德瑪得乘積 (定義兩同維度矩陣,相應元素計算乘積), 又稱 element-wise product(逐元乘積)、entrywise product(逐項乘積) --- reduce(縮減) entrywise product(逐項乘積) --- tf.matmul() 兩張量(矩陣)相乘 tf.multiply() 兩張量(矩陣)逐項乘積 tf.reduce_sum() 對張量(矩陣)指定維度的元素進行相加(omputes the sum of elements across dimensions of a tensor.) numpy.ufunc.reduce() 對同一維度的元素套用相同的操作,將陣列的維度縮減(較少翻譯為:歸約)至1維 (Reduces array’s dimension by one, by applying ufunc along one axis.) tf.gradients(ys, xs, ...) Constructs symbolic derivatives (符號導數) of sum of ys w.r.t. x in xs. symbolic differentiation 符號微分法 ''' '名詞翻譯' ''' https://stackoverflow.com/questions/43455320/difference-between-symbolic-differentiation-and-automatic-differentiation There are 3 popular methods to calculate the derivative: 1. Numerical differentiation: 數值方法,定義合理的方程式,透過多次迭代來減少誤差項,逼近理論解析解 2. Symbolic differentiation: 透過連鎖律獲得導函數表達式,計算微分值 3. Automatic differentiation: Automatic differentiation is the same as Symbolic differentiation (in one place they operate on math expression, in another on computer programs). And yes, they are sometimes very similar. But for control flow statements (`if, while, loops) the results can be very different: symbolic differentiation leads to inefficient code (unless carefully done) and faces the difficulty of converting a computer program into a single expression ''' '''Derivative''' ''' https://ithelp.ithome.com.tw/articles/10217112 https://ithelp.ithome.com.tw/articles/10216085 https://pytorch.org/tutorials/beginner/examples_autograd/tf_two_layer_net.html 最初 Tensorflow 是以靜態計算圖(static computational graph)的方式進行 gradient 計算 Tensorflow 制定的 tensor 結構可以放置 CPU 或 GPU,而 numpy ndarray 指能以 CPU 計算 故 Tensorflow 可將 ndarray 轉換至 tensor、tf.graph 也可放置於 GPU 相較於 Pytorch 的動態計算圖(dynamic computational graph)會在執行期間 自動微分(Runtime Automatic Differentation); TF 也是自動微分 大約從 TF 1.5 版開始推出 Eager Execution,Eager_tensor 就是可動態圖的方法 ''' '''TF Eager Execution''' import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers import tensorflow_datasets as tfds import numpy as np import time current_time_str = time.strftime("%H:%M:%S", time.localtime()) print(current_time_str) #print(type(current_time_str)) # + # Prepare the training dataset. batch_size = 64 (x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data() x_train = np.reshape(x_train, (-1, 784)) x_test = np.reshape(x_test, (-1, 784)) # Reserve 10,000 samples for validation. x_val = x_train[-10000:] y_val = y_train[-10000:] x_train = x_train[:-10000] y_train = y_train[:-10000] # Prepare the training dataset. train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train)) train_dataset = train_dataset.shuffle(buffer_size=1024).batch(batch_size) # Prepare the validation dataset. val_dataset = tf.data.Dataset.from_tensor_slices((x_val, y_val)) val_dataset = val_dataset.batch(batch_size) # + # Get model inputs = keras.Input(shape=(784,), name="digits") x = layers.Dense(64, activation="relu", name="dense_1")(inputs) x = layers.Dense(64, activation="relu", name="dense_2")(x) outputs = layers.Dense(10, name="predictions")(x) model = keras.Model(inputs=inputs, outputs=outputs) # Instantiate an optimizer to train the model. optimizer = keras.optimizers.SGD(learning_rate=1e-3) # Instantiate a loss function. loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True) # Prepare the metrics. train_acc_metric = keras.metrics.SparseCategoricalAccuracy() val_acc_metric = keras.metrics.SparseCategoricalAccuracy() # - # # Eager mode # *** 第一步: 設定迭代次數 *** epochs = 2 for epoch in range(epochs): print("\nStart of epoch %d" % (epoch,)) start_time = time.time() # *** 第二步: 取出 batches 的資料, # 向前傳播紀錄權重運算結果、得到預測結果,(預設)計算平均loss值, # 此時可再加入 loss 的正規化 # 再計算Loss_fn對權重的導數(自動微分: 先前紀錄所有運算過程,再反向傳播運算得微分值) # 最後用最佳化器更新權重 # *** # Iterate over the batches of the dataset. for step, (x_batch_train, y_batch_train) in enumerate(train_dataset): with tf.GradientTape() as tape: # Forwarding pred_logits = model(x_batch_train, training=True) # loss_object = loos_fn(y_true, y_pred) # `loss_object` can get loss value and regards as a function can compute gradient loss_object = loss_fn(y_batch_train, pred_logits) grads = tape.gradient(loss_object, model.trainable_weights) optimizer.apply_gradients(zip(grads, model.trainable_weights)) # Update training metric. train_acc_metric.update_state(y_batch_train, pred_logits) # Log every 200 batches. if step % 200 == 0: print( "Training loss (for one batch) at step %d: %.4f" % (step, float(loss_object)) ) print("Seen so far: %d samples" % ((step + 1) * batch_size)) # Display metrics at the end of each epoch. train_acc = train_acc_metric.result() print("Training acc over epoch: %.4f" % (float(train_acc),)) # Reset training metrics at the end of each epoch train_acc_metric.reset_states() # Run a validation loop at the end of each epoch. for x_batch_val, y_batch_val in val_dataset: val_logits = model(x_batch_val, training=False) # Update val metrics val_acc_metric.update_state(y_batch_val, val_logits) val_acc = val_acc_metric.result() val_acc_metric.reset_states() print("Validation acc: %.4f" % (float(val_acc),)) print("Time taken: %.2fs" % (time.time() - start_time)) # # Static mode? # ## Appetizer @tf.function def MyStaticGradientor(func, var): return func(var) A = tf.constant([[2,2]]) def square_fn(x): res = x**3 print(res) return res square_fn(A) MyStaticGradientor(square_fn, A) # ## Main course model_efficient = keras.Model(inputs=inputs, outputs=outputs) @tf.function def train_step(x, y): with tf.GradientTape() as tape: pred_logits = model_efficient(x, training=True) loss_value = loss_fn(y, pred_logits) grads = tape.gradient(loss_value, model_efficient.trainable_weights) optimizer.apply_gradients(zip(grads, model_efficient.trainable_weights)) train_acc_metric.update_state(y, pred_logits) return loss_value @tf.function def test_step(x, y): pred_logits = model_efficient(x, training=False) val_acc_metric.update_state(y, pred_logits) epochs = 2 for epoch in range(epochs): print(f"Start of epoch {epoch}") start_time = time.time() # ---- Training ---- # Iterate over the batches of the dataset. for step, (x_batch_train, y_batch_train) in enumerate(train_dataset): loss_value = train_step(x_batch_train, y_batch_train) # Log every 200 batches. if step % 200 == 0: print( "Training loss (for one batch) at step %d: %.4f" % (step, float(loss_value)) ) print("Seen so far: %d samples" % ((step + 1) * batch_size)) # Display metrics at the end of each epoch. train_acc = train_acc_metric.result() print("Training acc over epoch: %.4f" % (float(train_acc),)) # Reset training metrics at the end of each epoch train_acc_metric.reset_states() # ---- Testing (Validating) ---- # Run a validation loop at the end of each epoch. for x_batch_val, y_batch_val in val_dataset: test_step(x_batch_val, y_batch_val) val_acc = val_acc_metric.result() val_acc_metric.reset_states() print("Validation acc: %.4f" % (float(val_acc),)) print("Time taken: %.2fs" % (time.time() - start_time))
TestCode/Study_Tensorflow/TF_Training_Loop.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import csv from pprint import pprint from datetime import datetime from collections import Counter import calendar from dateutil import parser from pprint import pprint import time import pandas as pd chicago = 'chicago.csv' nyu = 'new_york_city.csv' washington = 'washington.csv' def get_city(): '''Asks the user for a city and returns the filename for that city's bike share data. Args: none. Returns: (str) Filename for a city's bikeshare data. ''' city = input('\nHello! Let\'s explore some US bikeshare data!\n' 'Would you like to see data for Chicago, New York, or Washington?\n') city_name = "" if city.lower() == "chicago": print("City selected : {} ".format(city.title())) city_name = chicago elif city.lower() == "new york": print("City selected : {} ".format(city.title())) city_name = nyu elif city == "Washington": print("City selected : {} ".format(city.title())) city_name = washington else: print("Invalid Entry..... Kindly re-enter") get_city() return (city_name) def get_time_period(): '''Asks the user for a time period and returns the specified filter. Args: none. Returns: time_period ''' time_period = input('\nWould you like to filter the data by month, day, or none at' ' all? Type "none" for no time filter.\n') if time_period.lower() == "month" or time_period.lower() == "day" or time_period.lower() == "none": print("Time Period selected :{}".format(time_period.title())) else: print("{} is Invalied entry, Kindly re-enter".format(time_period.title())) get_time_period() return(time_period) def get_month(): '''Asks the user for a month and returns the specified month. Args: none. Returns: month ''' month = input('\nWhich month? January, February, March, April, May, or June?\n').lower() if month == "january" or month == "february" or month == "march" or month == "april" or month == "may" or month == "june": print("Month selected :{}".format(month.title())) else: print("{} is Invalied entry, Kindly re-enter".format(month.title())) get_month() return (month) def get_day(): '''Asks the user for a day and returns the specified day. Args: none. Returns:day ''' day = input('\nWhich day? Please type your response as an integer.\n') d = int((datetime.datetime.strptime(month, "%B")).strftime('%m')) #gives the number value of the month string day_range = calendar.monthrange(2017, d)[1] #gives the upper bound for the month while True: day = int(input('\nWhich day? Please type your response as an integer from 1 to {}.\n'.format(day_range))) if 1 <= day <= day_range: print("Entered Day is : {}".format(day)) return(day) def open_city_file(city_name): """open the csv file and store and return it""" with open('city_name', newline='') as csv_file: city_file = [{key: value for key, value in row.items()} #list comprehimsion or one liners {} its s dictionary for row in csv.DictReader(csv_file, skipinitialspace=True)] return(city_file) def dict_of_startime(city_file): """prints dict of 'Start Time'""" file_st = [] for x in city_file: file_st.append({'Start Time' : x['Start Time']}) # a list of dicts return(file_st) """Lit of months""" def month_list(file_st): """prints and add month names to month[]""" month= [] for x in file_st: month.append(calendar.month_name[int(x['Start Time'][5:7])]) return(month) """list of days""" def weekday_list(file_st): """gets the day of each datetime object""" week_day = [] for x in file_st: week_day.append(parser.parse(x['Start Time']).strftime("%a")) return(week_day) # I do not see the need to filter using month, day, none for to find out popular month. As popular month is maximun of summation of number of trips in a month, the filter is rendered obsolute. def popular_month(month): ''' Returns: the month with highest no of trips Question: What is the most popular month for start time? ''' """counts and gives a dict of month""" x=Counter(month) """prints out max out the dict of months""" max_month = max(x, key=x.get) return(max_month) # While using filter in "popular day" none and day return the popular day from the whole dataset as filters are not necessary but for month it will return the popular day of each month i.e from Jan to June. def popular_day(time_period, file_st): ''' Question: What is the most popular day of week (Monday, Tuesday, etc.) for start time? ''' if time_period == 'none' or 'day': """gets popular weekday without filter""" max_day = max(week_day) return (max_day) else : """filters wrt to month. that is calculates the popular day of each month and returns the max """ popular_day = [] for x in file_st: a=(calendar.month_name[int(x['Start Time'][5:7])]) #month name b=parser.parse(x['Start Time']).strftime("%a") # day name popular_day += [(a,b)] x= Counter (popular_day) y= max(x, key=x.get) #retund the filtered month and popular day example june:friday return (y) # + def popular_hour(file_st, time_period): ''' Question: What is the most popular hour of day for start time? ''' """gets the hour of each datetime object""" hour = [] for x in file_st: hour.append(int(x['Start Time'][11:13])+1) if time_period == 'none': """counter the hours""" x=Counter(hour) max_hour = max(x, key=x.get) return (max_hour) elif time_period == 'month' : """filter month wise""" popular_hour = [] for x in file_st: a=(calendar.month_name[int(x['Start Time'][5:7])]) #month name b=hour[x] popular_day += [(a,b)] x= Counter (popular_hour) y= max(x, key=x.get) #retund the filtered month and popular day example june:friday return (y) else: """filter by day""" popular_hour_day = [] for x in file_st: a=parser.parse(x['Start Time']).strftime("%a") # day name b=hour(x) popular_day += [(a,b)] xx= Counter(popular_hour) yy= max(xx, key=xx.get) #retund the filtered month and popular day example june:friday return (yy) # + def trip_duration(city_file, time_period): ''' Question: What is the total trip duration and average trip duration? time in seconds ''' trip_sum = 0 avg_time = 0 if time_period == 'none': for x in city_file: trip_sum = trip_sum + int(x['Trip Duration']) avg_time = trip_sum / len (city_file) return (trip_sum, avg_time) elif time_period == 'month' : for x in city_file: trip_sum = trip_sum + int(x['Trip Duration']) avg_time = trip_sum / len (city_file) return (trip_sum, avg_time) else: for x in city_file: trip_sum = trip_sum + int(x['Trip Duration']) avg_time = trip_sum / len (city_file) return (trip_sum, avg_time) # - def popular_stations(city_file, time_period): ''' Question: What is the most popular start station and most popular end station? ''' station_start = [] station_end = [] pop_start = [] pop_end = [] if time_period == 'none': """start/end station popular""" for x in city_file: station_start.append(x['Start Station']) station_end.append(x['End Station']) y=Counter(station_start) pop_start= max(y,key=y.get) z=Counter(station_end) pop_end= max(z,key=z.get) return(pop_start, pop_end) elif time_period == 'month' : """filter month wise""" for x in city_file: a=(calendar.month_name[int(x['Start Time'][5:7])]) #month name b=x['Start Station'] c=x['End Station'] station_start += [(a,b)] Station_end += [(a,c)] x= Counter (station_start) pop_start= max(x, key=x.get) xx= Counter (station_end) pop_end= max(xx, key=x.get) return (pop_start,pop_end) else: """filter by day""" for x in city_file: a=parser.parse(x['Start Time']).strftime("%a") # day name b=x['Start Station'] c=x['End Station'] station_start += [(a,b)] Station_end += [(a,c)] x= Counter (station_start) pop_start= max(x, key=x.get) xx= Counter (station_end) pop_end= max(xx, key=x.get) return (pop_start,pop_end) # + def popular_trip(city_file, time_period): ''' Question: What is the most popular trip? ''' popular_trip = [] pop_station = [] if time_period == 'none': """counting the popular trip""" for x in city_file: popular_trip += [(x['Start Station'],x['End Station'])] x= Counter (popular_trip) pop_station= max(x, key=x.get) elif time_period == 'month': for x in city_file: popular_trip += [(x['Start Station'],x['End Station'])] x= Counter (popular_trip) pop_station= max(x, key=x.get) else: for x in city_file: popular_trip += [(x['Start Station'],x['End Station'])] x= Counter (popular_trip) pop_station= max(x, key=x.get) # + def users(city_file, time_period): ''' Question: What are the counts of each user type? ''' user_type= [] max_user_type = [] if time_period == 'none': for x in city_file: user_type.append(x['User Type']) x=Counter(user_type) max_user_type = max(x, key=x.get) return (max_user_type) elif time_period == 'month': """filter month wise""" for x in file_st: a=(calendar.month_name[int(x['Start Time'][5:7])]) #month name b=x['User Type'] popular_user += [(a,b)] x= Counter (popular_user) y= max(x, key=x.get) return (y) else: """filter day wise""" for x in file_st: a=parser.parse(x['Start Time']).strftime("%a") # day name b=x['User Type'] popular_user += [(a,b)] x= Counter (popular_user) y= max(x, key=x.get) return (y) # - def gender(city_file,city_name, time_period): ''' Question: What are the counts of gender? ''' if city_name == 'washington': print('No data on gender') break else: max_gender = [] if time_period == 'none': gender_c= [] for x in city_file: gender_c.append(x['Gender']) x=Counter(gender_c) max_gender = max(x, key=x.get) return (max_gender) elif time_period == 'month': """filter month wise""" for x in file_st: a=(calendar.month_name[int(x['Start Time'][5:7])]) #month name b=x['Gender'] popular_gender += [(a,b)] x= Counter (popular_gender) y= max(x, key=x.get) #retund the filtered month and popular day example june:friday return (y) else: """filter day wise""" for x in file_st: a=parser.parse(x['Start Time']).strftime("%a") # day name b=x['Gender'] popular_user += [(a,b)] x= Counter (popular_user) y= max(x, key=x.get) #retund the filtered month and popular day example june:friday return (y) def birth_years(city_name, city_file, time_period): ''' Question: What are the earliest (i.e. oldest user), most recent (i.e. youngest user), and most popular birth years? ''' if city_name == 'washington': print('No data on birth year') break else: max_gender = [] if time_period == 'none': birth_year1 = [] for x in testing: birth_year1.append(x['Birth Year']) print (birth_year1) """filtering the '' values out """ birth_year = list(filter(None, birth_year1)) print(birth_year) """pop year """ x = Counter(birth_year) pop_year = max (x, key=x.get) """max and min""" a = max(birth_year) b = min(birth_year) return (a,b,pop_year) elif time_period == 'month': """filter month wise""" birth_year1 = [] for x in testing: birth_year1.append(x['Birth Year']) print (birth_year1) """filtering the '' values out """ birth_year = list(filter(None, birth_year1)) print(birth_year) x = Counter(birth_year) pop_year = max (x, key=x.get) a = max(birth_year) b = min(birth_year) return(a,b,pop_year) else: """filter day wise""" birth_year1 = [] for x in testing: birth_year1.append(x['Birth Year']) print (birth_year1) """filtering the '' values out """ birth_year = list(filter(None, birth_year1)) print(birth_year) x = Counter(birth_year) pop_year = max (x, key=x.get) a = max(birth_year) b = min(birth_year) return(a,b,pop_year) import pandas as pd def display_data(city_name): '''Displays five lines of data if the user specifies that they would like to. After displaying five lines, ask the user if they would like to see five more, continuing asking until they say stop. Args: none. Returns: TODO: fill out return type and description (see get_city for an example) ''' display = input('\nWould you like to view individual trip data?' 'Type \'yes\' or \'no\'.\n') if display import use while true here # + import pandas as pd display = "" df = pd.read_csv('chicago.csv') while True: display = input('\nWould you like to view individual trip data?' 'Type \'yes\' or \'no\'.\n').lower e=df.head() print(e) # if display == 'no': # ii # + def statistics(): '''Calculates and prints out the descriptive statistics about a city and time period specified by the user via raw input. Args: none. Returns: none. ''' # Filter by city (Chicago, New York, Washington) city = get_city() # Filter by time period (month, day, none) time_period = get_time_period() print('Calculating the first statistic...') # What is the most popular month for start time? if time_period == 'none': start_time = time.time() #TODO: call popular_month function and print the results print("That took %s seconds." % (time.time() - start_time)) print("Calculating the next statistic...") # What is the most popular day of week (Monday, Tuesday, etc.) for start time? if time_period == 'none' or time_period == 'month': start_time = time.time() # TODO: call popular_day function and print the results print("That took %s seconds." % (time.time() - start_time)) print("Calculating the next statistic...") start_time = time.time() # What is the most popular hour of day for start time? # TODO: call popular_hour function and print the results print("That took %s seconds." % (time.time() - start_time)) print("Calculating the next statistic...") start_time = time.time() # What is the total trip duration and average trip duration? # TODO: call trip_duration function and print the results print("That took %s seconds." % (time.time() - start_time)) print("Calculating the next statistic...") start_time = time.time() # What is the most popular start station and most popular end station? # TODO: call popular_stations function and print the results print("That took %s seconds." % (time.time() - start_time)) print("Calculating the next statistic...") start_time = time.time() # What is the most popular trip? # TODO: call popular_trip function and print the results print("That took %s seconds." % (time.time() - start_time)) print("Calculating the next statistic...") start_time = time.time() # What are the counts of each user type? # TODO: call users function and print the results print("That took %s seconds." % (time.time() - start_time)) print("Calculating the next statistic...") start_time = time.time() # What are the counts of gender? # TODO: call gender function and print the results print("That took %s seconds." % (time.time() - start_time)) print("Calculating the next statistic...") start_time = time.time() # What are the earliest (i.e. oldest user), most recent (i.e. youngest user), and # most popular birth years? # TODO: call birth_years function and print the results print("That took %s seconds." % (time.time() - start_time)) # Display five lines of data at a time if user specifies that they would like to display_data() # # Restart? restart = input('\nWould you like to restart? Type \'yes\' or \'no\'.\n') if restart.lower() == 'yes': statistics() if __name__ == "__main__": statistics() # -
other/final_code.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import json SAMPLES_META = json.load(open('../data/ref/test_samples.json')) # + bam_files = [] for value in data: if value["format"] == "BAM": bam_files.append(value["files"][0]["file_name"]) bam_files # + FQ_FILES = {} for value in SAMPLES_META: if value["format"] == "FASTQ": FQ_FILES[value["id"]] = [item["R1"] for item in value["files"]] + [item["R2"] for item in value["files"]] FQ_FILES # + BAM_FILES = {} for value in SAMPLES_META: if value["format"] == "BAM": BAM_FILES[value["id"]] = [item["file_name"] for item in value["files"]] BAM_FILES # - SAMPLES = [value["id"] for value in SAMPLES_META] SAMPLES
python/.ipynb_checkpoints/LearningJSON-2-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd f500 = pd.read_csv('../Dataset/f500.csv', index_col = 0) # index_col specifies what column use to index the row axis f500.index.name = None # removes the name of index axis f500.head(5) industry = f500['industry'] # selecting a single column in a dataframe industry country = f500['country'] # selecting a specific column in a dataframe country revenue_years = f500[['revenues', 'years_on_global_500_list']] # selecting a list of specific columns in a dataframe revenue_years ceo_to_sector = f500.loc[:, 'ceo':'sector'] # selecting a slice of columns in dataframe ceo_to_sector toyota = f500.loc['Toyota Motor'] # selecting a single row in a dataframe toyota drink_companies = f500.loc[['Anheuser-Busch InBev', 'Coca-Cola', 'Heineken Holding']] # Selecting list of unique rows in a dataframe drink_companies middle_companies = f500['Tata Motors' : 'Nationwide'] # Selecting a slice of rows in a dataframe middle_companies country_count = country.value_counts() # since the country data is a series, #we can use the value_count function to count the number of times each unique value appears in the series country_count india = country_count['India'] # selecting a single item from a series india north_america = country_count[['USA', 'Canada', 'Mexico']] # selecting unique items from a series north_america spain_to_russia = country_count['Spain':'Russia'] # selecting a slice of countries from a series spain_to_russia big_movers = f500.loc[['Aviva', 'HP', 'JD.com', 'BHP Billiton'], ['rank', 'previous_rank']] # selecting multiple rows and multiple columns big_movers bottom_companies = f500.loc['National Grid':'AutoNation', ['rank', 'sector', 'country']] #selecting a slice of rows and multiple columns bottom_companies rank_change = f500['previous_rank'] - f500['rank'] # calculating the change in companies ranks rank_change rank_change_max = rank_change.max() # calculating the maximum change in companies ranks rank_change_max rank_change_min = rank_change.min() # calculating the minimum change in companies ranks rank_change_min # ### A Brief Explanation of some error # According to the data dictionary, this list should only rank companies on a scale of 1 to 500. Even if the company ranked 1st in the previous year moved to 500th this year, the rank change calculated would be -499. This indicates that there is incorrect data in either the rank column or previous_rank column. rank_change.describe() f500.describe() # this shows the descriptives for all columns in the dataframe with float and integer values # f500.describe(include=['O']) To show the descriptives for object columns we use the code # The assets and employees column shows values that are too long to display neatly e.g (5.000000e+02) these values hence pandas displays them in a special kind of scientific notation (the e notation). This results in these values being calculated as (5 * 10 ** 2 for 5.000000e+02 and 2.436323 * 10 ** 5 for 2.436323e+05) country.describe() # none float or integer values returns a different kind of statistics rank = f500['rank'] rank.describe() previous_rank = f500['previous_rank'] previous_rank.describe() # The above data shows that the minimum value in the `previous_rank` column is zero. This makes no sense as companies can only be ranked from 1 to 500, so the value zero is an error. # Using *method chaning* we can count the number of times the number zero appeared in the `previous_rank` column previous_rank.value_counts().loc[0] # we confirmed that 33 companies in the dataframe have a value of 0 in the `previous_rank` column. Given that multiple companies have a 0 rank, we might conclude that these companies didn't have a rank at all for the previous year. It would make more sense for us to replace these values with a *null value* instead. # We need to explore the rest of our dataframe to make sure there are no other data issues. max_f500 = f500.max(numeric_only = True) # Showing the maximum values for only numeric columns in the dataframe max_f500 min_f500 = f500.min(numeric_only = True) min_f500 # The above code shows that apart from the `previous_rank` column all other columns in our dataframe looks quite reasonable. # Previously, we concluded that companies with a rank of zero didn't have a rank at all. Next, we'll replace these values with a null value to clearly indicate that the value is missing. `np.nan` is used in pandas to represent values that can't be represented numerically, most commonly missing values. import numpy as np f500.loc[f500["previous_rank"] == 0, 'previous_rank'] = np.nan #Replacing all zero values in previous_rank with NaN values indicating that the value is missing f500.tail() # The above table shows that the values that were previously zero have been updated to NaN values. # # Now that we've corrected the data, let's create the rank_change series again. This time, we'll add it to our f500 dataframe as a new column. # f500['rank_change'] = f500['previous_rank'] - f500['rank'] # Assigning values to a non existent column creates a new column in the dataframe f500['rank_change'] f500['rank_change'].describe() f500['industry'][f500['country'] == 'USA'].value_counts().head(2) # This shows the top two industries in the USA # ### The table below summarizes how we can use DataFrame.iloc[] and Series.iloc[] to select by integer position: # # |Select by integer position| Explicit Syntax| Shorthand Convention| # |:---------------------------- | :----------------:| -------------------: | # |Single column from dataframe| df.iloc[:,3]| # |List of columns from dataframe| df.iloc[:,[3,5,6]]| # |Slice of columns from dataframe| df.iloc[:,3:7]| # |Single row from dataframe| df.iloc[20]| # |List of rows from dataframe| df.iloc[[0,3,8]]| # |Slice of rows from dataframe| df.iloc[3:5]| **df** [3:5]| # |Single items from series| s.iloc[8] | **s** [8]| # |List of item from series| s.iloc[[2,8,1]]| **s** [[2,8,1]]| # |Slice of items from series | s.iloc[5:10] | **s** [5:10]| null_revenues = f500['revenue_change'].isnull() # this checks if there are any null values in the revenues_change column null_revenues.head() # returns the first five dataset for the revenue_change column company_with_null_revenue_change = f500[null_revenues][['sector', 'country', 'revenues', 'revenue_change']] # The null_revenue # series is used to filter the dataset with some selected columns company_with_null_revenue_change # Above we see that there are two companies (Uniper and Hewlett Packard (HP) enterprise) in the entire dataset whose revenue change is null i.e not a number. And these companies fall in the `energy` and `technology` sectors and they are located in Germany and USA null_previous = f500['previous_rank'].isnull() null_previous_rank = f500[null_previous][['rank', 'previous_rank']] # Above we create a new dataframe (null_previous_rank) that indexes the entire dataframe(f500) that meets the criteria # of null_previous, but return only two columns # The code can be run in the reverse order with column specification coming before the row indexing null_previous_rank # ###### The Above output shows the companies whose `previous_rank` column is null and also the current rank of those companies null_previous_rank.count() # The output above shows that there are 33 such companies with null `previous_rank` values null_previous_rank.iloc[0:5] # Selecting the first five columns for the null_previous_rank dataframe previously_ranked = f500[f500['previous_rank'].notnull()] # create a new dataframe (previously_ranked) that #selects all rows whoseprevious_rank column is not null previously_ranked # We notice that the number of rows in the `previously_ranked` dataframe (467) is smaller than what is obtained in the f500 dataframe (500). This is because `previously_ranked` only picked values that are `notnull` from f500 rank_change = previously_ranked['previous_rank'] - previously_ranked['rank'] rank_change # The `rank_change` column from the `previously_ranked` dataframe also also has smaller amount of rows (467) no_change = (rank_change == 0) q = previously_ranked[no_change] [['industry','country']] q # The above output shows the companies that maintained the same ranking previously_ranked f500['rank_change'] = rank_change # Attach the rank_change column to the f500 dataframe f500 # By inspecting the f500 dataframe, we realize that the number of rows is 500 despite adding the `rank_change` column with less than 500 rows. What happened is that, when adding a new dataframe to an existing pandas dataframe: # # - pandas will ignore the ordering of the index axis and align all the index labels whether string or integer # * Pandas will discard any item on the series whose index (whether string or integer) does not match the existing dataframe # * If the series has a smaller number of rows, pandas will fill them with `NaN` values when they are merged with the existing dataframe # #### Combining Boolean Arrays # # To answer more complex questions, we need to learn how to combine boolean arrays. We combine boolean arrays using boolean operators. In Python, these boolean operators are **and**, **or**, and **not**. In pandas, the operators are slightly different: # # |Pandas Operator | Normal Python Operator | Meaning| # |:---------------| -----------------------:|---------:| # |a & b| a and b| True if both a and b are True, else False| # |a \| b| a or b| True if either a or b is True| # |~a| not a| True if a is False, else False| # # Below, we combined boolean arrays to answer the question of companies with large revenues that still incur negative profits (losses) large_revenue = f500['revenues'] > 100000 negative_profits = f500['profits'] < 0 combined = large_revenue & negative_profits big_rev_neg_profit = f500[combined][['revenues', 'profits']] big_rev_neg_profit # The above table shows that just two companies(japan Post Holdings & Chevron) in our dataset made profits of above 1 billion and still incured losses # # Just like when we use a single boolean array to perform selection, we don't need to use intermediate variables. The first place we can optimize our code is by combining our two boolean arrays in a single line, instead of assigning them to the intermediate `large_revenue` and `negative_profits` variables first. So a code like this will also work perfectly: # # - **combined = (f500["revenues"] > 100000) & (f500["profits"] < 0)** # # Notice that we used parentheses around each of our boolean comparisons. This is very important — **our boolean operation will fail without parentheses.** # # Lastly, instead of assigning the boolean arrays to `combined`, we can insert the comparison directly into our selection and still get the same output: # # - **big_rev_neg_profit = f500[(f500["revenues"] > 100000) & (f500["profits"] < 0)]** brazil_venezuela = f500[(f500['country'] == 'Brazil') | (f500['country'] == 'Venezuela')] brazil_venezuela # The above table shows that there are 8 companies that are headquartered in either brazil or venezuela tech_outside_usa = f500[(f500['sector'] == 'Technology') & ~ (f500['country'] == 'USA')] # Tech Firms not headquartered in the USA tech_outside_usa tech_outside_usa['sector'].count() # The above output shows that there are 30 firms in the technology sector that are not headquartered in the USA # + japanese_companies = f500[f500['country'] == 'Japan'] japanese_companies = japanese_companies.sort_values('employees', ascending = False) # descending order (highest to lowest) japanese_companies[['country', 'employees']].head() # The sort_values method sorts the values of a column selected from a pandas dataframe in ascending order by default # - # The above output shows the top 5 japanese companies with the highest number employees top_japanese_employees = japanese_companies.loc['Toyota Motor']['employees'] # Selecting a single item from a dataframe using df.loc[] top_japanese_employees # ### Aggregate Calculations # # Aggregation is where we apply a statistical operation to groups of our data. We can use the aggregation technique to calculate the average revenue for each unique country in our dataset, we can follow the steps: # # - create an empty dictionary called `avg_rev_by_country` that will hold the as key each unique country and as value the average revenue generated by all companies in that country # - create a variable `countries` that will hold an array of the unique countries in the original `f500` dataframe # - loop through each country `c` in the variable `countries` created in step 2 above # - for each iteration create a dtaframe `selected_rows` that select all rows in the original `f500` dataframe where, the `country` column of the original `f500` dataframe equals the unique values in the `countries` array variable created in step 2 # - create a variable called `mean` that will hold all the selected unique rows in the `selected_rows` dataframe from step 4 above, select only the `revenues` column and use the `mean` method to find the mean of the revenues for all unique countries # - update the `avg_rev_by_country` dictionary that has its keys as `c` which is the unique countries and holds as value `mean` which is the average revenues of companies in each unique country avg_rev_by_country = {} countries = country.unique() for c in countries: selected_rows = f500[f500['country'] == c] mean = selected_rows['revenues'].mean() avg_rev_by_country[c] = mean avg_rev_by_country # ### CEO's that employ the highest employees in each country # top_employer_by_country = {} countries = f500['country'].unique() for c in countries: unique_rows = f500[f500["country"] == c] # pick from the country column a country whose value equals a unique country sorted_rows = unique_rows.sort_values('employees', ascending = False) # sort the employee column to descending order top_employer = sorted_rows.iloc[0] # get the first row from the sorted rows that has the employee for each iteration employer_name = top_employer['ceo'] # select the ceo from the top_employer row top_employer_by_country[c] = employer_name top_employer_by_country # ### Interpretation of the above output # # The output above shows the highest employers in each country. From the output, we can see that in the USA the highest employer is '<NAME>', in china, the CEO that employs the highest number of workers is '<NAME>ua', while in Japan 'Akio Toyoda', has the highest number of employees. # + roa = f500['profits'] / f500['assets'] f500['roa'] = roa top_roa_by_country = {} unique_country = f500['country'].unique() for s in unique_country: selected_country = f500[f500['country'] == s] mean_roa_of_each_country = selected_country['roa'].mean() top_roa_by_country[s] = mean_roa_of_each_country top_roa_by_country # converting the dictionary values to descending order (from highest roa to lowest) import operator sorted_dict_to_list = sorted(top_roa_by_country.items(), key = operator.itemgetter(1), reverse = True) # Above, operator.itemgetter(1) takes the value of the key which is at the index 1. The output is a list # Converting output back to a dictionary sorted_dict = dict(sorted_dict_to_list) sorted_dict # - # The above code shows the country with the **Average** highest Returns on Assets # # **Note: (ROA is a business-specific metric which indicates a companies ability to make profit using their available assets)** # # Thus, on the **average**, switzerland has the highest returns on assests, followed by indonesia and then ireland at third position. # # The countries with the least **Average** returns on assets are Norway, Denmark and Mexico # + roa = f500['profits'] / f500['assets'] f500['roa'] = roa top_roa_by_country = {} unique_country = f500['country'].unique() for s in unique_country: selected_country = f500[f500['country'] == s] srt = selected_country.sort_values('roa', ascending = False) lock = srt.iloc[0][-1] top_roa_by_country[s] = lock top_roa_by_country # converting the dictionary values to descending order (from highest roa to lowest) import operator sorted_dict_to_list = sorted(top_roa_by_country.items(), key = operator.itemgetter(1), reverse = True) # Above, operator.itemgetter(1) takes the value of the key which is at the index 1. The output is a list # Converting output back to a dictionary sorted_dict = dict(sorted_dict_to_list) sorted_dict # - # The above code shows the country with the **highest Returns on Assets** # # Thus, the United States has the highest returns on assests, followed by sweden and then ireland at third position. # # The countries with the least returns on assets are finland, norway and denmark. # # ### Difference between the returns on assets and the average returns on assests # # The immediate table above shows the returns on assests for countries, starting from the country with the highest returns to the country with the smallest returns. The metric used is the **ROA** as it is, without further manipulations. # # What the code for the above output does is that, it groups together all the companies that are located in a single country and isolates the company with the highest **ROA** for each unique country. After which it displays the country and the Highest **ROA** for that country. # # The code that checks for average does the same thing but with some slight differences. It computes the average **ROA** for all the companies that are in a unique country and presents that average value alongside the country. That is, it gets the **ROA** for all companies within a country and divides the **total ROA** by the number of companies within the unique country. # # # So while this code takes into consideration all the respective **ROA's** for all companies within a unique country, the code without average does not take all the other companies **ROA** in account, rather it simply prints out the highest **ROA** for each unique country. # # That is why when reading the results from the **Average ROA** Switzerland appears first followed by indonesia and then ireland (Having smaller companies within it, thus the average produced a higher result). This is because the number of companies within these countries is relatively smaller than the number of companies in countries like the USA (who have more companies within it, thus the average produced a lower result) topping the rankings when calculating **ROA without Average**
Analytics_Projects/Fortune_500_Analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/zierenberg/lecture_MC_disease/blob/main/excercise_MonteCarlo_SIR.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="zoRwWgSqHA66" # # General # + id="_l6LsxcqD8Y0" import numpy as np import random import math import matplotlib.pyplot as plt from scipy.integrate import odeint from tqdm import tqdm import os try: os.mkdir("figures") except OSError: pass # + [markdown] id="8ZeRlMNo6e5F" # Deterministic solution # + id="Ow3VSlhP5_B4" def SIR_derivative(u, t, rate_infection, rate_recovery): S, I, R = u N = S + I + R dSdt = -rate_infection * S * I / N dIdt = rate_infection * S * I / N - rate_recovery * I dRdt = rate_recovery * I return dSdt, dIdt, dRdt def SIR_deterministic(rate_infection, rate_recovery, S0, I0, R0, times): u0 = S0, I0, R0 # Integrate SIR equations over the time grid, t. sol = odeint(SIR_derivative, u0, times, args=(rate_infection, rate_recovery)) S, I, R = sol.T return np.array(S), np.array(I), np.array(R) # + [markdown] id="milnfCe9G4Qw" # # Kinetic Monte Carlo Simulation der SIR Dynamik # Implementiert das stochastische SIR Modell mit dem Gillespie Algorithmus und stellt die Ergebnisse grafisch dar. # (z.B. 𝜇=1/7, 𝜆=2/7, $N=10^5$, $R_0$=0, $I_0$=10, 1 Messpunkt pro Tag) # + id="2y0XLn1Rak9s" #implement simulation here # + [markdown] id="YONfwAFn4bdq" # Example # + colab={"base_uri": "https://localhost:8080/", "height": 282} id="aWvz23Qf4gRU" outputId="3c40f5fe-e366-43ea-8998-72899f2372ab" rate_recovery = 1/7 rate_infection = 2/7 I0 = 10 N = int(1e5) S0 = N-I0 R0 = 0 time_total = 21 times = np.arange(0,time_total,1) # simulation # deterministic solution S_det, I_det, R_det = SIR_deterministic(rate_infection, rate_recovery, S0, I0, R0, times) plt.plot(times, I_det, color='black', label='deterministic', linewidth=2) plt.xlabel('time') plt.ylabel('number active cases') plt.legend() plt.savefig('figures/kineticMC_example.png', dpi=200) # + id="AH0lIwq8PPea"
excercise_MonteCarlo_SIR.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: AutoCNet PLIO (workshop) # language: python # name: autocnet_workshop # --- # We don't technically need this but it avoids a warning when importing pysis import os os.environ['ISISROOT'] = '/usgs/cpkgs/anaconda3_linux/envs/isis3.9.0' # <a id='toc'></a> # # AutoCNet Intro # As mentioned earlier AutoCNet is a method for storing control networks and has outlier detection functionality. AutoCNet also contains a suite of functions that parallelize network generation that leverages and compliments ISIS processing. The advantage of AutoCNet network generation is it takes advantage of elementwise cluster processing (these elements can be images, points, measures, etc.) and postgresql for data storage and quick relational querying. # # In this notebook we are going to step through the network generation process in AutoCNet! # # For Quick Access: # - [Load and apply configuration file](#configuration) # - [Ingest images and calculate overlaps](#ingest) # - [Distribute points in overlaps](#distribute) # - [Subpixel register points](#registration) # ### Grab the Image Data # We are going to process Kaguya Terrain Camera (TC) images surrounding the Reiner Gamma Lunar Swirl (4.9° - 9.9° N Planetocentric Latitude and 61.3° - 56.3° W Longitude). The data is located in '/scratch/ladoramkershner/moon/kaguya/workshop/original/', please use the cell below to copy the data into a directory of your choosing. # + import getpass uid = getpass.getuser() output_directory = f'/scratch/ladoramkershner/FY21_autocnet_workshop/workshop_scratch/{uid}' # put output directory path as string here print(output_directory) # - # # copy over the data to the 'lvl1' subdirectory # !mkdir -p $output_directory/lvl1/ # !cp -p /scratch/ladoramkershner/moon/kaguya/workshop/original/*cub $output_directory/lvl1/ # We need to create a list of the cubes, to feed into AutoCNet. It is important that the cube list handed to AutoCNet contain **absolute** paths, as they will serve as an accessor for loading information from the cubes later. # !ls $output_directory/lvl1/*cub > $output_directory/cubes.lis # !head $output_directory/cubes.lis # <a id='configuration'></a> # # Parse the Configuration File # [Return To Top](#toc) # # # The configuration parameters are typically held in a configuration yaml file. A configuration file has been compiled for use internal to the USGS ASC facilities leveraging a shared cluster and database. Use AutoCNet's function 'parse_config' to read in the yaml file and output a dictionary variable. # + from autocnet.config_parser import parse_config config_path = '/scratch/ladoramkershner/FY21_autocnet_workshop/config_moon.yml' config = parse_config(config_path) # - # The config is a nested dictionary, meaning it has a larger dictionary structure defining sections for the services above and then each service section is a dictionary defining the particular configuration parameters. # + import numpy as np print('configuration dictionary keys: ') print(np.vstack(list(config.keys())), '\n') print('cluster configuration dictionary keys: ') print(np.vstack(list(config['cluster'].keys()))) # - # Although the configuration file is set up for internal use, some fields need to be altered to point to user specific areas or unique strings. config['cluster']['cluster_log_dir'] = f'/scratch/ladoramkershner/FY21_autocnet_workshop/workshop_scratch/{uid}/logs' config['database']['name'] = f'workshop_{uid}_kaguyatc_reinergamma' config['redis']['basename'] = f'{uid}_queue' config['redis']['completed_queue'] = f'{uid}_queue:comp' config['redis']['processing_queue'] = f'{uid}_queue:proc' config['redis']['working_queue'] = f'{uid}_queue:work' # + default_log = config['cluster']['cluster_log_dir'] print(f'your log directory: {default_log}') print('your database name:', config['database']['name']) # - # ### Create the NetworkCandidateGraph # The NetworkCandidateGraph (NCG) class can be instantiated to an object without any arguments. However, this NCG object requires configuration before it can be used for any meaningful work, so we have to run 'config_from_dict'. # + from autocnet.graph.network import NetworkCandidateGraph ncg = NetworkCandidateGraph() ncg.config_from_dict(config) ncg.from_database() # - # <a id="ingest"></a> # # Ingest Image Data and Calculate Overlaps # [Return To Top](#toc) # # At this point our ncg variable is empty, so if we try to plot the contents we will get an empty plot. ncg.plot() # We need to load the images into the ncg using 'add_from_filelist', which loads the images from a passed in list and then calculates the overlaps. filelist = f'{output_directory}/cubes.lis' # this should contain absolute paths ncg.add_from_filelist(filelist) # Now when we plot the ncg, we see the undirected graph, where the circles are the nodes/images and the lines are the edges/overlaps. The Kaguya TC data has a very regular overlap pattern in this area, seen by the large number of edges shared between nodes. ncg.plot() # We have access to the image data through the ncg, but the ncg does not persist after the notebook is shut down. To persist the network, AutoCNet leverages a database for the storage of the networks images, points, and measures. The ncg has access to this database through the ncg's 'session_scope'. Through the ncg.session_scope() you can interact and execute queries on your database in pure SQL. with ncg.session_scope() as session: img_count = session.execute("SELECT COUNT(*) FROM images").fetchall() overlap_count = session.execute("SELECT COUNT(*) FROM overlay").fetchall() print(' Number of images in database: ', img_count[0][0]) print('Number of overlaps in database: ', overlap_count) # session.execute() is equivalent to running the input string in the database directly. It is a convenient if you are already familiar with pure sql commands, however, the return values are messy. The session.query() leverages a python module called sqlalchemy which allow pythonic calls to your database with clean output. # + from autocnet.io.db.model import Images, Overlay with ncg.session_scope() as session: img_count = session.query(Images).count() overlap_count = session.query(Overlay).count() print(' Number of images in database: ', img_count) print('Number of overlaps in database: ', overlap_count) # - # Additionally, session.execute() can be inconvenient if working with the actual data contained within the tables. For example, to access certain information you need to know the index where that information exists. with ncg.session_scope() as session: img = session.execute("SELECT * FROM images LIMIT 1").fetchall() print('image index: ', img[0][0]) print('product id: ', img[0][1]) print('image path: ', img[0][2]) print('image serial number: ', img[0][3]) print('image ignore flag: ', img[0][4]) # print('image geom: ', img[0][5]) # only uncomment after looking at other output print('image camera type: ', img[0][7]) # However, if the structure of the database changes (order of the columns or a column is added/removed) or you cannot remember the order of the columns, working with the database data in this way is be very inconvenient. So AutoCNet built models for each table of the database tables to help interface with them. # + from autocnet.io.db.model import Measures, Points with ncg.session_scope() as session: img = session.query(Images).first() print('image index: ', img.id) print('product id: ', img.name) print('image path: ', img.path) print('image serial number: ', img.serial) print('image ignore flag: ', img.ignore) # print('image geometry: ', img.geom) # only uncomment after looking at other output print('image camera type: ', img.cam_type) # - # Accessing the information off of the img object is more intuitive as it is property based instead of index based. # # img[0][0] --> img.id <br> # img[0][1] --> img.name <br> # img[0][2] --> img.path <br> # and so on.. # # Additionally, if you cannot remember the exact names of the properties you want to access, you can dir() the model. print(dir(Images)) # Finally, if you uncomment the prints of the geometry in the two previous cells you see that the raw database geometry (given by session.execute()) is stored as a hexadecimal string while the Images.geom property is a shapely Multipolygon with more intuitive longitude and latitude values. The MultiPolygon also has helpful functions which allows direct access to the latitude, longitude information. To plot the geometry all we have to do is... # + import matplotlib.pyplot as plt n = 25 with ncg.session_scope() as session: imgs = session.query(Images).limit(n) fig, axs = plt.subplots(1, 1, figsize=(5,10)) axs.set_title(f'Footprints of First {n} Images in Database') for img in imgs: x,y = img.geom.envelope.boundary.xy # this call! axs.plot(x,y) # - # <a id="distribute"></a> # # Place Points in Overlap # [Return To Top](#toc) # # The next step in the network generation process is to lay down points in the image overlaps. Before dispatching the function to the cluster, we need to make the log directory from our configuration file. If a SLURM job is submitted with a log directory argument that does not exist, the job will fail. # + ppio_log_dir = default_log.replace('logs', 'ppio_logs') print('creating directory: ', ppio_log_dir) if not os.path.exists(ppio_log_dir): os.mkdir(ppio_log_dir) # - # We are going to use the 'place_points_in_overlap' function to lay the points down. For now, we will use the default size and distribution arguments, which is easily accomplished by not handing in values for these arguments. However, we need to change our camera type from the default 'csm' to 'isis'. # + from autocnet.spatial.overlap import place_points_in_overlap njobs = ncg.apply('spatial.overlap.place_points_in_overlap', on='overlaps', # start of function kwargs cam_type='isis', walltime='00:30:00', # start of apply kwargs log_dir=ppio_log_dir, arraychunk=50) print(njobs) # - # !squeue -u $uid | head # helpful to grab job array id # The 'place_points_in_overlaps' function first evenly distributes points spatially into a given overlap, then it back-projects the points into the 'top' image. Once in image space, the function searches the area surrounding the measures to find interesting features to shift the measures to (this increases the chance of subpixel registration passing). The shifted measures are projected back to the ground and these updated longitudes and latitudes are used to propagate the points into all images associated with the overlap. So, this function requires: # - An overlap (to evenly distribute points into) # - Distribution kwargs (to decide how points are distributed into the overlap) # - Size of the area around the measure (to search for the interesting feature) # - Camera type (so it knows what to expect as inputs/output for the camera model) # # Since this function operates independently on each overlap, it is ideal for parallelization with the cluster. Notice that we are not passing in a single overlap to the apply call, instead we pass "on = 'overlaps'". The 'on' argument indicates which element (image, overlap, point, measure) to apply the function. with ncg.session_scope() as session: noverlay = session.query(Overlay).count() print(noverlay) # ### Multiple Ways to Check Job Array Process # #### Log Files # As jobs are put on the cluster, their corresponding log files are created. You can check how many jobs have been/ are being processed on the cluster by looking in the log directory. # !ls $ppio_log_dir | head -5 # As more logs are placed in the log directory, you will have to specify which array job's logs you are checking on. The naming convention of the log files generated by AutoCNet are 'path.to.function.function_name-jobid.arrayid_taskid.out' jobid = '' # put jobid int here # !ls $ppio_log_dir/*${jobid}_*.out | wc -l # #### Slurm Account # Using 'sacct' allows you to check the exit status of the tasks from your job array. # !sacct -j $jobid -s 'completed' | wc -l # !sacct -j $jobid -s 'failed' | wc -l # !sacct -j $jobid -s 'timeout' | wc -l # !sacct -j $jobid -s 'cancelled' | wc -l # The return of '2' from the word count on the 'failed', 'timeout', and 'cancelled' job accounts are the header lines. # !sacct -j $jobid -s 'failed' | head # #### NCG Queue Length # The queue holds the job packages in json files called 'queue messages' until the cluster is ready for the job. You can view how many messages are left on the queue with the 'queue_length' NCG property. print("jobs left on the queue: ", ncg.queue_length) # ### Reapply to the Cluster? # Sometimes jobs fail to submit to the cluster, it is prudent to check the ncg.queue_length AFTER your squeue is empty. # !squeue -u $uid print("jobs left on the queue: ", ncg.queue_length) # When reapplying a function to the cluster, you do not need to resubmit the function arguments, because those were already serialized into the queue message. However, the cluster submission arguments can be reformatted and the 'reapply' argument should be set to 'True'. # + # njobs = ncg.apply('spatial.overlap.place_points_in_overlap', # chunksize=redis_orphans, # arraychunk=None, # walltime='00:20:00', # log_dir=ppio_log_dir, # reapply=True) # print(njobs) # - # One advantage of using of a postgresql database for data storage is that it allows for storage of geometries. You can then use relational queries to view how different elements' geometries relate with one another. # + from geoalchemy2 import functions from geoalchemy2.shape import to_shape with ncg.session_scope() as session: results = ( session.query( Overlay.id, Overlay.geom.label('ogeom'), Points.geom.label('pgeom') ) .join(Points, functions.ST_Contains(Overlay.geom, Points.geom)=='True') .filter(Overlay.id < 10) # Just view first 10 overlaps .all() ) print('number of points: ', len(results)) fig, axs = plt.subplots(1, 1, figsize=(10,10)) axs.grid() oid = [] for res in results: if res.id not in oid: oid.append(res.id) ogeom = to_shape(res.ogeom) ox, oy = ogeom.envelope.boundary.xy axs.plot(ox, oy, c='k') pgeom = to_shape(res.pgeom) px, py = pgeom.xy axs.scatter(px, py, c='grey') # - # Notice that the points are not in straight lines, this is because of the shifting place_points_in_overlap does to find interesting measure locations. # # However, the default distribution of points in the overlaps looks sparse, so let’s rerun place_points_in_overlap with new distribution kwargs. Before rerunning place_points_in_overlap, the points and measures tables need to be cleared using ncg's 'clear_db' method. with ncg.session_scope() as session: npoints = session.query(Points).count() print('number of points: ', npoints) nmeas = session.query(Measures).count() print('number of measures: ', nmeas) ncg.clear_db(tables=['points', 'measures']) # clear the 'points' and 'measures' database tables with ncg.session_scope() as session: npoints = session.query(Points).count() print('number of points: ', npoints) nmeas = session.query(Measures).count() print('number of measures: ', nmeas) # The distribution argument for place_points_in_overlap requires two **function** inputs. Since overlaps are variable shapes and sizes, one integer is not sufficient to determine effective gridding along every overlaps sides. Instead, the distribution of points along the N to S edge of the overlap and the E to W edge of the overlap are determined based on a function. # # The default distribution functions are: <br /> # nspts_func=lambda x: ceil(round(x,1)\*10) <br /> # ewpts_func=lambda x: ceil(round(x,1)\*5) <br /> # # Where x in nspts_func is the length of the overlap's longer edge (in km) and x in ewpts_func is the length of the overlap's shorter edge (in km). This way a shorter edge will receive less points and a longer side will receive more points. Change the multipliers in the 'ns' and 'ew' functions below to find a satisfying distribution. # + from autocnet.cg.cg import distribute_points_in_geom def ns(x): from math import ceil return ceil(round(x,1)*15) def ew(x): from math import ceil return ceil(round(x,1)*10) total=0 with ncg.session_scope() as session: srid = config['spatial']['latitudinal_srid'] overlaps = session.query(Overlay).filter(Overlay.geom.intersects(functions.ST_GeomFromText('LINESTRING(301.2 7.4, 303.7 7.4, 303.7 9.9, 301.2 9.9, 301.2 7.4)', srid))).all() print('overlaps in selected area: ', len(overlaps)) for overlap in overlaps: ox, oy = overlap.geom.exterior.xy plt.plot(ox,oy) valid = distribute_points_in_geom(overlap.geom, method='classic', nspts_func=ns, ewpts_func=ew, Session=session) if valid: total += len(valid) px, py = list(zip(*valid)) plt.scatter(px, py, s=1) print(' points in selected area: ', total) # - # Then rerun the apply function, setting the 'distribute_points_kwargs' arguments. # + distribute_points_kwargs = {'nspts_func':ns, 'ewpts_func':ew, 'method':'classic'} njobs = ncg.apply('spatial.overlap.place_points_in_overlap', on='overlaps', # start of function kwargs distribute_points_kwargs=distribute_points_kwargs, # NEW LINE cam_type='isis', walltime='00:30:00', # start of apply kwargs log_dir=ppio_log_dir, arraychunk=100) print(njobs) # - # Check the progress of your jobs. # !squeue -u $uid | wc -l # !squeue -u $uid | head # Count the number of jobs started by looking for generated logs. jobid = '' # put jobid int here # ! ls $ppio_log_dir/*$jobid* | wc -l # Monitor how many jobs have completed or failed. # !sacct -j $jobid -s 'completed' | wc -l # !sacct -j $jobid -s 'failed' | wc -l # Check to see if the ncg redis queue is clear. redis_orphans = ncg.queue_length print("jobs left on the queue: ", redis_orphans) # Reapply cluster job if there are still jobs left on the queue # + # njobs = ncg.apply('spatial.overlap.place_points_in_overlap', # chunksize=redis_orphans, # arraychunk=None, # walltime='00:20:00', # log_dir=log_dir, # reapply=True) # print(njobs) # - # Visualize the new distribution with ncg.session_scope() as session: results = ( session.query( Overlay.id, Overlay.geom.label('ogeom'), Points.geom.label('pgeom') ) .join(Points, functions.ST_Contains(Overlay.geom, Points.geom)=='True') .filter(Overlay.id < 10) .all() ) print('number of points: ', len(results)) fig, axs = plt.subplots(1, 1, figsize=(10,10)) axs.grid() oid = [] for res in results: if res.id not in oid: oid.append(res.id) ogeom = to_shape(res.ogeom) ox, oy = ogeom.envelope.boundary.xy axs.plot(ox, oy, c='k') pgeom = to_shape(res.pgeom) px, py = pgeom.xy axs.scatter(px, py, c='grey') # <a id="registration"></a> # # Subpixel Registration # [Return To Top](#toc) # # The next step is to subpixel register the measures on the newly laid points, to do this we are going to use the 'subpixel_register_point' function. As the name suggests, 'subpixel_register_point' registers the measures on a single point, which makes it parallelizable on a network's points. Before we fire off the cluster jobs, let's create a new subpixel registration log directory. # + subpix_log_dir = default_log.replace('logs', 'subpix_logs') print('creating directory: ', subpix_log_dir) if not os.path.exists(subpix_log_dir): os.mkdir(subpix_log_dir) # - # ## First Run # + from autocnet.matcher.subpixel import subpixel_register_point # ?subpixel_register_point # # ncg.apply? # + subpixel_template_kwargs = {'image_size':(81,81), 'template_size':(51,51)} njobs = ncg.apply('matcher.subpixel.subpixel_register_point', on='points', # start of function kwargs match_kwargs=subpixel_template_kwargs, geom_func='simple', match_func='classic', cost_func=lambda x,y:y, threshold=0.6, verbose=False, walltime="00:30:00", # start of apply kwargs log_dir=subpix_log_dir, arraychunk=200, chunksize=20000) # maximum chunksize = 20,000 print(njobs) # - # Check the progress of your jobs. # !squeue -u $uid | head # This function chooses a reference measure, affinely transforms the other images to the reference image, and clips an 'image' chip out of the reference image and a 'template' chip out of the transformed images. The template chips are marched across the image chip and the maximum correlation value and location is saved. # # The solution is then evaluated to see if the maximum correlation solution is acceptable. The evaluation is done using the 'cost_func' and 'threshold' arguments. The cost_func is dependent two independent variables, the first is the distance that a point has shifted from the starting location and the second is the correlation coefficient coming out of the template matcher. The __order__ that these variables are passed in __matters__. We are not going to consider the distance the measures were moved and just look at the maximum correlation value returned by the matcher. So, our function is simply: $y$. # # If the cost_func solution is greater than the threshold value, the registration is successful and the point is updated. If not, the registration is considered unsuccessful, the point is not updated, and is set to ignore. # # So, 'subpixel_register_point' requires the following arguments: # - pointid # - match_kwargs (image size, template size) # - cost_func # - threshold # Count the number of jobs started by looking for generated logs. jobid = '' # put jobid int here # ! ls $subpix_log_dir/*$jobid* | wc -l # Monitor how many jobs have completed or failed. # !sacct -j $jobid -s 'completed' | wc -l # !sacct -j $jobid -s 'failed' | wc -l # Check to see if the ncg redis queue is clear once squeue is empty. redis_orphans = ncg.queue_length print("jobs left on the queue: ", redis_orphans) # Reapply cluster job if there are still jobs left on the queue. # + # job_array = ncg.apply('matcher.subpixel.subpixel_register_point', # reapply=True, # chunksize=redis_orphans, # arraychunk=None, # walltime="00:30:00", # log_dir=subpix1_log_dir) # print(job_array) # - # ### Visualize Point Registration # + from plio.io.io_gdal import GeoDataset from autocnet.transformation import roi from autocnet.utils.utils import bytescale roi_size = 25 with ncg.session_scope() as session: measures = session.query(Measures).filter(Measures.template_metric < 0.8, Measures.template_metric!=1).limit(15) for meas in measures: pid = meas.pointid source = session.query(Measures, Images).join(Images, Measures.imageid==Images.id).filter(Measures.pointid==pid, Measures.template_metric==1).all() sx = source[0][0].sample sy = source[0][0].line s_roi = roi.Roi(GeoDataset(source[0][1].path), sx, sy, size_x=roi_size, size_y=roi_size) s_image = bytescale(s_roi.clip()) destination = session.query(Measures, Images).join(Images, Measures.imageid==Images.id).filter(Measures.pointid==pid, Measures.template_metric!=1).limit(1).all() dx = destination[0][0].sample dy = destination[0][0].line d_roi = roi.Roi(GeoDataset(destination[0][1].path), dx, dy, size_x=roi_size, size_y=roi_size) d_template = bytescale(d_roi.clip()) fig, axs = plt.subplots(1, 2, figsize=(10,10)); axs[0].imshow(s_image, cmap='Greys'); axs[0].scatter(image_size[0], image_size[1], c='r') axs[0].set_title('Reference'); axs[1].imshow(d_template, cmap='Greys'); axs[1].scatter(image_size[0], image_size[1], c='r') axs[1].set_title('Template'); # - # ## Second run # We are going to rerun the subpixel registration with larger chips to attempt to register the measures that failed first run. subpixel_template_kwargs = {'image_size':(221,221), 'template_size':(81,81)} # Additionally, 'subpixel_register_point' can be run on a subset of points, using either the 'filters' or the 'query_string' arguments. # # The 'filters' argument does a equals comparison on point properties and **filters out** points with a certain property value (e.g.: points where ignore=true). While the 'query_string' argument can perform inequalities and **applies on** the selected values. Some examples of possible filters and query_string values are filters = {'ignore': 'true'} # filters out points where ignore=true query_string = """ SELECT DISTINCT pointid FROM measures WHERE "templateMetric" < 0.65 """ # only grabs points with template metrics less than 0.65 # filters and query_string cannot be applied at the same time. So choose one, comment out the other argument's line and rerun the subpixel registration apply. # + njobs = ncg.apply('matcher.subpixel.subpixel_register_point', on='points', # start of function kwargs # filters=filters, ##### NEW LINE query_string=query_string, match_kwargs=subpixel_template_kwargs, geom_func='simple', match_func='classic', cost_func=lambda x,y:y, threshold=0.6, verbose=False, walltime="00:30:00", # start of apply kwargs log_dir=subpix_log_dir, arraychunk=50, chunksize=20000) # maximum chunksize = 20,000 print(njobs) # - # Subsequent runs of 'subpixel_register_point' can be run on all points, if this is done AutoCNet checks for a previous subpixel registration result in the database and only updates the geometry if the new result is better. Additionally, the apriori geometry (original camera pointing) is stored in the database and subpixel registration is always done on the apriori geometry to avoid 'measure walking'. 'Measure walking' refers to a measure's geometry moving further and further away from its original location due to multiple runs of subpixel registration. In traditional software the apriori geometry may be stored, but subpixel registration is typically run off of the current geometry. # Count the number of jobs started by looking for generated logs. # ! squeue -u $uid | wc -l # ! squeue -u $uid | head # Count the number of jobs started by looking for generated logs. # + jobid = '' # put jobid int here # ! ls $log_dir/*$jobid* | wc -l # - # Check to see if the ncg redis queue is clear once squeue is empty. redis_orphans = ncg.queue_length print("jobs left on the queue: ", redis_orphans) # Reapply cluster job if there are still jobs left on the queue. # + # njobs = ncg.apply('matcher.subpixel.subpixel_register_point', # reapply = True, # walltime="00:30:00", # log_dir='/scratch/ladoramkershner/mars_quads/oxia_palus/subpix2_logs/', # arraychunk=50, # chunksize=20000) # maximum chunksize = 20,000 # print(njobs) # - # ### subpix2: Write out Network # Once you are finished leverage AutoCNet tools and want to move onto ISIS based analysis (qnet, jigsaw, etc.), you can use the ncg.to_isis() function to write the information in your database to an ISIS control network file. cnet = 'reiner_gamma_morning_ns7_ew5_t121x61_t221x81.net' ncg.to_isis(os.path.join(output_directory,cnet))
docs/users/workshops/FY21_workshop/4_network_generation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: tf2 # language: python # name: tf2 # --- # %load_ext autoreload # %autoreload 2 import tictactoe import cProfile import numpy as np import math S = [0]*9 action_list = [0,1,2,3,4,5] N_A = 9 sqrt_n_a = int(math.sqrt(N_A)) # + def foo_numba_1(): for i in range(1000): # action_list = list(range(i%9+1)) S_array_2d = np.array(S).reshape(sqrt_n_a, sqrt_n_a) X_in_stack = tictactoe.get_X_in_stack_numba(N_A, sqrt_n_a, np.array(action_list), S_array_2d) def foo_1(): for i in range(1000): # action_list = list(range(i%9+1)) S_array_2d = np.array(S).reshape(sqrt_n_a, sqrt_n_a) X_in_stack = tictactoe.get_X_in_stack(N_A, sqrt_n_a, np.array(action_list), S_array_2d) # + def foo_numba(): for i in range(1000): action_list = list(range(i%9+1)) S_array_2d = np.array(S).reshape(sqrt_n_a, sqrt_n_a) X_in_stack = tictactoe.get_X_in_stack_numba(N_A, sqrt_n_a, np.array(action_list), S_array_2d) def foo(): for i in range(1000): action_list = list(range(i%9+1)) S_array_2d = np.array(S).reshape(sqrt_n_a, sqrt_n_a) X_in_stack = tictactoe.get_X_in_stack(N_A, sqrt_n_a, np.array(action_list), S_array_2d) # - # %time foo_1() # %time foo_numba_1() # %time foo() # %time foo_numba()
numba_X_stack.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # SLU03 | Visualization with Pandas & Matplotlib: Learning notebook # *** # # In this notebook we will cover the following: # # - Scatter plots # - Line charts # - Bar/Column charts # - Histograms # - Box plots # - Chartjunk # - Matplotlib basics # - How to choose the right chart # ## Some theory first! # # Data Visualization techniques serve 3 main purposes: # - **Purpose 1** - Communicate an idea # - **Purpose 2** - Data Understanding # - **Purpose 3** - Monitorization # # Let's check them... # ### Purpose 1 - Communicate an idea # # Data Visualization (or *DataViz*) techniques can be used very effectively to communicate an idea to an external party. # # Here is an example: # # <img src="./data/map.jpeg" width="900"/> # # **This map is one of the oldest dataviz examples that exist.** # # In 1854, London was being ravaged by a deadly cholera outbreak. At that time, doctors believed that cholera vector of contagion was by the air, via what they called **“bad humours”**, that is, bad air floating around. **Doctor <NAME>** had a different idea, as he believed *water was the main way of spreading the disease*. # # So what he did is he went to the area with the most cholera cases detected, and did a **survey**, counting how many people had been infected in each neighbourhood block. And then he **displayed it on a map**. Here `each tiny black rectangle represents an infected person`. # # At that point **it became obvious that the biggest focus of disease emanated from a well on Broad Street** (you can see it labelled **`pump`** right at the epicenter of the infected cases). After using this map to convince both his colleagues and the local authorities, <NAME> managed to close the infected well, and soon after the cholera outbreak stopped. # # [Here](https://www.rcseng.ac.uk/library-and-publications/library/blog/mapping-disease-john-snow-and-cholera/) is an in-depth article about **<NAME>'s map** # ### Purpose 2 - Data Understanding # # For any scientist, or analyst, data visualization really helps **to gain a better understanding and discover patterns on the data** we are analyzing. # # A classic example of this is the **`Anscombe Quartet`**: # # <img src="./data/anscombe.jpeg" width="700"/> # # This is a dataset, created by **<NAME>**, that consists of **4 different series of x and y** (shown on the top left of the image above). If a statistician were to analyze this dataset just by looking at some summary statistics (shown on the top right of the image above), he would see that for each one of these series, the statistics are the same. Same mean of x, mean of y, variance, etc. So he could get to the conclusion that *he had the same series of x and y repeated 4 times!* # # Well, if instead of just looking at summary statistics he had *plotted each series independently* (shown on the bottom of the image above), he would have realized that each one of the **4 series is completely differen**t, just happen to have the same statistics. # ### Purpose 3 - Monitorization # # Another use of data visualization is to **help monitoring processes or performance** of a company. This monitorization is usually performed via a **`dashboard`**. # # A dashboard is basically **a visual interface where key metrics are displayed**. This way the person or department who owns such dashboard can check very easily the current status of whatever they are in charge of. # # Look at this example: # # <img src="./data/dashboard.png" width="800"/> # # The image on the left shows a *`report`* consisting of tens of columns and rows that was sent to a company's CFO, the image on the right is a dashboard representing the same data. We can see that by representing the data visually, *we are doing the job of processing the data, so the person in charge (the CFO) can focus on their expertise, that is finding insights and making decisions*. # # *** # We are going to proceed now to learn the basics of visualization with python, but just before that, a word of caution about one of the pitfalls of data visualization: **`Chartjunk`**. # # What is **`chartjunk`**? Well, it is easily described with an example: # # <img src="./data/chartjunk1.jpg" width="700"/> # # Not clear? How about this one: # # <img src="./data/chartjunk2.png" width="700"/> # # [Chartjunk](https://www.edwardtufte.com/bboard/q-and-a-fetch-msg?msg_id=00040Z) consist of **any visual information that doesn't help the visualization, and that in facts makes it harder to understand.** Since Data visualization is a very creative part of Data Science, we are sometimes tempted to make compelling, innovative visualizations. This is great, but *we need to always be aware of the elements we are adding to our visualization*. Are they absolutely necessary? # # **As a `rule of thumb`, if you have to explain your visualization, it's probably too complex.** # # --- # ## Now, let's move to the practice! # We start by importing **`pandas`** and **`matplotlib`**. import pandas as pd import matplotlib.pyplot as plt # **`Matplotlib`** is a library initially designed to save charts to files, we usually use the submodule `pyplot` to plot things or customize plots. The convention is **importing it as `plt`**, *same way as we shorten pandas as pd*. (saves a few keystrokes!). # # When plotting charts on a jupyter notebook we have tell jupyter that we want to render the plots inside the notebook. # # The most common way to specify this is by using one jupyter notebook ✨[magic commands](http://ipython.readthedocs.io/en/stable/interactive/magics.html)✨, which are ways to modify the default behavior of the notebook. # # `%matplotlib inline` will set up the notebook to be used with matplotlib. # # <div class="alert alert-info"> # ⚠️ <b>NOTE: </b>This is not required when using <b>Jupyter Lab</b>. # </div> # %matplotlib inline # The dataset we are going to use is a variation of the Online Retail Dataset ([source](http://archive.ics.uci.edu/ml/datasets/online+retail)) # # This is a transnational data set which contains all the transactions occurring between 01/12/2010 and 09/12/2011 for a UK-based and registered non-store online retail.The company mainly sells unique all-occasion gifts. Many customers of the company are wholesalers. data = pd.read_csv("data/online_orders.csv") data.head() # The dataset consists of the following fields: # # - **`country`**: Country name # - **`date`**: Date the row is showing a summary of # - **`sales`**: Total sales revenue (in UK pounds) for that country and date # - **`n_items`**: Number of items sold # - **`n_orders`**: Number of different online orders # - **`day_of_week`**: Day of week # # *** # ### Scatter plots # # We can use a scatter plot **`to see the relationship between variables`**, for example, the number of items sold and the total sales revenue data.plot.scatter(x='n_items', y='sales'); # We have made our very first chart, yaaaay!! 🎉🎉🎉 # # <div class="alert alert-info"> # ⚠️ <b>NOTE: </b>Did you see that we add a semicolon <code>;</code> at the end of the cell?<br/><br/> # That is because <b>jupyter notebooks</b> by default return the last value of a cell, so if we run the cell without semicolon the notebook will display the <b>plot object</b>. <i>(some random thing like <code>&lt; AxesSubplot:... &gt;</code>)</i> # </div> data.plot.scatter(x='n_items', y='sales') # Depending on your browser and your screen resolution, you might think that the previous chart is small. We can fix this by changing the matplotlib options to render bigger charts. # # We can set the chart size globally by changing the matplotlib default settings. These are included in the dictionary `plt.rcParams`. # The **plot size** is defined as a list with 2 elements, `[width, height]` in **inches** # So the default chart size is 6 inches wide by 4 inches tall, which is fairly small. # # For example, if we want to set the chart size as 10 inches wide by 10 inches high, we would do so as: plt.rcParams["figure.figsize"] = [14, 14] # Now if we plot the same chart again, we see it's bigger: data.plot.scatter(x='n_items', y='sales'); # Alternatively, we can use the **`%matplotlib notebook`** ✨magic✨ command, which adds a small widget to visualize charts. # # # <div class="alert alert-info"> # ⚠️ <b>NOTE: </b>the <code>%matplotlib notebook</code> ✨magic✨ adds some code to your browser, and <b>might not work as expected</b>. When using this ✨magic✨, after finishing interacting with one plot, <b>make sure you turn that plot off</b> (by clicking the off button on top of the visualization). # </div> # # # <div class="alert alert-warning"> <b>In case it's not working properly</b>, you can click <b>Kernel > Restart &amp; Clear Output</b> to clean your notebook and start over. <br/>(with <code>%matplotlib inline</code> this time)</div> # %matplotlib notebook # Now we can plot the chart again *(if it doesn't work, try running the ✨magic✨ cell again)* data.plot.scatter(x='n_items', y='sales'); # When you use the notebook ✨magic✨, you get to resize, zoom, and export the image. One important point to note is that the notebook ✨magic✨ embedds a widget in the notebook, and **we need to shut it down every time we do a chart** (by clicking the on/off button on top of the widget). # # Generally, when we are plotting for our own use (and we do not need to make very big plots), using `%matplotlib inline` and setting a decent plot size is simpler and works. # %matplotlib inline plt.rcParams["figure.figsize"] = [10, 10] data.plot.scatter(x='n_items', y='sales'); # No surprises here, we see that there is a linear relationship between the number of items purchased and the sales revenue. # # We can see for example how do sales relate to the day of week: data.plot.scatter(x='day_of_week', y="sales"); # We can see that sales are generally equaly distributed, except a little uptick on Thursdays and no sales at all on Saturday! # # Here we can see how data visualization can help us with the data QA (Data Quality Assurance), and can help us detect inconsistencies or errors in the data. In this case we would need to make sure everything is correct, since online sales don't usually stop on saturdays. # # For the types of charts that pandas supports we can use a different function to plot them using the **`kind`** argument. # # For example **we can plot the same scatter plot as above** by doing: data.plot(x='day_of_week', y="sales", kind="scatter"); # *** # ### Line Chart # # We use line charts generally when we want **`to see the trend of one (or many variables) over time (or evolution)`**. For example, let's say we want to see the sales in germany compared to france over time. **Pandas `plot` method** tries a line chart by default. # # We can set the index of the dataframe to the date and **pandas `plot` method** will automatically pick it up as the x axis data_indexed = data.set_index('date') data_indexed.head() # **Pandas plotting** library is just a thin wrapper around **`matplotlib`**. Matplotlib has a somewhat convoluted api, and pandas makes plotting common charts much easier. # # For example, here we call directly matplotlib `legend` to display the legend on top of pandas plots data_indexed[data_indexed.country=='Germany'].sales.plot(label="Germany") data_indexed[data_indexed.country=='France'].sales.plot(label="France") plt.legend(); # We see that sales numbers in France and Germany are generally similar. And that each country has a spike of sales at a certain day in 2011. # # *** # #### Using matplotlib api # # Sometimes, we want to display some information in a way that **pandas `plot` method** doesn't allow us to. In those situations we can use **matplotlib `plt.plot` method** directly as well. This gives us much more flexibility regarding how we can display the data # + for country in data.country.unique(): plt.plot(data[data.country == country].n_items, # plot this series as the x data[data.country == country].sales, # plot this series as the y marker='o', # make the markers circle shaped linestyle='', # don't connect the dots with lines ms=3, # size of the markers (in pixels) label=country # use the country as the label of this plot ) plt.legend(); # - # *** # ### Bar/Column chart # # We use bar/column charts **`to perform comparisons of discrete quantities`**. Normally you use the **horizontal bars** to compare across elements without a natural order (**categorical variable**) and **Column charts** (vertical bars) to compare among an **ordinal variable**. # + # same thing as: # data.groupby('country')['sales'].sum().plot(kind="barh"); data.groupby('country')['sales'].sum().reset_index().plot.barh(x="country", y="sales"); # - # Here we can see that the top countries in terms of sales on the dataset are Netherlands, Germany and EIRE. This does not mean that they are the top countries in terms of sales, because remember, the dataset consist of country + date. # If we need **`to show the distribution between the data, detailing the comparison with other categorical elements`**. For example, looking at the relative decomposition of each main element comparison based on the levels of another categorical element. # # We must use the **`stacked=True`** argument. Let's see: # <div class="alert alert-info"> # In order to give you this example, we need to make some data transformations using some concepts and methods that we haven't tell you <strong>yet</strong>. So, just ignore it for now. # # </div> # + data['weekday'] = pd.to_datetime(data['date']).dt.day_name() # Creating a new DataFrame with the sum of n_orders by country and weekday dpt = data.pivot_table(values=['n_orders'], index='country', columns='weekday', aggfunc='sum') # converting to percentage dpt = dpt.div(dpt.sum(1), axis=0) dpt # + # same thing as: #dpt.plot(kind='barh', stacked=True); dpt.plot.barh(stacked=True); # - # Here, we could check the percentage from **number of orders** by each **country** relative to the **day of week**. Allowing us to see that Switzerland and Belgium have more than 20% of their orders happening on Fridays. # *** # ### Histograms # # We can use histograms **`to make sure that nothing is fishy with the data`**, as well as to gain an understanding of its distribution. For example, we can use it to see how are sales distributed in EIRE # + # same thing as: # data[data.country=='EIRE'].sales.plot(kind="hist"); data[data.country=='EIRE'].sales.hist(); # - # Here we can see that, even though the big majority of sales days in EIRE are less than 2500 GBP, some days there are significantly higher sales. # # We can limit the extend of the xaxis of a histogram by passing the parameter `xlim` to the plot data[data.country=='EIRE'].sales.plot.hist(xlim=(0,7000)); # We can also specify the number of *groups* of the histogram by using the paramenter `bins` data[data.country=='EIRE'].sales.plot.hist(xlim=(0,7000), bins=50); # So we see that the most common daily sales revenue in EIRE is 1000 GBP per day # *** # ### Box Plot # # The box plot (also called **`whisker diagram`**) is a good chart when you want **`to compare how a variable is distributed among different groups`**. For example, on the chart of the bellow, we are analyzing the weight of all different car models manufactured in 2004 and comparing between the region of origin of those cars # # **`Outliers`** on the box plot (those points beyond the whiskers) are usually considered as being `1.5 * IQ` from **Quartile 1** (low outliers) and `1.5 * IQ` from **Quartile 3** (high outliers) # kind="boxplot" doesn't work since it's not part of the pandas.plot api for legacy reasons data.boxplot(column="sales", by="country"); # Here we see that the top countries in terms of points (France, Germany and Eire) have a significant number of outliers in terms of sales. Netherlands on the other hand, has sales that are more stable (probably a smaller number of bigger orders) # Sometimes we want to show a boxplot horizontally, we can do so by using the argument `vert=False` data.boxplot(column="sales", by="country", vert=False); # *** # ## Customizing plots # # Using **pandas `plot` method** as it is, is good enough for when doing data analysis (remember, data understanding is one of the main goals of data visualization). # # However, **when we want to share a chart with someone else** (whether that person is another data scientist or someone without a technical background), **we need to take more steps in order to provide the most effective visualizations**. # # <img src="https://imgs.xkcd.com/comics/convincing.png" width="600"/> # # **When producing charts for external use, always remember:** # # - Add a title describing the chart # - Add labels for all the axes. # - Check the axes limits to make sure they are appropriate and help convey the right information # - Add legends if necessary (when dealing with multiple groups) # - Make sure the color palette you choose will display properly on the medium where it is going to be consumed (for example, which color is the background where the chart will be inserted affects how the chart is visualized) # - It is good practice, specially if the chart is to be displayed publicly (and thus probably isolated from its original document) to add a footnote to the chart adding the source of the data. # #### Styles # # Pandas uses matplotlib as a plotting backend. Thus, we can use matplotlib's styles and api to modify our charts/plots # # We can change the style, making use of all of matplotlib's styles available, checking through this line bellow: plt.style.available # For example, we can change the style of the plots to the style for the package `dark_background` plt.style.use('dark_background') data[data.country=='EIRE'].sales.plot.hist(xlim=(0,7000), bins=70); # We can see that now the plot has a completely different set of fonts, color and sizes. # # Of course we can _bypass_ the applied style by specifying parameters in the plot. # # Now, for example, if we want to plot the same histograme in a nice pink color we would do it like this: data[data.country=='EIRE'].sales.plot.hist(xlim=(0,7000), bins=70, color="pink"); # #### Labels and titles # # Finally, if we wanted to share our plot with someone else (to display on a paper or to share with a client) we can make use of matplotlib customization to make our plot more explicit and nicer looking. # # For example, we can choose the style of another library *(ggplot, the most famous R plotting library)* plt.style.use('ggplot') plt.rcParams["figure.figsize"] = [14, 14] # bigger plots # Here we customize the chart to make it ready to be shared. We apply the following modifications: # # - We use `label` inside the `plot` function to assign a label to each plot. This will draw each line a different color, and will display their name on the legend. # - We activate the plot legend with `plt.legend` # - We add a title with `plt.legend` (size 25) # - We set the range for the y-axis to (0, 12350) # - We add a legend for the x axis with `plt.xlabel` (size 20 inches) # - We add a legend for the y axis with `plt.ylabel` (size 20 inches) # - We add text on a specific position (Starting from the left and from the bottom, need to try multiple times to see what looks good) data_indexed = data.set_index('date') data_indexed[data_indexed.country=='Germany'].sales.plot(label="Germany") data_indexed[data_indexed.country=='France'].sales.plot(label="France") plt.legend() plt.ylim(0, 12350) plt.title("Daily sales in France and Germany", size=25) plt.xlabel("Date", size=20) plt.ylabel("Sales (GPB)", size=20) plt.figtext(0.55, 0.08, "Source: Online Retail Dataset Transactions between 01/12/2010 and 09/12/2011"); # <div class="alert alert-info"> # ⚠️ <b>NOTE: </b>If you couldn't see the <b>title</b> and the <b>source</b> in the chart above, you can click <b>Kernel > Restart &amp; Clear Output</b> to clean your notebook and start over. Run the cells on <b><i>"Now, let's move to the practice!" Section</i></b> and run the cell on <b><i>"Labels and titles" section</i></b>. # </div>
S01 - Bootcamp and Binary Classification/SLU03 - Visualization with Pandas & Matplotlib/Learning notebook.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ![Callysto.ca Banner](https://github.com/callysto/curriculum-notebooks/blob/master/callysto-notebook-banner-top.jpg?raw=true) # # <a href="https://hub.callysto.ca/jupyter/hub/user-redirect/git-pull?repo=https%3A%2F%2Fgithub.com%2Fcallysto%2Fcurriculum-notebooks&branch=master&subPath=TechnologyStudies/IntroductionToPython/introduction-to-python-classes.ipynb&depth=1" target="_parent"><img src="https://raw.githubusercontent.com/callysto/curriculum-notebooks/master/open-in-callysto-button.svg?sanitize=true" width="123" height="24" alt="Open in Callysto"/></a> # # Python - Classes # # In this lesson, we are going to introduce a powerful programming language technique called Object-Oriented Programming, or OOP for short, using Python Classes. First, we will introduce Python classes, and then, we will provide the motivation for OOP and how Python Classes fit into OOP. # # #### Table of Contents: # [Python Classes](#Python_Classes) <br /> # [Getters and Setters](#Getters_and_Setters) <br /> # [Practice Project](#Practice_Project) <br /> # [Class Inheritance](#Class_Inheritance) <br /> # [Introduction to Object Oriented Programming](#Intro_to_OOP) <br /> # [Conclusion](#Conclusion) <br /> # # To run a cell, select the cell you wish to run, then either: # 1. Find the run button at the top of the page, and click it or, # 2. Hit shift+enter. # <a id='Python_Classes'></a> # ## Python Classes # # A **Python Class** is a structure that can group together relevant variables and functions. The variables and functions in this structure are designed to be used only in the context of that class. # # To give more accurate terminology, the functions defined inside a class are called **methods** and the variables are called **attributes**. # # Sound familiar? We have already seen Python structures with methods, such as lists, dictionaries and strings. (See introduction to data structures in Python). If we wanted to access these methods, we used the dot operator. This is also true for Python classes. To use the actual functions, or methods, inside a Python class, we use the dot operator. # # As a matter of fact, classes make up the Python standard library, such as the string class, but are also a central programming technique used in Python, as well as C and C++. # # Now, we cannot actually do anything with our class just by defining it and adding attributes and methods. If we want to do some work, we have to define an **instance** of our class. To see what we mean, suppose we define a class called Person. This class has attributes that we would associate with a person, such as height, age, weight, gender, etc. The methods in the person class would be actions a person would do, such as Work(), DoHomework(), Eat(), etc. # # We cannot say, Person.Work(), as Person is a definition and not an instance. We would have to declare an instance of that class. Say Person Sally. Now, Sally is an instance of the Person class, and Sally can have **unique values**. If we were to create another instance of Person, such as Person Frank, the values of Frank and Sally's attributes would be different. # # Notice that we declared Sally and Frank just as normal variables of type Person. Python classes allow you to create your own data types, with specified attributes and methods. This allows for a larger range of problem solving abilities, as well as more robust data handling and organization. # ### Remark: # # There are many languages that support classes such as C, C++ and Java. A major difference between Python classes and classes in C++ and Java is in the way data inside a class is handled and accessed. # # In C++ and Java, we are allowed to create what are know as private and public variables. Private variables are variables which cannot be accessed from outside the class. In order to access them, we need to create methods inside the class, which allows a user to modify the variables. These methods are explored later on in this notebook. We use private variables when there is sensitive data inside a class. We do not want a user to directly access the data, as it would threaten the integrity of the program or privacy of another user. # # In contrast, a public attribute is a attribute that can be accessed from outside the class. In general, all attributes in a Python class or instance are public -- but it can be a good practice to treat them like they're not. # + ## An example ## ## The syntax for declaring a class is class Class_Name ## class Employee: # This first function here is called the initializer, or, if we are in C++, # # the default constructor. It is reqiured in every class you make # # This function will be called everytime a new instance is created # def __init__(self, Name, Salary, Age): self.Employee_Name = Name self.Employee_Salary = Salary self.Employee_Age = Age # Functions in a Python class are defined as normal. Self is always a required parameter # # However, when we call these methods, we do not need to pass self explicitly # # These functions allow the user to print variables inside a given instance of a class # def Display_Name(self): print("Employee Name:", self.Employee_Name) def Display_Salary(self): print("Employee Salary:", self.Employee_Salary) def Display_Employee_Age(self): print("Employee Age:", self.Employee_Age) # + # Creating a new instance # # To create a new instance of a class, simply declare it as a normal variable # # and pass your desired information to the initializer, note that self is not passed # Emp1 = Employee("Frank", 20000, 25) Emp2 = Employee("Sarah", 40000, 34) # - # Accessing class methods # # We can access the methods in the class employee using the dot operator # # Because these are two different instances, they will print two different values # Emp1.Display_Name() Emp2.Display_Name() # + # Accessing Data inside the class # # In Python, it is possible to have access to the actual values inside the class using the dot operator # # This is not the case in other languages such as Java and C++ # print(Emp1.Employee_Name) print(Emp2.Employee_Name) # - # <a id='Getters_and_Setters'></a> # ### Getters and Setters: # # In many cases, we will want to access or change the variables inside a class. This is where functions called **Getters** and **Setters** come into play. A getter is a class method which returns the value of a variable inside a class. A setter is a class method which allows a variable to be updated. # # We will illustrate with an example. # + ## Example with Getters and Setters ## class Better_Employee: def __init__(self, Name, Salary, Age): self.Employee_Name = Name self.Employee_Salary = Salary self.Employee_Age = Age # These are the get functions, functions which allow values that are hidden in a class to be used outside of it # def Get_Name(self): return self.Employee_Name def Get_Salary(self): return self.Employee_Salary def Get_Age(self): return self.Employee_Age # These are the set functions, functions which allow the user to update values inside an instance # def Set_Name(self, new_name): self.Employee_Name = new_name def Set_Salary(self, new_salary): self.Employee_Salary = new_salary def Set_Age(self, new_age): self.Employee_Age = new_age def Display_Name(self): print("Employee Name:",self.Get_Name()) def Display_Salary(self): print("Employee Salary:",self.Get_Salary()) def Display_Age(self): print("Employee Age:",self.Get_Age()) # + # Now we create new instances # Emp1 = Better_Employee("Frank", 20000, 25) Emp2 = Better_Employee("Sarah", 40000, 34) Emp1.Display_Name() Emp1.Display_Salary() Emp1.Display_Age() print(Emp1.Employee_Name) Emp2.Display_Name() Emp2.Display_Salary() Emp2.Display_Age() # + # Suppose that everyone turns one year older and because of existing they get a 40000 dollar raise. # # Now, the current age and salary information in each class is outdated. Thus, we need to update # # the information in the class using setters # # These will update the age and salaries # Emp1.Set_Age(26) Emp1.Set_Salary(60000) Emp2.Set_Age(35) Emp2.Set_Salary(80000) Emp1.Display_Salary() Emp1.Display_Age() Emp2.Display_Salary() Emp2.Display_Age() # + # Now suppose we want to use the values in the class elsewhere, such as another file. # # We can access those values by using the get functions. Note that these are not the original # # variables, just the values associated with them are returned. The original variables are # # unaccessable. # Salary_To_Be_Used_Elsewhere = Emp2.Get_Salary() Age_To_Be_Used_Elsewhere = Emp1.Get_Age() print(Salary_To_Be_Used_Elsewhere) # Now we have access to values we extracted from the instance print(Age_To_Be_Used_Elsewhere) # - # <a id='Practice_Project'></a> # ### Practice Project # # Make a Bank Account class with the following attributes: # # > Name of owner <br /> # > ID (Integer of your choice, greater than 0) <br /> # > Total balance <br /> # > Savings account balance <br /> # > Checking account balance <br /> # # And create the following methods: # # > Deposit money into savings <br /> # > Deposit money into checking account <br /> # > Withdraw money from savings or checking <br /> # > Get Total Balance <br /> # > Get Owner Name <br /> # > Set ID <br /> # + ## Code Goes Here ## # - # ### Extra Resources: # # [Very detailed introduction to classes](https://docs.python.org/3/tutorial/classes.html) <br /> # [Another introduction to classes](https://www.learnpython.org/en/Classes_and_Objects)<br /> # [First part in a series on classes](https://www.geeksforgeeks.org/object-oriented-programming-in-python-set-1-class-and-its-members/)<br /> # [Second Part](https://www.geeksforgeeks.org/object-oriented-programming-in-python-set-2-data-hiding-and-object-printing/) # <a id='Class_Inheritance'></a> # ## Class Inheritance # # Suppose we create a class called Car, and a class called Truck, each with its own attributes and methods. # + # Car Class # class Car: # Attributes: # Car_Name = " " Model_Year = 0 Cost = 0 Max_Speed = 0 Horse_Power = 0 Torque = 0 isStandard = False Has_AC = False Other = " " # Has Methods: # def __init__(self, Name, Year, Other_Stuff): self.Car_Name = Name self.Model_Year = Year self.Other = Other_Stuff # ... # def Get_Model_Year(self): return self.Model_Year #etc# # + # Truck Class # class Truck: # Attributes: # Truck_Name = " " Model_Year = 0 Cost = 0 Max_Speed = 0 Horse_Power = 0 Torque = 0 Towing_Capacity = 0 isStandard = False Has_AC = False Other = " " # etc # # Has Methods: # def __init__(self, Name, Year, Other_Stuff): self.Truck_Name = Name self.Model_Year = Year self.Other = Other_Stuff # ... # def Get_Model_Year(self): return self.Model_Year #etc# my_car = Car("Herbie", 1969, "A movie star that's also a car!") print(my_car.Get_Model_Year()) my_truck = Truck("Optimius Prime", 1986, "The animated one was DEFINITELY better.") print(my_truck.Get_Model_Year()) # - # For the most part, these classes are identical, save for a few unique attributes. This is redundant! We have just done more work creating the same thing with a different name. # # So what can we do? Well, we can create what's know as a **generic class**, a class that can describe both a car and a truck, as well as other vehicles. Call this class vehicle, with all the attributes a vehicle possesses. # # Then, when we go to create a class of something more specific, we can **inherit** attributes and methods from the vehicle class. # This method is called **Class Inheritance**. Class inheritance allows us to make classes with the same implementation as another class. This removes redundant work and allows for more general classes. # # The class which we inherit from is called a **base class**. Classes which inherit from the base class are called **derived classes**. Base classes and derived classes can be called **parent classes** and **child classes** respectively. # + # Vehicle Class, carries all attributes associated with any vehicle # class Vehicle: # Attributes # Vehicle_Name = " " Model_Year = 0 Cost = 0 Max_Speed = 0 Other = " " # etc # # Has Methods # def __init__(self, Name, Year, Other_Stuff): self.Vehicle_Name = Name self.Model_Year = Year self.Other = Other_Stuff # ... # def Display_info(self): print("'%s' was built in %d. Other information: '%s'" % (self.Vehicle_Name, self.Model_Year, self.Other)) # etc # # Now, we can derive a more specific car class, without having to redefine most of our attributes ## # To inherit, we add an extra parameter to the class declaration # class Car(Vehicle): # Now we have access to the attributes and methods in the Vehicle class # # In the class constructor, all attributes unique to the Vehicle Class will # # be initiated in the vehicle class, and all unique attributes in the Car class # # will be initiated in the car class # def __init__(self, Name, Year, Can_Talk, Other_Stuff): super().__init__(Name, Year, Other_Stuff) # The super() function passes things on to the parent class self.Can_Talk = Can_Talk def Display_info(self): super().Display_info() # Again, super passes things up to the parent class if self.Can_Talk: print("It can talk!") else: print("It can't talk.") class Truck(Vehicle): def __init__(self, Name, Year, Carries_Cargo, Other_Stuff): super().__init__(Name, Year, Other_Stuff) self.Carries_Cargo = Carries_Cargo def Display_info(self): super().Display_info() # Again, super passes things up to the parent class if self.Carries_Cargo: print("It can carry cargo!") else: print("Don't ask it to carry cargo.") my_car = Car("Herbie", 1969, False, "A movie star that's also a car!") my_car.Display_info() print() another_car = Car("Kitt", 1986, True, "From the TV Smash hit 'Knight Rider'") another_car.Display_info() print() my_truck = Truck("Optimius Prime", 1986, False, "The animated one was DEFINITELY better.") my_truck.Display_info() # - # ### Extra Resources: # # [Introduction to class inheritance](https://www.digitalocean.com/community/tutorials/understanding-class-inheritance-in-python-3) <br /> # [Another introduction](https://www.programiz.com/python-programming/inheritance)<br /> # [Introduction to Python Classes and Inheritance](http://www.jesshamrick.com/2011/05/18/an-introduction-to-classes-and-inheritance-in-python/)<br /> # [Third Part in the series](https://www.geeksforgeeks.org/oop-in-python-set-3-inheritance-examples-of-object-issubclass-and-super) <br /> # <a id='Intro_to_OOP'></a> # ## A word on Object Oriented Programming and Data # # So what exactly is Object Oriented Programming (Or OOP for short)? OOP is a programming method in which the primary building blocks used to create the program are objects, and the objects interact with each other through methods. Inside these objects are variables and functions that are related to each other in some way. # # OOP allows for better code organization, as we can group related functions and variables in one spot under one name. # # OOP also allows for what is called **data abstraction**. Data abstraction is a way of separating the way we have written the object, and the way a user interacts with said object. # # OOP allows for whats known as **data encapsulation**. Data encapsulation allows us to hide sensitive information from the user. If the user wants to change that information, they must go through the objects methods to do so, instead of modifying the value directly. Doing this will ensure that our data does not get unwanted values which cause the object to break in some way. A more extreme version of data encapsulation is **data hiding**. Data hiding occurs when the user can neither see nor access certain information through the objects methods. You would do something like this if the information is very sensitive or important for the program to function. # # For example, when we are working with strings, we have access to string functions and methods. However, as a user of these methods and functions, we don't really need to know how these methods or functions work to use them. This is an example of data abstraction. # # If you wanted to access the constants in a string class, such as the ascii string, which is defined to be "abcdefghi...xyz", we would have to explicitly state that we want to use it. We would have to say: string.ascii_lowercase. This is an example of data encapsulation. If we want to use a piece of information, we have to use the object to obtain it. # # # ### Extra Resources: # # [Introduction to OOP and Inheritance](https://realpython.com/python3-object-oriented-programming/) <br /> # [Another Introduction](https://www.tutorialspoint.com/python/python_classes_objects.htm) <br /> # [Wikipedia entry on OOP](https://en.wikipedia.org/wiki/Object-oriented_programming) <br /> # [Disadvantages of OOP](https://www.quora.com/What-are-the-disadvantages-of-object-oriented-programming-languages) # <a id='Conclusion'></a> # ## Conclusion # # We have seen in this notebook how to construct and use classes in Python, as well as class inheritance. We expect that reader has a basic understanding of the following concepts: # # > 1. Basic classes and class syntax. <br /> # > 2. Accessing class attributes and class methods using the dot operator. <br / > # > 3. Using getters and setters to modify attributes. <br /> # > 4. Class inheritance and the motivation for inheritance. <br /> # > 5. Syntax for creating an inherited class <br/ > # > 6. Data abstraction and data hiding. <br /> # [![Callysto.ca License](https://github.com/callysto/curriculum-notebooks/blob/master/callysto-notebook-banner-bottom.jpg?raw=true)](https://github.com/callysto/curriculum-notebooks/blob/master/LICENSE.md)
_build/html/_sources/curriculum-notebooks/TechnologyStudies/IntroductionToPython/introduction-to-python-classes.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <center> # <img src="../../img/ods_stickers.jpg"> # ## Открытый курс по машинному обучению. Сессия № 3 # # ### <center> Автор материала: <NAME>, <EMAIL> # ## <center> Индивидуальный проект по анализу данных </center> # ## <center> Классификация спама в SMS </center> # ### Описание набора данных и признаков # #### Цель работы. # Задача состоит в том, чтобы построить модель классификации спам сообщений в SMS, на основе имеющихся данных. # #### Входные данные. # Решаться задача будет на датасете взятом тут: https://www.kaggle.com/uciml/sms-spam-collection-dataset # # * v1 метка spam/ham # * v2 текст sms # # Целевой признак является метка spam/ham является ли SMS спамом или нет # ### Первичный анализ import pandas as pd import numpy as np import seaborn as sns import scipy import matplotlib.pyplot as plt from nltk.stem.wordnet import WordNetLemmatizer from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.model_selection import train_test_split, validation_curve from sklearn.metrics import roc_auc_score, precision_score from sklearn.preprocessing import StandardScaler from sklearn.linear_model import LogisticRegression, LogisticRegressionCV from sklearn.naive_bayes import MultinomialNB from sklearn.model_selection import StratifiedKFold # %matplotlib inline import warnings warnings.filterwarnings(module='sklearn*', action='ignore', category=DeprecationWarning) df = pd.read_csv('../../data/spam.csv', encoding='latin-1') df.head() df.info() # #### В заявленных признаках v1 и v2 пропущенных значений нет. Видим что помимо признаков v1 и v2 имеем еще 3 признака. Скорее всего это какой-то мусор df['Unnamed: 2'].unique()[: 5] df[df['Unnamed: 2'] == ' PO Box 5249'] # #### Добавим данные из трех "левых" столбцов к тесту SMS и удалим их. Переименуем признаки. Для удобства переобозначим метки # * spam - 1 # * ham - 0 df['v2'] = df['v2'] + df['Unnamed: 2'].fillna('') + df['Unnamed: 3'].fillna('') + df['Unnamed: 4'].fillna('') df.drop(columns = ['Unnamed: 2', 'Unnamed: 3', 'Unnamed: 4'], inplace = True) df.rename(columns = {'v1' : 'label', 'v2' : 'sms'}, inplace = True) df['label'] = df['label'].map({'spam' : 1, 'ham' : 0}) df.head() # #### Датасет содержит 5572 объекта. Теперь пропущенных значений в нем нет. df.info() # #### Посмотрим как выглядит обычное SMS и спам SMS df[df['label'] == 0].sample(3) df[df['label'] == 1].sample(3) # #### В спам-сообщениях часто много заглавных букв, восклицательных знаков, и чисел, типа поздравляем вы выиграли миллион # #### Посмотрим на распределение классов. _, ax = plt.subplots() plt.bar(np.arange(2), df['label'].value_counts(), color = ['green', 'red']) ax.set_xticks(np.arange(2)) ax.set_xticklabels(['ham', 'spam']); df['label'].value_counts()[1] / df.shape[0], df['label'].value_counts()[0] / df.shape[0] # #### Видим что классы несбалансированы # ### Инсайты # #### Здравый смысл подсказывает, что обычно в спам сообщениях вам пишут какие-то левые люди, которые представляются вашими друзьями и зовут куда-то зарегистрироваться или вас поздравляют с выигрышами в лотерею. Значит признаки большого количества заглавных букв, обилия знаков препинания и чисел в текстах сообщений, должны что-то дать # ### Генерация признаков # #### Пока что будем генерировать признаки для объединенной выборки. Удалим знаки препинания, удалим опечатки, приведем тексты к нижнему регистру, сгенерируем признаки длина текста, число знаков препинания, наличие символа, не являющегося цифрой или буквой алфавита. # #### Первым признаком который мы создадим будет длина SMS. Обычно SMS имеют ограничения на количество слов, поэтому спамеры чтобы не платить много денежек стараются не превосходить эту длину df['len'] = df['sms'].apply(lambda x : len(x.strip().split())) # #### Создадим счетчик знаков препинания в тексте SMS, а затем удалим знаки препинания. В идеале нужен счетчик восклицательных знаков, так как в спаме вас обычно поздравляют с выигрышами в лотереях и прочем и используют много восклицаний import regex as re df['punctuation'] = df['sms'].apply(lambda x : len(re.findall("[^\P{P}-]+", x))) df['punctuation'] = df['sms'].apply(lambda x : len(re.findall("[^\P{P}-]+", x))) df['sms'] = df['sms'].apply(lambda x : re.sub("[^\P{P}-]+", "", x)) # #### Создадим счетчик заглавных букв в тексте SMS, а затем приведем тексты к нижнему регистру.. Зачастую в спам сообщениях пишут капсом. df['capital'] = df['sms'].apply(lambda x : sum(1 for c in x if c.isupper())) df['sms'] = df['sms'].apply(lambda x : str.lower(x)) # #### Посмотрим какие символы встречаются в текстах. Видим что помимо букв и цифр еще встречается много мусора. Создадим бинарный признак: содержит ли текст SMS символ кроме буквы и цифры. symbols = {} for x in [item for sublist in list(map(list, df['sms'].tolist())) for item in sublist] : if x in symbols : symbols[x] += 1 else : symbols[x] = 1 symbols volwes = 'aeiou' consonant = 'bcdfghjklmnpqrstvwxyz' digits = '0123456789' alphabet = set(volwes) | set(consonant) | set(digits) len(alphabet) bad_symbols = [x for x in symbols if x not in alphabet] bad_symbols = ''.join(set(bad_symbols) - set(' ')) bad_symbols df['badsymbol'] = df['sms'].apply(lambda x :1 if len([s for s in x if s in bad_symbols]) > 0 else 0) # #### Попробуем исправить опечатки df['sms'] = df['sms'].str.replace('å', 'a').str.replace('ä', 'a').str.replace('â', 'a').str.replace('á', 'a') df['sms'] = df['sms'].str.replace('õ', 'o').str.replace('ò', 'o').str.replace('ð', 'o').str.replace('ö', '0') \ .str.replace('ó', 'o').str.replace('ô', 'o') df['sms'] = df['sms'].str.replace('û', 'u') df['sms'] = df['sms'].str.replace('è', 'e') df['sms'] = df['sms'].str.replace('ì', '1').str.replace('ï', 'l') # #### В спам сообщениях часто упоминаются крупные денежные выигрыши. Нужно создать признаки : наличие числа в тексте и наличие символа валюты # #### Замечаем что среди символов в текстах имеются '$' и '£'. Создадим признак для них. df['moneysign'] = df['sms'].apply(lambda x : 1 if ('$' in list(x)) or ('£' in list(x)) else 0 ) # #### Остальные символы поудаляем. Вообще, возможно что при удалении знаков препинания мы поудаляли смайлы и возможно наличие/отсутствие смайла будет хорошим признаком. Если будет время надо подумать над этим. Признак исправлял ли я или нет symbols = {} for x in [item for sublist in list(map(list, df['sms'].tolist())) for item in sublist] : if x in symbols : symbols[x] += 1 else : symbols[x] = 1 bad_symbols = [x for x in symbols if x not in alphabet] bad_symbols = ''.join(set(bad_symbols) - set(' ')) bad_symbols for symb in bad_symbols : df['sms'] = df['sms'].str.replace(symb, '') symbols = {} for x in [item for sublist in list(map(list, df['sms'].tolist())) for item in sublist] : if x in symbols : symbols[x] += 1 else : symbols[x] = 1 symbols df.head() # #### Создадим признак: наличие в тексте SMS числа(возможно надо проверять не просто число, а число с множествой нулей). df['num'] = df['sms'].apply(lambda x : 1 if len([s for s in x if s in digits]) > 0 else 0) df.columns # #### Полезность признаков будем исследовать в дальнейшем с помощью модели # #### Разобьем данные на трейн и тест с одинаковым распределением целевой переменной target = df['label'].values X_train, X_test, y_train, y_test = train_test_split(df, target, test_size = 0.2, stratify = target, random_state = 10) y_train.sum() / len(y_train), y_test.sum() / len(y_test) X_train.shape, X_test.shape # #### В трейне 4457 объектов, в тесте 1115 # ### Визуальный анализ # #### Построим гистограммы созданных признаков слева и гистограммы созданных признаков в зависимости от целевой переменной справа for col in X_train.columns[2 :] : fig, axes = plt.subplots(nrows = 1, ncols = 2, figsize = (20, 10)) # ax.set_ylabel('% фрагментов', fontsize=12) # ax.set_xlabel('Имя автора', fontsize=12) axes[0].set_title(col) axes[0].hist(X_train[col], bins = 200); axes[1].set_title(col) axes[1].hist(X_train[col][X_train['label'] == 0], bins = 200, label = 'ham') axes[1].hist(X_train[col][X_train['label'] == 1], bins = 200, label = 'spam') plt.show() # #### Судя по гистограммам признаов, почти все спам сообщения содержат символ валюты. Также половина спам сообщений содержит число в своем тексте и опечатку. При генерации этих признаков подобный эффект и ожидался. fig, ax = plt.subplots(figsize = (20, 10)) sns.heatmap(X_train[['label', 'len', 'punctuation', 'capital', 'badsymbol', 'moneysign', 'num']].corr()) # #### - Во-первых, длина SMS коррелирует с числом гласных/согласных, числом знаков препинания, тут ничего удивительного. # #### - Во-вторых, видим корреляцию между наличием символа, не являющегося цифрой или буквой алфавита, и наличием символов "$" и "£", так как второе является подмножество первого. # #### - В-третьих, видим корреляцию между целевой переменной и наличием числа в тексте SMS и наличием символа денежки. # ### Выбор метрики # #### Решается задача классификации на два класса. Классы несбалансированы, FP - нормальное SMS помечено как спам, это недопустимо. FN - спам помечен как нормальное SMS, допустимо, но не сильно хочется. Поэтому в качестве метрики будем использовать rocauc. # ### Выбор модели # #### На заре развития спам-фильтров их строили используя наивный байесовский классификатор, поэтому будем рассматривать эту модель. Также у нас ожидается много признаков после использования преобразования tfidf к тексту SMS, поэтому будем рассматривать логистическую регрессию. # ### Предобработка данных # #### Будем использовать преобразование tfidf для текста SMS, так же отмасштабируем признаки. scaler = StandardScaler() cols = ['len', 'punctuation', 'capital', 'badsymbol', 'moneysign', 'num'] X_train_scaled = pd.DataFrame(scaler.fit_transform(X_train[cols]), columns = cols) X_test_scaled = pd.DataFrame(scaler.transform(X_test[cols]), columns = cols) # #### Данных у нас не так много, поэтому выбираем кросс-валидацию на 10 фолдов. Для начала посмотрим на наши модели из коробки, ничего не настраивая. def valid(model, n, bayes = False) : skf = StratifiedKFold(n_splits = n, random_state = 17) auc_scores = [] for train_index, valid_index in skf.split(X_train_scaled, y_train): X_train_part, X_valid = X_train_scaled.iloc[train_index], X_train_scaled.iloc[valid_index] y_train_part, y_valid = y_train[train_index], y_train[valid_index] X_train_sms, X_valid_sms = X_train.iloc[train_index]['sms'], X_train.iloc[valid_index]['sms'] cv = TfidfVectorizer(ngram_range = (1, 3)) X_train_bow = cv.fit_transform(X_train_sms) X_valid_bow = cv.transform(X_valid_sms) if bayes : X_train_new = X_train_bow X_valid_new = X_valid_bow else : X_train_new = scipy.sparse.csr_matrix(scipy.sparse.hstack([X_train_bow, X_train_part])) X_valid_new = scipy.sparse.csr_matrix(scipy.sparse.hstack([X_valid_bow, X_valid])) model.fit(X_train_new, y_train_part) model_pred_for_auc = model.predict_proba(X_valid_new) auc_scores.append(roc_auc_score(y_valid, model_pred_for_auc[:, 1])) return np.mean(auc_scores) logit = LogisticRegression(random_state = 17) bayes = MultinomialNB() scores_logit = valid(logit, 10) print('Logistic regreession - rocauc : {}'.format(scores_logit)) scores_bayes = valid(bayes, 10, True) print('Bayessian classfier - rocauc : {}'.format(scores_bayes)) # #### Видим, что логистическая регрессия справляется получше. Дальше будем работать только с ней. # ### Настройка гиперпараметров и построение кривых валидации и обучения. def valid_for_valid_plots(model, n, bayes = False) : skf = StratifiedKFold(n_splits = n, random_state = 17) auc_scores_cv = [] auc_scores_valid = [] for train_index, valid_index in skf.split(X_train_scaled, y_train): X_train_part, X_valid = X_train_scaled.iloc[train_index], X_train_scaled.iloc[valid_index] y_train_part, y_valid = y_train[train_index], y_train[valid_index] X_train_sms, X_valid_sms = X_train.iloc[train_index]['sms'], X_train.iloc[valid_index]['sms'] cv = TfidfVectorizer(ngram_range = (1, 3)) X_train_bow = cv.fit_transform(X_train_sms) X_valid_bow = cv.transform(X_valid_sms) if bayes : X_train_new = X_train_bow X_valid_new = X_valid_bow else : X_train_new = scipy.sparse.csr_matrix(scipy.sparse.hstack([X_train_bow, X_train_part])) X_valid_new = scipy.sparse.csr_matrix(scipy.sparse.hstack([X_valid_bow, X_valid])) model.fit(X_train_new, y_train_part) auc_scores_cv.append(roc_auc_score(y_train_part, model.predict_proba(X_train_new)[:, 1])) model_pred_for_auc = model.predict_proba(X_valid_new) auc_scores_valid.append(roc_auc_score(y_valid, model_pred_for_auc[:, 1])) return 1 - np.mean(auc_scores_valid), 1 - np.mean(auc_scores_cv) # #### Построим кривые валидации Cs = [0.1 * i for i in range(1, 21)] scores = [] for c in Cs : logit = LogisticRegression(C = c, random_state = 17) scores.append(valid_for_valid_plots(logit, 10)) fig, axes = plt.subplots(nrows = 1, ncols = 1, figsize = (20, 10)) plt.plot(Cs, [i[0] for i in scores], color = 'blue', label='holdout') plt.plot(Cs, [i[1] for i in scores], color = 'red', label='CV') plt.ylabel("ROCAUC") plt.xlabel("C") plt.title('Validation curve for C in (0.1, 2)'); # # #### Будем перебирать значения C в интервале [0.5, 1.5]. При С < 0.5 происходит недообучение. При С > 1.5 ошибка на трейне упирается в ноль, а на валидации не падает, это переобучение. Cs = np.linspace(0.5, 1.5, 10) for c in Cs : logit = LogisticRegression(C = c, random_state = 17) print(c, valid(logit, 10)) # ### C_opt = 1.5 C_opt = 1.5 # #### Построим кривые обучения def valid_for_train_plots(model, n, alpha, bayes = False) : skf = StratifiedKFold(n_splits = n, random_state = 17) auc_scores_cv = [] auc_scores_valid = [] for train_index, valid_index in skf.split(X_train_scaled[: int(X_train_scaled.shape[0] * alpha)], y_train[: int(X_train_scaled.shape[0] * alpha)]): X_train_part, X_valid = X_train_scaled.iloc[train_index], X_train_scaled.iloc[valid_index] y_train_part, y_valid = y_train[train_index], y_train[valid_index] X_train_sms, X_valid_sms = X_train.iloc[train_index]['sms'], X_train.iloc[valid_index]['sms'] cv = TfidfVectorizer(ngram_range = (1, 3)) X_train_bow = cv.fit_transform(X_train_sms) X_valid_bow = cv.transform(X_valid_sms) if bayes : X_train_new = X_train_bow X_valid_new = X_valid_bow else : X_train_new = scipy.sparse.csr_matrix(scipy.sparse.hstack([X_train_bow, X_train_part])) X_valid_new = scipy.sparse.csr_matrix(scipy.sparse.hstack([X_valid_bow, X_valid])) model.fit(X_train_new, y_train_part) auc_scores_cv.append(roc_auc_score(y_train_part, model.predict_proba(X_train_new)[:, 1])) model_pred_for_auc = model.predict_proba(X_valid_new) auc_scores_valid.append(roc_auc_score(y_valid, model_pred_for_auc[:, 1])) return np.mean(auc_scores_valid), np.mean(auc_scores_cv) alphas = [0.1 * i for i in range(1, 11)] scores = [] for alpha in alphas : logit = LogisticRegression(C = C_opt, random_state = 17) scores.append(valid_for_train_plots(logit, 10, alpha = alpha)) fig, axes = plt.subplots(nrows = 1, ncols = 1, figsize = (20, 10)) plt.plot(alphas, [i[0] for i in scores], color = 'blue', label='holdout') plt.plot(alphas, [i[1] for i in scores], color = 'red', label='CV') plt.ylabel("ROCAUC") plt.xlabel("C") plt.title('Learnings curve with optimal C'); # #### Судя по кривым обучения, происходит недообучение и для улучшения результата надо усложнить модель. # ### Прогноз для тестовой выборки cv = TfidfVectorizer(ngram_range = (1, 3)) X_train_sms = cv.fit_transform(X_train['sms']) X_test_sms = cv.transform(X_test['sms']) train = scipy.sparse.csr_matrix(scipy.sparse.hstack([X_train_sms, X_train_scaled])) test = scipy.sparse.csr_matrix(scipy.sparse.hstack([X_test_sms, X_test_scaled])) logit = LogisticRegression(C = C_opt, random_state = 17) logit.fit(train, y_train) for x, y in zip(cols, logit.coef_[0][len(cv.get_feature_names()) :]) : print(x, y) # #### Видим, что для нашей модели признаки наличия числа и наличие символа валюты в тексте SMS являются важными, также число слов в тексте и число заглавных букв, а вот признаки наличия опечаток и знаков препинания не так уж и важны. logit_pred = logit.predict_proba(test) roc_auc_score(y_test, logit_pred[:, 1]) # #### Качество на тесте соответствует ожиданиям после кросс-валидации # ### Выводы # #### Предложено решение задачи фильтрации спама на основе модели логистической регрессии. Можно использовать подобные спам-фильтры для SMS, электронной почты. # # #### Дальнейшее развитие модели может быть связано с лемматизацией/стеммингом текстов SMS. Использовать стекинг/блендинг нескольких моделей.
jupyter_russian/projects_individual/project_spam_classifier_eldar_kochshegulov.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- def e_step (theta,y): keys = set() ez = {} for key, val in theta.iteritems(): keys.add(key) ez[key] = [] for r in range(len(y['t1'])): den = 0 for i in keys: den += theta[i]*y[i][r] #print den for i in keys: ez[i].append(theta[i]*y[i][r]/den) return ez def m_step(ez): ny = {} keys = set() for key, val in ez.iteritems(): keys.add(key) for i in keys: ny[i]= sum(ez[i])/6 return ny # + from __future__ import division from math import sqrt from matplotlib import pyplot as plt y = {'t1':[0,1,1,0,1,1],'t2':[1,0,0,0,0,1],'t3':[0,0,1,1,0,0]} cth = {'t1':1/6,'t2':3/6,'t3':1/3} for round in range (15): cth_nr = [cth['t1'],cth['t2'],cth['t3']] colors = ['green','pink','red'] plt.pie(cth_nr,colors=colors) plt.axis('equal') plt.show() expz = e_step(cth,y) print expz cth = m_step(expz) print cth for i in ['t1','t2','t3']: print cth [i]*6 # -
EM_exercise.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # GPE examples for a $^{87}Rb$ spinor in F=1,2 # # # ### Introduction # This Notebook gives brief examples on numerical GPE simulations of a mixture of spin-1 and spin-2 BECs under the Single Mode Approximation (SMA). # # Publication avaliable under https://arxiv.org/abs/1904.07617 # # ### Constants # + from __future__ import division import numpy as np from scipy.constants import physical_constants import matplotlib.pyplot as plt import GPE_SMA_F1F2_Lib as GPE h=physical_constants["Planck constant"][0] hbar=physical_constants["Planck constant over 2 pi"][0] aB=physical_constants["Bohr radius"][0] m=1.443e-25 #mass of a Rb87 atom (see Rubidium 87 D Line Data, D.Steck) q=0.89*h ################################## # Spin matrices for f=1 and f=2 ################################## F1_x=1/np.sqrt(2)*np.array([[0,1,0],[1,0,1],[0,1,0]]) F1_y=1/np.sqrt(2)*np.array([[0,-1j,0],[1j,0,-1j],[0,1j,0]]) F1_z=np.array([[1,0,0],[0,0,0],[0,0,-1]]) F2_x=1/2*np.array([[0,2,0,0,0],[2,0,np.sqrt(6),0,0],[0,np.sqrt(6),0,np.sqrt(6),0],[0,0,np.sqrt(6),0,2],[0,0,0,2,0]]) F2_y=1/2*np.array([[0,-1j*2,0,0,0],[1j*2,0,-1j*np.sqrt(6),0,0],[0,1j*np.sqrt(6),0,-1j*np.sqrt(6),0],[0,0,1j*np.sqrt(6),0,-1j*2],[0,0,0,1j*2,0]]) F2_z=np.array([[2,0,0,0,0],[0,1,0,0,0],[0,0,0,0,0],[0,0,0,-1,0],[0,0,0,0,-2]]) # - # ### Scattering parameters # + #F=1 a1_0=101.8*aB a1_20=-1.07*aB g1_0=(4*np.pi*hbar**2/m)*(3*a1_0+2*a1_20)/3 g1_1=(4*np.pi*hbar**2/m)*(a1_20)/3 #F=2 a2_20=3.51*aB a2_42=6.95*aB g2_1=(4*np.pi*hbar**2/m)*a2_42/7 g2_2=(4*np.pi*hbar**2/m)*(3*a2_42-7*a2_20)/7 #F=1 <-> F=2 a12_12=-1.31*a1_20 a12_32=-1.27*a1_20 g12_1=(4*np.pi*hbar**2/m)*a12_32/3 g12_2=(4*np.pi*hbar**2/m)*(3*a12_12+2*a12_32)/3 # - # ### Atom number and effective volume atomNumber=100e3 omega=91*2*np.pi #mean trapping frequency RTF=(g1_0*atomNumber/(m*(omega**2))*15/(4*np.pi))**(1/5.) #Thomas-Fermi radius Veff=14./15.*np.pi*RTF**3 #effective volume print("RTF: %.2f um"%(RTF*1e6)) # ### EXAMPLE (I): f=1 # # Alignment to orientation conversion for the the initial state: # \begin{equation} # \frac{\Psi_0^{(1)}}{\sqrt{N}}= # \begin{pmatrix} # 1/2\\ # i\sqrt{2}\\ # 1/2 # \end{pmatrix} # \end{equation} # + wavefunction1=np.sqrt(atomNumber)*np.array([1/2,1j/np.sqrt(2),1/2]) wavefunction2=np.sqrt(atomNumber)*np.array([0,0,0,0,0]) t=np.linspace(0,300e-3,1000) [w1Array,F1_xArray,F1_yArray,F1_zArray,angle1Array,w2Array,F2_xArray,F2_yArray,F2_zArray,angle2Array]=GPE.SMA_GPE_F12_simulation(t,wavefunction1,wavefunction2,Veff,q,g1_1, g2_1,g2_2,g12_1,g12_2) F1_TArray=np.sqrt(F1_xArray**2+F1_yArray**2) n1Array=np.abs(w1Array)**2 plt.figure() plt.plot(t*1e3,F1_TArray/atomNumber) plt.xlabel("t (ms)") plt.ylabel(r"$F_T^{(1)}/N$") plt.title("Transverse magnetization") plt.show() # - # ### Example (II): f=1 & f=2 interaction, effective QZS # # Effective quadratic Zeeman shift (QZS) due to the inter-hyperfine interaction between f=1 and f=2. # \begin{equation} # \frac{\Psi_0}{\sqrt{N}}=\frac{\Psi_0^{(1)}\oplus\Psi_0^{(2)}}{\sqrt{N}}=\frac{1}{\sqrt{2}} # \begin{pmatrix} # 1/2\\ # i/\sqrt{2}\\ # 1/2 # \end{pmatrix} # \oplus # \begin{pmatrix} # 0\\ # 0\\ # 0\\ # 0\\ # 1 # \end{pmatrix} # \end{equation} # # # + wavefunction1=np.sqrt(atomNumber/2)*np.array([1/2.,1j/np.sqrt(2),1/2.]) wavefunction2=np.sqrt(atomNumber/2)*np.array([0,0,0,0,1]) t=np.linspace(0,300e-3,1000) [w1Array,F1_xArray,F1_yArray,F1_zArray,angle1Array,w2Array,F2_xArray,F2_yArray,F2_zArray,angle2Array]=GPE.SMA_GPE_F12_simulation(t,wavefunction1,wavefunction2,Veff,q,g1_1, g2_1,g2_2,g12_1,g12_2) F1_TArray=np.sqrt(F1_xArray**2+F1_yArray**2) plt.figure() plt.plot(t*1e3,F1_TArray,label=r"$f=1$") plt.plot(t*1e3,np.abs(F2_zArray/2),label=r"$f=2$") plt.xlabel("t (ms)") plt.ylabel(r"$F_T^{(f)}/f$") plt.title("Inter-hyperfine dynamics") plt.legend() plt.show() # - # ### Example (III): f=1 & f=2 interaction, effective LZS # Effective linear Zeeman splitting (LZS) due to the inter-hyperfine interaction between f=1 and f=2. # # \begin{equation} # \frac{\Psi_{0,A}}{\sqrt{N}}=\hat{R}_{x,\frac{\pi}{6}} # \begin{pmatrix} # \frac{1}{\sqrt{2}}\\ # 0\\ # 0 # \end{pmatrix} # \oplus # \begin{pmatrix} # \frac{1}{\sqrt{2}}\\ # 0\\ # 0\\ # 0\\ # 0 # \end{pmatrix} # \;\;\;\;\; # \frac{\Psi_{0,B}}{\sqrt{N}}=\hat{R}_{x,\frac{\pi}{6}} # \begin{pmatrix} # 0\\ # 0\\ # \frac{1}{\sqrt{2}} # \end{pmatrix} # \oplus # \begin{pmatrix} # 0\\ # 0\\ # 0\\ # 0\\ # \frac{1}{\sqrt{2}} # \end{pmatrix} # \end{equation} # # + from scipy.linalg import expm rotAngle=np.pi/6. R1=expm(-1j*rotAngle*F1_x) R2=expm(-1j*rotAngle*F2_x) t=np.linspace(0,200e-3,1000) #EXPERIMENT A wavefunction1A=np.sqrt(atomNumber/2)*np.array([1,0,0]) wavefunction2A=np.sqrt(atomNumber/2)*np.array([1,0,0,0,0]) wavefunction1A=np.dot(R1,wavefunction1A) wavefunction2A=np.dot(R2,wavefunction2A) #EXPERIMENT B wavefunction1B=np.sqrt(atomNumber/2)*np.array([0,0,1]) wavefunction2B=np.sqrt(atomNumber/2)*np.array([0,0,0,0,1]) wavefunction1B=np.dot(R1,wavefunction1B) wavefunction2B=np.dot(R2,wavefunction2B) #Simulation [_,_,_,_,angle1ArrayA,_,_,_,_,angle2ArrayA]=GPE.SMA_GPE_F12_simulation(t,wavefunction1A,wavefunction2A,Veff,q,g1_1, g2_1,g2_2,g12_1,g12_2) [_,_,_,_,angle1ArrayB,_,_,_,_,angle2ArrayB]=GPE.SMA_GPE_F12_simulation(t,wavefunction1B,wavefunction2B,Veff,q,g1_1, g2_1,g2_2,g12_1,g12_2) phi_A=angle1ArrayA+angle2ArrayA phi_B=angle1ArrayB+angle2ArrayB deltaPhi=np.unwrap(phi_A-phi_B) deltaPhi-=deltaPhi[0] plt.figure() plt.plot(t*1e3,deltaPhi) plt.xlabel("t (ms)") plt.ylabel(r"$\phi_A^{(12)}-\phi_B^{(12)}$ (rad)") plt.show() # -
GPE_SMA_F1F2_example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="-c0vWATuQ_Dn" colab_type="text" # # Lambda School Data Science - Loading, Cleaning and Visualizing Data # # Objectives for today: # - Load data from multiple sources into a Python notebook # - !curl method # - CSV upload method # - Create basic plots appropriate for different data types # - Scatter Plot # - Histogram # - Density Plot # - Pairplot # - "Clean" a dataset using common Python libraries # - Removing NaN values "Interpolation" # + [markdown] id="grUNOP8RwWWt" colab_type="text" # # Part 1 - Loading Data # # Data comes in many shapes and sizes - we'll start by loading tabular data, usually in csv format. # # Data set sources: # # - https://archive.ics.uci.edu/ml/datasets.html # - https://github.com/awesomedata/awesome-public-datasets # - https://registry.opendata.aws/ (beyond scope for now, but good to be aware of) # # Let's start with an example - [data about flags](https://archive.ics.uci.edu/ml/datasets/Flags). # + [markdown] id="wxxBTeHUYs5a" colab_type="text" # ## Lecture example - flag data # + id="nc-iamjyRWwe" colab_type="code" outputId="9b4cfab2-910a-4a67-eecf-9b1521f7fb92" colab={"base_uri": "https://localhost:8080/", "height": 3400} # Step 1 - find the actual file to download # From navigating the page, clicking "Data Folder" flag_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data' # You can "shell out" in a notebook for more powerful tools # https://jakevdp.github.io/PythonDataScienceHandbook/01.05-ipython-and-shell-commands.html # Funny extension, but on inspection looks like a csv # !curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data # Extensions are just a norm! You have to inspect to be sure what something is # + id="UKfOq1tlUvbZ" colab_type="code" colab={} # Step 2 - load the data # How to deal with a csv? 🐼 import pandas as pd flag_data = pd.read_csv(flag_data_url) # + id="exKPtcJyUyCX" colab_type="code" outputId="ab8bbd2b-4236-4e9b-c043-21636c0d340f" colab={"base_uri": "https://localhost:8080/", "height": 269} # Step 3 - verify we've got *something* flag_data.head() # + id="rNmkv2g8VfAm" colab_type="code" outputId="055bcf09-ed6b-4dea-957b-3a911481536b" colab={"base_uri": "https://localhost:8080/", "height": 555} # Step 4 - Looks a bit odd - verify that it is what we want flag_data.count() # + id="iqPEwx3aWBDR" colab_type="code" outputId="6acf3192-dac1-4007-debc-7c5e820b6aa5" colab={"base_uri": "https://localhost:8080/", "height": 87} # !curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data | wc # + id="5R1d1Ka2WHAY" colab_type="code" outputId="0b4539d9-52ba-4d4e-edd1-6c540d93b55f" colab={"base_uri": "https://localhost:8080/", "height": 5081} # So we have 193 observations with funny names, file has 194 rows # Looks like the file has no header row, but read_csv assumes it does help(pd.read_csv) # + id="EiNiR6vExQUt" colab_type="code" colab={} # ?pd.read_csv # + id="oQP_BuKExQWE" colab_type="code" colab={} ??pd.read_csv # + id="o-thnccIWTvc" colab_type="code" outputId="57799072-a851-4d4f-fd37-e24f6997492b" colab={"base_uri": "https://localhost:8080/", "height": 273} # Alright, we can pass header=None to fix this flag_data = pd.read_csv(flag_data_url, header=None) flag_data.head() # + id="iG9ZOkSMWZ6D" colab_type="code" outputId="d8b6f16a-6964-41f4-b438-9ddbff9eb08a" colab={"base_uri": "https://localhost:8080/", "height": 555} flag_data.count() # + id="gMcxnWbkWla1" colab_type="code" outputId="52fc01aa-00bd-4420-8539-0fe7dba9e263" colab={"base_uri": "https://localhost:8080/", "height": 555} flag_data.isna().sum() # + [markdown] id="AihdUkaDT8We" colab_type="text" # ### Yes, but what does it *mean*? # # This data is fairly nice - it was "donated" and is already "clean" (no missing values). But there are no variable names - so we have to look at the codebook (also from the site). # # ``` # 1. name: Name of the country concerned # 2. landmass: 1=N.America, 2=S.America, 3=Europe, 4=Africa, 4=Asia, 6=Oceania # 3. zone: Geographic quadrant, based on Greenwich and the Equator; 1=NE, 2=SE, 3=SW, 4=NW # 4. area: in thousands of square km # 5. population: in round millions # 6. language: 1=English, 2=Spanish, 3=French, 4=German, 5=Slavic, 6=Other Indo-European, 7=Chinese, 8=Arabic, 9=Japanese/Turkish/Finnish/Magyar, 10=Others # 7. religion: 0=Catholic, 1=Other Christian, 2=Muslim, 3=Buddhist, 4=Hindu, 5=Ethnic, 6=Marxist, 7=Others # 8. bars: Number of vertical bars in the flag # 9. stripes: Number of horizontal stripes in the flag # 10. colours: Number of different colours in the flag # 11. red: 0 if red absent, 1 if red present in the flag # 12. green: same for green # 13. blue: same for blue # 14. gold: same for gold (also yellow) # 15. white: same for white # 16. black: same for black # 17. orange: same for orange (also brown) # 18. mainhue: predominant colour in the flag (tie-breaks decided by taking the topmost hue, if that fails then the most central hue, and if that fails the leftmost hue) # 19. circles: Number of circles in the flag # 20. crosses: Number of (upright) crosses # 21. saltires: Number of diagonal crosses # 22. quarters: Number of quartered sections # 23. sunstars: Number of sun or star symbols # 24. crescent: 1 if a crescent moon symbol present, else 0 # 25. triangle: 1 if any triangles present, 0 otherwise # 26. icon: 1 if an inanimate image present (e.g., a boat), otherwise 0 # 27. animate: 1 if an animate image (e.g., an eagle, a tree, a human hand) present, 0 otherwise # 28. text: 1 if any letters or writing on the flag (e.g., a motto or slogan), 0 otherwise # 29. topleft: colour in the top-left corner (moving right to decide tie-breaks) # 30. botright: Colour in the bottom-left corner (moving left to decide tie-breaks) # ``` # # Exercise - read the help for `read_csv` and figure out how to load the data with the above variable names. One pitfall to note - with `header=None` pandas generated variable names starting from 0, but the above list starts from 1... # + id="okEjAUHwEZtE" colab_type="code" colab={} # + [markdown] id="XUgOnmc_0kCL" colab_type="text" # ## Loading from a local CSV to Google Colab # + id="-4LA4cNO0ofq" colab_type="code" colab={} # + [markdown] id="aI2oN4kj1uVQ" colab_type="text" # # Part 2 - Basic Visualizations # + [markdown] id="INqBGKRl88YD" colab_type="text" # ## Basic Data Visualizations Using Matplotlib # + id="6FsdkKuh8_Rz" colab_type="code" colab={} import matplotlib.pyplot as plt # Scatter Plot # + id="huwUQ7zE9gkD" colab_type="code" colab={} # Histogram # + id="CSmpwXQN9o8o" colab_type="code" colab={} # Seaborn Density Plot # + id="TMMJG5rQ-g_8" colab_type="code" colab={} # Seaborn Pairplot # + [markdown] id="ipBQKbrl76gE" colab_type="text" # ## Create the same basic Visualizations using Pandas # + id="qWIO8zuhArEr" colab_type="code" colab={} # Pandas Histogram - Look familiar? # + id="zxEajNvjAvfB" colab_type="code" colab={} # Pandas Scatterplot # + id="XjR5i6A5A-kp" colab_type="code" colab={} # Pandas Scatter Matrix - Usually doesn't look too great. # + [markdown] id="tmJSfyXJ1x6f" colab_type="text" # # Part 3 - Deal with Missing Values # + [markdown] id="bH46YMHEDzpD" colab_type="text" # ## Diagnose Missing Values # # Lets use the Adult Dataset from UCI. <https://github.com/ryanleeallred/datasets> # + id="NyeZPpxRD1BA" colab_type="code" colab={} # + [markdown] id="SYK5vXqt7zp1" colab_type="text" # ## Fill Missing Values # + id="32ltklnQ71A6" colab_type="code" colab={} # + [markdown] id="nPbUK_cLY15U" colab_type="text" # ## Your assignment - pick a dataset and do something like the above # # This is purposely open-ended - you can pick any data set you wish. It is highly advised you pick a dataset from UCI or a similar semi-clean source. You don't want the data that you're working with for this assignment to have any bigger issues than maybe not having headers or including missing values, etc. # # After you have chosen your dataset, do the following: # # - Import the dataset using the method that you are least comfortable with (!curl or CSV upload). # - Make sure that your dataset has the number of rows and columns that you expect. # - Make sure that your dataset has appropriate column names, rename them if necessary. # - If your dataset uses markers like "?" to indicate missing values, replace them with NaNs during import. # - Identify and fill missing values in your dataset (if any) # - Don't worry about using methods more advanced than the `.fillna()` function for today. # - Create one of each of the following plots using your dataset # - Scatterplot # - Histogram # - Density Plot # - Pairplot (note that pairplots will take a long time to load with large datasets or datasets with many columns) # # If you get that done and want to try more challenging or exotic things, go for it! Use documentation as illustrated above, and follow the 20-minute rule (that is - ask for help if you're stuck!). # # If you have loaded a few traditional datasets, see the following section for suggested stretch goals. # + id="NJdISe69ZT7E" colab_type="code" colab={} # TODO your work here! # And note you should write comments, descriptions, and add new # code and text blocks as needed # + [markdown] id="MZCxTwKuReV9" colab_type="text" # ## Stretch Goals - Other types and sources of data # # Not all data comes in a nice single file - for example, image classification involves handling lots of image files. You still will probably want labels for them, so you may have tabular data in addition to the image blobs - and the images may be reduced in resolution and even fit in a regular csv as a bunch of numbers. # # If you're interested in natural language processing and analyzing text, that is another example where, while it can be put in a csv, you may end up loading much larger raw data and generating features that can then be thought of in a more standard tabular fashion. # # Overall you will in the course of learning data science deal with loading data in a variety of ways. Another common way to get data is from a database - most modern applications are backed by one or more databases, which you can query to get data to analyze. We'll cover this more in our data engineering unit. # # How does data get in the database? Most applications generate logs - text files with lots and lots of records of each use of the application. Databases are often populated based on these files, but in some situations you may directly analyze log files. The usual way to do this is with command line (Unix) tools - command lines are intimidating, so don't expect to learn them all at once, but depending on your interests it can be useful to practice. # # One last major source of data is APIs: https://github.com/toddmotto/public-apis # # API stands for Application Programming Interface, and while originally meant e.g. the way an application interfaced with the GUI or other aspects of an operating system, now it largely refers to online services that let you query and retrieve data. You can essentially think of most of them as "somebody else's database" - you have (usually limited) access. # # *Stretch goal* - research one of the above extended forms of data/data loading. See if you can get a basic example working in a notebook. Image, text, or (public) APIs are probably more tractable - databases are interesting, but there aren't many publicly accessible and they require a great deal of setup. # + id="f4QP6--JBXNK" colab_type="code" colab={}
module2-loadingdata/LS_DS_112_Loading_Data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # conda install numpy # + import numpy as np # - my_list = [1,2,3,4,6] arr = np.array(my_list) my_list arr type(arr) arr.shape # + lst1 = [1,2,3,4] lst2 = [5,6,7,8] lst3 = [9,10,11,12] arr = np.array([lst1,lst2,lst3]) # - arr arr.shape arr.reshape(6,2) arr.shape # ## Indixing arr = np.array([1,2,3,4,5,6]) arr arr[3] # + lst1 = [1,2,3,4,5] lst2 = [6,7,8,9,10] lst3 = [11,12,13,14,15] arr = np.array([lst1,lst2,lst3]) # - arr arr[1:,3:] arr[:,:] arr[0:,0:] arr[0:2,0:2] arr[1:,3:] arr[1:,2:4] arr[1,1:4] arr = np.arange(0,10) arr arr = np.arange(0,10,step=2) arr np.linspace(1,10,50) # ## copy function and broadcasting arr = np.array([1,2,3,4,5,6,7,8,9]) arr arr[5:] = 49 arr arr1 = arr arr1[5:]=194 arr1 arr arr2 = arr1.copy() arr2 arr2[5:] = 97 arr2 print(arr) print(arr1) print(arr2) ## some conditions very useful in explotary data analysis val = 3 arr<4 arr*val arr%2 arr[arr<3] arr[arr<300] np.arange(1,21).reshape(2,10) arr1 = np.arange(0,10).reshape(2,5) arr1 arr2 = np.arange(0,10).reshape(2,5) arr2 arr1*arr2 np.ones(5) np.ones(3,dtype=int) np.ones((3,4),dtype=int) ## Random distrubution np.random.rand(4,4) arr_ex = np.random.randn(3,5) arr_ex import seaborn as sns import pandas as pd sns.distplot(pd.DataFrame(arr_ex.reshape(15,1))) np.random.randint(0,100,12).reshape(3,4) np.random.random_sample((2,5))
Prerequisite/Numpy.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/onobruno/Moodle_log/blob/master/Log_file.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="C3iRaIGHBG7E" colab_type="text" # Objetivo: Analisar a tabela log de forma ágil e simples para todos os cursos, independente do papel # # + id="ojlGuR2PBAqN" colab_type="code" colab={} ### Bibliotecas import pandas as pd import matplotlib as matplot import matplotlib.ticker as plticker import matplotlib.pyplot as plt import numpy as np import pytz from datetime import datetime from datetime import date from google.colab import drive from scipy import NaN from datetime import datetime from pathlib import Path as path # + id="uc-V2aAwJBdS" colab_type="code" colab={} ### load drive drive.mount('/content/drive', force_remount=True) # + id="V7JgNRZiCtp0" colab_type="code" colab={} class log(): def __init__(self,file = None): print("...building the object...") self.path_file = file self.df = self.load_file() self.warning() self.ajustes() self.course = input('Qual é o nome do seu curso? ') def load_file(self): ### Here we will load the file with datastamp format### #transforming into a date format the first column with dates dateparse = lambda x: datetime.strptime(x,'%d/%m/%Y %H:%M') #Reading the file in appropriated way # Getting the suffix of the file file_suffix = path(self.path_file).suffix if file_suffix == '.xlsx': print ('The suffix file is ',file_suffix) df = pd.read_excel(self.path_file, parse_dates = [0] ,keep_date_col = True, date_parser = dateparse) elif file_suffix == '.xlx': print ('The suffix file is ',file_suffix) df = pd.read_excel(self.path_file, parse_dates = [0] ,keep_date_col = True, date_parser = dateparse) elif file_suffix == '.csv': print ('The suffix file is ',file_suffix) df = pd.read_csv(self.path_file, parse_dates = [0] ,keep_date_col = True, date_parser = dateparse) else: print('Your file is not using a extension at the end as: .xlsx, .xls or .csv. Please rename the file appropriated') return df def warning(self): if (self.df.shape[1]) < 9: print('This log table has ', 9 - self.df.shape[1] , 'columns less') elif (self.df.shape[1]) > 9: print('This log table has ', self.df.shape[1] - 9 , 'columns greater') else: print ('Load file susccesfully!') def ajustes(self): ### add a new column 'user_id' ### # getting user's id from description column at position 6 users_id = [ int(line.split()[4].replace("'","")) for line in list(self.df.iloc[:,6]) ] self.df['user_id'] = users_id # spliting each line of 'Description' column self.df['description_split'] = self.df.iloc[:,6].str.split() # calcullating each line of 'Description' column length = [ len(line) for line in list(self.df.iloc[:,10])] self.df['description_lenth'] = length def visitas_por_dia(self): '''Número de diferentes alunos que visitaram a plataforma por dia''' #getting dataframe df_new = self.df #Without any modifications df_new = df_new[df_new.iloc[:,2]=='-'] # Selecting just people, who saw the couse df_new = df_new[df_new.iloc[:,5]=='Curso visto'] # reversing, indexing df_new = df_new.sort_index(ascending=False) df_new = df_new.reset_index(drop=True) df_new.index = df_new.iloc[:,0] # truncate inicial = input('Quando começa seu curso? (dd/mm/aaaa) \n') final = input('Quando termina seu curso? (dd/mm/aaaa) \n') inicial = datetime.strptime(inicial,"%d/%m/%Y") final = datetime.strptime(final,"%d/%m/%Y") #range between datas: data_inicial and data_final data_range = pd.date_range(start = date_initial, end = date_final).to_list() df_new = df_new.truncate(after = pd.Timestamp(str(final.date()) + ' 23:59:00'), before=pd.Timestamp(str(inicial.date()) + ' 01:00:00')) #Getting just day,month and year df_new.iloc[:,0] = list(df_new.iloc[:,0].dt.date) #Getting only different days different_days = list(dict.fromkeys(list(df_new.iloc[:,0]))) #Getting different user per day entered at main page course different_users_per_day = [] for day in data_range: if day in different_days: df_day = df_new[df_new.iloc[:,0]==day] different_users = len(list(dict.fromkeys(list(df_day.iloc[:,9])))) different_users_per_day.append(different_users) else: different_users_per_day.append(0) #For plotting data different_days = [day.strftime('%d/%m') for day in data_range] df_plot =pd.DataFrame({'Dias': different_days, 'Usuários': different_users_per_day}) # First plot plt.title("Acessos distintos por dia \n" + self.course) plt.ylabel('# Usuários') plt.xlabel('Data') plt.plot('Dias', 'Usuários', data=df_plot, marker='o', alpha=0.4) # Xticks my_xticks = np.array(different_days) plt.xticks(my_xticks[::7]) # Show the graph plt.show() def viewed_mod(self): '''This function will return how many times a user saw a module''' #getting dataframe df_new = self.df # Selecting just people, who saw modules df_new = df_new[df_new.iloc[:,5]=='Módulo do curso visualizado'] ## example of message log: ## "The user with id '102037' viewed the 'quiz' activity with course module id '99275'." -> this has 14 strings df_new = df_new[df_new.iloc[:,11] == 14] #Module id mod_id_list = [ mod_id[-1].replace("'","").replace(".","") for mod_id in list(df_new.iloc[:,10])] df_new['with id'] = ' with id ' #module name it is at column 3 and its id mod_name_id_list = list(map(lambda x,y,z: x+y+z, list(df_new.iloc[:,3]), list(df_new['with id']), mod_id_list)) df_new['mod_name_id'] = mod_name_id_list #Getting just the distincts modules's id with its name mods = list( dict.fromkeys( list(df_new['mod_name_id'] )) ) mods.sort() #This is a inplace method #The id's user is being add with his name for distinct when there is equal names id_name_list = [ item for item in zip( list(df_new['user_id']), list(df_new.iloc[:,1]))] df_new['id_name'] = id_name_list #Getting just the distincts user's id with his name users = list( dict.fromkeys( list(df_new['id_name']) ) ) id_list,name_list = map(list,zip(*users)) # Initiating a dict with distinct modules as keys and inserting a empty list in each one mods_df = dict.fromkeys( mods ) mods_df = {key:[] for key in mods_df.keys()} #Store user's infos user_df = [] for user in users: user_df.append(user) df_new2 = df_new[df_new['id_name']== user] for mod in mods: if mod in list(df_new2['mod_name_id']): view_mod = list(df_new2['mod_name_id']).count(mod) mods_df[mod].append(view_mod) else: mods_df[mod].append(0) #adjusting dataframe df_final = pd.DataFrame(mods_df) df_final.insert(0,'id_name',user_df) df_final.insert(0,'id',id_list) df_final.insert(0,'name',name_list) #ordering by name df_final = df_final.sort_values(['name']) #Saving dafaframe df_final.to_excel('Hits in ' + self.course + ' per module.xlsx',index=False) def time_spent(self): '''This function will return how long a user spent at the online course''' tempo = input('Quanto tempo dura a sessão do aluno em segundos? \n') tempo = int(tempo) #getting dataframe df_new = self.df #getting different users users = list(dict.fromkeys(list(df_new['user_id']))) all_tempos = [] for user in users: df_new2 = df_new[df_new['user_id'] == user] lista = list(df_new2.iloc[:,0]) if df_new2.shape[0] ==1: all_tempos.append(0) else: times = list( pd.to_datetime(df_new2.iloc[:,0]) ) tempos = [ (time_after - time_before).total_seconds()/60 if (time_after - time_before).total_seconds() < tempo else 0 for time_after, time_before in zip(times[:-1],times[1:]) ] all_tempos.append(sum(tempos)) d_times = {'Time spent (min)':all_tempos, 'User id': users} df_times = pd.DataFrame(d_times) df_times.to_excel('tempos' + self.course + 'users.xlsx')
Log_file.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda root] # language: python # name: conda-root-py # --- # + active="" # Convert categorical columns to one hot encoded columns # - import pandas as pd # + df = pd.DataFrame({'sex': ['M', 'F', 'M', 'F'], 'col_2': [1.2, 3.1, 4.4, 5.5], 'col_3': [1, 2, 3, 4], 'col_4': ['a', 'b', 'c', 'd']}) # - df # + categorical_variables = ['sex'] for variable in categorical_variables: # Fill missing data with the word "Missing" df[variable].fillna("Missing", inplace=True) # Create array of dummies dummies = pd.get_dummies(df[variable], prefix=variable) # Update dataframe to include dummies and drop the main variable df = pd.concat([df, dummies], axis=1) df.drop([variable], axis=1, inplace=True) # - df
4_d_convert_categorical_columns_to_one_hot_encoded_columns.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="inOY0Dmqb-pK" colab_type="code" colab={} import pandas as pd # + id="rcwEAjoccMbq" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 253} outputId="3ef7e3ba-66a9-4ef6-d219-65d953657065" executionInfo={"status": "ok", "timestamp": 1544579684577, "user_tz": -330, "elapsed": 5024, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04565686033771522282"}} mdata = pd.read_csv('train.csv') mdata.tail() # + id="8sCsO7gcH4TO" colab_type="code" colab={} from sklearn.model_selection import train_test_split features = mdata.columns[1:] X= mdata[features] Y = mdata['label'] X_train, X_test, Y_train, y_test = train_test_split(X/255., Y, test_size = 0.1, random_state = 0) # + id="T_PPT4onKANP" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 85} outputId="843b9004-a175-49e8-8689-b822365bed93" executionInfo={"status": "ok", "timestamp": 1544580441893, "user_tz": -330, "elapsed": 55457, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04565686033771522282"}} from sklearn.svm import LinearSVC clf_svm = LinearSVC(penalty = 'l2', dual = False, tol = 1e-5) clf_svm.fit(X_train, Y_train) # + id="p-gk4DVQKe5d" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="f27a096f-f0e6-4d49-b849-dea5f85591e2" executionInfo={"status": "ok", "timestamp": 1544580504367, "user_tz": -330, "elapsed": 765, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04565686033771522282"}} from sklearn.metrics import accuracy_score y_pred_svm = clf_svm.predict(X_test) acc_svm = accuracy_score(y_test, y_pred_svm) print('SVm accuracy:', acc_svm) # + id="rSjZLSX3K7Yc" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 258} outputId="51ed3409-eed6-42cd-8cf8-a8106df4c262" executionInfo={"status": "ok", "timestamp": 1544583391750, "user_tz": -330, "elapsed": 2702370, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04565686033771522282"}} from sklearn.model_selection import GridSearchCV penalties = ['l1', 'l2'] tolerances = [1e-3, 1e-4, 1e-5] param_grid = {'penalty' : penalties, 'tol' : tolerances} grid_search = GridSearchCV(LinearSVC(dual = False), param_grid, cv=3) grid_search.fit(X_train, Y_train) grid_search.best_params_ # + id="qPNTnl-TLu1t" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 85} outputId="81cdb8e7-c5c4-4d4d-b2f7-7f016bd17262" executionInfo={"status": "ok", "timestamp": 1544586208319, "user_tz": -330, "elapsed": 198631, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04565686033771522282"}} clf_svm = LinearSVC(penalty = 'l1', dual = False, tol = 0.001) clf_svm.fit(X_train, Y_train) # + id="pvYEYHCZMA5r" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="08045f9c-ad14-43fd-e04e-fc1014576df0" executionInfo={"status": "ok", "timestamp": 1544586213091, "user_tz": -330, "elapsed": 667, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04565686033771522282"}} y_pred_svm = clf_svm.predict(X_test) acc_svm = accuracy_score(y_test, y_pred_svm) print('SVM accuracy: ', acc_svm) # + id="x1bC5T6ggzOr" colab_type="code" colab={}
others/MNIST.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # MACHINE LEARNING FOR CROWD MODELLING AND SIMULATION from tkinter import * from utils import * # ## TASK 1 # # Setting up the model environment: by executing the snippet below it is possible to view the modelling environment, add pedestrians in cells as well as targets and obstacles. It is possible to move pedestrians: # - put a pedestrian (or more!) in the scenario # - click on Free Walk # - move the pedestrian with WASD or key arrows app = Tk() setup_task_1(app) app.mainloop() # ## TASK 2 # # First step of a single pedestrian: define a scenario of 50 by 50, start a single pedestrian at position (5,25) and a target at (25,25). Simulate the scenario. app = Tk() setup_task_2(app) app.mainloop() # ## TASK 3 # # Interaction of pedestrians: define a scenario where 5 pedestrians move to a target starting from roughly the same distance, proving the correctness of the implementation by showing how they take the same time to arrive app = Tk() setup_task_3(app) app.mainloop() # + [markdown] pycharm={"name": "#%% md\n"} # ## TASK 4 # # Obstacle avoidance: # 1. Rudimentary obstacle avoidance: pedestrians simply don't step on other pedestrians and obstacles # 2. Dijkstra's algorithm: each pedestrian finds the actual best path to reach a destination # # Try to execute the next cell either checking or not the Dijkstra checkbox. # If Dijkstra is enabled, the pedestrian will be able to overcome the chicken test, # otherwise it will be stuck in the trap. # + pycharm={"name": "#%%\n"} app = Tk() setup_task_4(app) app.mainloop() # + [markdown] pycharm={"name": "#%% md\n"} # ## TASK 5 # # RiMEA scenario 1: # + pycharm={"name": "#%%\n"} rimea_test_1() # + [markdown] pycharm={"name": "#%% md\n"} # RiMEA scenario 4: # + pycharm={"name": "#%%\n"} rimea_test_4() # + [markdown] pycharm={"name": "#%% md\n"} # RiMEA scenario 6: # + pycharm={"name": "#%%\n"} rimea_test_6() # + [markdown] pycharm={"name": "#%% md\n"} # RiMEA scenario 7: # + pycharm={"name": "#%%\n"} rimea_test_7()
EX1/tasks_notebook.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Produce training set for speculator import os import tqdm import numpy as np import astropy.units as u from astropy.cosmology import Planck13 as cosmo # -- gqp_mc -- from gqp_mc import util as UT from gqp_mc import fitters as Fitters # -- plotting -- import matplotlib as mpl import matplotlib.pyplot as plt mpl.rcParams['text.usetex'] = True mpl.rcParams['font.family'] = 'serif' mpl.rcParams['axes.linewidth'] = 1.5 mpl.rcParams['axes.xmargin'] = 1 mpl.rcParams['xtick.labelsize'] = 'x-large' mpl.rcParams['xtick.major.size'] = 5 mpl.rcParams['xtick.major.width'] = 1.5 mpl.rcParams['ytick.labelsize'] = 'x-large' mpl.rcParams['ytick.major.size'] = 5 mpl.rcParams['ytick.major.width'] = 1.5 mpl.rcParams['legend.frameon'] = False # %matplotlib inline np.random.seed(0) speculate = Fitters.iSpeculator(model_name='fsps') # First we need to sample the prior space of our paremeters: # # $$\beta_1, \beta_2, \beta_3, \beta_4, \gamma_1, \gamma_2, \tau_{\rm dust}, t_{\rm age}$$ # # The actual prior we want to sample later is a uniform prior with the range # # $$\beta_1, \beta_2, \beta_3, \beta_4 \in [0, 1]$$ # # $$\gamma_1, \gamma_2 \in [6.9e-5, 7.3e-3]$$ # # $$\tau_{\rm dust} \in [0, 3]$$ # # $$t_{\rm age} \in [13.8, 8.6]$$ # Redshift within 0, 0.5 def sample_prior(n_sample): ''' sample a padded uniform prior ''' prior_min = np.array([0.0, 0.0, 0.0, 0.0, 6.5e-5, 6.5e-5, 0.0, 8.6]) prior_max = np.array([1.1, 1.1, 1.1, 1.1, 7.5e-3, 7.5e-3, 3.5, 13.8]) return prior_min + (prior_max - prior_min) * np.random.uniform(size=(n_sample, len(prior_min))) theta_train = sample_prior(600000) theta_train[:590000,:4] = speculate._transform_to_SFH_basis(np.random.uniform(size=(590000,4))) fig = plt.figure(figsize=(16,4)) for i in range(3): sub = fig.add_subplot(1,3,i+1) sub.scatter(theta_train[:,i+1], theta_train[:,0]) sub.set_xlabel(r'$\beta_{%i}$' % (i+2), fontsize=20) sub.set_xlim(0, 1) sub.set_ylabel(r'$\beta_1$', fontsize=20) sub.set_ylim(0, 1) fig.subplots_adjust(wspace=0.4) # Now that we have the parameters for our training sample. Lets generate FSPS spectra for each of them wmin, wmax = 2300., 11030. # + w_fsps, _ = speculate._fsps_model(theta_train[0]) wlim = (w_fsps >= wmin) & (w_fsps <= wmax) fwave = os.path.join(UT.dat_dir(), 'speculator', 'wave_fsps.npy') np.save(fwave, w_fsps[wlim]) # - for i in range(2,100): print('--- batch %i ---' % i) i_batch = range(6000*i,6000*(i+1)) logspectra_train = [] for _theta in tqdm.tqdm(theta_train[i_batch]): _, _spectrum = speculate._fsps_model(_theta) logspectra_train.append(np.log(_spectrum[wlim])) ftheta = os.path.join(UT.dat_dir(), 'speculator', 'DESI_simpledust.theta_train.%i.npy' % i) fspectrum = os.path.join(UT.dat_dir(), 'speculator', 'DESI_simpledust.logspectrum_fsps_train.%i.npy' % i) np.save(ftheta, theta_train[i_batch]) np.save(fspectrum, np.array(logspectra_train))
nb/speculator_trainingset.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] colab_type="text" id="Ic4_occAAiAT" # ##### Copyright 2019 The TensorFlow Hub Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # + cellView="both" colab={} colab_type="code" id="ioaprt5q5US7" # Copyright 2019 The TensorFlow Hub Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # + cellView="form" colab={} colab_type="code" id="yCl0eTNH5RS3" #@title MIT License # # Copyright (c) 2017 <NAME> # # Permission is hereby granted, free of charge, to any person obtaining a # # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. # + [markdown] colab_type="text" id="ItXfxkxvosLH" # # Text Classification with Movie Reviews # + [markdown] colab_type="text" id="MfBg1C5NB3X0" # <table class="tfo-notebook-buttons" align="left"> # <td> # <a target="_blank" href="https://www.tensorflow.org/hub/tutorials/tf2_text_classification"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> # </td> # <td> # <a target="_blank" href="https://colab.research.google.com/github/tensorflow/hub/blob/master/examples/colab/tf2_text_classification.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> # </td> # <td> # <a target="_blank" href="https://github.com/tensorflow/hub/blob/master/examples/colab/tf2_text_classification.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> # </td> # <td> # <a href="https://storage.googleapis.com/tensorflow_docs/hub/examples/colab/tf2_text_classification.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> # </td> # </table> # + [markdown] colab_type="text" id="Eg62Pmz3o83v" # This notebook classifies movie reviews as *positive* or *negative* using the text of the review. This is an example of *binary*—or two-class—classification, an important and widely applicable kind of machine learning problem. # # We'll use the [IMDB dataset](https://www.tensorflow.org/api_docs/python/tf/keras/datasets/imdb) that contains the text of 50,000 movie reviews from the [Internet Movie Database](https://www.imdb.com/). These are split into 25,000 reviews for training and 25,000 reviews for testing. The training and testing sets are *balanced*, meaning they contain an equal number of positive and negative reviews. # # This notebook uses [tf.keras](https://www.tensorflow.org/guide/keras), a high-level API to build and train models in TensorFlow, and [TensorFlow Hub](https://www.tensorflow.org/hub), a library and platform for transfer learning. For a more advanced text classification tutorial using `tf.keras`, see the [MLCC Text Classification Guide](https://developers.google.com/machine-learning/guides/text-classification/). # + [markdown] colab_type="text" id="Q4DN769E2O_R" # ## Setup # + colab={} colab_type="code" id="2ew7HTbPpCJH" import numpy as np import tensorflow as tf import tensorflow_hub as hub import tensorflow_datasets as tfds import matplotlib.pyplot as plt print("Version: ", tf.__version__) print("Eager mode: ", tf.executing_eagerly()) print("Hub version: ", hub.__version__) print("GPU is", "available" if tf.config.list_physical_devices('GPU') else "NOT AVAILABLE") # + [markdown] colab_type="text" id="iAsKG535pHep" # ## Download the IMDB dataset # # The IMDB dataset is available on [TensorFlow datasets](https://github.com/tensorflow/datasets). The following code downloads the IMDB dataset to your machine (or the colab runtime): # + colab={} colab_type="code" id="zXXx5Oc3pOmN" train_data, test_data = tfds.load(name="imdb_reviews", split=["train", "test"], batch_size=-1, as_supervised=True) train_examples, train_labels = tfds.as_numpy(train_data) test_examples, test_labels = tfds.as_numpy(test_data) # + [markdown] colab_type="text" id="l50X3GfjpU4r" # ## Explore the data # # Let's take a moment to understand the format of the data. Each example is a sentence representing the movie review and a corresponding label. The sentence is not preprocessed in any way. The label is an integer value of either 0 or 1, where 0 is a negative review, and 1 is a positive review. # + colab={} colab_type="code" id="y8qCnve_-lkO" print("Training entries: {}, test entries: {}".format(len(train_examples), len(test_examples))) # + [markdown] colab_type="text" id="RnKvHWW4-lkW" # Let's print first 10 examples. # + colab={} colab_type="code" id="QtTS4kpEpjbi" train_examples[:10] # + [markdown] colab_type="text" id="IFtaCHTdc-GY" # Let's also print the first 10 labels. # + colab={} colab_type="code" id="tvAjVXOWc6Mj" train_labels[:10] # + [markdown] colab_type="text" id="LLC02j2g-llC" # ## Build the model # # The neural network is created by stacking layers—this requires three main architectural decisions: # # * How to represent the text? # * How many layers to use in the model? # * How many *hidden units* to use for each layer? # # In this example, the input data consists of sentences. The labels to predict are either 0 or 1. # # One way to represent the text is to convert sentences into embeddings vectors. We can use a pre-trained text embedding as the first layer, which will have two advantages: # * we don't have to worry anout text preprocessing, # * we can benefit from transfer learning. # # For this example we will use a model from [TensorFlow Hub](https://www.tensorflow.org/hub) called [google/tf2-preview/gnews-swivel-20dim/1](https://tfhub.dev/google/tf2-preview/gnews-swivel-20dim/1). # # There are three other models to test for the sake of this tutorial: # * [google/tf2-preview/gnews-swivel-20dim-with-oov/1](https://tfhub.dev/google/tf2-preview/gnews-swivel-20dim-with-oov/1) - same as [google/tf2-preview/gnews-swivel-20dim/1](https://tfhub.dev/google/tf2-preview/gnews-swivel-20dim/1), but with 2.5% vocabulary converted to OOV buckets. This can help if vocabulary of the task and vocabulary of the model don't fully overlap. # * [google/tf2-preview/nnlm-en-dim50/1](https://tfhub.dev/google/tf2-preview/nnlm-en-dim50/1) - A much larger model with ~1M vocabulary size and 50 dimensions. # * [google/tf2-preview/nnlm-en-dim128/1](https://tfhub.dev/google/tf2-preview/nnlm-en-dim128/1) - Even larger model with ~1M vocabulary size and 128 dimensions. # + [markdown] colab_type="text" id="In2nDpTLkgKa" # Let's first create a Keras layer that uses a TensorFlow Hub model to embed the sentences, and try it out on a couple of input examples. Note that the output shape of the produced embeddings is a expected: `(num_examples, embedding_dimension)`. # + colab={} colab_type="code" id="_NUbzVeYkgcO" model = "https://tfhub.dev/google/tf2-preview/gnews-swivel-20dim/1" hub_layer = hub.KerasLayer(model, output_shape=[20], input_shape=[], dtype=tf.string, trainable=True) hub_layer(train_examples[:3]) # + [markdown] colab_type="text" id="dfSbV6igl1EH" # Let's now build the full model: # + colab={} colab_type="code" id="xpKOoWgu-llD" model = tf.keras.Sequential() model.add(hub_layer) model.add(tf.keras.layers.Dense(16, activation='relu')) model.add(tf.keras.layers.Dense(1)) model.summary() # + [markdown] colab_type="text" id="6PbKQ6mucuKL" # The layers are stacked sequentially to build the classifier: # # 1. The first layer is a TensorFlow Hub layer. This layer uses a pre-trained Saved Model to map a sentence into its embedding vector. The model that we are using ([google/tf2-preview/gnews-swivel-20dim/1](https://tfhub.dev/google/tf2-preview/gnews-swivel-20dim/1)) splits the sentence into tokens, embeds each token and then combines the embedding. The resulting dimensions are: `(num_examples, embedding_dimension)`. # 2. This fixed-length output vector is piped through a fully-connected (`Dense`) layer with 16 hidden units. # 3. The last layer is densely connected with a single output node. This outputs logits: the log-odds of the true class, according to the model. # + [markdown] colab_type="text" id="0XMwnDOp-llH" # ### Hidden units # # The above model has two intermediate or "hidden" layers, between the input and output. The number of outputs (units, nodes, or neurons) is the dimension of the representational space for the layer. In other words, the amount of freedom the network is allowed when learning an internal representation. # # If a model has more hidden units (a higher-dimensional representation space), and/or more layers, then the network can learn more complex representations. However, it makes the network more computationally expensive and may lead to learning unwanted patterns—patterns that improve performance on training data but not on the test data. This is called *overfitting*, and we'll explore it later. # + [markdown] colab_type="text" id="L4EqVWg4-llM" # ### Loss function and optimizer # # A model needs a loss function and an optimizer for training. Since this is a binary classification problem and the model outputs a probability (a single-unit layer with a sigmoid activation), we'll use the `binary_crossentropy` loss function. # # This isn't the only choice for a loss function, you could, for instance, choose `mean_squared_error`. But, generally, `binary_crossentropy` is better for dealing with probabilities—it measures the "distance" between probability distributions, or in our case, between the ground-truth distribution and the predictions. # # Later, when we are exploring regression problems (say, to predict the price of a house), we will see how to use another loss function called mean squared error. # # Now, configure the model to use an optimizer and a loss function: # + colab={} colab_type="code" id="Mr0GP-cQ-llN" model.compile(optimizer='adam', loss=tf.losses.BinaryCrossentropy(from_logits=True), metrics=['accuracy']) # + [markdown] colab_type="text" id="hCWYwkug-llQ" # ## Create a validation set # # When training, we want to check the accuracy of the model on data it hasn't seen before. Create a *validation set* by setting apart 10,000 examples from the original training data. (Why not use the testing set now? Our goal is to develop and tune our model using only the training data, then use the test data just once to evaluate our accuracy). # + colab={} colab_type="code" id="-NpcXY9--llS" x_val = train_examples[:10000] partial_x_train = train_examples[10000:] y_val = train_labels[:10000] partial_y_train = train_labels[10000:] # + [markdown] colab_type="text" id="35jv_fzP-llU" # ## Train the model # # Train the model for 40 epochs in mini-batches of 512 samples. This is 40 iterations over all samples in the `x_train` and `y_train` tensors. While training, monitor the model's loss and accuracy on the 10,000 samples from the validation set: # + colab={} colab_type="code" id="tXSGrjWZ-llW" history = model.fit(partial_x_train, partial_y_train, epochs=40, batch_size=512, validation_data=(x_val, y_val), verbose=1) # + [markdown] colab_type="text" id="9EEGuDVuzb5r" # ## Evaluate the model # # And let's see how the model performs. Two values will be returned. Loss (a number which represents our error, lower values are better), and accuracy. # + colab={} colab_type="code" id="zOMKywn4zReN" results = model.evaluate(test_data, test_labels) print(results) # + [markdown] colab_type="text" id="z1iEXVTR0Z2t" # This fairly naive approach achieves an accuracy of about 87%. With more advanced approaches, the model should get closer to 95%. # + [markdown] colab_type="text" id="5KggXVeL-llZ" # ## Create a graph of accuracy and loss over time # # `model.fit()` returns a `History` object that contains a dictionary with everything that happened during training: # + colab={} colab_type="code" id="VcvSXvhp-llb" history_dict = history.history history_dict.keys() # + [markdown] colab_type="text" id="nRKsqL40-lle" # There are four entries: one for each monitored metric during training and validation. We can use these to plot the training and validation loss for comparison, as well as the training and validation accuracy: # + colab={} colab_type="code" id="nGoYf2Js-lle" acc = history_dict['accuracy'] val_acc = history_dict['val_accuracy'] loss = history_dict['loss'] val_loss = history_dict['val_loss'] epochs = range(1, len(acc) + 1) # "bo" is for "blue dot" plt.plot(epochs, loss, 'bo', label='Training loss') # b is for "solid blue line" plt.plot(epochs, val_loss, 'b', label='Validation loss') plt.title('Training and validation loss') plt.xlabel('Epochs') plt.ylabel('Loss') plt.legend() plt.show() # + colab={} colab_type="code" id="6hXx-xOv-llh" plt.clf() # clear figure plt.plot(epochs, acc, 'bo', label='Training acc') plt.plot(epochs, val_acc, 'b', label='Validation acc') plt.title('Training and validation accuracy') plt.xlabel('Epochs') plt.ylabel('Accuracy') plt.legend() plt.show() # + [markdown] colab_type="text" id="oFEmZ5zq-llk" # In this plot, the dots represent the training loss and accuracy, and the solid lines are the validation loss and accuracy. # # Notice the training loss *decreases* with each epoch and the training accuracy *increases* with each epoch. This is expected when using a gradient descent optimization—it should minimize the desired quantity on every iteration. # # This isn't the case for the validation loss and accuracy—they seem to peak after about twenty epochs. This is an example of overfitting: the model performs better on the training data than it does on data it has never seen before. After this point, the model over-optimizes and learns representations *specific* to the training data that do not *generalize* to test data. # # For this particular case, we could prevent overfitting by simply stopping the training after twenty or so epochs. Later, you'll see how to do this automatically with a callback.
examples/colab/tf2_text_classification.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import sys sys.path.insert(0, '../') import src # + import scipy.stats as scs import matplotlib.pyplot as plt import numpy as np # %matplotlib inline plt.style.use('ggplot') # - z = scs.norm(0, 1) z.ppf(0.8) z.ppf(.975) z = scs.norm() def zplot(cdf=0.95, align='center'): """Plots a z distribution with common annotations Example: zplot(0.95) zplot(0.95, align='left') Parameters: cdf (float): The area under the standard normal distribution curve. align (str): The area under the curve can be aligned to the center (default) or to the left. Supported: 'left' or 'center' Returns: None: A plot of the normal distribution with annotations showing the area under the curve and the boundaries of the area. """ fig = plt.figure(figsize=(12,6)) ax = fig.subplots() norm = scs.norm() x = np.linspace(-5, 5, 1000) y = norm.pdf(x) ax.plot(x, y) if align == 'center': CIa = norm.ppf(0.5 - cdf / 2) CIb = norm.ppf(0.5 + cdf / 2) ax.vlines(CIb, 0, norm.pdf(CIb), color='grey', linestyle='--', label=CIb) ax.vlines(CIa, 0, norm.pdf(CIa), color='grey', linestyle='--', label=CIa) fill_x = np.linspace(CIa, CIb, 1000) ax.fill_between(x, 0, y, color='grey', alpha='0.25', where=(x > CIa) & (x < CIb)) plt.xlabel('z') plt.ylabel('PDF') plt.text(CIa, norm.pdf(CIa), "z = {0:.3f}".format(CIa), fontsize=12, rotation=90, va="bottom", ha="right") elif align == 'left': CIb = norm.ppf(cdf) ax.vlines(CIb, 0, norm.pdf(CIb), color='grey', linestyle='--', label=CIb) fill_x = np.linspace(-5, CIb, 1000) ax.fill_between(x, 0, y, color='grey', alpha='0.25', where=x < CIb) else: raise ValueError('align must be set to "center"(default) or "left"') plt.text(CIb, norm.pdf(CIb), "z = {0:.3f}".format(CIb), fontsize=12, rotation=90, va="bottom", ha="left") plt.text(0, 0.1, "area = {0:.3f}".format(cdf), fontsize=12, ha='center') plt.xlabel('z') plt.ylabel('PDF') plt.show() zplot(0.95) zplot(0.80, 'left')
code/z-dist.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 读取腔标定 # # *版权所有 (c) 2021 百度量子计算研究所,保留所有权利。* # ## 内容概要 # # 本教程将介绍如何使用读取模拟功能进行标定读取腔参数的实验,本教程的大纲如下: # - 背景介绍 # - 准备工作 # - 标定读取腔的跃迁频率 # - 标定色散频移与耦合强度 # - 测量衰减速率 # - 总结 # ## 背景介绍 # # 在超导电路中,如果想获得某个量子比特的状态,我们可以通过测量与之耦合的读取腔来间接实现对量子比特状态的读取,其具体操作是,我们首先向该读取腔施加一个脉冲信号,然后探测并分析从读取腔反射的脉冲信号。由于反射脉冲的振幅和相位的变化是由与之耦合的量子比特的状态决定的,因此我们可以通过这个变化间接读出量子比特在某次测量中的结果是 “0” 还是 “1”。 # # 在实验中,我们首先需要对读取腔的相关参数进行标定,本教程介绍如何使用量脉完成对于读取腔标定的模拟。 # # 一个由读取腔和量子比特相互耦合的系统可以用色散区域(Dispersive regime)的 Jaynes-Cumming 模型来描述 \[1\]: # # $$ # \hat{H}_{\rm JC} = \omega_r \hat{a}^\dagger \hat{a} + \frac{1}{2}\omega_q \hat{\sigma}_z + \chi \hat{a}^\dagger \hat{a} \hat{\sigma}_z. # $$ # # # 其中 $\hat{a}$、$\hat{a}^\dagger$ 分别是读取腔的湮灭和产生算符,而 $\hat{\sigma}_z$ 是量子比特的泡利 Z 算符。系数 $\omega_r$ 和 $\omega_q$ 分别是读取腔和量子比特的裸态频率(裸态频率是没有耦合的系统的本征频率)。$\chi$ 是色散频移,可表示为: # # $$ # \chi = \frac{g^2 \alpha}{\Delta_{qr}(\Delta_{qr} + \alpha)}. # $$ # # 其中,$\alpha$ 是量子比特的非谐性,$\Delta_{qr} = \omega_q - \omega_r$ 是量子比特和读取腔的失调, $g$ 是量子比特与读取腔的耦合强度。由哈密顿量 $\hat{H}_{\rm JC}$ 中的相互作用项 $\chi \hat{a}^\dagger \hat{a} \hat{\sigma}_z$ 可知,量子比特分别处于 $|0\rangle$ 和 $|1\rangle$ 的时候,对应的读取腔的跃迁频率相差了 $2\chi$。因此在实验中分别将量子比特制备到 $|0\rangle$ 态和 $|1\rangle$ 态,然后分别对读取腔进行频率扫描的操作,得到两个跃迁频率 $f_0$ 以及 $f_1$,并计算频率差 $2\chi$,就可以通过上式间接计算量子比特和读取腔的耦合强度 $g$。 # # 我们还可以通过测量读取腔频率的谱线宽度 $\kappa$ 来确定其光子衰减速率。为了模拟读取腔与量子比特组成的系统与环境的相互作用,该系统的密度矩阵 $\hat{\rho}(t)$ 的时间演化由 Lindblad 主方程给出 \[3, 4\]: # # # $$ # \frac{d \hat{\rho}(t)}{dt} = -i[\hat{H}(t), \hat{\rho}(t)] + \frac{\kappa}{2}[2 \hat{a} \hat{\rho}(t) \hat{a}^\dagger - \hat{\rho}(t) \hat{a}^\dagger \hat{a} - \hat{a}^\dagger \hat{a} \hat{\rho}(t)]. # $$ # # # 实验中,我们可以用洛伦兹函数拟合读取腔的频谱图并得到线宽,就能得到衰减速率 $\kappa$。 # # 这里,我们最后提取的可观测量是读取腔场模的两个正交分量 $\hat{X} = \frac{1}{2}(\hat{a}^\dagger + \hat{a})$ 和 $\hat{Y} = \frac{i}{2}(\hat{a}^\dagger - \hat{a})$。实验中通过对从读取腔反射的脉冲经过一系列信号处理,我们就可以得到与这两个正交分量相关的电压值 $V_I$ 和 $V_Q$。 # # 本教程中,我们使用量脉的工具求解读取腔的动力学演化,以模拟实验中标定读取腔的操作。我们标定的目标参数有:量子比特分别在 $|0\rangle$ 和 $|1\rangle$ 的读取腔跃迁频率 $\omega_{r0}$ 和 $\omega_{r1}$,谱线宽度 $\kappa$ 和色散频移 $\chi$。 # ## 准备工作 # # 为了运行该教程,我们首先需要从量脉(Quanlse)以及其它 python 库导入所需要的包。 # + # Import tools from Quanlse from Quanlse.Simulator.ReadoutSim3Q import readoutSim3Q from Quanlse.Calibration.Readout import resonatorSpec, fitLorentzian, lorentzian # Import tools from other python libraries from scipy.signal import find_peaks import numpy as np import matplotlib.pyplot as plt from math import pi # - # ## 标定读取腔的跃迁频率 # # 在本节中,我们分别标定量子比特处于基态和第一激发态时的读取腔的跃迁频率。首先我们创建一个含有读取腔信息的 `ReadoutModel` 类的一个实例 `readoutModel`,这里我们直接用预设好的 `readoutSim3Q()` 实例作为演示。 readoutModel = readoutSim3Q() # Initialize a readoutModel object # 为了得到读取腔的跃迁频率,我们设定读取脉冲在某一频率范围内对读取腔进行频率扫描。设置扫描频率的范围 `freqRange`,驱动振幅 `amp` 以及读取脉冲的持续时间 `duration`。 freqRange = np.linspace(7.105, 7.125, 60) * 2 * pi # the range of frequency to probe the resonator, in 2 pi GHz amp = 0.0005 * (2 * pi) # drive amplitude, in 2 pi GHz duration = 1000 # duration of the readout pulse, in nanoseconds # 调用函数 `resonatorSpec` 来完成一次对与处于基态的量子比特耦合的读取腔的扫描,输入想模拟的读取腔序号 `onRes`、频率范围 `freqRange`、读取脉冲振幅 `amp` 以及持续时间 `duration`,并且设置 `qubitState` 为基态。 vi0, vq0 = resonatorSpec(readoutModel=readoutModel, onRes=[0], freqRange=freqRange, amplitude=amp, duration=duration, qubitState='ground') # 得到信号 $V_I$ 和 $V_Q$ 后,绘制 $V_I$ 或者 $V_Q$ 随驱动频率变化的曲线。 idx0 = find_peaks(vq0[0], height=max(vq0[0]))[0] # find the index of the transition frequency w0 = freqRange[idx0][0] # transition frequency print(f'The resonator transition frequency with qubit in ground state is {(w0 / (2 * pi)).round(3)} GHz') plt.plot(freqRange / (2 * pi), np.array(vq0[0])) plt.plot() plt.xlabel('$\omega_d$ (GHz)') plt.ylabel('signal (a.u.)') plt.title('Readout resonator spectrum') plt.vlines((freqRange / (2 * pi))[idx0], 0, max(vq0[0]), linestyles='dashed') plt.show() # 从上图的模拟结果可知,当量子比特处于基态时,读取腔的跃迁频率大约在 7.118 GHz 左右。接下来我们用同样的步骤确定当量子比特处于激发态时的读取腔跃迁频率: vi1, vq1 = resonatorSpec(readoutModel=readoutModel, onRes=[0], freqRange=freqRange, amplitude=amp, duration=duration, qubitState='excited') idx1 = find_peaks(vq1[0], height=max(vq1[0]))[0] w1 = freqRange[idx1][0] print(f'The resonator transition frequency with qubit in excited state is {(w1 / (2 * pi)).round(3)} GHz') plt.plot(freqRange / (2 * pi), np.array(vq1[0])) plt.plot() plt.xlabel('$\omega_d$ (GHz)') plt.ylabel('signal (a.u.)') plt.title('Readout resonator spectrum') plt.vlines((freqRange / (2 * pi))[idx1], 0, max(vq1[0]), linestyles='dashed') plt.show() # 由频谱图可知,当量子比特处于第一激发态时,读取腔的跃迁频率大约在 7.112 GHz 左右。 # ## 标定色散频移与耦合强度 # # 在上节中,我们通过标定得到跃迁频率$f_0$ 和 $f_1$,因此可以直接计算色散频移 $\chi$: # # $$ # \chi = \frac{|f_0 - f_1|}{2}. # $$ chi = abs(w0 - w1) / 2 print(f'The dispersive shift is {(chi * 1e3 / (2 * pi)).round(3)} MHz') # 结合"背景介绍"章节中给出的 $\chi$ 关于其它已知参数的表达式,我们可以计算读取腔与量子比特的耦合强度: # # $$ # g = \sqrt{\frac{\chi\Delta_{qr}(\Delta_{qr}+\alpha)}{\alpha}}. # $$ # 从 `readoutModel` 中提取出模型中的理论参数,并且使用上式计算耦合强度 $g$。 # + # Extract parameters from the model wq = readoutModel.pulseModel.qubitFreq[0] # qubit bare frequency alpha = readoutModel.pulseModel.qubitAnharm[0] # qubit anharmonicity wr = (w0 + w1) / 2 # estimated resonator frequency detuning = wq - wr # qubit-resonator detuning # + # coupling strength calculation def qrCoupling(chi, detuning, alpha): g = np.sqrt(abs(chi * detuning * (detuning + alpha) / alpha)) return g # - gEst = qrCoupling(chi, detuning, alpha) # Estimated qubit-resonator coupling strength # 然后我们比较通过间接计算得到耦合强度与理论耦合强度 $g$ 的值。 g = readoutModel.coupling[0] # therotical qubit-resonator coupling strength print(f'Theoretical coupling strength is {g * 1e3 / (2 * pi)} MHz') print(f'Estimated coupling strength is {(gEst * 1e3 / (2 * pi)).round(1)} MHz') # 通过标定色散频移以及间接计算得到的读取腔与量子比特的耦合强度大概是 132.4 MHz,与理论值 134.0 MHz 基本吻合。 # ## 测量衰减速率 # # 得到读取腔的频谱后,我们可以使用洛伦兹函数拟合该频谱,从而得到线宽来估计衰减速率 $\kappa$。这里,我们通过调用 `fitLorentzian` 函数,并且输入频率扫描范围和信号强度,得到拟合的频谱曲线,从而得到出谱线宽度 $\kappa$: param, cov = fitLorentzian(freqRange, vq0[0]) # Fit the curve using lorentzian function kappaEst = abs(param[2]) # Estimated linewidth plt.plot(freqRange / (2 * pi), lorentzian(freqRange, param[0], param[1], param[2], param[3]), '.') plt.plot(freqRange / (2 * pi), vq0[0]) plt.xlabel('$\omega_d$ (GHz)') plt.ylabel('signal (a.u.)') plt.title('Readout resonator spectrum') plt.show() # 比较衰减速率(或谱线宽度)的理论值和通过标定计算的值。 # + kappa = readoutModel.dissipation print(f'Theoretical decay rate is {kappa * 1e3 / (2 * pi)} MHz') print(f'Estimated linewidth is {(kappaEst * 1e3 / (2 * pi)).round(3)} MHz') # - # 从模拟结果可知,我们在主方程设定的衰减速率 $\kappa$ 是 2.0 MHz,而通过频谱得到的线宽是 1.987 MHz。这说明了在实验中通过对读取腔进行频率的扫描以及计算线宽,可以间接标定读取腔与环境相互作用的强度。 # ## 总结 # 用户可以通过点击这个链接 [tutorial-readout-cavity-calibration-cn.ipynb](https://github.com/baidu/Quanlse/blob/main/Tutorial/CN/tutorial-readout-cavity-calibration-cn.ipynb) 跳转到此 Jupyter Notebook 文档相应的 GitHub 页面并获取相关代码以运行该程序。用户可以尝试不同的读取腔参数,运行该教程的代码以模拟超导量子计算实验中读取腔的校准。 # ## 参考文献 # # \[1\] [<NAME>, et al. "Cavity quantum electrodynamics for superconducting electrical circuits: An architecture for quantum computation." *Physical Review A* 69.6 (2004): 062320.](https://journals.aps.org/pra/abstract/10.1103/PhysRevA.69.062320) # # \[2\] [<NAME>, et al. "Charge-insensitive qubit design derived from the Cooper pair box." *Physical Review A* 76.4 (2007): 042319.](https://journals.aps.org/pra/abstract/10.1103/PhysRevA.76.042319) # # \[3\] [<NAME>. "On the generators of quantum dynamical semigroups." *Communications in Mathematical Physics* 48.2 (1976): 119-130.](https://link.springer.com/article/10.1007/bf01608499) # # \[4\] [<NAME>., et al. "Dynamics of dispersive single-qubit readout in circuit quantum electrodynamics." *Physical Review A* 80.4 (2009): 043840.](https://journals.aps.org/pra/abstract/10.1103/PhysRevA.80.043840)
Tutorial/CN/tutorial-readout-cavity-calibration-cn.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Import Dependencies import numpy as np np.random.seed(123) # for reproducibility from keras.models import Sequential from keras.layers import Dense, Dropout, Activation, Flatten from keras.layers import Convolution2D, MaxPooling2D from keras.utils import np_utils from keras import backend as K K.set_image_dim_ordering('th') # remember to use thenos as backend from matplotlib import pyplot as plt # # Load Data from keras.datasets import mnist (X_train, y_train), (X_test, y_test) = mnist.load_data() X_train.shape plt.imshow(X_train[0]) # # Preprocess X_train = X_train.reshape(X_train.shape[0], 1, 28, 28) X_test = X_test.reshape(X_test.shape[0], 1, 28, 28) X_train = X_train.astype('float32') X_test = X_test.astype('float32') X_train /= 255 X_test /= 255 X_train.shape Y_train = np_utils.to_categorical(y_train, 10) Y_test = np_utils.to_categorical(y_test, 10) Y_train.shape # # Model Architecture # + model = Sequential() model.add(Convolution2D(32, 3, 3, activation='relu', input_shape=(1,28,28))) model.add(Convolution2D(32, 3, 3, activation='relu')) model.add(MaxPooling2D(pool_size=(2,2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(10, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) # - # # Trainning model.fit(X_train, Y_train, batch_size=32, nb_epoch=10, verbose=1) # # Testing model.evaluate(X_test, Y_test, verbose=0)
Intro to Keras - MNIST Edition.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.6.8 64-bit # name: python36864bit105ca9937d9642daa3958d93a183440c # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/diengiau/py18plus/blob/master/04_functionConditions.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="8-J6lnI-sj_M" colab_type="text" # [TOC] # + [markdown] id="Hb-d7jCFHGPi" colab_type="text" # # 1. Function in Python # # ## 1.1 Forward price function # # We'll use functions to wrap repeated processes to clean the codes, re-use it in multiple projects, and make our codes more efficiently. # # Let's assume we need to recalculate the futures price for a contract multiple times, based on current prices (e.g., $S$) and the time (e.g., $T$), so how should we do? # # Let $S$ runs from 20, 25, 30, ..., 100. # $T$ is from 1 to 5 years. # # Then how much is the forward price, let assume: $F=S\times e^{rT}$ where the risk-free rate is 5% per annum. # # + id="7l3uelJxHGPl" colab_type="code" outputId="ccfa5d8e-34c7-469b-cc0c-3441e06a3fb9" colab={} # if S = 20, T = 1 from numpy import exp as exp 20*exp(0.05*1) # + id="E66Z71K6HGPp" colab_type="code" outputId="6a5ad118-4a15-4f66-be5b-cd6c8b947ebe" colab={} # if S = 20, T = 2 20*exp(0.05*2) # if S = 20, T = 3 20*exp(0.05*3) # if S = 20, T = 4 20*exp(0.05*4) # if S = 20, T = 5 20*exp(0.05*5) # + [markdown] id="hvtHxzT1HGPt" colab_type="text" # It is too time-consuming. So we can think about a function $f=f(T)$. # # Step 1: Define the function: # + id="Sk96Xe4uHGPu" colab_type="code" colab={} def f(T): return 20*exp(0.05*T) # + id="uj7oPzcyHGPy" colab_type="code" outputId="ea0147ac-ecfb-48c6-b2b8-b39edb9acf45" colab={} f # + [markdown] id="qYcLIV3vHGP3" colab_type="text" # Step 2: Use/call/apply the function: # + id="Jvs_UIakHGP4" colab_type="code" outputId="45dd2b9c-08d6-48fb-dcd2-7a4186c45824" colab={} f(1) # + id="jtgm3vHUHGP7" colab_type="code" outputId="e52f3f9c-f466-4250-8ae4-f1e9e7056f14" colab={} f(5) # + [markdown] id="w2G0i0uGHGP9" colab_type="text" # We can add one more argument, say current price $S$, into the function: # + id="aIHlRl9WHGP-" colab_type="code" colab={} def f(S, T): return S*exp(0.05*T) # + id="xMcBR4ttHGQB" colab_type="code" outputId="359afc65-f5ff-4d43-ebfe-bfaa6a8e4895" colab={} f(20, 1) # + id="xIwUu1XuHGQD" colab_type="code" outputId="ea8235eb-778d-49be-c0e1-02a6ea89842c" colab={} f(20, 5) # + [markdown] id="0cUTp-NlHGQG" colab_type="text" # Or even a more complex function with both $S$, $T$, and $r$: # + id="_Ca0Gf0NHGQH" colab_type="code" colab={} def f(S,T,r): return S*exp(r*T) # + id="A9IuaFGcHGQJ" colab_type="code" outputId="398a5e9a-df96-4722-84c4-09d5b44fa1dd" colab={} f(20, 5, 0.05) # + [markdown] id="vAe7TuF0HGQL" colab_type="text" # We should explicitly call the arguments to make the code more understandable: # + id="TzddVNW2HGQM" colab_type="code" outputId="67cacfec-244b-45d0-d6d5-3f4eabe13bc8" colab={} f(S=20, T=5, r=0.05) # + [markdown] id="QMNJuRhWHGQO" colab_type="text" # If one argument change very little, we should add the default value for the argument too: # + id="g57pp5ajHGQP" colab_type="code" outputId="d11d4f44-3238-4cc8-d11c-010c8acc65f1" colab={} def f(S,T,r=0.05): return S*exp(r*T) f(S=20, T=5) # + [markdown] id="QGCKTMivsj_x" colab_type="text" # ## 1.2 A function to get bond yield # # As we discussed in class, we need more efficient way to get the bond yield. Now, we rely on a method, namely __Newton optimization__ to derive the bond yield from the bond pricing formula. So the inputs of the function include: # # - Bond (market) price # - Par/face value # - Coupon rate # - Time to maturity # - Frequency of compounding, e.g., semiannually or 2 times yearly # # Let's see the code: # + id="qIiV5_1xsj_x" colab_type="code" outputId="64d4ee09-7f1f-452b-9628-09af1732dbff" colab={} """ Get yield-to-maturity of a bond """ import scipy.optimize as optimize import numpy as np def bondYield(price, par, T, coup, freq=2, guess=0.05): #freq = float(freq) periods = T*freq # number times of paying counpon print(f"Number of period: {periods}") dt = [(i+1)/freq for i in range(int(periods))] print(dt) coupon = coup/100.*par/freq # coupon per time print(f"Coupon payment per time: {coupon}") def price_func(y): return sum([coupon*np.exp(-y*t) for t in dt]) + par*np.exp(-y*T) - price print("\nThe bond yield is:") return optimize.newton(price_func, guess) bondYield(price=95.0428, par=100, T=1.5, coup=5.75, freq=2) # + id="Gj0Rt93_sj_0" colab_type="code" outputId="9cab1543-1bfa-49e5-ec7b-6b1d98f827ab" colab={} # another example in our slide bondYield(price=98.39, par=100, T=2, coup=6, freq=2) # + [markdown] id="YMYESg-Wsj_3" colab_type="text" # You see that it works like a magic. But why? Please read this awesome explanation from `stackexchange`: # # [Why does Newton's method work?](https://math.stackexchange.com/questions/350740/why-does-newtons-method-work) # # ![](https://i.stack.imgur.com/arGHL.png) # # + [markdown] id="pPwhIF6bHGQS" colab_type="text" # # 2. Conditional operations # # The most common one is the `if else` operations to check condition. It works like we often make decisions in real life: # # ```{python} # if have_girl_friend: # stay_at_home_and_play_game # else: # go_out_watch_3d_movies_then_go_home_eat_instant_noodle # ``` # # Let write a simple `if` operation to check if a number is even number: # + id="lBd2WLMUHGQT" colab_type="code" outputId="c8499846-0ac0-4ee8-a133-dbd41d13ca19" colab={} n = 14 if n % 2 == 0: print("This is an even number") else: print("This is NOT an even number") # + [markdown] id="XdjsKxAVHGQW" colab_type="text" # We can wrap it in a function to make it more clean: # + id="FYQJjxveHGQX" colab_type="code" colab={} def checkEvenNumber(n): if n % 2 == 0: print("This is an even number") else: print("This is NOT an even number") # + id="pVgsglEjHGQZ" colab_type="code" outputId="0884913c-d6c9-4827-e8ed-6bfcbe429f94" colab={} checkEvenNumber(14) # + id="8MojqyopHGQc" colab_type="code" outputId="104a3195-606a-4e3a-b732-1dd58aa34d8b" colab={} checkEvenNumber(13) # + [markdown] id="hPJIQ_8CHGQe" colab_type="text" # # 3. Ternary Operator # # The `if else` may be too long, sometimes we need a more simple conditional operation: ternary operator. # See the document at [here](https://book.pythontips.com/en/latest/ternary_operators.html). # # The formula is: # # `action_if_true if condition else action_if_false` # # For example, we want to check a number if a number is positive or not: # # - If YES, then we take square root # - If NO, then we replace it with zero # # + id="BDKYEk9vHGQg" colab_type="code" outputId="d2ad0f2d-3252-4935-c784-376783c0e577" colab={"base_uri": "https://localhost:8080/", "height": 51} import numpy as np x = np.random.randn(10) x # + [markdown] id="c7xYErKxHGQi" colab_type="text" # We first do the ternary operator for the first number in the list `x`: # + id="ZgDapwTAHGQj" colab_type="code" outputId="9604256a-791d-4160-a861-0fbf1f56ea92" colab={} np.sqrt(x[0]) if x[0]>0 else 0 # + id="smPlvYg0HGQm" colab_type="code" outputId="4991ac41-9f2a-4464-b896-84c29cfad040" colab={} np.sqrt(x[1]) if x[1]>0 else 0 # + id="5lMpxR-0HGQp" colab_type="code" outputId="6d506e6c-e9d3-4880-feb4-05edeb2d1cec" colab={} np.sqrt(x[3]) if x[3]>0 else 0 # + [markdown] id="5BnpyEFIHGQr" colab_type="text" # It is too long to repeat this for a 10-element list `x`, or even a longer list in our future life. We need to save time for go-out-watch-3d-movies with our girl-friend/boy-friend (let assume you have one). So we will go next section to learn `loop` operator. # + [markdown] id="-4251etMHGQs" colab_type="text" # # 4. Loop # # The most common is `for` loop: # + id="3tbJ7f5fHGQt" colab_type="code" outputId="f7cba592-4386-4862-c3f3-1a820030ebd7" colab={"base_uri": "https://localhost:8080/", "height": 187} for i in [0,1,2,3,4,5,6,7,8,9]: print(np.sqrt(x[i]) if x[i]>0 else 0) # + id="Q9nFt1kIt1JY" colab_type="code" outputId="1cbb1ad8-e26d-45a9-fe00-50c3b7362c64" colab={"base_uri": "https://localhost:8080/", "height": 357} for i in range(10): print("Now i =" , i, " then output is: ") # logging print(np.sqrt(x[i]) if x[i]>0 else 0) # + [markdown] id="imDHstIrHGQv" colab_type="text" # It is cleaner if we replace `[0,1,2,3,4,5,6,7,8,9]` by `range(10)`: # + id="zHKoKyPAHGQw" colab_type="code" outputId="4576a6a9-5950-4c38-9aeb-bcf8ef5b3821" colab={} list(range(10)) # equivalent # + id="B9yvI_hHHGQy" colab_type="code" outputId="3b473f27-e179-4618-c06f-3436bd2841b3" colab={} for i in range(10): print(np.sqrt(x[i]) if x[i]>0 else 0) # + [markdown] id="M9aa_UkHHGQ1" colab_type="text" # Next, we are better to store the output data into a list of output: # + id="KoBz6HY-HGQ2" colab_type="code" outputId="9e4dd6d2-6962-4e33-83f9-f2eeccce6c9a" colab={} output = [] for i in range(10): output.append(np.sqrt(x[i]) if x[i]>0 else 0) output # + [markdown] id="0Byb6wwHHGQ6" colab_type="text" # # 5. `map` operator # # The next idea is to use `map` to map a function to a list, so it works very similar to `for loop` and gives the same results. # The idea is that we will create a function so that can transform the input to output. Then apply that function to every element of the input list. # # + id="1kF-gapXHGQ7" colab_type="code" outputId="a8de8393-3d47-4ea4-d63b-ccf9c06800f8" colab={} def transformNumber(n): return np.sqrt(n) if n>0 else 0 list(map(transformNumber, x)) # + [markdown] id="ns4wlASHHGQ9" colab_type="text" # It is too much for today. We will apply these operators in our forwards/futures calculation in the next tutorial.
04_functionConditions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Revision # This week we won't add new content in order to let some of you breathe and catch up with the huge amount of stuff you have learned so far. Go back to the lectures you haven't understood well and re-visit the assignments and their solutions. # # I you haven't done so already, you can follow the [Scipy Lecture Notes](https://scipy-lectures.org/) tutorials, Chapter 1 in particular. # # We will also do some "quizz" and Q&A in class to refresh some things!
book/week_06/01-Revision.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Day 3 Assignment 1 # # ## Conditions for pilot to land a plane ft=int(input("Enter a feet:")) if(ft==1000): print("Safe to Land") elif(ft>1000)and(ft<5000): print("Come down to 1000ft") else: print("Turn Around and Try later") # # Day 3 Assignment 2 # # ## Prime Numbers between 1-200 using for loop for num in range(0,200): if num > 1: for i in range(2,num): if (num % i) == 0: break else: print(num)
B7 Day-3 Assignments.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda root] # language: python # name: conda-root-py # --- # + import warnings import pandas as pd import numpy as np import os import operator # sorting from read_trace import * import matplotlib.pyplot as plt warnings.filterwarnings("ignore", category=np.VisibleDeprecationWarning) # - # ### read two concurrent kernel trace # + # # ls all the trace files in the targeted folder: two streams target_folder = './profile_results' trace_list = [] for root, dirs, files in os.walk(target_folder): for file in files: if 'trace' in file: trace_list.append(file) # record the overlapping rate for different data size ovlp_dict = {} for item in trace_list: trace_file = target_folder + "/" + item current_ovlp = check_kernel_ovlprate(trace_file) # find out the data size N = item.replace("trace_", "").replace(".csv","") ovlp_dict[N] = current_ovlp # sort the dd : the results is a list of tuple sorted_ovlp_dict = sorted(ovlp_dict.items(), key=operator.itemgetter(1), reverse=True) # - sorted_ovlp_dict[0:3] sorted_ovlp_dict[-5:] max_ovlp = sorted_ovlp_dict[0][1] max_ovlp = round(max_ovlp, 3) print max_ovlp # + min_ovlp = 0 for i in range(1, len(sorted_ovlp_dict)): current_ovlp = sorted_ovlp_dict[-i][1] if current_ovlp > 0: min_ovlp = current_ovlp break min_ovlp = round(min_ovlp, 3) print min_ovlp # - # ### iterate through the ovlp dd, record the kernel slowdown ratio # + data_size_ls = [] ovlp_rate_ls = [] slowdown_ls = [] for item in sorted_ovlp_dict: data_size = str(item[0]) # string ovlp_rate = float(item[1]) if ovlp_rate > 0: # trace file for current data size s1_trace_file = 'profile_results_s1/trace_' + data_size + '.csv' s2_trace_file = 'profile_results/trace_' + data_size + '.csv' #print s1_trace_file #print s2_trace_file # find out single stream kernel runtime df_current = trace2dataframe(s1_trace_file) s1_kernel_dd = get_kernel_time_from_trace(df_current) # find out two stream kernel runtime df_current = trace2dataframe(s2_trace_file) s2_kernel_dd = get_kernel_time_from_trace(df_current) slow_down_ratio_list = kernel_slowdown(s1_kernel_dd, s2_kernel_dd) #print slow_down_ratio_list # update the list data_size_ls.append(data_size) ovlp_rate_ls.append(ovlp_rate) slowdown_ls.append(slow_down_ratio_list) # - data_size_ls[:3] ovlp_rate_ls[0:3] slowdown_ls[0:3] # ### plot figure : kernel slown vs overlapping_rate print len(ovlp_rate_ls) # + s1_kernel_slowndown = [] s2_kernel_slowndown = [] for item in slowdown_ls: s1_kernel_slowndown.append(item[0]) s2_kernel_slowndown.append(item[1]) # - print s1_kernel_slowndown[0] print s2_kernel_slowndown[0] # + n_groups = len(ovlp_rate_ls) #s1_kernel_slowndown #s2_kernel_slowndown # create plot fig, ax = plt.subplots() index = np.arange(n_groups) bar_width = 0.05 opacity = 0.8 # kernel 1 rects1 = plt.bar(index, s1_kernel_slowndown, bar_width, alpha=opacity, color='b', label='Kernel-0') # kernel 2 rects2 = plt.bar(index + bar_width, s2_kernel_slowndown, bar_width, alpha=opacity, color='g', label='Kernel-1') # annotate the overlapping range xlabel_note = 'Overlapping Rate (' + str(max_ovlp) + '-' + str(min_ovlp) + ')' plt.xlabel(xlabel_note) plt.ylabel('Slowdown Ratio') plt.title('Kernel Slowdown Ratio Affected by Concurrent Execution') #plt.xticks(index + bar_width, ovlp_rate_ls) # turn off the ticks plt.tick_params( axis='x', # changes apply to the x-axis which='both', # both major and minor ticks are affected bottom='off', # ticks along the bottom edge are off top='off', # ticks along the top edge are off labelbottom='off') # labels along the bottom edge are off plt.legend(prop={'size':9}) plt.tight_layout() plt.show() # - # ### pick 10 points among the list # + active="" # data_size_ls # ovlp_rate_ls # slowdown_ls # - ovlp_rate_ls[0:5] slowdown_ls[0:5] print max_ovlp print min_ovlp # + from math import * def find_10_pos(N): step_size = N / 10. pos_list = [] current_pos = 0 for i in xrange(10): current_pos = floor(current_pos + step_size) pos_list.append(current_pos) return pos_list def find_5_pos(N): step_size = N / 5. pos_list = [] current_pos = 0 for i in xrange(5): current_pos = floor(current_pos + step_size) pos_list.append(current_pos) return pos_list # + def find_closet_value_pos(ovlp_rate_ls, value): """ Iterate through the list to find the position of the value """ pos = 0 min_diff = fabs(value - ovlp_rate_ls[0]) for i in xrange(1, len(ovlp_rate_ls)): cur_diff = fabs(value - ovlp_rate_ls[i]) if cur_diff < min_diff: min_diff = cur_diff pos = i return pos def find_10_pos_by_range(max_val, min_val, ovlp_rate_ls): """ Go downwards from the ovlp_rate_ls, look for indices of 10 data points to plot. """ step_size = (max_val - min_val) * 0.1 pos_list = [0] # the 1st in the list current_value = ovlp_rate_ls[0] # there are 9 left for i in xrange(9): current_value = current_value - step_size # find the nearest value index current_value_ind = find_closet_value_pos(ovlp_rate_ls, current_value) # update to the pos list pos_list.append(current_value_ind) return pos_list def find_5_pos_by_range(max_val, min_val, ovlp_rate_ls): """ Go downwards from the ovlp_rate_ls, look for indices of 10 data points to plot. """ step_size = (max_val - min_val) * 0.2 pos_list = [0] # the 1st in the list current_value = ovlp_rate_ls[0] # there are 4 left for i in xrange(4): current_value = current_value - step_size # find the nearest value index current_value_ind = find_closet_value_pos(ovlp_rate_ls, current_value) # update to the pos list pos_list.append(current_value_ind) return pos_list # + total_samples = len(ovlp_rate_ls) target_pos = None if total_samples > 10: target_pos = find_10_pos_by_range(max_ovlp, min_ovlp, ovlp_rate_ls) elif total_samples > 5: target_pos = find_5_pos_by_range(max_ovlp, min_ovlp, ovlp_rate_ls) else: print("Not engough data!") # - # print target_pos target_pos # + ovlp_rate_ls_2plot = [] s1_kernel_slowndown_2plot = [] s2_kernel_slowndown_2plot = [] for item in target_pos: pos = int(item) current_ovlp = ovlp_rate_ls[pos] s1_sd = s1_kernel_slowndown[pos] s2_sd = s2_kernel_slowndown[pos] # add to plot list ovlp_rate_ls_2plot.append(current_ovlp) s1_kernel_slowndown_2plot.append(s1_sd) s2_kernel_slowndown_2plot.append(s2_sd) # print ovlp_rate_ls_2plot # round to 3 decimal ovlp_rate_ls_2plot_rnd = [round(x, 3) for x in ovlp_rate_ls_2plot] print ovlp_rate_ls_2plot_rnd # + n_groups = len(ovlp_rate_ls_2plot_rnd) # create plot fig, ax = plt.subplots() index = np.arange(n_groups) bar_width = 0.3 opacity = 0.8 # kernel 1 rects1 = plt.bar(index, s1_kernel_slowndown_2plot, bar_width, alpha=opacity, color='b', label='Kernel-0') # kernel 2 rects2 = plt.bar(index + bar_width, s2_kernel_slowndown_2plot, bar_width, alpha=opacity, color='g', label='Kernel-1') plt.xlabel('Overlapping Rate') plt.ylabel('Slowdown Ratio') plt.title('Kernel Slowdown Ratio Affected by Concurrent Execution') #plt.xticks(index + bar_width, ovlp_rate_ls_2plot) plt.xticks(index + bar_width, ovlp_rate_ls_2plot_rnd, rotation=30) plt.legend(prop={'size':9}) plt.tight_layout() plt.show()
cmp_cmp/kernel_slowdown_analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # 2 - Modélisation # ## Modèle très simple de classification # ### Régression Logistique # Entrainons une régression logistique simple avec le data set mit_train, puis testons le avec le dataset mit_test. from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import pywt mit_test = pd.read_csv("mitbih_test.csv", header=None) mit_train = pd.read_csv("mitbih_train.csv", header=None) X_train = mit_train.iloc[:,:-1] y_train = mit_train.iloc[:,-1] X_test = mit_test.iloc[:,:-1] y_test = mit_test.iloc[:,-1] X_train len(X_train) X_test # Préprocessing DWT application DWT sur X_train et X_test et enregistrement sur X_train, X_test def get_dwt_features(signal, waveletname, level): n=signal.shape[0] m=signal.shape[1] print(n) print(m) for i in range(n): coeffs=pywt.coeffs_to_array(pywt.wavedec(signal.iloc[i], waveletname, level=level))[0] coeff_slices=pywt.coeffs_to_array(pywt.wavedec(signal.iloc[i], waveletname, level=level))[1] # print(i,"-",len(coeffs)) # print(coeffs) if i==0 : df_new=pd.DataFrame(coeffs).T else: df_new=pd.concat([df_new,pd.DataFrame(coeffs).T], axis=0) df_new.index = range(n) return df_new,coeff_slices df_dwt1,coeff_slices=get_dwt_features(X_train.iloc[0:20000,], 'db3',5) df_dwt1 coeff_slices df_dwt2,coeff_slices=get_dwt_features(X_train.iloc[20000:40000,], 'db3',5) df_dwt3,coeff_slices=get_dwt_features(X_train.iloc[40000:60000,], 'db3',5) df_dwt4,coeff_slices=get_dwt_features(X_train.iloc[60000:len(X_train),], 'db3',5) list_df_dwt = [df_dwt1,df_dwt2,df_dwt3,df_dwt4] X_train = pd.concat(list_df_dwt) X_train.index = range(len(X_train)) X_train X_test,coeff_slices=get_dwt_features(X_test, 'db3',5) X_test # Regardons la distribution des classes : # + liste = [] for i in range(len(y_train.value_counts())): x = "Classe {} : {}%".format(round(y_train.value_counts(normalize = True).index[i]), round(100*y_train.value_counts(normalize = True).iloc[i])) liste.append(x) liste # - plt.figure(figsize = (16,9)) sns.set_theme() plt.title("Répartition des classes dans le data set MITBIH_train", fontsize = 20) plt.pie(y_train.value_counts(), labels = liste); y_test.value_counts(normalize = True) # On voit que les classes sont déséquilibrées, la classe 0 est fortement majoritaire. Regardons l'impact sur les résultats du modèle. # + lr = LogisticRegression() lr.fit(X_train, y_train) y_pred = lr.predict(X_test) # - pd.crosstab(y_test, y_pred, rownames = ["réel"], colnames = ["predict"]) # + from sklearn.metrics import classification_report print(classification_report(y_test, y_pred)) # - # Comme prévu, le modèle a tendance à largement prédire la classe 0. Ainsi les rappels des classes 1, 2 et 3 sont mauvaises. Nous voyons cependant que le rappel de la classe 4 est bonne. Rééquilibrons les classes à l'aide d'un undersampling et retestons notre modèle. # ### Rééquilibrage des données from imblearn.under_sampling import RandomUnderSampler from imblearn.over_sampling import SMOTE ru = RandomUnderSampler() X_ru, y_ru = ru.fit_resample(X_train, y_train) # + lr2 = LogisticRegression() lr2.fit(X_ru, y_ru) y_pred_ru = lr2.predict(X_test) # - pd.crosstab(y_test, y_pred_ru, rownames = ["réel"], colnames = ["predict"]) print(classification_report(y_test, y_pred_ru)) # On a un meilleur rappel sur les classes 1, 2, 3 et 4 sur les données rééquilibrées mais on a tendance cette fois à sous prédire la classe 0. Par ailleurs la précision a été fortement dégradée. # ## SVM from sklearn.svm import SVC # + svm = SVC() svm.fit(X_train, y_train) svm_pred = svm.predict(X_test) pd.crosstab(y_test, svm_pred, rownames = ["reel"], colnames = ["predict"]) # - print(classification_report(y_test, svm_pred)) # + svm = SVC() svm.fit(X_ru, y_ru) svm_pred_ru = svm.predict(X_test) pd.crosstab(y_test, svm_pred_ru, rownames = ["reel"], colnames = ["predict"]) # - print(classification_report(y_test, svm_pred_ru)) # Nous observons de meilleurs résultats sur le rappel des classes 1, 2 et 3 avec le SVM lorsqu’on compare les algos entraînés respectivement avec le dataset d'entraînement non ré-échantillonné et le même dataset ré-échantillonné # ## Modélisation Deep Learning # ### Une première itération simple de Deep Learning # Utilisons dans un premier temps un réseau de neurones simples avec 4 couches. from tensorflow.keras.layers import Input, Dense, Dropout, Flatten, Conv1D, MaxPooling1D from tensorflow.keras.models import Model from tensorflow.keras.utils import to_categorical smote = SMOTE() X_sm, y_sm = smote.fit_resample(X_train, y_train) # + inputs = Input(shape = X_train.shape[1], name = "Input") dense1 = Dense(units = 10, activation = "tanh", name = "dense1", kernel_initializer="normal") dense2 = Dense(units = 8, activation = "tanh", name = "dense2", kernel_initializer="normal") dense3 = Dense(units = 6, activation = "tanh", name = "dense3", kernel_initializer="normal") dense4 = Dense(units = 5, activation = "softmax", name = "dense4", kernel_initializer="normal") x = dense1(inputs) x = dense2(x) x = dense3(x) outputs = dense4(x) model = Model(inputs = inputs, outputs = outputs) model.summary() # - model.compile(loss = "sparse_categorical_crossentropy", optimizer = "adam", metrics = ["accuracy"]) # On entraine sur le data set global : model.fit(X_train, y_train, epochs = 10, batch_size = 32, validation_split = 0.1) test_pred_class = model.predict(X_test).argmax(axis = 1) pd.crosstab(y_test, test_pred_class, rownames = ["reel"], colnames = ["predict"]) # On entraine sur le dataset ré-échantillonné : model.fit(X_sm, y_sm, epochs = 10, batch_size = 32, validation_split = 0.1) # + nn_pred_ru = model.predict(X_test).argmax(axis=1) pd.crosstab(y_test, nn_pred_ru) # - print(classification_report(y_test, nn_pred_ru)) # ### Itération CNN # + inputs = Input(shape = (X_train.shape[1],1), name = "Input") conv1_cnn = Conv1D(filters = 32, kernel_size = 5, padding = "valid", activation = "relu") pool1_cnn = MaxPooling1D(pool_size = 2) dropout_cnn = Dropout(rate=0.2) flatten_cnn = Flatten() dense1_cnn = Dense(units = 128, activation="relu") dense2_cnn = Dense(units = 5, activation="softmax") x_cnn = conv1_cnn(inputs) x_cnn = pool1_cnn(x_cnn) x_cnn = dropout_cnn(x_cnn) x_cnn = flatten_cnn(x_cnn) x_cnn = dense1_cnn(x_cnn) outputs = dense2_cnn(x_cnn) model_cnn = Model(inputs = inputs, outputs = outputs) model_cnn.compile(loss = "categorical_crossentropy", optimizer = "adam", metrics = ["accuracy"]) y_train = to_categorical(y_train) y_test = to_categorical(y_test) model_cnn.fit(X_train, y_train, epochs = 10, batch_size = 200, validation_split = 0.2) # - pred_test_cnn = model_cnn.predict(X_test).argmax(1) pd.crosstab(y_test.argmax(1), pred_test_cnn, rownames = ["reel"], colnames = ["predict"]) print(classification_report(y_test.argmax(1), pred_test_cnn)) y_sm = to_categorical(y_sm) model_cnn.fit(X_sm, y_sm, epochs = 10, batch_size = 200, validation_split = 0.2) pred_test_cnn_sm = model_cnn.predict(X_test).argmax(1) pd.crosstab(y_test.argmax(1), pred_test_cnn_sm, rownames = ["reel"], colnames = ["predict"]) print(classification_report(y_test.argmax(1), pred_test_cnn_sm)) # ### Itération LeNet # + inputs = Input(shape = (X_train.shape[1],1), name = "Input") conv1_lenet = Conv1D(filters = 32, kernel_size = 5, padding = "valid", activation = "relu") pool1_lenet = MaxPooling1D(pool_size = 2) conv2_lenet = Conv1D(filters = 16, kernel_size = 3, padding = "valid", activation = "relu") pool2_lenet = MaxPooling1D(pool_size = 2) dropout_lenet = Dropout(rate=0.2) flatten_lenet = Flatten() dense1_lenet = Dense(units = 128, activation="relu") dense2_lenet = Dense(units = 5, activation="softmax") x = conv1_lenet(inputs) x = pool1_lenet(x) x = conv2_lenet(x) x = pool2_lenet(x) x = dropout_lenet(x) x = flatten_lenet(x) x = dense1_lenet(x) outputs = dense2_lenet(x) model_lenet = Model(inputs = inputs, outputs = outputs) model_lenet.compile(loss = "categorical_crossentropy", optimizer = "adam", metrics = ["accuracy"]) model_lenet.fit(X_train, y_train, epochs = 10, batch_size = 200, validation_split = 0.2) # - pred_test_lenet = model_cnn.predict(X_test).argmax(1) pd.crosstab(y_test.argmax(1), pred_test_lenet, rownames = ["reel"], colnames = ["predict"]) print(classification_report(y_test.argmax(1), pred_test_lenet)) model_lenet.fit(X_sm, y_sm, epochs = 10, batch_size = 200, validation_split = 0.2) pred_test_lenet_sm = model_lenet.predict(X_test).argmax(1) pd.crosstab(y_test.argmax(1), pred_test_lenet_sm, rownames = ["reel"], colnames = ["predict"]) print(classification_report(y_test.argmax(1), pred_test_lenet_sm))
notebooks/2 - Modelisation_DWT_db3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.2.0 # language: julia # name: julia-1.2 # --- # # Introductory sheaves tutorial # # Here we will run through a simple sheaf example using <NAME>'s [pysheaf](https://github.com/kb1dds/pysheaf) python package. The code is still under active development so we will demonstrate a simple example from Figure 3 of Blevins and Bassett 2020. ## Import python pysheaf package. using PyCall ps = pyimport("pysheaf") # ### Cell signaling example # # In the cell signaling example from Figure 3a, our system is set up # # (Cell 1) --------> (Cell 2) --------> (Cell 3) # # and the effect across edges is # # (Cell 1) --- 2x ---> (Cell 2) --- 0.5x ---> (Cell 3) # # so that cell 2 signals at twice the rate of cell 1, and cell 3 signals at half the rate of cell 2. # # We start to represent this system the nodes of our system with `AddCell`. # # + shf=ps.Sheaf() shf.AddCell("Cell_1",ps.Cell("real")) # First argument is the name of the Cell, the second describes the stalk shf.AddCell("Cell_2",ps.Cell("real")) shf.AddCell("Cell_3",ps.Cell("real")) # - # At this point we have three nodes, one for each cell, and we link them together with restriction maps following the edges in our system. # + # Defining functions for restriction maps function mult_by_two(x) return 2.0*x end function mult_by_half(x) return 0.5*x end # Add these relations to the sheaf shf.AddCoface("Cell_1","Cell_2",ps.Coface("real","real",mult_by_two)) shf.AddCoface("Cell_2","Cell_3",ps.Coface("real","real",mult_by_half)) # - # Now in order to reproduce the figure, we can assign the rates for each cell: # # | Cell Number | Rate (number of molecules per time) | # | ------------- |-------------------------------------------:| # | Cell 1 | 4 | # | Cell 2 | 8 | # | Cell 3 | 4 | # # # and we know these also satisfy our maps. We will add the data assignments with `SetDataAssignment`. shf.GetCell("Cell_1").SetDataAssignment(ps.Assignment("real",4.0)) shf.GetCell("Cell_2").SetDataAssignment(ps.Assignment("real",8.0)) shf.GetCell("Cell_3").SetDataAssignment(ps.Assignment("real",4.0)) # The next few lines of code check the consistency radius of the sheaf. For details see slide 16 of this [presentation](http://www.appliedcategorytheory.org/wp-content/uploads/2018/03/Michael-Robinson-Sheaf-Methods-for-Inference.pdf) # # Briefly, if we pick one node and propagate that value along the sheaf following the restriction maps, we may get different values at each node than those assigned. The consistency radius is the maximum difference between the actual data assignment and an expected value via propagation, and sets a lower bound on the distance to the nearest global section [1](https://arxiv.org/abs/1603.01446). # + shf.mPreventRedundantExtendedAssignments = false shf.MaximallyExtendCell("Cell_1") # Use the data on Cell 1 to decide what values at other nodes *should* be shf.MaximallyExtendCell("Cell_2") shf.MaximallyExtendCell("Cell_3") consistency_radius = shf.ComputeConsistencyRadius() print("The consistency radius is $consistency_radius") # - # ### New observations # # If we observe new data from the system, will the new data fit our sheaf? # + # If we add drug A and then we observe the following rates: shf.GetCell("Cell_1").SetDataAssignment(ps.Assignment("real",2.0)) shf.GetCell("Cell_2").SetDataAssignment(ps.Assignment("real",4.0)) shf.GetCell("Cell_3").SetDataAssignment(ps.Assignment("real",2.0)) # Check to see if the new data is consistent with our original sheaf shf.mPreventRedundantExtendedAssignments = false shf.MaximallyExtendCell("Cell_1") shf.MaximallyExtendCell("Cell_2") shf.MaximallyExtendCell("Cell_3") consistency_radius_A = shf.ComputeConsistencyRadius() print("The consistency radius is $consistency_radius_A") # + # If we add drug B and then we observe the following rates: shf.GetCell("Cell_1").SetDataAssignment(ps.Assignment("real",4.0)) shf.GetCell("Cell_2").SetDataAssignment(ps.Assignment("real",4.0)) shf.GetCell("Cell_3").SetDataAssignment(ps.Assignment("real",2.0)) # Check to see if the new data is consistent with our original sheaf shf.mPreventRedundantExtendedAssignments = false shf.MaximallyExtendCell("Cell_1") shf.MaximallyExtendCell("Cell_2") shf.MaximallyExtendCell("Cell_3") consistency_radius_B = shf.ComputeConsistencyRadius() print("The consistency radius is $consistency_radius_B") # - # Now we can see that our new data does not fit the original model! # # | Cell Number | Rate original | Rate after perturbation 1|Rate after perturbation 2| # | ------------- |-------------------------------------------:|-------------------------------------------:|-------------------------------------------:| # | Cell 1 | 4 | 2| 4 # | Cell 2 | 8 | 4 | 4 | # | Cell 3 | 4 | 2 | 2 | # | | # | Consistency radius | 0 | 0 | 2|
SheavesExample.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Instructions # # 1. Download any sample from Shop and move it into a _separate_ folder locally. # 2. Pass the path for that folder as the `path=` argument for `read_sg_shop_zipfile()`. # 3. Specify whether you also want to read any supplement files using the `return_supplemental_files` parameter. from safegraph_eval import ingest # # Read multiple months of Patterns/Core/Geo nested_dir = '/Users/eugene/Documents/Projects/safegraph_eval_dir/sample_data/multiple_months_zipped_patterns_core_geo/' output = ingest.read_sg_shop_zipfile(nested_dir, return_supplemental_files = True) output.keys() output['data'].head() output['brand_info'].head() # # Read one month of Patterns/Core/Geo unnested_dir = '/Users/eugene/Documents/Projects/safegraph_eval_dir/sample_data/one_month_zipped_patterns_core/' output = read_sg_shop_zipfile(unnested_dir, return_supplemental_files = True) output['data'].head() # # Read Core unnested_core_dir = '/Users/eugene/Documents/Projects/safegraph_eval_dir/sample_data/zipped_core/' output = read_sg_shop_zipfile(unnested_core_dir, return_supplemental_files = True) output['data'].head() output['brand_info'].head()
notebooks/ingest usage.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import os os.chdir('../app') import frontend.stock_analytics as salib import matplotlib.pyplot as plt import matplotlib.dates as mdates from pandas.plotting import register_matplotlib_converters register_matplotlib_converters() from datetime import datetime,timedelta from pprint import pprint import matplotlib.patches as patches import time import numpy as np import datetime import copy import preprocessing.lob.s03_fill_cache as l03 import re import preprocessing.preglobal as pg # %matplotlib inline import cv2 from mpl_toolkits.axes_grid1.inset_locator import inset_axes from pymongo import MongoClient, UpdateMany, UpdateOne, InsertOne import pandas as pd from matplotlib import rc import json url='mongodb://192.168.0.94:27017/' client = MongoClient(url) tbl = client['global']['keynotes'] tbl2 = client['global']['timekeys'] folder = "Thesis/generated_figures/" # + def graph_pre(size=1): rc('font',**{'family':'serif','serif':['Times']}) rc('text', usetex=True) h = [0,3,5][size] plt.rcParams["figure.figsize"] = (7.5,h) def graph_post(): plt.tight_layout() # - # Load all Graphs sa_array = [] for i in pg.get_kn_entries({'selected':1}): print('load',i['id']) sa_array.append({'kn':i,'sa':salib.stock_analytics(i['id'], gui_mode=False)}) def dobins(ts_array, N = 1000, x_bins=None, useinteger=False, stepsize=None): ts_array.sort() #x_bins = None if x_bins is None: if useinteger: minp = math.floor(ts_array[0]) maxp = math.ceil(ts_array[-1]) steps = np.ceil((maxp-minp)/N) x_bins = np.arange(minp, maxp+0.1, steps) elif stepsize is None: x_bins = np.linspace(ts_array[0], ts_array[-1], N+1) else: x_bins = np.arange(ts_array[0], ts_array[-1], stepsize) N = len(x_bins)-1 dt = x_bins[1]-x_bins[0] x_bins = x_bins[:-1] y_bins = np.zeros(len(x_bins)) unique, counts = np.unique(np.floor((ts_array-x_bins[0])/dt), return_counts=True) unique = unique[:-1] y_bins[unique.astype(int)] = counts[:-1] E = y_bins.mean() V = y_bins.var() #print('V =',V,'; E =',E,'; r(tau) = V/E =', V/E) return x_bins, y_bins, V/E salib.time_as_string = False filte = copy.deepcopy(sa_array[0]['sa'].filters['default']) g = sa_array[0]['sa'].graphlist['marketorder_num'] filte['numbins']=100 filte['range']['time'] = [9.6*3600*1000, 15.9*3600*1000] f = filte # from json with open("all_results_w_frac.json", "r") as out_file: all_results_w_frac_json = json.load(out_file) all_results_w_frac = {} for k,v in all_results_w_frac_json.items(): all_results_w_frac[k] = pd.DataFrame(v) # flatten it all_results_flat = pd.DataFrame() for k,v in all_results_w_frac.items(): v['keynote'] = k all_results_flat = all_results_flat.append(v) pd.set_option('display.max_rows', 500) def load_graph(sa): sstart = sa['kn']['time_mapping'][0]['stock_time_start'] sstop = sa['kn']['time_mapping'][-1]['stock_time_end'] return sstart, sstop def get_metadata(row,col): if pd.isna(row['metadata']): return "" return list(json.loads(row['metadata'])[col].items())[0][1] def draw_histogram(ax, dt, rfilter, drawannotations=False, fract='frac_of_total150',label='',axright=False,title=''): x_bins_orig = np.linspace(-dt/1000,dt/1000, 20) x_bins = None y_bins = [] for index, row in rfilter.iterrows(): sa = [sa for sa in sa_array if sa['kn']['id'] == row['keynote']][0] sstart, sstop = load_graph(sa) t0 = row['time']*1000 + sstart cs = sa['sa'].ticks.aggregate([{ "$match":{"timestamp":{'$gte':t0-dt, '$lte':t0+dt}, "type":{"$in":["fill","execute","trade"]} }} , {'$project': {"timestamp":1 } } ] ) timestamps = [(a['timestamp']-(t0))/1000 for a in cs] x_bins,b,_ = dobins(timestamps,x_bins=x_bins_orig) b = b[2:-1] y_bins.append(b/b.sum()) x_bins = x_bins[2:-1] y_bins = np.array(y_bins)*len(b) xmean = y_bins.mean(axis=0) xstd = y_bins.std(axis=0) if ax: ax.plot(x_bins,xmean,'.') ax.fill_between(x_bins, xmean-xstd, xmean+xstd, color='grey') ax.set_ylim([0,None]) ax.axvline(x=0, linewidth=1, color='k') ax.axhline(y=1, linewidth=1, color='k') rrt =r'Relative rate $N(t_i)/ \frac{1}{M} \sum N(t)$' ax.set_ylabel(rrt) if axright: ax.yaxis.tick_right() ax.yaxis.labelpad = 15 ax.yaxis.set_label_position("right") ax.set_ylabel(rrt,rotation=270) ax.set_xlabel('Time difference from event $t_i$ [in s]') ax.set_title(title) rfilter['smallrtmp'] = 1/((1/rfilter[fract])-1) print(label+'& {:.2f} $\pm$ {:.2f} & {:.2f} $\pm$ {:.2f} \\\\'.format( rfilter['smallrtmp'].mean(),rfilter['smallrtmp'].std(),rfilter[fract].mean(),rfilter[fract].std())) if not drawannotations: return def trans(x): return 1/(1/x - 1) drawannotations.get_yaxis().set_visible(False) drawannotations.set_ylim([0,1.4]) drawannotations.set_xlim([0.3,1.0]) ax2 = drawannotations.twiny() def tick_function(X): V = trans(X) return ["%.3f" % z for z in V] ax2.set_xlim(drawannotations.get_xlim()) new_tick_locations = drawannotations.get_xticks() ax2.set_xticks(new_tick_locations) ax2.set_xticklabels(tick_function(new_tick_locations)) ax2.set_xlabel(r"Ratio $r(t)$") drawannotations.set_xlabel(r"Ratio $R(t)$") i = 1.3 for _, row in rfilter.sort_values(fract).iterrows(): i -= 0.1 drawannotations.plot([row[fract],row[fract]],[0,i],lw=1,c='k') drawannotations.text(row[fract], i, tex, {'ha': 'left', 'va': 'bottom'}, rotation=0) # + rc('font',**{'family':'serif','serif':['Times']}) rc('text', usetex=True) plt.rcParams["figure.figsize"] = (7.5,5) filtered = all_results_flat[[not pd.isna(a) and ('iphone' in a.lower() or 'ipad' in a.lower()) and ('neu' in a.lower()) for a in all_results_flat['metadata'] ]] plt.rcParams["figure.figsize"] = (7.5,5) fig,axs = plt.subplots(2) draw_histogram(axs[0], 250*1000, filtered, drawannotations=axs[1], label='New iPhones and iPads (at sale price announcement)', title='New iPhones and iPads') axs[0].set_ylim([0,2.5]) plt.tight_layout() fig.savefig(folder+"res_lob41.pdf") # - # + rc('font',**{'family':'serif','serif':['Times']}) rc('text', usetex=True) plt.rcParams["figure.figsize"] = (7.5,5) filte['numbins']=100 filte['range']['time'] = [9.5*3600*1000, 16.*3600*1000] fig,axs = plt.subplots(2,2) draw_histogram(axs[0][0], 10*60*1000, all_results_flat[all_results_flat['src'] == 'kn_start'], fract='frac_of_total600', label='keynote start',title='Start of keynote') draw_histogram(axs[0][1], 250*1000, all_results_flat[all_results_flat['src'] == 'random_kn'], label='random $t$',axright=True,title='Random time') draw_histogram(axs[1][0], 250*1000, all_results_flat[(all_results_flat['src'] == 'keyword_ocr') | (all_results_flat['src'] == 'keyword_subs')], label='all keywords',title='All keywords') filtered = all_results_flat[[not pd.isna(a) and ('neu' in a.lower()) for a in all_results_flat['metadata'] ]] draw_histogram(axs[1][1], 250*1000, filtered, label='new product',axright=True,title='New products') for axa in axs: for ax in axa: ax.set_ylim([0,2.5]) filtered2 = all_results_flat[[not pd.isna(a) and ('iphone' in a.lower() or 'ipad' in a.lower()) and ('neu' in a.lower()) for a in all_results_flat['metadata'] ]] draw_histogram(None, 250*1000, filtered2, label='new \\textit{iPhone}/\\textit{iPad}') plt.tight_layout() fig.savefig(folder+"res_lob40.pdf") # -
figures/04_trigger_analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import hdc_top hdc_top.HDCTop.dimension = 5000 # top_imrandom hdc_top_1 = hdc_top.HDCTop('database/fm.csv', 'database/imrandom/GSR_proj_pos_D_5000_imrandom.csv', 'database/imrandom/GSR_proj_neg_D_5000_imrandom.csv', 'database/imrandom/ECG_proj_pos_D_5000_imrandom.csv', 'database/imrandom/ECG_proj_neg_D_5000_imrandom.csv', 'database/imrandom/EEG_proj_pos_D_5000_imrandom.csv', 'database/imrandom/EEG_proj_neg_D_5000_imrandom.csv', 'database/imrandom/im_D_5000_imrandom.csv', is_early_fusion=True) hdc_top_1.train_am() hdc_top_1.test() hdc_top_1.prediction_success_rate hdc_top_1.prediction_v_success_rate hdc_top_1.prediction_a_success_rate hdc_top_1.reset() hdc_top_1.is_early_fusion = False hdc_top_1.train_am() hdc_top_1.test() hdc_top_1.prediction_success_rate hdc_top_1.prediction_v_success_rate hdc_top_1.prediction_a_success_rate # top_imoptim hdc_top_2 = hdc_top.HDCTop('database/fm.csv', 'database/imoptim/GSR_proj_pos_D_5000_imoptim.csv', 'database/imoptim/GSR_proj_neg_D_5000_imoptim.csv', 'database/imoptim/ECG_proj_pos_D_5000_imoptim.csv', 'database/imoptim/ECG_proj_neg_D_5000_imoptim.csv', 'database/imoptim/EEG_proj_pos_D_5000_imoptim.csv', 'database/imoptim/EEG_proj_neg_D_5000_imoptim.csv', 'database/imoptim/im_D_5000_imoptim.csv', is_early_fusion=True) hdc_top_2.train_am() hdc_top_2.test() hdc_top_2.prediction_success_rate hdc_top_2.prediction_v_success_rate hdc_top_2.prediction_a_success_rate hdc_top_2.reset() hdc_top_2.is_early_fusion = False hdc_top_2.train_am() hdc_top_2.test() hdc_top_2.prediction_success_rate hdc_top_2.prediction_v_success_rate hdc_top_2.prediction_a_success_rate # top_reduced hdc_top_3 = hdc_top.HDCTop('database/fm.csv', 'database/reduced/GSR_proj_pos_D_5000_reduced.csv', 'database/reduced/GSR_proj_neg_D_5000_reduced.csv', 'database/reduced/ECG_proj_pos_D_5000_reduced.csv', 'database/reduced/ECG_proj_neg_D_5000_reduced.csv', 'database/reduced/EEG_proj_pos_D_5000_reduced.csv', 'database/reduced/EEG_proj_neg_D_5000_reduced.csv', 'database/reduced/im_D_5000_reduced.csv', is_early_fusion=True) hdc_top_3.train_am() hdc_top_3.test() hdc_top_3.prediction_success_rate hdc_top_3.prediction_v_success_rate hdc_top_3.prediction_a_success_rate hdc_top_3.reset() hdc_top_3.is_early_fusion = False hdc_top_3.train_am() hdc_top_3.test() hdc_top_3.prediction_success_rate hdc_top_3.prediction_v_success_rate hdc_top_3.prediction_a_success_rate
Software Model - Python/hdc_top_d5000.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- spark # + import pyspark.sql.types as st from pyspark.sql import udf, Window, SparkSession, Row from pyspark.sql import functions as f spark = SparkSession.builder.appName('OPI_Exam').getOrCreate() # + # #!unzip profeco.pdf.zip # + # Add a custom Schema precios_schema = st.StructType([ st.StructField('producto', st.StringType(), True), st.StructField('presentacion', st.StringType(), True), st.StructField('marca', st.StringType(), True), st.StructField('categoria', st.StringType(), True), st.StructField('catalogo', st.StringType(), True), st.StructField('precio', st.DecimalType(), True), st.StructField('fechaRegistro', st.TimestampType(), True), st.StructField('cadenaComercial', st.StringType(), True), st.StructField('giro', st.StringType(), True), st.StructField('nombreComercial', st.StringType(), True), st.StructField('direccion', st.StringType(), True), st.StructField('estado', st.StringType(), True), st.StructField('municipio', st.StringType(), True), st.StructField('latitud', st.StringType(), True), st.StructField('longitud', st.StringType(), True), ]) # + # Read raw data df = spark.read.schema(precios_schema).options(header='true').csv('all_data.csv') df.show(2) # + # I want to know the registered years # - # # 1.- Data pre-processing # ## a. How many entries are? print(f'There are {df.count()} entries in this file') # ## b. How many categories are? categories = df.select(['categoria']).distinct() print(f'There are {categories.count()} categories') # ## c. How many commercial chains are being monitored? commercial_chain = df.select(['cadenaComercial']).distinct() print(f'There are {commercial_chain.count()} commercial chains') # ## d. How could you ensure the data quality? Did you detect any inconsistence in the data or error in the source? # # A: The easiest way is to ensure you are downloading it from a reliable source, in this case, the data had a gold seal that stated it's quality. # # Then we can perform different things to ensure its quality, for example look if there are missing values (empty cells) or outliers in the data. # # A thing that I have seen in this exercise and the previous one is that accents from spanish can cause troubles when are casted into strings due to the different codification they use. # # Another thing I have encountered is the correct classification of data, for example in the estado column there are three entries that does not correspond to a valid mexican state. # ## e. Which products are monitored the most per state? chains_products_df = df.select(['estado', 'producto','presentacion','marca']).distinct().groupby( ['estado','producto']).count().sort(f.col('estado').asc(), f.col('count').desc()).dropna() # Filter COL. EDUARDO GUERRA, estado and from estado column not_a_state = ['COL. EDUARDO GUERRA', 'estado', 'ESQ. SUR 125"'] # Ensure that varios is not a product chains_products_df = chains_products_df.where(chains_products_df['producto']!='VARIOS')\ .where(chains_products_df['estado'] != 'COL. EDUARDO GUERRA')\ .where(chains_products_df['estado'] != 'estado')\ .where(chains_products_df['estado'] != 'ESQ. SUR 125"') #this could be probably done in a more efficient way using a udf, I am not sure why this is not filtering correctly # Ensure that estados is a real mexican state # Filter COL. EDUARDO GUERRA, estado and ESQ. SUR 125" from estado column chains_products_df.select('estado').distinct().show(35) partition = Window.partitionBy("estado").orderBy(f.col('count').desc()) chains_products_df.withColumn('rn', f.row_number().over(partition)).where(f.col('rn') == 1).drop('rn').show() # ## f. Which commercial chain has the most variety of monitored products? # As the data is reported daily, most of the products will repeat many times, that's why we need to take # into account "producto", "presentacion", "marca" and then apply the distinct() method that creates a mathematical # set where there's only one product per entry chains_products_df = df.select(['cadenaComercial', 'producto','presentacion','marca']).distinct().groupby( ['cadenaComercial','producto','presentacion','marca']).count() chains_products_df.select('cadenaComercial', 'count').groupby('cadenaComercial').sum().orderBy('sum(count)', ascending=False).show() # Wal-Mart is the chain with the most monitored products # # 2.- Exploratory analysis # ## a) Generate a basic basket that allows you to compare prices geographically and temporarily. Justify your election and procedure. # first we need to choose the products that will put together our basic basket. # Then we will choose the prices based on the day and state and average throughout commercial chains # # 3.- Visualization # ## a) Create a map that allow us to identify the different offer in categories in Leon, Guanajuato and the price level in each one. Bonus points if the map is interactive. zm_guanajuato=[ 'LEON', 'SILAO DE LA VICTORIA', '<NAME>', '<NAME>'] df.select(f.max('fechaRegistro')).show() # The max registered date is: # #+-------------------+ #| max(fechaRegistro)| # #+-------------------+ #|2016-04-29 17:47:24| # #+-------------------+ # + #First filter by state and municipios and take prices from 2015 zm_categories_df = df.select(['estado','municipio', 'cadenaComercial', 'categoria','precio','fechaRegistro','latitud', 'longitud']) zm_categories_df = zm_categories_df.where( zm_categories_df['estado'] == 'GUANAJUATO').where( zm_categories_df['municipio'] == zm_guanajuato[0]).where( zm_categories_df['municipio'] == zm_guanajuato[1]).where( zm_categories_df['municipio'] == zm_guanajuato[2]).where( (zm_categories_df['fechaRegistro'] < '2016-01-01 00:00:00') & (zm_categories_df['fechaRegistro'] >= '2015-01-01 00:00:00')) # + jupyter={"outputs_hidden": true} zm_categories_df.write.format('csv').option('header', True).mode('overwrite').option('sep', ',')\ .save(f'/zm_Guanajuato_Categorias/') # + jupyter={"outputs_hidden": true} zm_categories_df = zm_categories_df.groupby( 'cadenaComercial', 'categoria','latitud','longitud').mean('precio') # - from ipyleaflet import Map, basemaps Map(center = (60, -2.2), zoom = 2, min_zoom = 1, max_zoom = 20, basemap=basemaps.Stamen.Terrain)
SeccionB.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import cs_vqe as c import ast import os import cs_vqe_with_LCU as c_LCU import quchem.Misc_functions.conversion_scripts as conv_scr # + # with open("hamiltonians.txt", 'r') as input_file: # hamiltonians = ast.literal_eval(input_file.read()) working_dir = os.getcwd() data_dir = os.path.join(working_dir, 'data') data_hamiltonians_file = os.path.join(data_dir, 'hamiltonians.txt') with open(data_hamiltonians_file, 'r') as input_file: hamiltonians = ast.literal_eval(input_file.read()) # - for key in hamiltonians.keys(): print(f"{key: <25} n_qubits: {hamiltonians[key][1]:<5.0f}") # + # mol_key = 'H2_6-31G_singlet' # mol_key ='H2-O1_STO-3G_singlet' mol_key = 'H1-F1_STO-3G_singlet' # mol_key='H3_STO-3G_singlet_1+' # currently index 2 is contextual part # ''''''''''''''''3 is NON contextual part # join together for full Hamiltonian: ham = hamiltonians[mol_key][2] ham.update(hamiltonians[mol_key][3]) # full H ham # - print(f"n_qubits: {hamiltonians[mol_key][1]}") # # Get non-contextual H # + nonH_guesses = c.greedy_dfs(ham, 10, criterion='weight') nonH = max(nonH_guesses, key=lambda x:len(x)) # largest nonCon part found by dfs alg # - # Split into: # # $$H = H_{c} + H_{nc}$$ # + code_folding=[4] nonCon_H = {} Con_H = {} for P in ham: if P in nonH: nonCon_H[P]=ham[P] else: Con_H[P]=ham[P] # - # ## Testing contextuality print('Is NONcontextual correct:', not c.contextualQ_ham(nonCon_H)) print('Is contextual correct:',c.contextualQ_ham(Con_H)) # # Classical part of problem! # Take $H_{nc}$ and split into: # - $Z$ = operators that completely comute with all operators in $S$ # - $T$ = remaining operators in $S$ # - where $S = Z \cup T$ and $S$ is set of Pauli operators in $H_{nc}$ # # # - We then split the set $T$ into cliques $C_{1}, C_{2}, ... , C_{|T|}$ # - all ops in a clique commute # - ops between cliques anti-commute! bool_flag, Z_list, T_list = c.contextualQ(list(nonCon_H.keys()), verbose=True) Z_list T_list # ## Get quasi model # # First we define # # - $C_{i1}$ = first Pauli in each $C_{i}$ set # - $A_{ij} = C_{ij}C_{1i}$ # # # - $G^{prime} = \{1 P_{i} \;| \; i=1,2,...,|Z| \}$ # - aka all the completely commuting terms with coefficients set to +1! # # - We define G to be an independent set of $G^{prime}$ # - where $G \subseteq G^{prime}$ # G_list, Ci1_list, all_mappings = c.quasi_model(nonCon_H) print('non-independent Z list:', Z_list) print('G (independent) Z list:', G_list) print('all Ci1 terms:', Ci1_list) # $$R = G \cup \{ C_{i1} \;| \; i=1,2,...,N \}$$ # Assemble all the mappings from terms in the Hamiltonian to their products in R: all_mappings # Overall $R$ is basically reduced non-contextual set # - where everything in original non-contextual set can be found by **inference!** # # Function form # # $$R = G \cup \{ C_{i1} \;| \; i=1,2,...,N \}$$ # # - note q to do with $G$ # - note r to do with $C_{i1}$ # + model = [G_list, Ci1_list, all_mappings] fn_form = c.energy_function_form(nonCon_H, model) # returns [ # denstion of q, # dimension of r, # [coeff, indices of q's, indices of r's, term in Hamiltonian] # ] # - model fn_form Energy_function = c.energy_function(fn_form) # + import random ### now for the q terms we only have +1 or -1 assignment! q_variables = [random.choice([1,-1]) for _ in range(fn_form[0])] ### r variables is anything that makes up unit vector! r_variables = c.angular(np.arange(0,2*np.pi, fn_form[1])) r_variables # - Energy_function(*q_variables,*r_variables) # find_gs_nonconfunction optimizes above steps by: # 1. brute forcing all choices of ```q_variables``` # - ```itertools.product([1,-1],repeat=fn_form[0])``` # 2. optimizing over ```r_variables``` (in code ```x```) # - using SciPy optimizer! # + model = [G_list, Ci1_list, all_mappings] lowest_eigenvalue, ground_state_params, model_copy, fn_form_copy, = c.find_gs_noncon(nonCon_H, method = 'differential_evolution', model=model, fn_form=fn_form) # returns: best + [model, fn_form] print(lowest_eigenvalue) print(ground_state_params) # - ## check Energy_function(*ground_state_params[0],*ground_state_params[1]) == lowest_eigenvalue # # Now need to rotate Hamiltonian! # We now have non contextual ground state: $(\vec{q}, \vec{r})$ ground_state_params # We can use this result - ground state of $H_{nc}$ - as a classical estiamte of our ground state of the full Hamiltonian ($H = H_{c} + H_{nc}$) # # However we can also obtain a quantum correction using $H_{c}$ # # By minimizing theenergy of the remaining terms in the Hamiltonian over the quantum states that are **consistent with the noncon-textual ground state**. # To do this we first rotate each $G_{j}$ and $\mathcal{A} = \sum_{i=1}^{N} r_{i}A_{i}$: # + model = [G_list, Ci1_list, all_mappings] print(G_list) # G_j terms! print(Ci1_list) # mathcal(A) # - # to SINGLE QUBIT pauli Z operators! # # - to map the operators in $G$ to single qubit Pauli operators, we use $\frac{\pi}{2}$ rotations! # # - note $\mathcal{A}$ is an anti-commuting set... therefore we can use $N-1$ rotations as in unitary partitioning's sequence of rotations to do this! # - $R^{\dagger}\mathcal{A} R = \text{single Pauli op}$ # # Rotate full Hamiltonian to basis with diagonal noncontextual generators! # function ```diagonalize_epistemic```: # 1. first if else statement: # - if cliques present: # - first maps A to single Pauli operator (if cliques present) # - then rotates to diagonlize G union with single Pauli opator of A (hence GuA name!) # - else if NO cliques present: # - gets rotations to diagonlize G # # - these rotations make up GuA term in code! # 2. NEXT code loops over terms in GuA (denoted as g in code) # - if g is not a single qubit $Z$: # - code generates code to rotate operator to make g diagonal (rotations) # - then constructs map of g to single Z (J rotation) # - Note R is applied to GuA # # # ######### # - Note rotations are given in Appendix A of https://arxiv.org/pdf/2011.10027.pdf # - First code checks if g op in GuA is diagonal # - if so then needs to apply "K" rotation (involving $Y$ and $I$ operators (see pg 11 top) to make it NOT diagononal # - now operator will be diagnoal! # - next generate "J" rotation # - turns non-diagonal operator into a single qubit $Z$ operator! # # NEW LCU method N_index=0 check_reduction=True N_Qubits= hamiltonians[mol_key][1] R_LCU, Rotations_list, diagonalized_generators_GuA, eigen_vals_nonC_ground_state_GuA_ops= c_LCU.diagonalize_epistemic_LCU( model, fn_form, ground_state_params, N_Qubits, N_index, check_reduction=check_reduction) R_LCU diagonalized_generators_GuA diagonalized_generators_GuA R_LCU_str = conv_scr.Openfermion_to_dict(R_LCU, N_Qubits) for op1 in diagonalized_generators_GuA[:-1]: for op2 in R_LCU_str: print(op1, op2, c.commute(op1, op2)) print('##') # should commute with everything BAR script A term (last check) (hence slice ending at [:-1] !!!) eigen_vals_nonC_ground_state_GuA_ops # + order = list(range(hamiltonians[mol_key][1])) # [4, 3, 1, 2, 0]# N_index=0 check_reduction=True N_Qubits= hamiltonians[mol_key][1] reduced_H_LCU_list = c_LCU.get_reduced_hamiltonians_LCU(ham, # Con_H, model, fn_form, ground_state_params, order, N_Qubits, N_index, check_reduction=check_reduction) # - reduced_H_LCU_list[-1] # + from openfermion.linalg import qubit_operator_sparse import scipy as sp H = conv_scr.Get_Openfermion_Hamiltonian(reduced_H_LCU_list[-1]) sparseH = qubit_operator_sparse(H, n_qubits=hamiltonians[mol_key][1]) if hamiltonians[mol_key][1]<6: Energy= min(np.linalg.eigvalsh(sparseH.toarray())) else: Energy= sp.sparse.linalg.eigsh(sparseH, which='SA', k=1)[0][0] Energy # - # # Compare to old way! ### old way order = list(range(hamiltonians[mol_key][1])) reduced_H_standard_list = c.get_reduced_hamiltonians(ham, # Con_H, model, fn_form, ground_state_params, order) len(reduced_H_standard_list[0]) print(len(reduced_H_LCU_list[-1]), len(reduced_H_standard_list[-1])) reduced_H_standard_list[2] # + from quchem.Misc_functions.Misc_functions import sparse_allclose H1=conv_scr.Get_Openfermion_Hamiltonian(reduced_H_LCU_list[-1]) H2=conv_scr.Get_Openfermion_Hamiltonian(reduced_H_standard_list[-1]) H1_mat = qubit_operator_sparse(H1, n_qubits=hamiltonians[mol_key][1]) H2_mat = qubit_operator_sparse(H2, n_qubits=hamiltonians[mol_key][1]) sparse_allclose(H1_mat, H2_mat) # - if hamiltonians[mol_key][1]<6: Energy= min(np.linalg.eigvalsh(H2_mat.toarray())) else: Energy= sp.sparse.linalg.eigsh(H2_mat, which='SA', k=1)[0][0] Energy # # Restricting the Hamiltonian to a contextualsubspace # (Section B of https://arxiv.org/pdf/2011.10027.pdf) # # In the rotated basis the Hamiltonian is restricted to the subspace stabilized by the noncontextual generators $G_{j}'$ print(diagonalized_generators_GuA) # G_j' terms! # The quantum correction is then obtained by minimizing the expectation value of this resticted Hamiltonian! # # (over +1 eigenvectors of the remaining non-contextual generators $\mathcal{A}'$) print(Ci1_list) # mathcal(A) # - $\mathcal{H}_{1}$ denotes Hilbert space of $n_{1}$ qubits acted on by by the single qubit $G_{j}'$ terms # - $\mathcal{H}_{2}$ denotes Hilbert space of remaining $n_{2}$ # # Overall full Hilbert space is: $\mathcal{H}=\mathcal{H}_{1} \otimes \mathcal{H}_{2}$ # # The **contextual Hamiltonian** in this rotated basis is: # # $$H_{c}'=\sum_{P \in \mathcal{S_{c}'}} h_{P}P$$ # # The set of Pauli terms in $H_{c}'$ is $\mathcal{S_{c}'}$, where terms in $\mathcal{S_{c}'}$ act on both $\mathcal{H}_{1}$ and $\mathcal{H}_{2}$ subspaces in general! # # We can write $P$ terms as: # # $$P=P_{1}^{\mathcal{H}_{1}} \otimes P_{2}^{\mathcal{H}_{2}}$$ # # $P$ commutes with an element of $G'$ if and only if $P_{1} \otimes \mathcal{I}^{\mathcal{H}_{2}}$ does # # As the generators $G'$ act only on $\mathcal{H}_{1}$ # If $P$ anticommutes with any element of $G'$ then its expection value in the noncontextual state is zero # # Thus any $P$ must commute with all elements of $G'$ and so $P_{1} \otimes \mathcal{I}^{\mathcal{H}_{2}}$ too # # As the elements of $G'$ are single-qubit Pauli $Z$ operators acting in $\mathcal{H}_{1}$: print(diagonalized_generators_GuA) # G_j' terms! # $P_{1}$ must be a product of such operators! # # **As the exepcation value of $P_{1}$ is some $p_{1}= \pm 1$ DETERMINED BY THE NONCONTEXTUAL GROUND STATE** eigen_vals_nonC_ground_state_GuA_ops # Let $|\psi_{(\vec{q}, \vec{r})} \rangle$ be any quantum state consistent with the nonconxtual ground state $(\vec{q}, \vec{r})$... aka gives correct expection values of: print(diagonalized_generators_GuA) print(eigen_vals_nonC_ground_state_GuA_ops) # Then the action of any $P$ which allows our contextual correction has the form: # # $$P |\psi_{(\vec{q}, \vec{r})} \rangle = \big( P_{1}^{\mathcal{H}_{1}} \otimes P_{2}^{\mathcal{H}_{2}} \big) |\psi_{(\vec{q}, \vec{r})} \rangle$$ # # $$ = p_{1}\big( \mathcal{I}^{\mathcal{H}_{1}} \otimes P_{2}^{\mathcal{H}_{2}} \big) |\psi_{(\vec{q}, \vec{r})} \rangle$$ # # - repeating above, but $p_{1}$ is the expectation value of $P_{1}$ determiend by the noncontextual ground state! # Thus we can denote $H_{c}' |_{(\vec{q}, \vec{r})}$ as the restriction of $H_{c}'$ on its action on the noncontextual ground state $(\vec{q}, \vec{r})$: # # $$H_{c}' |_{(\vec{q}, \vec{r})} =\sum_{\substack{P \in \mathcal{S_{c}'} \\ \text{s.t.} [P, G_{i}']=0 \\ \forall G'_{i} \in G'}} p_{1}h_{P}\big( \mathcal{I}^{\mathcal{H}_{1}} \otimes P_{2}^{\mathcal{H}_{2}} \big) $$ # # $$=\mathcal{I}_{\mathcal{H}_{1}} \otimes H_{c}'|_{\mathcal{H}_{2}} $$ # # # where we can write: # $$H_{c}'|_{\mathcal{H}_{2}} = \sum_{\substack{P \in \mathcal{S_{c}'} \\ \text{s.t.} [P, G_{i}']=0 \\ \forall G'_{i} \in G'}} p_{1}h_{P}P_{2}^{\mathcal{H}_{2}}$$ # # # # Cleary this Hamiltonian on $n_{2}$ qubits is given by: # # $$n_{2} = n - |G|$$ # # - $|G|=$ number of noncontextual generators $G_{j}$ from copy import deepcopy import pprint # ```quantum_correction``` function # + n_q = len(diagonalized_generators_GuA[0]) rotated_H = deepcopy(ham) ##<-- full Hamiltonian # iteratively perform R rotation over all terms in orginal Hamiltonian for R in Rotations_list: newly_rotated_H={} for P in rotated_H.keys(): lin_comb_Rot_P = c.apply_rotation(R,P) # linear combination of Paulis from R rotation on P for P_rot in lin_comb_Rot_P: if P_rot in newly_rotated_H.keys(): newly_rotated_H[P_rot]+=lin_comb_Rot_P[P_rot]*rotated_H[P] # already in it hence += else: newly_rotated_H[P_rot]=lin_comb_Rot_P[P_rot]*rotated_H[P] rotated_H = deepcopy(newly_rotated_H) ##<-- perform next R rotation on this H rotated_H # - # next find where Z indices in $G'$ # + z_indices = [] for d in diagonalized_generators_GuA: for i in range(n_q): if d[i] == 'Z': z_indices.append(i) print(diagonalized_generators_GuA) print(z_indices) # - # **The exepcation value of $P_{1}$ terms are $p_{1}= \pm 1$ DETERMINED BY THE NONCONTEXTUAL GROUND STATE** print(diagonalized_generators_GuA) print(eigen_vals_nonC_ground_state_GuA_ops) # We need to ENFORCE the diagnal geneators assigned values in the diagonal basis to these expectation values above^^^ # + ham_red = {} for P in rotated_H.keys(): sgn = 1 for j, z_index in enumerate(z_indices): # enforce diagonal generator's assigned values in diagonal basis if P[z_index] == 'Z': sgn = sgn*eigen_vals_nonC_ground_state_GuA_ops[j] #<- eigenvalue of nonC ground state! elif P[z_index] != 'I': sgn = 0 if sgn != 0: # construct term in reduced Hilbert space P_red = '' for i in range(n_q): if not i in z_indices: P_red = P_red + P[i] if P_red in ham_red.keys(): ham_red[P_red] = ham_red[P_red] + rotated_H[P]*sgn else: ham_red[P_red] = rotated_H[P]*sgn ham_red # - c.quantum_correction(ham, #<- full Ham model, fn_form, ground_state_params) c.quantum_correction(nonCon_H,model,fn_form,ground_state_params) c.get_reduced_hamiltonians(ham, model, fn_form, ground_state_params, list(range(hamiltonians[mol_key][1])))[-1] == rotated_H ### aka when considering all qubit problem it is equal to rotated H! # For some reason it seems that when considering full Hamiltonian there is no reduction in the number of terms! # # Q. Do you expect any term reduction when doing CS-VQE? # + ### find optimal LCU qubit removal order! data_csvqe_results_file = os.path.join(data_dir, 'csvqe_results.txt') with open(data_csvqe_results_file, 'r') as input_file: csvqe_results = ast.literal_eval(input_file.read()) N_index = 0 check_reduction= True n_qubits= hamiltonians[mol_key][1] true_gs= csvqe_results[mol_key][0] c_LCU.csvqe_approximations_heuristic_LCU(ham, nonCon_H, n_qubits, true_gs, N_index, check_reduction=check_reduction) # - ### SeqRot order! list(range(hamiltonians[mol_key][1])) ## memory intensive: c.csvqe_approximations_heuristic(ham, nonCon_H, n_qubits, true_gs)
Projects/CS_VQE/jupyter_notebooks/CS-VQE LCU method.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd from pathlib import Path def clean_data(df): df = df.dropna().reset_index(drop=True) return df def read_csv_to_df(address: str) -> pd.DataFrame: file_name = Path(file_address) df = None if file_name.exists(): print("Reading file {}.".format(file_address)) df = pd.read_csv(file_address) print(df.head()) else: print("file {} does not exists.".format(file_address)) return df def clean_text_reg(text: str)-> str: text = text.lower() text = re.sub('@', '', text) text = re.sub('\[.*?\]', '', text) text = re.sub('https?://\S+|www\.\S+', '', text) text = re.sub('<.*?>+', '', text) text = re.sub('[%s]' % re.escape(string.punctuation), '', text) text = re.sub('\n', '', text) text = re.sub('\w*\d\w*', '', text) text = re.sub(r"[^a-zA-Z ]+", "", text) #Tokenize the data text = nltk.word_tokenize(text) #Remove stopwords text = [w for w in text if w not in stop_words] return text def make_stop_words(): return stopwords.words('english') def zscore(df,selected_col_names): scaler = preprocessing.MinMaxScaler() d = scaler.fit_transform(df_corr_april_2021_adult) scaled_df = pd.DataFrame(d, columns=selected_col_names) return scaled_df.head()
Ali/.ipynb_checkpoints/util-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Alphabetical Sorting # ### Counting Sort # Counting sort is a sorting algorithm that sorts the elements of an array by counting the number of occurrences of each unique element in the array. The count is stored in an auxiliary array and the sorting is done by mapping the count as an index of the auxiliary array. And its time complexity is $O(n)$ where n is the size of array. It can be applied only when the values in the array are in a predefined set. # The algorithm used to implement the counting sorted adopted from [here](https://www.youtube.com/watch?v=7zuGmKfUt7s). It computes the frequencies of the values in the array keep them in an auxilary array and relocate the values in the array based on the position that defined by auxilary array described in details on this [page](https://www.programiz.com/dsa/counting-sort). # ### Word Sort # Each word can be converted to list of integers corresponding to the ordinals of its characters. Iterating from the first ordinal of the words to the last one that is the lenght of the longest word, applying counting sort will give the sorted list of words based on the first character, then for those words that have the same character, the counting sort is applied on the second character of the words and this procedure countinoues till the algorithm reaches to the last character or find no duplicated character at some point that is coded as a recursive function. # # # <img src="sorting algorithm.png"> # # # The numbers inside the boxes in each step in recursive algorithm are those pass as input to counting sort. # ### Example # The following example consists of 7 names that selected in a way to consider all the possible different case. import sorting_lib words_list = ['Moses', 'Francesco','Sri','Francesca', 'Mosess', 'Alberto', 'Alessio'] sorted_words_list = sorting_lib.alpha_counting_sort(words_list) print(sorted_words_list) # ### Empirical and theorical running time of counting sort # The dominant part of the designed algorithm for counting sort includes two for-loop of size n (the size of array) and one of size m (the maximum possible ordinal value) to calculate the the frequencies, array of cumulative values and sorted array. In theory the algorithm suppose to have time complexity of $O(max\{n,m\})$ while m is fixed value it is of the $O(n)$. # The following plot shows that the emperical running time increases linearly respect to the array size as expected. sorting_lib.plot_time_complexity_count_sort() # ### Empirical and theorical running time of alphabetical sort # In each iteration of recursive function that sorts the words, if at least one character is duplicated a new subproblem will be generated and recursive function calls itself. In the first step a problem of size n (number words to be sorted) have to be solved by recursive function and check wheather there exists at least one duplicated character to generate subpromlems. The following picture shows how the recursive function works. d is the maximum length of the words. So in the worst case where all words are the same, the summation of the size of subproblems is n, so the theorical running time is $O(nd)$. # # <img src="Tree.png"> # # To analyze the empirical running time, we fixed words length and number of words respectively from left to right (for fixed $d$ and $n$) that yeilds to two linear plots. To test the running time with respect to $n$ and $d$ at the same time ($n = d$), we set them to be equal that its result is shown on the right most plot that follows a quaderatic line that demonstrate the time complexity of $O(nd)$ sorting_lib.plot_time_complexity_alpha_sort()
alphabetical_sorting.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + #Iniciando o carregamento de todas os datasets, lendo cada um dos arquivos referentes do mês e unindo todos em um #único dataset para cada ano. O ano de 2012 possui somente do mês de junho a dezembro, os demais possuem todos os 12 #meses daquele ano import pandas as pd dados_2012 = pd.concat(pd.read_csv(f'C://fontes_dados//operacoes_credito//2012//planilha_2012{mes}.csv',sep=';') for mes in ('06','07','08','09','10','11','12')) dados_2013 = pd.concat(pd.read_csv(f'C://fontes_dados//operacoes_credito//2013//planilha_2013{mes}.csv',sep=';') for mes in ('01','02','03','04','05','06','07','08','09','10','11','12')) dados_2014 = pd.concat(pd.read_csv(f'C://fontes_dados//operacoes_credito//2014//planilha_2014{mes}.csv',sep=';') for mes in ('01','02','03','04','05','06','07','08','09','10','11','12')) dados_2015 = pd.concat(pd.read_csv(f'C://fontes_dados//operacoes_credito//2015//planilha_2015{mes}.csv',sep=';') for mes in ('01','02','03','04','05','06','07','08','09','10','11','12')) dados_2016 = pd.concat(pd.read_csv(f'C://fontes_dados//operacoes_credito//2016//planilha_2016{mes}.csv',sep=';') for mes in ('01','02','03','04','05','06','07','08','09','10','11','12')) dados_2017 = pd.concat(pd.read_csv(f'C://fontes_dados//operacoes_credito//2017//planilha_2017{mes}.csv',sep=';') for mes in ('01','02','03','04','05','06','07','08','09','10','11','12')) dados_2018 = pd.concat(pd.read_csv(f'C://fontes_dados//operacoes_credito//2018//planilha_2018{mes}.csv',sep=';') for mes in ('01','02','03','04','05','06','07','08','09','10','11','12')) dados_2019 = pd.concat(pd.read_csv(f'C://fontes_dados//operacoes_credito//2019//planilha_2019{mes}.csv',sep=';') for mes in ('01','02','03','04','05','06','07','08','09','10','11','12')) dados_2020 = pd.concat(pd.read_csv(f'C://fontes_dados//operacoes_credito//2020//planilha_2020{mes}.csv',sep=';') for mes in ('01','02','03','04','05','06','07','08','09','10','11','12')) dados_2021 = pd.concat(pd.read_csv(f'C://fontes_dados//operacoes_credito//2021//planilha_2021{mes}.csv',sep=';') for mes in ('01','02','03','04','05','06','07','08','09','10','11')) dados_2012.to_csv('C://fontes_dados//operacoes_credito//2012//dados_credito_2012.csv',sep=';') dados_2013.to_csv('C://fontes_dados//operacoes_credito//2013//dados_credito_2013.csv',sep=';') dados_2014.to_csv('C://fontes_dados//operacoes_credito//2014//dados_credito_2014.csv',sep=';') dados_2015.to_csv('C://fontes_dados//operacoes_credito//2015//dados_credito_2015.csv',sep=';') dados_2016.to_csv('C://fontes_dados//operacoes_credito//2016//dados_credito_2016.csv',sep=';') dados_2017.to_csv('C://fontes_dados//operacoes_credito//2017//dados_credito_2017.csv',sep=';') dados_2018.to_csv('C://fontes_dados//operacoes_credito//2018//dados_credito_2018.csv',sep=';') dados_2019.to_csv('C://fontes_dados//operacoes_credito//2019//dados_credito_2019.csv',sep=';') dados_2020.to_csv('C://fontes_dados//operacoes_credito//2020//dados_credito_2020.csv',sep=';') dados_2021.to_csv('C://fontes_dados//operacoes_credito//2021//dados_credito_2021.csv',sep=';') # + #Removendo as colunas que se sabe que não serão utilizadas del dados_2012['sr'] del dados_2012['cnae_secao'] del dados_2012['cnae_subclasse'] del dados_2012['origem'] del dados_2012['indexador'] del dados_2012['porte'] del dados_2012['a_vencer_ate_90_dias'] del dados_2012['a_vencer_de_91_ate_360_dias'] del dados_2012['a_vencer_de_361_ate_1080_dias'] del dados_2012['a_vencer_de_1081_ate_1800_dias'] del dados_2012['a_vencer_de_1801_ate_5400_dias'] del dados_2012['a_vencer_acima_de_5400_dias'] del dados_2012['vencido_acima_de_15_dias'] del dados_2012['carteira_inadimplida_arrastada'] del dados_2012['ativo_problematico'] #Tratando o número de operações dados_2012['numero_de_operacoes'].replace('<= 15','15', inplace = True) dados_2012['numero_de_operacoes'] = pd.to_numeric(dados_2012['numero_de_operacoes']) #Filtrando somente registros de Cooperativas dados_2012 = dados_2012.query('tcb == "Cooperativas"') #Tratando o campo da carteira ativa dados_2012['carteira_ativa'] = dados_2012['carteira_ativa'].replace(',','.', regex=True) dados_2012['carteira_ativa'] = pd.to_numeric(dados_2012['carteira_ativa']) #Tratando o campo da data dados_2012['data_base'] = dados_2012['data_base'].replace('-','/', regex=True) dados_2012['data_base'] = pd.to_datetime(dados_2012['data_base'], format= "%Y/%M/%d") dados_2012['data_base'] = pd.to_datetime(dados_2012['data_base']).dt.normalize() dados_2012.info() dados_2012.to_csv('C://fontes_dados//operacoes_credito//dados_2012') # + #Carregando arquivo do ano de 2013 import pandas as pd dados_2013 = pd.read_csv('C://fontes_dados//operacoes_credito//2013//dados_credito_2013.csv',sep=';') #Removendo as colunas que se sabe que não serão utilizadas del dados_2013['Unnamed: 0'] del dados_2013['sr'] del dados_2013['cnae_secao'] del dados_2013['cnae_subclasse'] del dados_2013['origem'] del dados_2013['indexador'] del dados_2013['porte'] del dados_2013['a_vencer_ate_90_dias'] del dados_2013['a_vencer_de_91_ate_360_dias'] del dados_2013['a_vencer_de_361_ate_1080_dias'] del dados_2013['a_vencer_de_1081_ate_1800_dias'] del dados_2013['a_vencer_de_1801_ate_5400_dias'] del dados_2013['a_vencer_acima_de_5400_dias'] del dados_2013['vencido_acima_de_15_dias'] del dados_2013['carteira_inadimplida_arrastada'] del dados_2013['ativo_problematico'] #Tratando o número de operações dados_2013['numero_de_operacoes'].replace('<= 15','15', inplace = True) dados_2013['numero_de_operacoes'] = pd.to_numeric(dados_2013['numero_de_operacoes']) #Filtrando somente registros de Cooperativas dados_2013 = dados_2013.query('tcb == "Cooperativas"') #Tratando o campo da carteira ativa dados_2013['carteira_ativa'] = dados_2013['carteira_ativa'].replace(',','.', regex=True) dados_2013['carteira_ativa'] = pd.to_numeric(dados_2013['carteira_ativa']) #Tratando o campo da data dados_2013['data_base'] = dados_2013['data_base'].replace('-','/', regex=True) dados_2013['data_base'] = pd.to_datetime(dados_2013['data_base'], format= "%Y/%M/%d") dados_2013['data_base'] = pd.to_datetime(dados_2013['data_base']).dt.normalize() dados_2013.info() dados_2013.to_csv('C://fontes_dados//operacoes_credito//dados_2013') # + #Carregando arquivo do ano de 2014 import pandas as pd dados_2014 = pd.read_csv('C://fontes_dados//operacoes_credito//2014//dados_credito_2014.csv',sep=';') #Removendo as colunas que se sabe que não serão utilizadas del dados_2014['Unnamed: 0'] del dados_2014['sr'] del dados_2014['cnae_secao'] del dados_2014['cnae_subclasse'] del dados_2014['origem'] del dados_2014['indexador'] del dados_2014['porte'] del dados_2014['a_vencer_ate_90_dias'] del dados_2014['a_vencer_de_91_ate_360_dias'] del dados_2014['a_vencer_de_361_ate_1080_dias'] del dados_2014['a_vencer_de_1081_ate_1800_dias'] del dados_2014['a_vencer_de_1801_ate_5400_dias'] del dados_2014['a_vencer_acima_de_5400_dias'] del dados_2014['vencido_acima_de_15_dias'] del dados_2014['carteira_inadimplida_arrastada'] del dados_2014['ativo_problematico'] #Tratando o número de operações dados_2014['numero_de_operacoes'].replace('<= 15','15', inplace = True) dados_2014['numero_de_operacoes'] = pd.to_numeric(dados_2014['numero_de_operacoes']) #Filtrando somente registros de Cooperativas dados_2014 = dados_2014.query('tcb == "Cooperativas"') #Tratando o campo da carteira ativa dados_2014['carteira_ativa'] = dados_2014['carteira_ativa'].replace(',','.', regex=True) dados_2014['carteira_ativa'] = pd.to_numeric(dados_2014['carteira_ativa']) #Tratando o campo da data dados_2014['data_base'] = dados_2014['data_base'].replace('-','/', regex=True) dados_2014['data_base'] = pd.to_datetime(dados_2014['data_base'], format= "%Y/%M/%d") dados_2014['data_base'] = pd.to_datetime(dados_2014['data_base']).dt.normalize() dados_2014.info() dados_2014.to_csv('C://fontes_dados//operacoes_credito//dados_2014') # + #Carregando arquivo do ano de 2015 import pandas as pd dados_2015 = pd.read_csv('C://fontes_dados//operacoes_credito//2015//dados_credito_2015.csv',sep=';') #Removendo as colunas que se sabe que não serão utilizadas del dados_2015['Unnamed: 0'] del dados_2015['sr'] del dados_2015['cnae_secao'] del dados_2015['cnae_subclasse'] del dados_2015['origem'] del dados_2015['indexador'] del dados_2015['porte'] del dados_2015['a_vencer_ate_90_dias'] del dados_2015['a_vencer_de_91_ate_360_dias'] del dados_2015['a_vencer_de_361_ate_1080_dias'] del dados_2015['a_vencer_de_1081_ate_1800_dias'] del dados_2015['a_vencer_de_1801_ate_5400_dias'] del dados_2015['a_vencer_acima_de_5400_dias'] del dados_2015['vencido_acima_de_15_dias'] del dados_2015['carteira_inadimplida_arrastada'] del dados_2015['ativo_problematico'] #Tratando o número de operações dados_2015['numero_de_operacoes'].replace('<= 15','15', inplace = True) dados_2015['numero_de_operacoes'] = pd.to_numeric(dados_2015['numero_de_operacoes']) #Filtrando somente registros de Cooperativas dados_2015 = dados_2015.query('tcb == "Cooperativas"') #Tratando o campo da carteira ativa dados_2015['carteira_ativa'] = dados_2015['carteira_ativa'].replace(',','.', regex=True) dados_2015['carteira_ativa'] = pd.to_numeric(dados_2015['carteira_ativa']) #Tratando o campo da data dados_2015['data_base'] = dados_2015['data_base'].replace('-','/', regex=True) dados_2015['data_base'] = pd.to_datetime(dados_2015['data_base'], format= "%Y/%M/%d") dados_2015['data_base'] = pd.to_datetime(dados_2015['data_base']).dt.normalize() dados_2015.info() dados_2015.to_csv('C://fontes_dados//operacoes_credito//dados_2015') # + #Carregando arquivo do ano de 2016 import pandas as pd dados_2016 = pd.read_csv('C://fontes_dados//operacoes_credito//2016//dados_credito_2016.csv',sep=';') #Removendo as colunas que se sabe que não serão utilizadas del dados_2016['Unnamed: 0'] del dados_2016['sr'] del dados_2016['cnae_secao'] del dados_2016['cnae_subclasse'] del dados_2016['origem'] del dados_2016['indexador'] del dados_2016['porte'] del dados_2016['a_vencer_ate_90_dias'] del dados_2016['a_vencer_de_91_ate_360_dias'] del dados_2016['a_vencer_de_361_ate_1080_dias'] del dados_2016['a_vencer_de_1081_ate_1800_dias'] del dados_2016['a_vencer_de_1801_ate_5400_dias'] del dados_2016['a_vencer_acima_de_5400_dias'] del dados_2016['vencido_acima_de_15_dias'] del dados_2016['carteira_inadimplida_arrastada'] del dados_2016['ativo_problematico'] #Tratando o número de operações dados_2016['numero_de_operacoes'].replace('<= 15','15', inplace = True) dados_2016['numero_de_operacoes'] = pd.to_numeric(dados_2016['numero_de_operacoes']) #Filtrando somente registros de Cooperativas dados_2016 = dados_2016.query('tcb == "Cooperativas"') #Tratando o campo da carteira ativa dados_2016['carteira_ativa'] = dados_2016['carteira_ativa'].replace(',','.', regex=True) dados_2016['carteira_ativa'] = pd.to_numeric(dados_2016['carteira_ativa']) #Tratando o campo da data dados_2016['data_base'] = dados_2016['data_base'].replace('-','/', regex=True) dados_2016['data_base'] = pd.to_datetime(dados_2016['data_base'], format= "%Y/%M/%d") dados_2016['data_base'] = pd.to_datetime(dados_2016['data_base']).dt.normalize() dados_2016.info() dados_2016.to_csv('C://fontes_dados//operacoes_credito//dados_2016') # + #Carregando arquivo do ano de 2017 import pandas as pd dados_2017 = pd.read_csv('C://fontes_dados//operacoes_credito//2017//dados_credito_2017.csv',sep=';') #Removendo as colunas que se sabe que não serão utilizadas del dados_2017['Unnamed: 0'] del dados_2017['sr'] del dados_2017['cnae_secao'] del dados_2017['cnae_subclasse'] del dados_2017['origem'] del dados_2017['indexador'] del dados_2017['porte'] del dados_2017['a_vencer_ate_90_dias'] del dados_2017['a_vencer_de_91_ate_360_dias'] del dados_2017['a_vencer_de_361_ate_1080_dias'] del dados_2017['a_vencer_de_1081_ate_1800_dias'] del dados_2017['a_vencer_de_1801_ate_5400_dias'] del dados_2017['a_vencer_acima_de_5400_dias'] del dados_2017['vencido_acima_de_15_dias'] del dados_2017['carteira_inadimplida_arrastada'] del dados_2017['ativo_problematico'] #Tratando o número de operações dados_2017['numero_de_operacoes'].replace('<= 15','15', inplace = True) dados_2017['numero_de_operacoes'] = pd.to_numeric(dados_2017['numero_de_operacoes']) #Filtrando somente registros de Cooperativas dados_2017 = dados_2017.query('tcb == "Cooperativas"') #Tratando o campo da carteira ativa dados_2017['carteira_ativa'] = dados_2017['carteira_ativa'].replace(',','.', regex=True) dados_2017['carteira_ativa'] = pd.to_numeric(dados_2017['carteira_ativa']) #Tratando o campo da data dados_2017['data_base'] = dados_2017['data_base'].replace('-','/', regex=True) dados_2017['data_base'] = pd.to_datetime(dados_2017['data_base'], format= "%Y/%M/%d") dados_2017['data_base'] = pd.to_datetime(dados_2017['data_base']).dt.normalize() dados_2017.info() dados_2017.to_csv('C://fontes_dados//operacoes_credito//dados_2017') # + #Carregando arquivo do ano de 2018 import pandas as pd dados_2018 = pd.read_csv('C://fontes_dados//operacoes_credito//2018//dados_credito_2018.csv',sep=';') #Removendo as colunas que se sabe que não serão utilizadas del dados_2018['Unnamed: 0'] del dados_2018['sr'] del dados_2018['cnae_secao'] del dados_2018['cnae_subclasse'] del dados_2018['origem'] del dados_2018['indexador'] del dados_2018['porte'] del dados_2018['a_vencer_ate_90_dias'] del dados_2018['a_vencer_de_91_ate_360_dias'] del dados_2018['a_vencer_de_361_ate_1080_dias'] del dados_2018['a_vencer_de_1081_ate_1800_dias'] del dados_2018['a_vencer_de_1801_ate_5400_dias'] del dados_2018['a_vencer_acima_de_5400_dias'] del dados_2018['vencido_acima_de_15_dias'] del dados_2018['carteira_inadimplida_arrastada'] del dados_2018['ativo_problematico'] #Tratando o número de operações dados_2018['numero_de_operacoes'].replace('<= 15','15', inplace = True) dados_2018['numero_de_operacoes'] = pd.to_numeric(dados_2018['numero_de_operacoes']) #Filtrando somente registros de Cooperativas dados_2018 = dados_2018.query('tcb == "Cooperativas"') #Tratando o campo da carteira ativa dados_2018['carteira_ativa'] = dados_2018['carteira_ativa'].replace(',','.', regex=True) dados_2018['carteira_ativa'] = pd.to_numeric(dados_2018['carteira_ativa']) #Tratando o campo da data dados_2018['data_base'] = dados_2018['data_base'].replace('-','/', regex=True) dados_2018['data_base'] = pd.to_datetime(dados_2018['data_base'], format= "%Y/%M/%d") dados_2018['data_base'] = pd.to_datetime(dados_2018['data_base']).dt.normalize() dados_2018.info() dados_2018.to_csv('C://fontes_dados//operacoes_credito//dados_2018') # + #Carregando arquivo do ano de 2019 import pandas as pd dados_2019 = pd.read_csv('C://fontes_dados//operacoes_credito//2019//dados_credito_2019.csv',sep=';') #Removendo as colunas que se sabe que não serão utilizadas del dados_2019['Unnamed: 0'] del dados_2019['sr'] del dados_2019['cnae_secao'] del dados_2019['cnae_subclasse'] del dados_2019['origem'] del dados_2019['indexador'] del dados_2019['porte'] del dados_2019['a_vencer_ate_90_dias'] del dados_2019['a_vencer_de_91_ate_360_dias'] del dados_2019['a_vencer_de_361_ate_1080_dias'] del dados_2019['a_vencer_de_1081_ate_1800_dias'] del dados_2019['a_vencer_de_1801_ate_5400_dias'] del dados_2019['a_vencer_acima_de_5400_dias'] del dados_2019['vencido_acima_de_15_dias'] del dados_2019['carteira_inadimplida_arrastada'] del dados_2019['ativo_problematico'] #Tratando o número de operações dados_2019['numero_de_operacoes'].replace('<= 15','15', inplace = True) dados_2019['numero_de_operacoes'] = pd.to_numeric(dados_2019['numero_de_operacoes']) #Filtrando somente registros de Cooperativas dados_2019 = dados_2019.query('tcb == "Cooperativas"') #Tratando o campo da carteira ativa dados_2019['carteira_ativa'] = dados_2019['carteira_ativa'].replace(',','.', regex=True) dados_2019['carteira_ativa'] = pd.to_numeric(dados_2019['carteira_ativa']) #Tratando o campo da data dados_2019['data_base'] = dados_2019['data_base'].replace('-','/', regex=True) dados_2019['data_base'] = pd.to_datetime(dados_2019['data_base'], format= "%Y/%M/%d") dados_2019['data_base'] = pd.to_datetime(dados_2019['data_base']).dt.normalize() dados_2019.info() dados_2019.to_csv('C://fontes_dados//operacoes_credito//dados_2019') # + #Carregando arquivo do ano de 2018 import pandas as pd dados_2018 = pd.read_csv('C://fontes_dados//operacoes_credito//2018//dados_credito_2018.csv',sep=';') #Removendo as colunas que se sabe que não serão utilizadas del dados_2018['Unnamed: 0'] del dados_2018['sr'] del dados_2018['cnae_secao'] del dados_2018['cnae_subclasse'] del dados_2018['origem'] del dados_2018['indexador'] del dados_2018['porte'] del dados_2018['a_vencer_ate_90_dias'] del dados_2018['a_vencer_de_91_ate_360_dias'] del dados_2018['a_vencer_de_361_ate_1080_dias'] del dados_2018['a_vencer_de_1081_ate_1800_dias'] del dados_2018['a_vencer_de_1801_ate_5400_dias'] del dados_2018['a_vencer_acima_de_5400_dias'] del dados_2018['vencido_acima_de_15_dias'] del dados_2018['carteira_inadimplida_arrastada'] del dados_2018['ativo_problematico'] #Tratando o número de operações dados_2018['numero_de_operacoes'].replace('<= 15','15', inplace = True) dados_2018['numero_de_operacoes'] = pd.to_numeric(dados_2018['numero_de_operacoes']) #Filtrando somente registros de Cooperativas dados_2018 = dados_2018.query('tcb == "Cooperativas"') #Tratando o campo da carteira ativa dados_2018['carteira_ativa'] = dados_2018['carteira_ativa'].replace(',','.', regex=True) dados_2018['carteira_ativa'] = pd.to_numeric(dados_2018['carteira_ativa']) #Tratando o campo da data dados_2018['data_base'] = dados_2018['data_base'].replace('-','/', regex=True) dados_2018['data_base'] = pd.to_datetime(dados_2018['data_base'], format= "%Y/%M/%d") dados_2018['data_base'] = pd.to_datetime(dados_2018['data_base']).dt.normalize() dados_2018.info() dados_2018.to_csv('C://fontes_dados//operacoes_credito//dados_2018') # + #Carregando arquivo do ano de 2020 import pandas as pd dados_2020 = pd.read_csv('C://fontes_dados//operacoes_credito//2020//dados_credito_2020.csv',sep=';') #Removendo as colunas que se sabe que não serão utilizadas del dados_2020['Unnamed: 0'] del dados_2020['sr'] del dados_2020['cnae_secao'] del dados_2020['cnae_subclasse'] del dados_2020['origem'] del dados_2020['indexador'] del dados_2020['porte'] del dados_2020['a_vencer_ate_90_dias'] del dados_2020['a_vencer_de_91_ate_360_dias'] del dados_2020['a_vencer_de_361_ate_1080_dias'] del dados_2020['a_vencer_de_1081_ate_1800_dias'] del dados_2020['a_vencer_de_1801_ate_5400_dias'] del dados_2020['a_vencer_acima_de_5400_dias'] del dados_2020['vencido_acima_de_15_dias'] del dados_2020['carteira_inadimplida_arrastada'] del dados_2020['ativo_problematico'] #Tratando o número de operações dados_2020['numero_de_operacoes'].replace('<= 15','15', inplace = True) dados_2020['numero_de_operacoes'] = pd.to_numeric(dados_2020['numero_de_operacoes']) #Filtrando somente registros de Cooperativas dados_2020 = dados_2020.query('tcb == "Cooperativas"') #Tratando o campo da carteira ativa dados_2020['carteira_ativa'] = dados_2020['carteira_ativa'].replace(',','.', regex=True) dados_2020['carteira_ativa'] = pd.to_numeric(dados_2020['carteira_ativa']) #Tratando o campo da data dados_2020['data_base'] = dados_2020['data_base'].replace('-','/', regex=True) dados_2020['data_base'] = pd.to_datetime(dados_2020['data_base'], format= "%Y/%M/%d") dados_2020['data_base'] = pd.to_datetime(dados_2020['data_base']).dt.normalize() dados_2020.info() dados_2020.to_csv('C://fontes_dados//operacoes_credito//dados_2020') # + #Carregando arquivo do ano de 2021 import pandas as pd dados_2021 = pd.read_csv('C://fontes_dados//operacoes_credito//2021//dados_credito_2021.csv',sep=';') #Removendo as colunas que se sabe que não serão utilizadas del dados_2021['Unnamed: 0'] del dados_2021['sr'] del dados_2021['cnae_secao'] del dados_2021['cnae_subclasse'] del dados_2021['origem'] del dados_2021['indexador'] del dados_2021['porte'] del dados_2021['a_vencer_ate_90_dias'] del dados_2021['a_vencer_de_91_ate_360_dias'] del dados_2021['a_vencer_de_361_ate_1080_dias'] del dados_2021['a_vencer_de_1081_ate_1800_dias'] del dados_2021['a_vencer_de_1801_ate_5400_dias'] del dados_2021['a_vencer_acima_de_5400_dias'] del dados_2021['vencido_acima_de_15_dias'] del dados_2021['carteira_inadimplida_arrastada'] del dados_2021['ativo_problematico'] #Tratando o número de operações dados_2021['numero_de_operacoes'].replace('<= 15','15', inplace = True) dados_2021['numero_de_operacoes'] = pd.to_numeric(dados_2021['numero_de_operacoes']) #Filtrando somente registros de Cooperativas dados_2021 = dados_2021.query('tcb == "Cooperativas"') #Tratando o campo da carteira ativa dados_2021['carteira_ativa'] = dados_2021['carteira_ativa'].replace(',','.', regex=True) dados_2021['carteira_ativa'] = pd.to_numeric(dados_2021['carteira_ativa']) #Tratando o campo da data dados_2021['data_base'] = dados_2021['data_base'].replace('-','/', regex=True) dados_2021['data_base'] = pd.to_datetime(dados_2021['data_base'], format= "%Y/%M/%d") dados_2021['data_base'] = pd.to_datetime(dados_2021['data_base']).dt.normalize() dados_2021.info() dados_2021.to_csv('C://fontes_dados//operacoes_credito//dados_2021') # - #Unindo todos os datasets em um único dados_completos # + agrupado_qtde_2012 = pd.DataFrame(dados_2012.groupby('uf')['numero_de_operacoes'].sum()).assign(ano=2012) agrupado_qtde_2013 = pd.DataFrame(dados_2013.groupby('uf')['numero_de_operacoes'].sum()).assign(ano=2013) agrupado_qtde_2014 = pd.DataFrame(dados_2014.groupby('uf')['numero_de_operacoes'].sum()).assign(ano=2014) agrupado_qtde_2015 = pd.DataFrame(dados_2015.groupby('uf')['numero_de_operacoes'].sum()).assign(ano=2015) agrupado_qtde_2016 = pd.DataFrame(dados_2016.groupby('uf')['numero_de_operacoes'].sum()).assign(ano=2016) agrupado_qtde_2017 = pd.DataFrame(dados_2017.groupby('uf')['numero_de_operacoes'].sum()).assign(ano=2017) agrupado_qtde_2018 = pd.DataFrame(dados_2018.groupby('uf')['numero_de_operacoes'].sum()).assign(ano=2018) agrupado_qtde_2019 = pd.DataFrame(dados_2019.groupby('uf')['numero_de_operacoes'].sum()).assign(ano=2019) agrupado_qtde_2020 = pd.DataFrame(dados_2020.groupby('uf')['numero_de_operacoes'].sum()).assign(ano=2020) agrupado_qtde_2021 = pd.DataFrame(dados_2021.groupby('uf')['numero_de_operacoes'].sum()).assign(ano=2021) agrupados_geral = pd.concat([agrupado_qtde_2012, agrupado_qtde_2013, agrupado_qtde_2014, agrupado_qtde_2015, agrupado_qtde_2016, agrupado_qtde_2017, agrupado_qtde_2018, agrupado_qtde_2019, agrupado_qtde_2020, agrupado_qtde_2021]) agrupados_geral = agrupados_geral.reset_index() # + agrupados_geral = pd.concat([agrupado_qtde_2012, agrupado_qtde_2013, agrupado_qtde_2014, agrupado_qtde_2015, agrupado_qtde_2016, agrupado_qtde_2017, agrupado_qtde_2018, agrupado_qtde_2019, agrupado_qtde_2020, agrupado_qtde_2021]) agrupados_geral = agrupados_geral.reset_index() # - agrupados_geral_ano = pd.DataFrame(agrupados_geral.groupby('ano')['numero_de_operacoes'].sum()).reset_index() agrupados_geral_ano agrupados_geral_ano['ano'] = agrupados_geral_ano['ano'].astype(str) agrupados_geral_ano.info() agrupados_geral_ano.hist(figsize=(15,12)); agrupados_geral_ano.plot(x = 'ano', y = ['numero_de_operacoes'], kind = 'bar', stacked=True, legend=True, figsize=(16, 6)) agrupados_geral_ano.plot(x = 'ano', y = ['numero_de_operacoes'], kind = 'line', stacked=True, legend=True, figsize=(16, 6)) # + agrupado_qtde_2012 = pd.DataFrame(dados_2012.groupby('uf')['carteira_ativa'].sum()).assign(ano=2012) agrupado_qtde_2013 = pd.DataFrame(dados_2013.groupby('uf')['carteira_ativa'].sum()).assign(ano=2013) agrupado_qtde_2014 = pd.DataFrame(dados_2014.groupby('uf')['carteira_ativa'].sum()).assign(ano=2014) agrupado_qtde_2015 = pd.DataFrame(dados_2015.groupby('uf')['carteira_ativa'].sum()).assign(ano=2015) agrupado_qtde_2016 = pd.DataFrame(dados_2016.groupby('uf')['carteira_ativa'].sum()).assign(ano=2016) agrupado_qtde_2017 = pd.DataFrame(dados_2017.groupby('uf')['carteira_ativa'].sum()).assign(ano=2017) agrupado_qtde_2018 = pd.DataFrame(dados_2018.groupby('uf')['carteira_ativa'].sum()).assign(ano=2018) agrupado_qtde_2019 = pd.DataFrame(dados_2019.groupby('uf')['carteira_ativa'].sum()).assign(ano=2019) agrupado_qtde_2020 = pd.DataFrame(dados_2020.groupby('uf')['carteira_ativa'].sum()).assign(ano=2020) agrupado_qtde_2021 = pd.DataFrame(dados_2021.groupby('uf')['carteira_ativa'].sum()).assign(ano=2021) agrupados_geral_valores = pd.concat([agrupado_qtde_2012, agrupado_qtde_2013, agrupado_qtde_2014, agrupado_qtde_2015, agrupado_qtde_2016, agrupado_qtde_2017, agrupado_qtde_2018, agrupado_qtde_2019, agrupado_qtde_2020, agrupado_qtde_2021]) agrupados_geral_valores = agrupados_geral_valores.reset_index() agrupados_geral_valores_ano = pd.DataFrame(agrupados_geral_valores.groupby('ano')['carteira_ativa'].sum()).reset_index() agrupados_geral_valores_ano['ano'] = agrupados_geral_valores_ano['ano'].astype(str) # - agrupados_geral_valores_ano.plot(x = 'ano', y = ['carteira_ativa'], kind = 'bar', stacked=True, legend=True, figsize=(16, 6)) agrupados_geral_valores_ano.plot(x = 'ano', y = ['carteira_ativa'], kind = 'line', stacked=True, legend=True, figsize=(16, 6)) # + quantidades_x_valores_ano = pd.merge(agrupados_geral_ano,agrupados_geral_valores_ano,how = 'inner', on = 'ano') import matplotlib.pyplot as pltquantidades_x_valores_ano # - cores = ['blue', 'green'] quantidades_x_valores_ano.plot(x = 'ano', y = ['numero_de_operacoes','carteira_ativa'], kind = 'line', stacked=True, legend=True, color = cores, figsize=(16, 6)) quantidades_x_valores_ano.plot.bar(stacked=True,figsize=(16, 6)) agrupados_geral.plot(x = 'ano',y = 'numero_de_operacoes',figsize=(16, 6)) # + todos_dados = pd.concat([dados_2012, dados_2013, dados_2014, dados_2015, dados_2016, dados_2017, dados_2018, dados_2019, dados_2020, dados_2021]) todos_dados.info() # - todos_dados agrupados_uf = pd.DataFrame(todos_dados.groupby('uf')['carteira_ativa'].sum()) agrupados_uf = agrupados_uf.reset_index() agrupados_uf = agrupados_ano.reset_index() agrupados_uf = pd.DataFrame(todos_dados.groupby('uf')['carteira_ativa'].sum()) agrupados_uf = agrupados_uf.reset_index() agrupados_uf = agrupados_uf.sort_values(by=['carteira_ativa']) agrupados_uf.plot(x='uf', y='carteira_ativa', kind='bar', stacked=True,figsize=(16, 6)) todos_dados['modalidade'].unique() todos_modalidade = pd.DataFrame(todos_dados.groupby('modalidade')['carteira_ativa'].sum()) todos_modalidade = todos_modalidade.reset_index() todos_modalidade = todos_modalidade.sort_values(by=['carteira_ativa']) todos_modalidade.plot(x='modalidade', y='carteira_ativa', kind='bar', color = 'green', stacked=True,figsize=(16, 6)) tipo_pessoa = pd.DataFrame(todos_dados.groupby('cliente')['carteira_ativa'].sum()) tipo_pessoa = tipo_pessoa.reset_index() tipo_pessoa = tipo_pessoa.sort_values(by=['carteira_ativa']) tipo_pessoa.plot(x='cliente', y='carteira_ativa', kind='bar', color = 'blue', stacked=True,figsize=(16, 6)) print('Valor máximo contratado...........: ', todos_dados['carteira_ativa'].max()) print('Valor mínimo contratado...........: ', todos_dados['carteira_ativa'].min()) print('Valor médio contratado............: ', todos_dados['carteira_ativa'].mean()) print('Amplitude.........................: ', todos_dados['carteira_ativa'].max() - todos_dados['carteira_ativa'].min()) print('Variância valor contratado........: ', todos_dados['carteira_ativa'].var()) print('Desvio padrão valor contratado....: ', todos_dados['carteira_ativa'].std()) print('Mediana valor contratado..........: ', todos_dados['carteira_ativa'].median()) print('Moda valor contratado.............: ', todos_dados['carteira_ativa'].mode()) todos_dados
notebook/Todos_datasets_juntos.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python3 # --- # # PROJECT GEGEVENSBANKEN 2018 - DEEL 3: QUERIES # + [markdown] heading_collapsed=true # ## 1. Inleiding # # Cf. het document `prerequisites.pdf` in de `docs` folder. # + [markdown] hidden=true # ### 1.1 Software # + [markdown] hidden=true # In eerste instantie moeten de correcte packages geladen worden. We raden **sterk** aan om je ook tot deze packages te beperken, gezien wij de ingevulde methodes zullen runnen op onze eigen machines. # # Zelfde opmerking voor de gebruikte python versie, wij veronderstellen 3.6. # + hidden=true # Benodigde packages import json # Package om .json files in te laden (bvb kolomnamen zijn zo opgeslagen) import getpass # Package om een paswoordveldje te genereren. import mysql.connector # MySQL package import numpy as np import os import pandas as pd # Populaire package voor data-verwerking import sys # + hidden=true sys.version_info # Check python versie, wij veronderstellen 3.6 # + [markdown] hidden=true # ### 1.2 Interageren met een gegevensbank # + [markdown] hidden=true # In deze sectie geven we al drie methodes mee om jullie leven al wat makkelijker te maken # # - `verbind met GB` # - Dit is functie die toelaat om gemakkelijk verbinding te maken met een database. De `username`, `hostname` en `gegevensbanknaam` worden als argumenten meegegeven met de functie. Deze functie geeft een `connection` object als output. Zo'n object is wat de `connect()` methode van het `mysql.connector` package teruggeeft. Dit is conform de python DB-API (cf. https://www.python.org/dev/peps/pep-0249/), een soort standaard waaraan python DB packages voldoen. # # Moest deze functie niet correct functioneren op je eigen besturingssysteem: voel je vrij om een variant te schrijven. Bij de correctie gebruiken wij onze eigen implementatie om een mysql `connection` object te maken. Deze functie is dus enkel voor jezelf van belang. # # # - `run_query` # - Deze functie runt een gegeven `query` op een gegeven `connectie`, en geeft het resultaat terug. # # # - `res_to_df` # - Deze functie giet het resultaat van een query (`query_result`) in een pandas dataframe, met vooraf gespecifieerde kolomnamen (`column_names`). # + code_folding=[0] hidden=true def verbind_met_GB(username, hostname, gegevensbanknaam): """ Maak verbinding met een externe gegevensbank :param username: username van de gebruiker, string :param hostname: naam van de host, string. Dit is in het geval van een lokale server gewoon 'localhost' :param gegevensbanknaam: naam van de gegevensbank, string. :return connection: connection object, dit is wat teruggeven wordt door connect() methods van packages die voldoen aan de DB-API """ password = <PASSWORD>() # Genereer vakje voor wachtwoord in te geven connection = mysql.connector.connect(host=hostname, user=username, passwd=password, db=gegevensbanknaam) return connection def run_query(connection, query): """ Voer een query uit op een reeds gemaakte connectie, geeft het resultaat van de query terug """ # Making a cursor and executing the query cursor = connection.cursor() cursor.execute(query) # Collecting the result and casting it in a pd.DataFrame res = cursor.fetchall() return res def res_to_df(query_result, column_names): """ Giet het resultaat van een uitgevoerde query in een 'pandas dataframe' met vooraf gespecifieerde kolomnamen. Let op: Het resultaat van de query moet dus exact evenveel kolommen bevatten als kolomnamen die je meegeeft. Als dit niet het geval is, is dit een indicatie dat je oplossing fout is. (Gezien wij de kolomnamen van de oplossing al cadeau doen) """ df = pd.DataFrame(query_result, columns=column_names) return df # + [markdown] hidden=true # ### 1.3 Kolomnamen en input parameters # + [markdown] hidden=true # We leggen op voorhand reeds de **kolomnamen van de oplossingen**, en de **naam en types van de inputparameters** vast. Hier moet je je dus aan houden en mag je dus niks aan wijzigen. # # - Het aantal kolommen (en hun volgorde) van jullie oplossing en de onze moeten exact overeen komen, vandaar dat de kolomnamen hieronder gegeven zijn. Dit komt trouwens van pas bij het opstellen van je queries! # # # - Dankzij de voorbeeldparameters, die al gegeven zijn in de functiedefinitie e.g., # query_42(connection, col_names, super_voorbeeldparam = 101) # weten jullie exact welke vorm en type (integer, lijst, etc) de inputparameters zullen hebben. Wijzig zeker niets aan de naam van die parameters (i.e.,*super_voorbeeldparam* blijft *super_voorbeeldparam*). De default waarden die we ingevuld hebben zijn ter illustratie, zorg ervoor dat je query ook met andere waarden dan de defaults werkt! # # **Oplossingen die deze vorm niet respecteren, zullen crashen op onze machines en resulteren in een score van 0 op die query.** # + hidden=true # Dictionary van kolomnamen inladen filename = os.path.join(os.getcwd(), 'solution', 'all_q_colnam.json') col_names = json.load(open(filename, 'r')) col_names # Inspecteer dictionary # + [markdown] heading_collapsed=true # ## 2. Hoe deze notebook in te vullen # + [markdown] hidden=true # Hieronder volgen 10 onvolledige methodes/functies (e.g., `query_42(c, col_names, super_voorbeeldparam = ['joske', 'jef'])`). Aan jullie om ze aan te vullen zodat: # 1. Een correcte query gegenereerd wordt # 2. De query uitgevoerd wordt # 3. Het resultaat wordt teruggegeven in een Pandas DataFrame # # Voor stap 2 en 3 zijn de nodige methodes al voorhanden, i.e.: `run_query(connection, query)` en `res_to_df(res, column_names)`. Jullie werk zal dus vooral bestaan uit stap 1, queries genereren. # # # # Elke functie heeft minstens 2 inputargumenten: # 1. `connection`: Een connection object # 2. `column_names`: De kolomnamen van het Pandas DataFrame # # Gevolgd door eventuele extra argumenten (e.g., `super_voorbeeldparam = ['joske','jef']`) van de inputparameters. # # **Nogmaals: verander niets aan de namen van de methodes, namen van de parameters en de kolomnamen van de resulterende DataFrames. Wijzigingen hieraan leiden onvermijdelijk tot een score van 0 op die query.** # # Je kan naar believen extra cellen toevoegen om je queries te testen, resultaten te inspecteren etc. We vragen jullie om de finale, ingevulde methodes te kopiëren naar een extern script dat enkel en alleen deze oplossingen bevat. Cf. de laatste sectie voor instructies omtrent het effectieve indienen. # - # ## 3. Voorbeeld # Om jullie al wat op weg te zetten volgt hier een voorbeeldje over hoe je te werk kan gaan. # ### 3.1 Query EXAMPLE # # **Beschrijving** # # Het resultaat van deze functie is een Pandas DataFrame met teamnaam, jaar en aantal homeruns van teams die meer dan een gegeven aantal *`homeruns`* hadden in dat jaar. def query_EX(connection, column_names, homeruns=20): # Bouw je query query=""" select t.name, t.yearID, t.HR from Teams as t where t.HR > {}; """.format(homeruns) # TIP: Zo krijg je parameters in de string (samen met `{}` in de string) # Stap 2 & 3 res = run_query(connection, query) # Query uitvoeren df = res_to_df(res, column_names) # Query in DataFrame brengen return df # Eerst maken we een verbindingsobject met de databank # + username = 'root' # Vervang dit als je via een andere user queries stuurt hostname = 'localhost' # Als je een databank lokaal draait, is dit localhost. db = 'lahman2016' # Naam van de gegevensbank op je XAMPP Mysql server # We verbinden met de gegevensbank c = verbind_met_GB(username, hostname, db) # - # Vervolgens runnen we onze query, en inspecteren we het resultaat. # + # De voorbeeldquery heeft dezelfde kolomnamen als query 1, dus we gebruiken die kolommen = col_names['query_01'] # Functie uitvoeren, geeft resultaat van de query in een DataFrame df = query_EX(c, col_names['query_01'], homeruns=10) # We inspecteren de eerste paar resultaten (voor alles te zien: laat .head() weg) df.head() # - # En we runnen onze query nogmaals met een andere waarde voor de parameter `jaar` # + # Functie uitvoeren, geeft resultaat van de query in een DataFrame df = query_EX(c, col_names['query_01'], homeruns=40) # We inspecteren de eerste paar resultaten (voor alles te zien: laat .head() weg) df.head() # - # ## 4. Queries # ### 4.1 Query 01 # # **Beschrijving** # # Het resultaat van deze functie is een Pandas dataframe met: de teamnaam, het jaar, en het aantal homeruns per team, en dit voor alle teams, gesorteerd op aantal homeruns van hoog naar laag. def query_01(connection, column_names): # Bouw je query query=""" MAAK QUERY HIER """ # Stap 2 & 3 res = run_query(connection, query) # Query uitvoeren df = res_to_df(res, column_names) # Query in DataFrame brengen return df # ### 4.2 Query 02 # # **Beschrijving** # # Het resultaat van deze functie is een Pandas dataframe met: de voornaam, achternaam, geboortejaar, geboortemaand, geboortedag van spelers die hun eerste major league appearance maakten na een gegeven *`datum`*. # # De tabel is oplopend alfabetisch gesorteerd op achternaam. def query_02(connection, column_names, datum = '1980-01-16'): # Bouw je query query=""" MAAK QUERY HIER """ # Stap 2 & 3 res = run_query(connection, query) # Query uitvoeren df = res_to_df(res, column_names) # Query in DataFrame brengen return df # ### 4.3 Query 03 # # **Beschrijving** # # Het resultaat van deze functie is een Pandas dataframe dat per club: de clubnaam en de voor- en achternaam van alle managers weergeeft, die ooit voor de club gewerkt hebben als niet-playermanager. Per club mag een welbepaalde manager slechts 1 keer in het resultaat voorkomen. # # Sorteer oplopend alfabetisch op clubnaam. def query_03(connection, column_names): # Bouw je query query=""" MAAK QUERY HIER """ # Stap 2 & 3 res = run_query(connection, query) # Query uitvoeren df = res_to_df(res, column_names) # Query in DataFrame brengen return df # ### 4.4 Query 04 # # **Beschrijving** # # Het resultaat van deze functie is een Pandas dataframe met gegevens van teams (teamnaam, rang, aantal wins en losses) en van managers (voor- en achternaam) zodanig dat # 1. de desbetreffende manager is opgenomen in de hall of fame na *`datum x`* # 2. de manager in kwestie was ooit, na *`datum y`*, manager van het team in kwestie # # De tabel moet gesorteerd zijn op teamnaam en rang (alfabetisch oplopend). def query_04(connection, column_names, datum_x='1980-01-01', datum_y='1980-01-01'): # Bouw je query query=""" MAAK QUERY HIER """ # Stap 2 & 3 res = run_query(connection, query) # Query uitvoeren df = res_to_df(res, column_names) # Query in DataFrame brengen return df # ### 4.5 Query 05 # # **Beschrijving** # # Het resultaat van deze functie is een Pandas dataframe dat de naam van de teams bevat die na 1980 minstens 1 manager hebben gehad die speler-manager was. # # De tabel moet oplopend gesorteerd worden op teamnaam. De tabel mag geen dubbels bevatten. def query_05(connection, column_names): # Bouw je query query=""" MAAK QUERY HIER """ # Stap 2 & 3 res = run_query(connection, query) # Query uitvoeren df = res_to_df(res, column_names) # Query in DataFrame brengen return df # ### 4.6 Query 06 # # **Beschrijving** # # Het resultaat van deze functie is een Pandas dataframe dat bestaat uit de teamnaam, rang, jaar, aantal wins en losses van de teams waarvan dat jaar alle spelers die bij dat team speelden, meer verdienden bij dat team dan *`salaris`*. # # Sorteer de gegevens volgens oplopend aantal wins. def query_06(connection, column_names, salaris=20000): # Bouw je query query=""" MAAK QUERY HIER """ # Stap 2 & 3 res = run_query(connection, query) # Query uitvoeren df = res_to_df(res, column_names) # Query in DataFrame brengen return df # ### 4.7 Query 07 # # **Beschrijving** # # Het resultaat van deze functie is een Pandas dataframe dat de achternaam en voornaam bevat van alle managers die tijdens hun carriere alle awards hebben gewonnen. # # Sorteer oplopend alfabetisch op achternaam. def query_07(connection, column_names): # Bouw je query query=""" MAAK QUERY HIER """ # Stap 2 & 3 res = run_query(connection, query) # Query uitvoeren df = res_to_df(res, column_names) # Query in DataFrame brengen return df # ### 4.8 Query 08 # # **Beschrijving** # # Het resultaat van deze functie is een Pandas DataFrame dat voor een gegeven jaar een aantal statistieken van alle staten bevat waarbij de gemiddelde lengte van alle spelers geboren in die staat en opgenomen in de hall of fame na *`jaar`* groter is dan *`lengte`*. # # Voor die staten moet de tabel de volgende statistieken bevatten: het gemiddelde gewicht, de gemiddelde lengte, het gemiddeld aantal batting homeruns, en het gemiddeld aantal pitching saves van alle spelers (geboren in die staat) die in de hall of fame zijn opgenomen na *`jaar`*. # # Sorteer oplopend alfabetisch op staat. # # Nb. Lengte is uitgedrukt in inches. def query_08(connection, column_names, jaar=1990, lengte=75): # Bouw je query query=""" MAAK QUERY HIER """ # Stap 2 & 3 res = run_query(connection, query) # Query uitvoeren df = res_to_df(res, column_names) # Query in DataFrame brengen return df # ### 4.9 Query 09 # # **Beschrijving** # # Het resultaat van deze functie geeft een Pandas dataframe terug dat bestaat uit het jaar, de naam en het aantal homeruns uit de Teams tabel van het team met het 2e meeste aantal homeruns in *`jaar`*. def query_09(connection, column_names, jaar=1975): # Bouw je query query=""" MAAK QUERY HIER """ # Stap 2 & 3 res = run_query(connection, query) # Query uitvoeren df = res_to_df(res, column_names) # Query in DataFrame brengen return df # ### 4.10 Query 10 # # **Beschrijving** # # Het resultaat van deze functie is een Pandas DataFrame dat voor een gegeven *`jaar`*; teamnaam, rang, en aantal gespeelde games van de teams weergeeft die in dat *`jaar`* ten minste 1 speler hebben die datzelfde *`jaar`* exact 1 trofee won. # # Sorteer op teamnaam en rang. def query_10(connection, column_names, jaar=1990): # Bouw je query query=""" MAAK QUERY HIER """ # Stap 2 & 3 res = run_query(connection, query) # Query uitvoeren df = res_to_df(res, column_names) # Query in DataFrame brengen return df # ## 5. Hoe en wat in te dienen? # Nu je alle queries ingevuld en getest hebt, ben je klaar om je taak in te dienen. # # - Maak een leeg bestand aan, en geef het de bestandsnaam met formaat: `dd_X_groep_YY.py`. # - `dd` verwijst naar de dag van je oefenzitting, e.g. `wo` voor woensdag # - `X` is een integer die verwijst naar de volgnummer van je oefenzitting op die dag, e.g. `1` # - `YY` zijn twee integers die verwijzen naar de volgnummer van je groepje, e.g.: `03` # - Een goede bestandsnaam is dus bijvoorbeeld: `wo_1_groep_03.py` # # # - Kopieer alle **INGEVULDE FUNCTIES EN NIETS ANDERS** naar dit script. Het script bevat dus enkel en alleen de methodes: # - query_01(connection, column_names) # - query_02(connection, column_names, datum = '1980-01-16') # - etc, etc # # # - Voor de eerste 3 queries kan je dan je oplossing al eens testen via de verification notebook! # - Eerst wordt je script automatisch gerund met verschillende parameters # - De (eventuele) resultaten worden opgeslagen in csv files (in de `out` folder) # - Die csv files worden vergeleken met de csv files van de oplossing (te vinden in de `solution` folder). # - Elke query krijgt een score toegekend. Cf. https://en.wikipedia.org/wiki/F1_score. # - Een kort rapport wordt weergegeven die je pointers kan geven over wat er mis is met je query. # - TP: True Positives # - TN: True Negatives # - FP: False Positives # # # - Als je oplossing definitief is, submit je je `dd_X_groep_YY.py` via Toledo.
assignment.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## The next cell will get a ~65 MB data file 'sequence.index', you only need to run the cell once # + tags=["outputPrepend"] # !wsl rm sequence.index 2>/dev/null # !wsl wget -nd ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/historical_data/former_toplevel/sequence.index -O sequence.index # - # # Interfacing with R # + import os from IPython.display import Image import rpy2.robjects as robjects import rpy2.robjects.lib.ggplot2 as ggplot2 from rpy2.robjects.functions import SignatureTranslatedFunction import pandas as pd from rpy2.robjects import pandas2ri from rpy2.robjects import default_converter from rpy2.robjects.conversion import localconverter # - read_delim = robjects.r('read.delim') seq_data = read_delim('sequence.index', header=True, stringsAsFactors=False) #In R: # seq.data <- read.delim('sequence.index', header=TRUE, stringsAsFactors=FALSE) # + print('This data frame has %d columns and %d rows' % (seq_data.ncol, seq_data.nrow)) print(seq_data.colnames) #In R: # print(colnames(seq.data)) # print(nrow(seq.data)) # print(ncol(seq.data)) print('Columns in Python %d ' % robjects.r.ncol(seq_data)[0]) #access some functions as_integer = robjects.r('as.integer') match = robjects.r.match my_col = match('READ_COUNT', seq_data.colnames)[0] # Vector returned print('Type of read count before as.integer: %s' % seq_data[my_col - 1].rclass[0]) seq_data[my_col - 1] = as_integer(seq_data[my_col - 1]) print('Type of read count after as.integer: %s' % seq_data[my_col - 1].rclass[0]) my_col = match('BASE_COUNT', seq_data.colnames)[0] # Vector returned seq_data[my_col - 1] = as_integer(seq_data[my_col - 1]) my_col = match('CENTER_NAME', seq_data.colnames)[0] seq_data[my_col - 1] = robjects.r.toupper(seq_data[my_col - 1]) robjects.r.assign('seq.data', seq_data) robjects.r('print(c("Column names in R: ",colnames(seq.data)))') robjects.r('seq.data <- seq.data[seq.data$WITHDRAWN==0, ]') #Lets remove all withdrawn sequences robjects.r("seq.data <- seq.data[, c('STUDY_ID', 'STUDY_NAME', 'CENTER_NAME', 'SAMPLE_ID', 'SAMPLE_NAME', 'POPULATION', 'INSTRUMENT_PLATFORM', 'LIBRARY_LAYOUT', 'PAIRED_FASTQ', 'READ_COUNT', 'BASE_COUNT', 'ANALYSIS_GROUP')]") #Lets shorten the dataframe #Population as factor robjects.r('seq.data$POPULATION <- as.factor(seq.data$POPULATION)') # - ggplot2.theme = SignatureTranslatedFunction(ggplot2.theme, init_prm_translate = {'axis_text_x': 'axis.text.x'}) bar = ggplot2.ggplot(seq_data) + ggplot2.geom_bar() + ggplot2.aes_string(x='CENTER_NAME') + ggplot2.theme(axis_text_x=ggplot2.element_text(angle=90, hjust=1)) robjects.r.png('out.png', type='cairo-png') bar.plot() dev_off = robjects.r('dev.off') dev_off() Image(filename='out.png') #Get Yoruba and CEU robjects.r('yri_ceu <- seq.data[seq.data$POPULATION %in% c("YRI", "CEU") & seq.data$BASE_COUNT < 2E9 & seq.data$READ_COUNT < 3E7, ]') yri_ceu = robjects.r('yri_ceu') yri_ceu scatter = ggplot2.ggplot(yri_ceu) + ggplot2.aes_string(x='BASE_COUNT', y='READ_COUNT', shape='factor(POPULATION)', col='factor(ANALYSIS_GROUP)') + ggplot2.geom_point() robjects.r.png('out.png') scatter.plot() dev_off = robjects.r('dev.off') dev_off() Image(filename='out.png') # + # pd_yri_ceu = pandas2ri.ri2py(yri_ceu) doesn't work, see https://rpy2.github.io/doc/latest/html/pandas.html with localconverter(robjects.default_converter + pandas2ri.converter): pd_yri_ceu = robjects.conversion.py2rpy(yri_ceu) print(type(pd_yri_ceu)) pd_yri_ceu # - pd_yri_ceu del pd_yri_ceu['PAIRED_FASTQ'] no_paired = pandas2ri.py2ri(pd_yri_ceu) robjects.r.assign('no.paired', no_paired) robjects.r("print(colnames(no.paired))")
Chapter01/Interfacing_R.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # %cd D:\MastersFolder\2nd_year\Image_Processing_And_Analysis\Project # + # #!pip install einops import math import six from einops.layers.tensorflow import Rearrange import tensorflow as tf from tensorflow.keras.callbacks import TensorBoard from tensorflow.keras import datasets import logging import numpy as np from fastprogress import master_bar, progress_bar train_acc = [] val_acc = [] train_loss = [] val_loss = [] train_pre = [] val_pre = [] train_R = [] val_R = [] def gelu(x): """Gaussian Error Linear Unit. This is a smoother version of the RELU. Original paper: https://arxiv.org/abs/1606.08415 Args: x: float Tensor to perform activation. Returns: `x` with the GELU activation applied. """ cdf = 0.5 * (1.0 + tf.tanh( (math.sqrt(2 / math.pi) * (x + 0.044715 * tf.pow(x, 3))))) return x * cdf def get_activation(identifier): """Maps a identifier to a Python function, e.g., "relu" => `tf.nn.relu`. It checks string first and if it is one of customized activation not in TF, the corresponding activation will be returned. For non-customized activation names and callable identifiers, always fallback to tf.keras.activations.get. Args: identifier: String name of the activation function or callable. Returns: A Python function corresponding to the activation function. """ if isinstance(identifier, six.string_types): name_to_fn = {"gelu": gelu} identifier = str(identifier).lower() if identifier in name_to_fn: return tf.keras.activations.get(name_to_fn[identifier]) return tf.keras.activations.get(identifier) class Residual(tf.keras.Model): def __init__(self, fn): super().__init__() self.fn = fn def call(self, x): return self.fn(x) + x class PreNorm(tf.keras.Model): def __init__(self, dim, fn): super().__init__() self.norm = tf.keras.layers.LayerNormalization(epsilon=1e-5) self.fn = fn def call(self, x): return self.fn(self.norm(x)) class FeedForward(tf.keras.Model): def __init__(self, dim, hidden_dim): super().__init__() self.net = tf.keras.Sequential([tf.keras.layers.Dense(hidden_dim, activation=get_activation('gelu')), tf.keras.layers.Dense(dim)]) def call(self, x): return self.net(x) class Attention(tf.keras.Model): def __init__(self, dim, heads = 8): super().__init__() self.heads = heads self.scale = dim ** -0.5 self.to_qkv = tf.keras.layers.Dense(dim * 3, use_bias=False) self.to_out = tf.keras.layers.Dense(dim) self.rearrange_qkv = Rearrange('b n (qkv h d) -> qkv b h n d', qkv = 3, h = self.heads) self.rearrange_out = Rearrange('b h n d -> b n (h d)') def call(self, x): qkv = self.to_qkv(x) qkv = self.rearrange_qkv(qkv) q = qkv[0] k = qkv[1] v = qkv[2] dots = tf.einsum('bhid,bhjd->bhij', q, k) * self.scale attn = tf.nn.softmax(dots,axis=-1) out = tf.einsum('bhij,bhjd->bhid', attn, v) out = self.rearrange_out(out) out = self.to_out(out) return out class Transformer(tf.keras.Model): def __init__(self, dim, depth, heads, mlp_dim): super().__init__() layers = [] for _ in range(depth): layers.extend([ Residual(PreNorm(dim, Attention(dim, heads = heads))), Residual(PreNorm(dim, FeedForward(dim, mlp_dim))) ]) self.net = tf.keras.Sequential(layers) def call(self, x): return self.net(x) class ViT(tf.keras.Model): def __init__(self, *, image_size, patch_size, num_classes, dim, depth, heads, mlp_dim, channels=3): super().__init__() assert image_size % patch_size == 0, 'image dimensions must be divisible by the patch size' num_patches = (image_size // patch_size) ** 2 patch_dim = channels * patch_size ** 2 self.patch_size = patch_size self.dim = dim self.pos_embedding = self.add_weight("position_embeddings", shape=[num_patches + 1, dim], initializer=tf.keras.initializers.RandomNormal(), dtype=tf.float32) self.patch_to_embedding = tf.keras.layers.Dense(dim) self.cls_token = self.add_weight("cls_token", shape=[1, 1, dim], initializer=tf.keras.initializers.RandomNormal(), dtype=tf.float32) self.rearrange = Rearrange('b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1=self.patch_size, p2=self.patch_size) self.transformer = Transformer(dim, depth, heads, mlp_dim) self.to_cls_token = tf.identity self.mlp_head = tf.keras.Sequential([tf.keras.layers.Dense(mlp_dim, activation=get_activation('gelu')), tf.keras.layers.Dense(num_classes)]) @tf.function def call(self, img): shapes = tf.shape(img) x = self.rearrange(img) x = self.patch_to_embedding(x) cls_tokens = tf.broadcast_to(self.cls_token,(shapes[0],1,self.dim)) x = tf.concat((cls_tokens, x), axis=1) x += self.pos_embedding x = self.transformer(x) x = self.to_cls_token(x[:, 0]) return self.mlp_head(x) logger = logging.getLogger(__name__) class TrainerConfig: # optimization parameters max_epochs = 10 batch_size = 64 learning_rate = 1e-3 # checkpoint settings ckpt_path = None def __init__(self, **kwargs): for k, v in kwargs.items(): setattr(self, k, v) class Trainer: def __init__(self, model, model_config, train_dataset, train_dataset_len, test_dataset, test_dataset_len, config): self.train_dataset = train_dataset.batch(config.batch_size) self.train_dataset_len = train_dataset_len self.test_dataset = test_dataset self.test_dataset_len = None self.test_dist_dataset = None if self.test_dataset: self.test_dataset = test_dataset.batch(config.batch_size) self.test_dataset_len = test_dataset_len self.config = config self.tokens = 0 self.strategy = tf.distribute.OneDeviceStrategy("GPU:0") if len(tf.config.list_physical_devices('GPU')) > 1: self.strategy = tf.distribute.MirroredStrategy() with self.strategy.scope(): self.model = model(**model_config) self.optimizer = tf.keras.optimizers.Adam(learning_rate=config.learning_rate) self.cce = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True,reduction=tf.keras.losses.Reduction.NONE) self.train_dist_dataset = self.strategy.experimental_distribute_dataset(self.train_dataset) if self.test_dataset: self.test_dist_dataset = self.strategy.experimental_distribute_dataset(self.test_dataset) def save_checkpoints(self): if self.config.ckpt_path is not None: self.model.save_weights(self.config.ckpt_path) def train(self): train_loss_metric = tf.keras.metrics.Mean('training_loss', dtype=tf.float32) test_loss_metric = tf.keras.metrics.Mean('testing_loss', dtype=tf.float32) train_accuracy = tf.keras.metrics.Accuracy('training_accuracy', dtype=tf.float32) test_accuracy = tf.keras.metrics.Accuracy('testing_accuracy', dtype=tf.float32) train_Precision_metric= tf.keras.metrics.Precision(dtype=tf.float32) test_Precision_metric= tf.keras.metrics.Precision(dtype=tf.float32) train_recall = tf.keras.metrics.Recall() test_recall = tf.keras.metrics.Recall() @tf.function def train_step(dist_inputs): def step_fn(inputs): X, Y = inputs with tf.GradientTape() as tape: # training=True is only needed if there are layers with different # behavior during training versus inference (e.g. Dropout). logits = self.model(X,training=True) num_labels = tf.shape(logits)[-1] label_mask = tf.math.logical_not(Y < 0) label_mask = tf.reshape(label_mask,(-1,)) logits = tf.reshape(logits,(-1,num_labels)) logits_masked = tf.boolean_mask(logits,label_mask) label_ids = tf.reshape(Y,(-1,)) label_ids_masked = tf.boolean_mask(label_ids,label_mask) cross_entropy = self.cce(label_ids_masked, logits_masked) loss = tf.reduce_sum(cross_entropy) * (1.0 / self.config.batch_size) y_pred = tf.argmax(tf.nn.softmax(logits,axis=-1),axis=-1) train_accuracy.update_state(tf.squeeze(Y),y_pred) train_Precision_metric.update_state(tf.squeeze(Y),y_pred) train_recall.update_state(tf.squeeze(Y),y_pred) grads = tape.gradient(loss, self.model.trainable_variables) self.optimizer.apply_gradients(list(zip(grads, self.model.trainable_variables))) return cross_entropy per_example_losses = self.strategy.run(step_fn, args=(dist_inputs,)) sum_loss = self.strategy.reduce(tf.distribute.ReduceOp.SUM, per_example_losses, axis=0) mean_loss = sum_loss / self.config.batch_size return mean_loss @tf.function def test_step(dist_inputs): def step_fn(inputs): X, Y = inputs # training=True is only needed if there are layers with different # behavior during training versus inference (e.g. Dropout). logits = self.model(X,training=False) num_labels = tf.shape(logits)[-1] label_mask = tf.math.logical_not(Y < 0) label_mask = tf.reshape(label_mask,(-1,)) logits = tf.reshape(logits,(-1,num_labels)) logits_masked = tf.boolean_mask(logits,label_mask) label_ids = tf.reshape(Y,(-1,)) label_ids_masked = tf.boolean_mask(label_ids,label_mask) cross_entropy = self.cce(label_ids_masked, logits_masked) loss = tf.reduce_sum(cross_entropy) * (1.0 / self.config.batch_size) y_pred = tf.argmax(tf.nn.softmax(logits,axis=-1),axis=-1) test_accuracy.update_state(tf.squeeze(Y),y_pred) test_Precision_metric.update_state(tf.squeeze(Y),y_pred) test_recall.update_state(tf.squeeze(Y),y_pred) return cross_entropy per_example_losses = self.strategy.run(step_fn, args=(dist_inputs,)) sum_loss = self.strategy.reduce(tf.distribute.ReduceOp.SUM, per_example_losses, axis=0) mean_loss = sum_loss / self.config.batch_size return mean_loss train_pb_max_len = math.ceil(float(self.train_dataset_len)/float(self.config.batch_size)) test_pb_max_len = math.ceil(float(self.test_dataset_len)/float(self.config.batch_size)) if self.test_dataset else None epoch_bar = master_bar(range(self.config.max_epochs)) with self.strategy.scope(): for epoch in epoch_bar: for inputs in progress_bar(self.train_dist_dataset,total=train_pb_max_len,parent=epoch_bar): loss = train_step(inputs) self.tokens += tf.reduce_sum(tf.cast(inputs[1]>=0,tf.int32)).numpy() train_loss_metric(loss) train_loss.append(train_loss_metric.result().numpy()) train_acc.append(train_accuracy.result().numpy()) train_pre.append(train_Precision_metric.result().numpy()) train_R.append(train_recall.result().numpy()) epoch_bar.child.comment = f'training loss : {train_loss_metric.result()}' print(f"epoch {epoch+1}: train loss {train_loss_metric.result():.5f}. train accuracy {train_accuracy.result():.5f}") train_loss_metric.reset_states() train_accuracy.reset_states() if self.test_dist_dataset: for inputs in progress_bar(self.test_dist_dataset,total=test_pb_max_len,parent=epoch_bar): loss = test_step(inputs) test_loss_metric(loss) val_loss.append(test_loss_metric.result().numpy()) val_acc.append(test_accuracy.result().numpy()) val_pre.append(test_Precision_metric.result().numpy()) val_R.append(test_recall.result().numpy()) epoch_bar.child.comment = f'testing loss : {test_loss_metric.result()}' print(f"epoch {epoch+1}: test loss {test_loss_metric.result():.5f}. test accuracy {test_accuracy.result():.5f}") test_loss_metric.reset_states() test_accuracy.reset_states() self.save_checkpoints() # + IMG_WIDTH = 128 IMG_HEIGHT = 128 IMG_CHANNELS = 3 import os from tqdm import tqdm from skimage.io import imread, imshow from skimage.transform import resize X_ = "Database/Classification/Supervised/" train_ids = next(os.walk(X_))[2] X = np.zeros((len(train_ids), IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), dtype=np.uint8) for n,id_ in tqdm(enumerate(train_ids), total=len(train_ids)): path = X_ + id_ img = imread(path)[:,:,:IMG_CHANNELS] img = resize(img, (IMG_HEIGHT, IMG_WIDTH,IMG_CHANNELS), mode='constant', preserve_range=True) X[n] = img #Fill empty X_train with values from img import pandas as pd data = pd.read_csv('Database/Classification/Supervised.csv') Y = data.drop(['image'],axis=1) # + import sklearn.model_selection as model_selection X_train, X_test, y_train, y_test = model_selection.train_test_split(X, Y, train_size=0.65,test_size=0.35, random_state=101) train_images = tf.cast(X_train.reshape((-1, 3, 128, 128)),dtype=tf.float32) test_images = tf.cast(X_test.reshape((-1, 3, 128, 128)),dtype=tf.float32) train_images, test_images = train_images / 255.0, test_images / 255.0 train_x = tf.data.Dataset.from_tensor_slices(train_images,) train_y = tf.data.Dataset.from_tensor_slices(y_train) train_dataset = tf.data.Dataset.zip((train_x,train_y)) test_x = tf.data.Dataset.from_tensor_slices(test_images) test_y = tf.data.Dataset.from_tensor_slices(y_test) test_dataset = tf.data.Dataset.zip((test_x,test_y)) # + tconf = TrainerConfig(max_epochs=10,batch_size=64, learning_rate=1e-3) # sample model config. model_config = {"image_size":128, "patch_size":4, "num_classes":2, "dim":64, "depth":3, "heads":4, "mlp_dim":128} trainer = Trainer(ViT, model_config, train_dataset, len(train_images), test_dataset, len(test_images), tconf) # + tf.autograph.experimental.do_not_convert(trainer.train()) # + train_f1=[] val_f1 = [] for i in range(len(train_pre)): train_f1.append(2* (train_pre[i]*train_R[i])/(train_pre[i]+train_R[i])) for i in range(len(val_pre)): val_f1.append(2* (train_pre[i]*train_R[i])/(train_pre[i]+train_R[i])) # - train_f1 # + non_seg_train_acc = train_acc non_seg_train_loss = train_loss non_seg_val_acc = val_acc non_seg_val_loss = val_loss non_seg_train_R = train_R non_seg_val_R = val_R non_seg_train_f1 = train_f1 non_seg_val_f1 = val_f1 print("Train Accuracy = ",train_acc[len(train_acc)-1]) print("Train Loss = ",train_loss[len(train_loss)-1]) print("Validation Accuracy = ",val_acc[len(val_acc)-1]) print("Validation Loss = ",val_loss[len(val_loss)-1]) print("Train Recall =",train_R[len(train_R)-1]) print("Validation Recall = ",val_R[len(val_R)-1]) print("Train F1Score =",train_f1[len(train_f1)-1]) print("Validation F1Score =",val_f1[len(val_f1)-1]) # - import matplotlib.pyplot as plt # summarize history for accuracy # summarize history for accuracy plt.plot(train_acc) plt.plot(val_acc) plt.title('ViT Model Accuracy') plt.ylabel('accuracy') plt.xlabel('epoch') plt.legend(['train', 'test'], loc='upper left') plt.show() # summarize history for loss plt.plot(train_loss) plt.plot(val_loss) plt.title('ViT Model Model loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train', 'test'], loc='upper left') plt.show() import matplotlib.pyplot as plt # summarize history for accuracy # summarize history for accuracy plt.plot(non_seg_train_acc) plt.plot(non_seg_val_acc) plt.title('ViT Model Accuracy without segmentation') plt.ylabel('accuracy') plt.xlabel('epoch') plt.legend(['train', 'test'], loc='upper left') plt.show() # summarize history for loss plt.plot(non_seg_train_loss) plt.plot(non_seg_val_loss) plt.title('ViT Model Model loss without segmentation') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train', 'test'], loc='upper left') plt.show() # ## With Segmentation # + IMG_WIDTH = 128 IMG_HEIGHT = 128 IMG_CHANNELS = 3 import os from tqdm import tqdm from skimage.io import imread, imshow from skimage.transform import resize X_ = "Database/Classification/Segmentation_Classification/ISIC2018/Segmented_Unet_transformer/" train_ids = next(os.walk(X_))[2] X = np.zeros((len(train_ids), IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), dtype=np.uint8) for n,id_ in tqdm(enumerate(train_ids), total=len(train_ids)): path = X_ + id_ img = imread(path)[:,:,:IMG_CHANNELS] img = resize(img, (IMG_HEIGHT, IMG_WIDTH,IMG_CHANNELS), mode='constant', preserve_range=True) X[n] = img #Fill empty X_train with values from img import pandas as pd data = pd.read_csv('Database/Classification/Supervised.csv') Y = data.drop(['image'],axis=1) # - os.walk(X_) # + import sklearn.model_selection as model_selection X_train, X_test, y_train, y_test = model_selection.train_test_split(X, Y, train_size=0.65,test_size=0.35, random_state=101) train_images = tf.cast(X_train.reshape((-1, 3, 128, 128)),dtype=tf.float32) test_images = tf.cast(X_test.reshape((-1, 3, 128, 128)),dtype=tf.float32) train_images, test_images = train_images / 255.0, test_images / 255.0 train_x = tf.data.Dataset.from_tensor_slices(train_images,) train_y = tf.data.Dataset.from_tensor_slices(y_train) train_dataset = tf.data.Dataset.zip((train_x,train_y)) test_x = tf.data.Dataset.from_tensor_slices(test_images) test_y = tf.data.Dataset.from_tensor_slices(y_test) test_dataset = tf.data.Dataset.zip((test_x,test_y)) # + tconf = TrainerConfig(max_epochs=10,batch_size=64, learning_rate=1e-3) # sample model config. model_config = {"image_size":128, "patch_size":4, "num_classes":2, "dim":64, "depth":3, "heads":4, "mlp_dim":128} trainer = Trainer(ViT, model_config, train_dataset, len(train_images), test_dataset, len(test_images), tconf) # - tf.autograph.experimental.do_not_convert(trainer.train()) # + seg_train_acc = train_acc seg_train_loss = train_loss seg_val_acc = val_acc seg_val_loss = val_loss seg_train_R = train_R seg_val_R = val_R seg_train_f1 = train_f1 seg_val_f1 = val_f1 print("Train Accuracy = ",train_acc[len(train_acc)-1]) print("Train Loss = ",train_loss[len(train_loss)-1]) print("Validation Accuracy = ",val_acc[len(val_acc)-1]) print("Validation Loss = ",val_loss[len(val_loss)-1]) print("Train Recall =",train_R[len(train_R)-1]) print("Validation Recall = ",val_R[len(val_R)-1]) print("Train F1Score =",train_f1[len(train_f1)-1]) print("Validation F1Score =",val_f1[len(val_f1)-1]) # + import matplotlib.pyplot as plt # summarize history for accuracy # summarize history for accuracy plt.plot(train_acc) plt.plot(val_acc) plt.title('ViT Model Accuracy with segmentation') plt.ylabel('accuracy') plt.xlabel('epoch') plt.legend(['train', 'test'], loc='upper left') plt.show() # summarize history for loss plt.plot(train_loss) plt.plot(val_loss) plt.title('ViT Model Model loss with segmentation') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train', 'test'], loc='upper left') plt.show()
ViT_.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # I came along this topic while searching for methods to gather business intelligence data in a GDPR conform way. General Data Protection Regulation will be implemented on 2018-05-25 in Europe and is quite difficult to transfer into real world processes. It comes also with [hefty penalties](https://en.wikipedia.org/wiki/General_Data_Protection_Regulation#Sanctions), so that every company that processes personal data should take the time to check their data processing workflows. # # Differential privacy is still not widely known, also there is quite a bit discourse if it hold up to its promises at all. Since by now also Google and Apple claim to use differential privacy, I started investigating it by reading "the" # book on differential privacy by <NAME>. You can download for free [here](https://www.cis.upenn.edu/~aaroth/Papers/privacybook.pdf). # # I am assuming in my example (simplified) that there are two systems: # # * one system that holds data bound to privacy constraints, but all access is restricted by passwords, encryption and similar, so that only the user it self can access his data. # * one system that collects overall data needed for business development. This may be a huge database where all other systems (incl. the one system mentioned above) just sent their stats to and that generates the reports for business management. # # The problem is the following: How can we make sure that no privacy concerns are violated when sending data to the central data pool system? Statistics can be simple ones as active users up to data that will be used for active learning in a machine learning based scenario. Even data that is not directly bound to persons can be used in combination with other (meta) data to deanonymize information. See this example: Deanonymization of the [Netflix dataset prize](https://www.cs.utexas.edu/~shmat/shmat_oak08netflix.pdf) # # Differential privacy makes this bold promise: # # > Learn nothing about the individual, learn something about the population. # # # Let's make a simple example: # We want to learn about a population how many people are <NAME> - without knowing this excatly for one user to not violate the privay of that user. So we need to make sure, that we can not draw any conclusion in that central system once the data is sent. Normal "attack" or "exploit" vectors would be to ask the central database before and after data sending. By knowing the Bieber fans before and after that one users statistics were sent an attacker could learn if that persons is Bieber fans or not. Not good! # # Let's improve. We need to add noise or fake data, so that we can not tell for sure what happens per user basis. Since our data in this simplified example is only binary, we add noise. # # ## Simple example # # Let us put this into code and play a bit with that scenario. When sending one person's data into the data set, we add noise: # # * 50% of the time we tell the truth and # * the other 50% of the time we do a coin flip and send that # # First we are generating a data set, that represents the our real distribution. We model Heads as boolean True, Tails is False. Roughly 10% of the population in our simplified example are Justin Bieber fans ( $p = 0.1 $) # # # + import numpy as np def generate_population(size, probability): return np.random.choice([True, False], size, p=[probability, 1-probability]) population = generate_population(1000, 0.1) # - # In a real world scenario we do not know the probabilities here. We will later calcuate those values with the information we learn. # # Let us create a function that represents adding the noise while sending out collected to the data pool. # # + from random import random def fuzz(x): return x if random() > 0.5 else random() > 0.5 # - # Now we are evaluating our data after we send it through the fuzzer. # # + after_noise_population = [fuzz(i) for i in population] bieber_fans_count_after_noise = np.sum(after_noise_population) bieber_fans_count_before_noise = np.sum(population) print(f"In our population {bieber_fans_count_before_noise} are Justin Bieber fans.") print(f"In the central system, after adding noise we have {bieber_fans_count_after_noise} Justin Bieber fans.") # - # Let now see how we can derive the distribution of Justin Bieber fans in the real popluation from our central database. The number above, Bieber fans after adding noise, consist of those actual bieber fans if the first coin flip is Heads and the ones where the first coin flip was Tails, but the second coin flip Heads. So with the fraction of bieber fans as $f_{JB}$: # # # $$ # \begin{aligned} # f_{JB} &= \frac{1}{2}p + \frac{1}{4} \\ # p &= 2(f_{JB} - \frac{1}{4}) # \end{aligned} # $$ # # Note, that in our little example we know the real probability, since we set it initially to 0.1. Transferring this example to a real world, we would not know this, but need to derive it as shown above. # Let's see how many data we need to get to a staple value. For that we plot our findings over growing population sizes. # + # %matplotlib inline import matplotlib.pyplot as plt plt.style.use('seaborn') def compute(size, p): population = generate_population(size, p) w_noise = [fuzz(i) for i in population] ratio = np.sum(w_noise) / size real_p = 2*(ratio-0.25) return real_p sim_sizes = [compute(i, 0.1) for i in range(10, 10000)] plt.plot(sim_sizes) plt.ylabel('Fraction of <NAME>') plt.xlabel('Size of population') plt.title('Deriving the number of <NAME> fans after noise'); # - # Here we see that our probabily stabilizes around 0.1 with a growing population size. # # ## Conclusions # # We transferred binary data differentially private. This is just a small step, using the same technique for vectors of information in way to retrieve usable data in the end is way harder. I hope to show some more complex example in later articles. # # Also, differential privacy seems to gain traction very slowly. In my opinion the main problem here is to prove how private the data is afterwards. That is very abstract, hence my attempt to put all this in a simple understandable code example.
content/resources/notebooks/2018-02-11-differential-privacy.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="_YeUvQJWAasZ" # ## Interacting with CerebralCortex Data # # # + [markdown] id="CvOdk3vovbFm" # Cerebral Cortex is MD2K's big data cloud tool designed to support population-scale data analysis, visualization, model development, and intervention design for mobile-sensor data. It provides the ability to do machine learning model development on population scale datasets and provides interoperable interfaces for aggregation of diverse data sources. # # This page provides an overview of the core Cerebral Cortex operations to familiarilze you with how to discover and interact with different sources of data that could be contained within the system. # # _Note:_ While some of these examples are showing open dataset, they are designed to function on real-world mCerebrum data and the signal generators were built to facilitate the testing and evaluation of the Cerebral Cortex platform by those individuals that are unable to see those original datasets or do not wish to collect data before evaluating the system. # + [markdown] id="wQykWeXzOyhZ" # ## Setting Up Environment # # + [markdown] id="aqHv61N2wAu0" # # Notebook does not contain the necessary runtime enviornments necessary to run Cerebral Cortex. The following commands will download and install these tools, framework, and datasets. # + colab={"base_uri": "https://localhost:8080/"} id="2iCah_VCO0DW" executionInfo={"status": "ok", "timestamp": 1627422471836, "user_tz": 300, "elapsed": 73884, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh79SoXWdWnHQd17nkp3P-NbcrexOPu5iMhtyuaWg=s64", "userId": "06621486372133451647"}} outputId="dfb7519f-e436-44fd-a636-848445ef7410" import importlib, sys, os from os.path import expanduser sys.path.insert(0, os.path.abspath('..')) DOWNLOAD_USER_DATA=False ALL_USERS=False #this will only work if DOWNLOAD_USER_DATA=True IN_COLAB = 'google.colab' in sys.modules MD2K_JUPYTER_NOTEBOOK = "MD2K_JUPYTER_NOTEBOOK" in os.environ if (get_ipython().__class__.__name__=="ZMQInteractiveShell"): IN_JUPYTER_NOTEBOOK = True JAVA_HOME_DEFINED = "JAVA_HOME" in os.environ SPARK_HOME_DEFINED = "SPARK_HOME" in os.environ PYSPARK_PYTHON_DEFINED = "PYSPARK_PYTHON" in os.environ PYSPARK_DRIVER_PYTHON_DEFINED = "PYSPARK_DRIVER_PYTHON" in os.environ HAVE_CEREBRALCORTEX_KERNEL = importlib.util.find_spec("cerebralcortex") is not None SPARK_VERSION = "3.1.2" SPARK_URL = "https://archive.apache.org/dist/spark/spark-"+SPARK_VERSION+"/spark-"+SPARK_VERSION+"-bin-hadoop2.7.tgz" SPARK_FILE_NAME = "spark-"+SPARK_VERSION+"-bin-hadoop2.7.tgz" CEREBRALCORTEX_KERNEL_VERSION = "3.3.14" DATA_PATH = expanduser("~") if DATA_PATH[:-1]!="/": DATA_PATH+="/" USER_DATA_PATH = DATA_PATH+"cc_data/" if MD2K_JUPYTER_NOTEBOOK: print("Java, Spark, and CerebralCortex-Kernel are installed and paths are already setup.") else: SPARK_PATH = DATA_PATH+"spark-"+SPARK_VERSION+"-bin-hadoop2.7/" if(not HAVE_CEREBRALCORTEX_KERNEL): print("Installing CerebralCortex-Kernel") # !pip -q install cerebralcortex-kernel==$CEREBRALCORTEX_KERNEL_VERSION else: print("CerebralCortex-Kernel is already installed.") if not JAVA_HOME_DEFINED: if not os.path.exists("/usr/lib/jvm/java-8-openjdk-amd64/") and not os.path.exists("/usr/lib/jvm/java-11-openjdk-amd64/"): print("\nInstalling/Configuring Java") # !sudo apt update # !sudo apt-get install -y openjdk-8-jdk-headless os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64/" elif os.path.exists("/usr/lib/jvm/java-8-openjdk-amd64/"): print("\nSetting up Java path") os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64/" elif os.path.exists("/usr/lib/jvm/java-11-openjdk-amd64/"): print("\nSetting up Java path") os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-11-openjdk-amd64/" else: print("JAVA is already installed.") if (IN_COLAB or IN_JUPYTER_NOTEBOOK) and not MD2K_JUPYTER_NOTEBOOK: if SPARK_HOME_DEFINED: print("SPARK is already installed.") elif not os.path.exists(SPARK_PATH): print("\nSetting up Apache Spark ", SPARK_VERSION) # !pip -q install findspark import pyspark spark_installation_path = os.path.dirname(pyspark.__file__) import findspark findspark.init(spark_installation_path) if not os.getenv("PYSPARK_PYTHON"): os.environ["PYSPARK_PYTHON"] = os.popen('which python3').read().replace("\n","") if not os.getenv("PYSPARK_DRIVER_PYTHON"): os.environ["PYSPARK_DRIVER_PYTHON"] = os.popen('which python3').read().replace("\n","") else: print("SPARK is already installed.") else: raise SystemExit("Please check your environment configuration at: https://github.com/MD2Korg/CerebralCortex-Kernel/") if DOWNLOAD_USER_DATA: if not os.path.exists(USER_DATA_PATH): if ALL_USERS: print("\nDownloading all users' data.") # !rm -rf $USER_DATA_PATH # !wget -q http://mhealth.md2k.org/images/datasets/cc_data.tar.bz2 && tar -xf cc_data.tar.bz2 -C $DATA_PATH && rm cc_data.tar.bz2 else: print("\nDownloading a user's data.") # !rm -rf $USER_DATA_PATH # !wget -q http://mhealth.md2k.org/images/datasets/s2_data.tar.bz2 && tar -xf s2_data.tar.bz2 -C $DATA_PATH && rm s2_data.tar.bz2 else: print("Data already exist. Please remove folder", USER_DATA_PATH, "if you want to download the data again") # + [markdown] toc-hr-collapsed=false id="bqGtjexSAYNp" # # Cerebral Cortex Data Analysis Algorithms # Cerebral Cortex contains a library of algorithms that are useful for processing data and converting it into features or biomarkers. This page demonstrates a simple GPS clustering algorithm. For more details about the algorithms that are available, please see our [documentation](https://cerebralcortex-kernel.readthedocs.io/en/latest/). These algorithms are constantly being developed and improved through our own work and the work of other researchers. # + [markdown] id="p29mBTykAYNv" # ## Initalize the system # + id="9kJOZDmjCROS" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1627422479553, "user_tz": 300, "elapsed": 7721, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh79SoXWdWnHQd17nkp3P-NbcrexOPu5iMhtyuaWg=s64", "userId": "06621486372133451647"}} outputId="1d5d1408-8348-479e-db66-f8498866d167" from cerebralcortex.kernel import Kernel CC = Kernel(cc_configs="default", study_name="default", new_study=True) # + [markdown] id="28X2g4Q4AYNx" # ## Generate some sample location data # # This example utilizes a data generator to protect the privacy of real participants and allows for anyone utilizing this system to explore the data without required institutional review board approvals. This is disabled for this demonstration to not create too much data at once. # + id="kEXf3fNnCpHv" # !wget -q https://raw.githubusercontent.com/MD2Korg/CerebralCortex/master/jupyter_demo/util/data_helper.py # + id="_XGg6o6QAYNx" from data_helper import gen_location_datastream gps_stream = gen_location_datastream(user_id="00000000-afb8-476e-9872-6472b4e66b68", stream_name="gps--org.md2k.phonesensor--phone") # + [markdown] id="uyRUe_g3El5Z" # ### Print generated demo data # + colab={"base_uri": "https://localhost:8080/"} id="dougv7AJAYNx" executionInfo={"status": "ok", "timestamp": 1627422485688, "user_tz": 300, "elapsed": 3165, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh79SoXWdWnHQd17nkp3P-NbcrexOPu5iMhtyuaWg=s64", "userId": "06621486372133451647"}} outputId="1d7a4278-8a35-4aa7-ac8b-3bade2a6045d" gps_stream.show(3) # + [markdown] id="q8qGhLImErnN" # ### Print schema of demo data # + colab={"base_uri": "https://localhost:8080/"} id="zzcyCSWzEvHw" executionInfo={"status": "ok", "timestamp": 1627422485689, "user_tz": 300, "elapsed": 8, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh79SoXWdWnHQd17nkp3P-NbcrexOPu5iMhtyuaWg=s64", "userId": "06621486372133451647"}} outputId="44209f87-2e13-4ac4-bedc-dce8b33143a9" gps_stream.printSchema() # + [markdown] id="4zWN_Y-aAYNy" # ## Cluster the location data # Cerebral Cortex makes it easy to apply built-in algorithms to data streams. In this case, `gps_clusters` is imported from the algorithm library, then `compute` is utilized to run this algorithm on the `gps_stream` to generate a set of centroids. This is the general format for applying algorithm to datastream and makes it easy for researchers to apply validated and tested algorithms to his/her own data without the need to become an expert in the particular set of transformations needed. # # _Note:_ the `compute` method engages the parallel computation capabilities of Cerebral Cortex, which causes all the data to be read from the data storage layer and processed on every computational core available to the system. This allows the computation to run as quickly as possible and to take advantage of powerful clusters from a relatively simple interface. This capability is critical to working with mobile sensor big data where data sizes can exceed 100s of gigabytes per datastream for larger studies. # + id="wI4ebB-TAYNy" from cerebralcortex.algorithms.gps.clustering import cluster_gps # + [markdown] id="QYR4LyyEEIj-" # ### Window GPS Data # + id="VAM2YrVeELYl" windowed_gps = gps_stream.window() # + [markdown] id="_8vSFZboEWeg" # ### Cluster windowed GPS data # + colab={"base_uri": "https://localhost:8080/"} id="5wtze8C0Ed4Z" executionInfo={"status": "ok", "timestamp": 1627422493379, "user_tz": 300, "elapsed": 6450, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh79SoXWdWnHQd17nkp3P-NbcrexOPu5iMhtyuaWg=s64", "userId": "06621486372133451647"}} outputId="c7a293ce-edbf-42e6-9807-b556e2f67fb6" clusters = cluster_gps(windowed_gps) clusters.show(3, truncate=False) # + [markdown] id="x0A7cjbhAYNz" # ## Visualize GPS Data # + [markdown] id="c7sdjzYnAYNz" # ### GPS Stream Plot # GPS visualization requires dedicated plotting capabilities. Cerebral Cortex includes a library to allow for interactive exploration. In this plot, use your mouse to drag the map around along with zooming in to explore the specific data points. # + id="osWx8QVCAYNz" from cerebralcortex.plotting.gps.plots import plot_gps_clusters # + colab={"base_uri": "https://localhost:8080/", "height": 741} id="CoGe1Z_wIvJ7" executionInfo={"status": "ok", "timestamp": 1627422497691, "user_tz": 300, "elapsed": 3939, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh79SoXWdWnHQd17nkp3P-NbcrexOPu5iMhtyuaWg=s64", "userId": "06621486372133451647"}} outputId="1a359741-5c2a-4847-81c6-10bd5ca69c37" plot_gps_clusters(clusters)
examples/cc_algorithms.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="3TjPeW5AnR-B" # ## **Setup: Setting up the SQLite DB:** # + colab={} colab_type="code" id="nl5xFo_4koQr" import sqlite3 import pandas as pd # + colab={} colab_type="code" id="CPImnQEknZW-" # Open up a new connection: conn = sqlite3.connect('rpg_db.sqlite3') # - # Open new cursor: curs = conn.cursor() # + [markdown] colab_type="text" id="rUyAPH9alIV7" # ## **Assignment - Part 1, Querying a Database:** # + [markdown] colab_type="text" id="54vbZjFPlUfK" # This directory contains a file rpg_db.sqlite3, a database for a hypothetical webapp role-playing game. This test data has dozens-to-hundreds of randomly generated characters across the base classes (Fighter, Mage, Cleric, and Thief) as well as a few Necromancers. Also generated are Items, Weapons, and connections from characters to them. Note that, while the name field was randomized, the numeric and boolean fields were left as defaults. # # Use sqlite3 to load and write queries to explore the data, and answer the following questions: # + [markdown] colab_type="text" id="5-eO7NTFmKPE" # **(1) How many total Characters are there?** # - num_characters = curs.execute("""SELECT COUNT(DISTINCT character_id) FROM charactercreator_character""" ).fetchall()[0][0] print(f"(1) How many total Characters are there?: {num_characters} Characters") # + [markdown] colab_type="text" id="YRptpBpUmN1C" # **(2) How many of each specific subclass?** # + # SQLite queries to find how many unique characters there are of each character type: num_clerics = curs.execute("""SELECT COUNT(DISTINCT character_ptr_id) FROM charactercreator_cleric""" ).fetchall()[0][0] num_fighters = curs.execute("""SELECT COUNT(DISTINCT character_ptr_id) FROM charactercreator_fighter""" ).fetchall()[0][0] num_thiefs = curs.execute("""SELECT COUNT(DISTINCT character_ptr_id) FROM charactercreator_thief""" ).fetchall()[0][0] num_mages = curs.execute("""SELECT COUNT(DISTINCT character_ptr_id) FROM charactercreator_mage""" ).fetchall()[0][0] num_mages_necromancers = curs.execute("""SELECT COUNT(DISTINCT mage_ptr_id) FROM charactercreator_necromancer""" ).fetchall()[0][0] # Print answers: print("(2) How many of each specific subclass (of Character)?") print(f"# of clerics: {num_clerics}") print(f"# of fighters: {num_fighters}") print(f"# of thiefs: {num_thiefs}") print(f"# of mages: {num_mages} (of which {num_mages_necromancers} are necromancers)") # Check to make sure we covered all characters and type: assert num_characters == num_clerics + num_fighters + num_thiefs + num_mages # + [markdown] colab_type="text" id="Tx2C6iv4mVxK" # **(3) How many total Items?** # + colab={} colab_type="code" id="e7GRPbBsmJIP" # SQLite queries to find how many unique items, how many of those are weapons, and how many total items # held by characters there are: num_unique_items = curs.execute("""SELECT COUNT(DISTINCT item_id) FROM armory_item""" ).fetchall()[0][0] num_weapons = curs.execute("""SELECT COUNT(DISTINCT item_ptr_id) FROM armory_weapon""" ).fetchall()[0][0] total_items_held = curs.execute("""SELECT COUNT(item_id) FROM charactercreator_character_inventory""" ).fetchall()[0][0] # Print answers: print(f"# of unique item types: {num_unique_items} items ({num_weapons} weapons, {num_unique_items - num_weapons} non-weapons)") print(f"# of total items held by characters in the game: {total_items_held} items") # + [markdown] colab_type="text" id="KFDM3NY0mXu8" # **(4) How many of the Items are weapons? How many are not?** # - # See answer to Question #3 above. # + [markdown] colab_type="text" id="-WxkualxmZbo" # **(5) How many Items does each character have? (Return first 20 rows)** # + colab={} colab_type="code" id="Ut8eEBcymJNT" # SQLite query to find how many total items each character has: query_05_results = curs.execute("""SELECT character_id, COUNT(item_id) FROM charactercreator_character_inventory GROUP BY character_id ORDER BY character_id LIMIT 20""" ).fetchall() # Display results as a table by making into a pandas dataframe: pd.DataFrame(query_05_results, columns=['character_id', 'num_items_held']) # + [markdown] colab_type="text" id="WvBogJhRmbPn" # **(6) How many Weapons does each character have? (Return first 20 rows)** # + # SQLite query to find how many weapons each character has: query_06_results = curs.execute("""SELECT inventory.character_id, COUNT(inventory.item_id) FROM charactercreator_character_inventory AS inventory WHERE inventory.item_id IN (SELECT item_ptr_id FROM armory_weapon) GROUP BY inventory.character_id""" ).fetchall() # Display results as a table by making into a pandas dataframe: pd.DataFrame(query_06_results, columns=['character_id', 'num_weapons_held']) # + # [?? To do: How would I find the above, except including all characters, incl. those with 0 weapons, # and showing that ??] # - # CHECK answer (BUT don't use this syntax -- BETWEEN 138 AND 174 isn't as good as directly # checking if item_id is in weapons): curs.execute("""SELECT character_id, COUNT(item_id) FROM charactercreator_character_inventory WHERE item_id BETWEEN 138 AND 174 GROUP BY character_id ORDER BY character_id LIMIT 20""" ).fetchall() # + [markdown] colab_type="text" id="ukKccKSZmdLz" # **(7) On average, how many Items does each Character have?** # + # SQLite query to find avg. # of items held per character: avg_items_per_character = curs.execute("""SELECT CAST(COUNT(item_id) AS FLOAT) / COUNT(DISTINCT character_id) FROM charactercreator_character_inventory""" ).fetchall()[0][0] # Print answer: print(f"Average # of items held per character: {avg_items_per_character:.2f} items") # CHECK answer: # Avg. items per character: 898 total items held / 302 characters = 2.9735099337748343 assert avg_items_per_character == total_items_held / num_characters == 898 / 302 # + [markdown] colab_type="text" id="UzDAEBbvmejm" # **(8) On average, how many Weapons does each character have?** # + # Execute SQLite query to find avg. # of weapons held per character (all characters): avg_weap_per_char = curs.execute("""SELECT CAST(COUNT(item_id) AS FLOAT) / (SELECT COUNT(DISTINCT character_id) FROM charactercreator_character_inventory) FROM charactercreator_character_inventory as inventory WHERE item_id IN (SELECT item_ptr_id FROM armory_weapon)""" ).fetchall()[0][0] # Print answer: print(f"Avg. # of weapons per character: {avg_weap_per_char:.2f} weapons") # CHECK answer: # Avg. weapons per character (all characters): 203 total weapons held / 302 characters = 0.6721854304635762 assert avg_weap_per_char == 203 / 302 # + # Execute SQLite query to find avg. # of weapons held per weapon-holding character: avg_weap_per_weap_holder = curs.execute("""SELECT CAST(COUNT(item_id) AS FLOAT) / COUNT(DISTINCT character_id) FROM charactercreator_character_inventory WHERE item_id IN (SELECT item_ptr_id FROM armory_weapon)""" ).fetchall()[0][0] # Print answer: print(f"Avg. # of weapons per weapon-holding character: {avg_weap_per_weap_holder:.2f} weapons") # CHECK answer: # Avg. weapons per weapon-holding character: 203 weapons held / 155 weapon-holding characters = 1.3096774193548386 assert avg_weap_per_weap_holder == 203 / 155
module1-introduction-to-sql/rpg_queries_v_notebook.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: feml # language: python # name: feml # --- # ## Multivariate imputation of Chained Equations # # In this notebook we will implement MICE using various machine learning models to estimate the missing values. # # [IterativeImputer from Sklearn](https://scikit-learn.org/stable/modules/generated/sklearn.impute.IterativeImputer.html#sklearn.impute.IterativeImputer) # # - Same model will be used to predict NA in all variables # - Can't use classification for binary variables and regression for continuous variables # # For a more sophisticated imputation, we would have to assemble the imputers / models manually. # + import pandas as pd import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.linear_model import BayesianRidge from sklearn.experimental import enable_iterative_imputer from sklearn.impute import IterativeImputer # - # ## Load data # + # load data with numerical variables variables = ['A2','A3','A8', 'A11', 'A14', 'A15', 'A16'] data = pd.read_csv('../creditApprovalUCI.csv', usecols=variables) data.head() # + # let's separate into training and testing set X_train, X_test, y_train, y_test = train_test_split( data.drop('A16', axis=1), data['A16'], test_size=0.3, random_state=0) X_train.shape, X_test.shape # + # find the percentage of missing data within those variables X_train.isnull().mean() # - X_train.hist(bins=50, figsize=(10,10)) plt.show() # In this dataset, as most variables are continuous, we can easily estimate missing values with regression models using MICE. # + # let's create a MICE imputer using Bayes as estimator imputer = IterativeImputer( estimator=BayesianRidge(), # the estimator to predict the NA initial_strategy='mean', # how will NA be imputed in step 1 max_iter=10, # number of cycles imputation_order='ascending', # the order in which to impute the variables n_nearest_features=None, # whether to limit the number of predictors skip_complete=True, # whether to ignore variables without NA random_state=0, ) # + # perform MICE imputer.fit(X_train) # + # transform the data - replace the missing values train_t = imputer.transform(X_train) test_t = imputer.transform(X_test) # + # after the imputation there is no more data missing pd.DataFrame(train_t, columns=X_train.columns).isnull().sum() # - # ## Lets compare imputation with different models # + import pandas as pd import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.linear_model import BayesianRidge from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import ExtraTreesRegressor from sklearn.neighbors import KNeighborsRegressor # + X_train, X_test, y_train, y_test = train_test_split( data.drop('A16', axis=1), data['A16'], test_size=0.3, random_state=0) X_train.shape, X_test.shape # + imputer_bayes = IterativeImputer( estimator=BayesianRidge(), max_iter=10, random_state=0) imputer_knn = IterativeImputer( estimator=KNeighborsRegressor(n_neighbors=5), max_iter=10, random_state=0) imputer_nonLin = IterativeImputer( estimator=DecisionTreeRegressor(max_features='sqrt', random_state=0), max_iter=10, random_state=0) imputer_missForest = IterativeImputer( estimator=ExtraTreesRegressor(n_estimators=10, random_state=0), max_iter=10, random_state=0) # - imputer_bayes.fit(X_train) imputer_knn.fit(X_train) imputer_nonLin.fit(X_train) imputer_missForest.fit(X_train) # + # replace NA X_train_bayes = imputer_bayes.transform(X_train) X_train_knn = imputer_knn.transform(X_train) X_train_nonLin = imputer_nonLin.transform(X_train) X_train_missForest = imputer_missForest.transform(X_train) # - predictors = [var for var in variables if var !='A16'] # transform numpy array to dataframe X_train_bayes = pd.DataFrame(X_train_bayes, columns = predictors) X_train_knn = pd.DataFrame(X_train_knn, columns = predictors) X_train_nonLin = pd.DataFrame(X_train_nonLin, columns = predictors) X_train_missForest = pd.DataFrame(X_train_missForest, columns = predictors) # + # plot the distribution of the imputed variable fig = plt.figure() ax = fig.add_subplot(111) X_train['A3'].plot(kind='kde', ax=ax, color='blue') X_train_bayes['A3'].plot(kind='kde', ax=ax, color='green') X_train_knn['A3'].plot(kind='kde', ax=ax, color='red') X_train_nonLin['A3'].plot(kind='kde', ax=ax, color='black') X_train_missForest['A3'].plot(kind='kde', ax=ax, color='orange') # add legends lines, labels = ax.get_legend_handles_labels() labels = ['A3 original', 'A3 bayes', 'A3 knn', 'A3 Trees', 'A3 missForest'] ax.legend(lines, labels, loc='best') plt.title('Variable A3 distribution after MICE') plt.show() # -
Section-05-Multivariate-Imputation/05.02-MICE.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: ML # language: python # name: ml # --- import sys sys.path.append('../') from tqdm import tqdm_notebook as tqdm import matplotlib.pyplot as plt import matplotlib.image as mpimg import pandas as pd import cv2 import random import numpy as np from utils import * from mask_functions import * # %matplotlib inline # #### save masks indices to png format # save indices where a mask is 1 as npy file df = pd.read_csv('../data/train.csv') mask_dir = '../data/npy_masks_256/' gb = df.groupby('ImageId') unique_ids = list(gb.groups.keys()) for image_id in tqdm(unique_ids): df = gb.get_group(image_id) mask_path = os.path.join(mask_dir, image_id + '.npy') annotations = df['EncodedPixels'].tolist() mask = np.zeros([1024, 1024]) if annotations[0] != '-1': for rle in annotations: mask += run_length_decode(rle) mask = cv2.resize(mask, (size, size)) # resize will generate a few value between 0 and 1 mask = (mask >= 0.5).astype('float32') # for overlap #idx = np.argwhere(mask>=0.5) np.save(mask_path, mask) np.unique(mask, return_counts=True) plt.imshow(mask) df = pd.read_csv('../data/train.csv') df = df.drop_duplicates('ImageId') df.head() mask_idx = np.load('../data/npy_masks/' + image_id + '.npy') mask = np.zeros([1024, 1024]) mask[mask_idx[:, 0], mask_idx[:, 1]] = 1 plt.imshow(mask); mask = np.zeros([1024, 1024]) amask = mask.copy() amask[100:200, 100:200] = 1 idx = np.argwhere(amask==1) mask[idx[:, 0], idx[:, 1]] = 1 plt.imshow(mask) mask_idx = np.load('../data/npy_masks/1.2.276.0.7230010.3.1.4.8323329.1426.1517875167.704674.npy') mask_idx.shape mask_idx img = cv2.imread('../data/train_png/1.2.276.0.7230010.3.1.4.8323329.1000.1517875165.878027.png', cv2.IMREAD_GRAYSCALE) np.save('test.npy', img) # !du -sh test.npy # save indices where a mask is 1 as npy file df = pd.read_csv('../data/train.csv') img_npy_dir = '../data/npy_train_256/' img_dir = '../data/train_png' size = 256 gb = df.groupby('ImageId') unique_ids = list(gb.groups.keys()) for image_id in tqdm(unique_ids): img_npy_path = os.path.join(img_npy_dir, image_id + '.npy') img_path = os.path.join(img_dir, image_id + '.png') img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE) img = cv2.resize(img, (size, size)) img = np.expand_dims(img, -1) # [10] np.save(img_npy_path, img) plt.imshow(img[:, :, 0], cmap='bone') # !ls ../external_data/messidor/ # read all three data labels files train1 = pd.read_csv('../data/train.csv') train2 = pd.read_csv('../data/train_old.csv') train3 = pd.read_csv('../data/train_messidor.csv') # concatenate into one train1['path'] = train1['id_code'].apply(lambda x: "../data/train_images/" + x + ".png") train2['path'] = train2['id_code'].apply(lambda x: "../external_data/train_images/" + x + ".jpeg") train3['path'] = train3['id_code'].apply(lambda x: "../external_data/messidor/train_images/" + x + ".tif") train_df = pd.concat([train1, train2, train3], axis=0) train_df.head() # + def load_rgb(path): image = cv2.imread(path) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) image = cv2.resize(image, (IMG_SIZE, IMG_SIZE)) return image def load_ben_color(path, sigmaX=10 ): image = cv2.imread(path) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) #image = crop_image_from_gray(image) image = cv2.resize(image, (IMG_SIZE, IMG_SIZE)) image = cv2.addWeighted ( image,4, cv2.GaussianBlur( image , (0,0) , sigmaX) ,-4 ,128) return image def load_ben_color_cropped(path, IMG_SIZE, sigmaX=10, tol=7): image = cv2.imread(path) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) image = crop_image_from_gray(image, tol=tol) image = cv2.resize(image, (IMG_SIZE, IMG_SIZE)) image = cv2.addWeighted ( image,4, cv2.GaussianBlur( image , (0,0) , sigmaX) ,-4 ,128) return image def load_ben_gray(path): image = cv2.imread(path) image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) image = cv2.resize(image, (IMG_SIZE, IMG_SIZE)) image = cv2.addWeighted( image, 4, cv2.GaussianBlur(image, (0, 0), IMG_SIZE / 10), -4, 128 ) # <NAME>'s preprocessing method [1] ## (IMG_SIZE, IMG_SIZE) -> (IMG_SIZE, IMG_SIZE, 3) image = image.reshape(IMG_SIZE, IMG_SIZE, 1) image = np.repeat(image, 3, axis=-1) return image def crop_image_from_gray(img,tol=7): # pdb.set_trace() if img.ndim ==2: mask = img>tol return img[np.ix_(mask.any(1),mask.any(0))] elif img.ndim==3: gray_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) mask = gray_img>tol check_shape = img[:,:,0][np.ix_(mask.any(1),mask.any(0))].shape[0] if (check_shape == 0): # image is too dark so that we crop out everything, return img # return original image else: img1=img[:,:,0][np.ix_(mask.any(1),mask.any(0))] img2=img[:,:,1][np.ix_(mask.any(1),mask.any(0))] img3=img[:,:,2][np.ix_(mask.any(1),mask.any(0))] # print(img1.shape,img2.shape,img3.shape) img = np.stack([img1,img2,img3],axis=-1) # print(img.shape) return img # - names = train_df.id_code.values save_folder = '../data/train_images/' IMG_SIZE = 300 mkdir('../data/train_images/bgcc300/') train_df.shape # this thing is slow at start (the png and tif images) then speeds up after ~5k iterations (the jpegs) for idx, row in tqdm(train_df.iterrows()): name = row.id_code path = row.path image = load_ben_color_cropped(path, IMG_SIZE, tol=8) np.save(os.path.join(save_folder, 'bgcc300', name + '.npy'), image) # ### Analysis image = np.load('../data/train_images/bgcc300/000c1434d8d7.npy') plt.imshow(image) npy_files = glob('../data/train_images/npy_bengrahm_color/*.npy') npy = np.random.choice(npy_files) print(npy) image = np.load(npy) plt.imshow(image); # idx=0 idx+=1 #print(idx) #fname = names[idx] fname = "20060523_50153_0100_PP" image_folder = '../external_data/messidor/train_images/' img_path = os.path.join(images_folder, fname + ".tif") image_org = load_ben_color(img_path) image_cropped = load_ben_color_cropped(img_path, tol=10) plt.figure(figsize=(15, 15)) plt.subplot(1, 2, 1) plt.imshow(image_org, cmap="gray") label = '' #label = str(train_df.iloc[idx]['diagnosis']) plt.title('label: ' + label) plt.subplot(1, 2, 2) plt.imshow(image_cropped, cmap="gray") plt.title('label: ' + label) plt.show(); image = cv2.imread(img_path) image = cv2.resize(image, (IMG_SIZE, IMG_SIZE)) gray_img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) mask = gray_img> 8 plt.imshow(mask) check_shape = img[:,:,0][np.ix_(mask.any(1),mask.any(0))].shape[0] if (check_shape == 0): # image is too dark so that we crop out everything, return img # return original image else: img1=img[:,:,0][np.ix_(mask.any(1),mask.any(0))] img2=img[:,:,1][np.ix_(mask.any(1),mask.any(0))] img3=img[:,:,2][np.ix_(mask.any(1),mask.any(0))] # print(img1.shape,img2.shape,img3.shape) img = np.stack([img1,img2,img3],axis=-1) # #### messidor xls to csv # + # combining messidors label xls files into a csv file import pandas as pd from glob import glob # filenames excel_names = glob('../external_data/messidor/*.xls') # read them in excels = [pd.ExcelFile(name) for name in excel_names] # turn them into dataframes frames = [x.parse(x.sheet_names[0], header=None,index_col=None) for x in excels] # delete the first row for all frames except the first # i.e. remove the header row -- assumes it's the first frames[1:] = [df[1:] for df in frames[1:]] # concatenate them.. combined = pd.concat(frames) # write it out #combined.to_excel("c.xlsx", header=False, index=False) # - combined.head() combined.shape combined.to_csv('../external_data/messidor/train.csv', header=None, index=False) combined['id_code'] = combined['Image name'].apply(lambda x: x.split('.')[0]) combined['diagnosis'] = combined['Retinopathy grade'] combined = combined.drop(columns=['Image name', 'Ophthalmologic department', 'Retinopathy grade', 'Risk of macular edema '], axis=0) combined.to_csv('../external_data/messidor/combinedsidor.csv')
notebooks/.ipynb_checkpoints/prepare_npy-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns class Metrics: def __init__(self, logs): self.logs = logs self.raw_metrics = self.calculate_metrics(logs) def extract_metrics(self, filename): f = open(filename, 'r') lines = f.readlines() gpu_usage = None gpu_mem = None for line in lines: if "=> Average precision" in line: accuracy = float(line.split(" ")[-1]) if "=> Total training time" in line: training_time = float(line.split(" ")[-2]) if "GPU BEFORE EVALUATION:" in line: gpu_usage, gpu_mem = eval(line[22:]) return (accuracy, training_time, gpu_usage, gpu_mem) def calculate_metrics(self, files): metrics = dict() for file in files: iters, g_fc_uni, _ = file.split("_") iters = int(iters) g_fc_uni = int(g_fc_uni) if (iters, g_fc_uni) in metrics: metrics[(iters, g_fc_uni)].append(self.extract_metrics(file)) else: metrics[(iters, g_fc_uni)] = [self.extract_metrics(file)] return metrics def get_metrics_df(self): rows = [] for key in self.raw_metrics: vals = self.raw_metrics[key] rows.append([key[0], key[1]] + [np.mean(x) for x in list(zip(*vals))]) return pd.DataFrame(rows, columns=["iters", "buffer_size", "Accuracy", "Training time (s)", "GPU Usage (%)", "GPU Memory (MB)"]) # # NR Tuning Results # %cd /Users/samuilstoychev/Desktop/research_project_repo/research_project/experiments/vgg_nr_tuning/2021-04-30-03-17/ # logs = !ls metrics = Metrics(logs) df = metrics.get_metrics_df() # ## Accuracy # + fig, ax = plt.subplots(figsize=(8, 5)) # Sample figsize in inches sns.heatmap(df.pivot('iters','buffer_size','Accuracy'), annot=True, linewidths=.5, ax=ax) # Source for the code below: https://github.com/mwaskom/seaborn/issues/1773 b, t = plt.ylim() b += 0.5 t -= 0.5 plt.ylim(b, t) ax.invert_yaxis() plt.show() # - # ## GPU Memory Usage (MB) # + fig, ax = plt.subplots(figsize=(8, 5)) # Sample figsize in inches sns.heatmap(df.pivot('iters','buffer_size','GPU Memory (MB)'), annot=True, linewidths=.5, ax=ax) # Source for the code below: https://github.com/mwaskom/seaborn/issues/1773 b, t = plt.ylim() b += 0.5 t -= 0.5 plt.ylim(b, t) ax.invert_yaxis() plt.show() # - # ## Average GPU Usage (%) # + fig, ax = plt.subplots(figsize=(8, 5)) # Sample figsize in inches sns.heatmap(df.pivot('iters','buffer_size','GPU Usage (%)'), annot=True, linewidths=.5, ax=ax) # Source for the code below: https://github.com/mwaskom/seaborn/issues/1773 b, t = plt.ylim() b += 0.5 t -= 0.5 plt.ylim(b, t) ax.invert_yaxis() plt.show()
analysis/VGG_NR_Tuning.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] Collapsed="false" # # Building and Testing Recommender Systems With Surprise # + [markdown] Collapsed="false" # Surprise is a Python scikit building and analyzing recommender systems that deal with explicit rating data. # # <http://surpriselib.com/> # + Collapsed="false" import pandas as pd import matplotlib as plt import seaborn as sns # + Collapsed="false" movies = pd.read_csv(r'C:\Users\delchain_default\Documents\GitHub\Python-Notes\Machine Learning\Recommender System (Advanced)\Movie_data.csv') movies.head() # + Collapsed="false" users = movies.groupby('user_id')['rating'].count().reset_index().sort_values('rating', ascending=False) users # + [markdown] Collapsed="false" # Most of the users gave less than 5 ratings, and very few users gave many ratings, although the most productive user have given 13,602 ratings. # # I'm sure you have noticed that the above two charts share the same distribution. The number of ratings per movie and the bnumber of ratings per user decay exponentially. # # To reduce the dimensionality of the dataset, we will filter out rarely rated movies and rarely rating users. # + Collapsed="false" min_movie_ratings = 50 filter_movies = movies['title'].value_counts() > min_movie_ratings filter_movies = filter_movies[filter_movies].index.tolist() min_user_ratings = 50 filter_users = movies['user_id'].value_counts() > min_user_ratings filter_users = filter_users[filter_users].index.tolist() df = movies[(movies['title'].isin(filter_movies)) & (movies['user_id'].isin(filter_users))] print('The original data frame shape:\t{}'.format(movies.shape)) print('The new data frame shape:\t{}'.format(df.shape)) # + Collapsed="false" df['title'].nunique() # + Collapsed="false" df # + [markdown] Collapsed="false" # ## Surprise # + [markdown] Collapsed="false" # To load a data set from the above pandas data frame, we will use the load_from_df() method, we will also need a Reader object, and the rating_scale parameter must be specified. The data frame must have three columns, corresponding to the user ids, the item ids, and the ratings in this order. Each row thus corresponds to a given rating. # + [markdown] Collapsed="false" # # + Collapsed="false" from surprise import Reader from surprise import Dataset from surprise.model_selection import cross_validate from surprise import NormalPredictor from surprise import KNNBasic from surprise import KNNWithMeans from surprise import KNNWithZScore from surprise import KNNBaseline from surprise import SVD from surprise import BaselineOnly from surprise import SVDpp from surprise import NMF from surprise import SlopeOne from surprise import CoClustering from surprise.accuracy import rmse from surprise import accuracy from surprise.model_selection import train_test_split # + Collapsed="false" reader = Reader(rating_scale=(1, 5)) data = Dataset.load_from_df(df[['user_id', 'title', 'rating']], reader) # + [markdown] Collapsed="false" # The Reader class is used to parse a file containing ratings. # # Such a file is assumed to specify only one rating per line, and each line needs to respect the following structure: # + Collapsed="false" benchmark = [] # Iterate over all algorithms for algorithm in [SVD(), SVDpp(),SlopeOne(), NormalPredictor(), KNNBaseline(), KNNBasic(), KNNWithMeans(), KNNWithZScore(), BaselineOnly(), CoClustering()]: # NMF() # Perform cross validation results = cross_validate(algorithm, data, measures=['RMSE'], cv=3, verbose=False) # Get results & append algorithm name tmp = pd.DataFrame.from_dict(results).mean(axis=0) tmp = tmp.append(pd.Series([str(algorithm).split(' ')[0].split('.')[-1]], index=['Algorithm'])) benchmark.append(tmp) # + Collapsed="false" surprise_results = pd.DataFrame(benchmark).set_index('Algorithm').sort_values('test_rmse') surprise_results # + Collapsed="false" # + [markdown] Collapsed="false" # ## Train and Predict # + [markdown] Collapsed="false" # SVDpp produces the best results, however it it very time consuming. lets train and predict using KNNBaseline # + Collapsed="false" print('Using KNNBaseline') bsl_options = {'method': 'als', 'n_epochs': 5, 'reg_u': 12, 'reg_i': 5 } algo = KNNBaseline() cross_validate(algo, data, measures=['RMSE'], cv=3, verbose=False) # + [markdown] Collapsed="false" # We use the train_test_split() to sample a trainset and a testset with given sizes, and use the accuracy metric of rmse. We’ll then use the fit() method which will train the algorithm on the trainset, and the test() method which will return the predictions made from the testset. # + Collapsed="false" trainset, testset = train_test_split(data, test_size=0.25) algo = KNNBaseline() predictions = algo.fit(trainset).test(testset) accuracy.rmse(predictions) # + [markdown] Collapsed="false" # To inspect our predictions in details, we are going to build a pandas data frame with all the predictions. The following code were largely taken from this notebook. # + Collapsed="false" trainset = algo.trainset print(algo.__class__.__name__) # + [markdown] Collapsed="false" # To inspect our predictions in details, we are going to build a pandas data frame with all the predictions. # + Collapsed="false" def get_Iu(user_id): # """ return the number of items rated by given user args: # user_id: the id of the user # Item_User : the number of items rated by the user # """ try: return len(trainset.ur[trainset.to_inner_uid(user_id)]) except ValueError: # user was not part of the trainset return 0 def get_Ui(item_id): # """ return number of users that have rated given item : # item_id: the title of the movie # User_Item: the number of users that have rated the item. # """ try: return len(trainset.ir[trainset.to_inner_iid(item_id)]) except ValueError: return 0 df1 = pd.DataFrame(predictions, columns=['user_id', 'item_id', 'real', 'est', 'details']) df1['Item_USer'] = df1.user_id.apply(get_Iu) df1['User_Item'] = df1.item_id.apply(get_Ui) df1['err'] = abs(df1.est - df1.real) # + Collapsed="false" df1.head() # + Collapsed="false" # what is 'details' ? # what i am wondering: if i create a user with a couple of movies ratings, how do i issue recommendations for this user? # we used cross validation in order to validate # how can we optimize the knn parameters? is there a GridSearch method available to improve the fit? # + Collapsed="false" best_predictions = df1.sort_values(by='err')[:20] worst_predictions = df1.sort_values(by='err')[-20:] # + Collapsed="false" df1.shape # + Collapsed="false" best_predictions # + Collapsed="false" worst_predictions # + [markdown] Collapsed="false" # # + Collapsed="false" # + Collapsed="false" # + [markdown] Collapsed="false" #
Machine Learning/Recommender System (Advanced)/Recommender with Surprise - Movies.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- import torch import networkx as nx from dgl.data import CoraGraphDataset import matplotlib.pyplot as plt def plot_graph(graph): graph = graph.to_networkx() plt.figure(figsize=(26, 12)) nx.draw(graph, node_size=50) plt.show() dataset = CoraGraphDataset() graph = dataset[0] A = graph.adj().to_dense() A.shape plot_graph(graph) def get_k_hop_neihgbors(target, adj, level): a = torch.matrix_power(adj, level) neighors = torch.nonzero(a[target], as_tuple=False).flatten() return neighors def plot_k_neighbors(graph, target, adj, level): gnx = graph.to_networkx().to_undirected() neighbors = get_k_hop_neihgbors(target, adj, level) node_colors = [] for node in gnx.nodes(): if node == target: node_colors.append('darkorange') elif node in neighbors: node_colors.append('crimson') else: node_colors.append('royalblue') plt.figure(figsize=(26, 12)) plt.title('What each node in GCN sees for {} layers'.format(level)) nx.draw(gnx, node_size=60, node_color=node_colors) plt.savefig('../images/gcn-{}-layers.png'.format(level)) plt.show() plot_k_neighbors(graph, target=450, adj=A, level=2) plot_k_neighbors(graph, target=450, adj=A, level=10)
notebooks/what-gcn-see.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.6 # language: python # name: python36 # --- # # Vector Multiplication # Vector multiplication can be performed in three ways: # # - Scalar Multiplication # - Dot Product Multiplication # - Cross Product Multiplication # # ## Scalar Multiplication # Let's start with *scalar* multiplication - in other words, multiplying a vector by a single numeric value. # # Suppose I want to multiply my vector by 2, which I could write like this: # # \begin{equation} \vec{w} = 2\vec{v}\end{equation} # # Note that the result of this calculation is a new vector named **w**. So how would we calculate this? # Recall that **v** is defined like this: # # \begin{equation}\vec{v} = \begin{bmatrix}2 \\ 1 \end{bmatrix}\end{equation} # # To calculate 2v, we simply need to apply the operation to each dimension value in the vector matrix, like this: # # \begin{equation}\vec{w} = \begin{bmatrix}2 \cdot 2 \\ 2 \cdot 1 \end{bmatrix}\end{equation} # # Which gives us the following result: # # \begin{equation}\vec{w} = \begin{bmatrix}2 \cdot 2 \\ 2 \cdot 1 \end{bmatrix} = \begin{bmatrix}4 \\ 2 \end{bmatrix}\end{equation} # # In Python, you can apply these sort of matrix operations directly to numpy arrays, so we can simply calculate **w** like this: # + # %matplotlib inline import numpy as np import matplotlib.pyplot as plt import math v = np.array([2,1]) w = 2 * v print(w) # Plot w origin = [0], [0] plt.grid() plt.ticklabel_format(style='sci', axis='both', scilimits=(0,0)) plt.quiver(*origin, *w, scale=10) plt.show() # - # The same approach is taken for scalar division. # # Try it for yourself - use the cell below to calculate a new vector named **b** based on the following definition: # # \begin{equation}\vec{b} = \frac{\vec{v}}{2}\end{equation} # + b = v / 2 print(b) # Plot b origin = [0], [0] plt.axis('equal') plt.grid() plt.ticklabel_format(style='sci', axis='both', scilimits=(0,0)) plt.quiver(*origin, *b, scale=10) plt.show() # - # ## Dot Product Multiplication # So we've seen how to multiply a vector by a scalar. How about multiplying two vectors together? There are actually two ways to do this depending on whether you want the result to be a *scalar product* (in other words, a number) or a *vector product* (a vector). # # To get a scalar product, we calculate the *dot product*. This takes a similar approach to multiplying a vector by a scalar, except that it multiplies each component pair of the vectors and sums the results. To indicate that we are performing a dot product operation, we use the &bull; operator: # # \begin{equation} \vec{v} \cdot \vec{s} = (v_{1} \cdot s_{1}) + (v_{2} \cdot s_{2}) ... + \; (v_{n} \cdot s_{n})\end{equation} # # So for our vectors **v** (2,1) and **s** (-3,2), our calculation looks like this: # # \begin{equation} \vec{v} \cdot \vec{s} = (2 \cdot -3) + (1 \cdot 2) = -6 + 2 = -4\end{equation} # # So the dot product, or scalar product, of **v** &bull; **s** is **-4**. # # In Python, you can use the *numpy.**dot*** function to calculate the dot product of two vector arrays: # + import numpy as np v = np.array([2,1]) s = np.array([-3,2]) d = np.dot(v,s) print (d) # - # In Python 3.5 and later, you can also use the **@** operator to calculate the dot product: # + import numpy as np v = np.array([2,1]) s = np.array([-3,2]) d = v @ s print (d) # - # ### The Cosine Rule # An useful property of vector dot product multiplication is that we can use it to calculate the cosine of the angle between two vectors. We could write the dot products as: # # $$ \vec{v} \cdot \vec{s} = \|\vec{v} \|\|\vec{s}\| \cos (\theta) $$ # # Which we can rearrange as: # # $$ \cos(\theta) = \frac{\vec{v} \cdot \vec{s}}{\|\vec{v} \|\|\vec{s}\|} $$ # # So for our vectors **v** (2,1) and **s** (-3,2), our calculation looks like this: # # $$ \cos(\theta) = \frac{(2 \cdot-3) + (-3 \cdot 2)}{\sqrt{2^{2} + 1^{2}} \times \sqrt{-3^{2} + 2^{2}}} $$ # # So: # # $$\cos(\theta) = \frac{-4}{8.0622577483}$$ # # Which calculates to: # # $$\cos(\theta) = -0.496138938357 $$ # # So: # # $$\theta \approx 119.74 $$ # # Here's that calculation in Python: # + import math import numpy as np # define our vectors v = np.array([2,1]) s = np.array([-3,2]) # get the magnitudes vMag = np.linalg.norm(v) sMag = np.linalg.norm(s) # calculate the cosine of theta cos = (v @ s) / (vMag * sMag) # so theta (in degrees) is: theta = math.degrees(math.acos(cos)) print(theta) # - # ## Cross Product Multiplication # To get the *vector product* of multipying two vectors together, you must calculate the *cross product*. The result of this is a new vector that is at right angles to both the other vectors in 3D Euclidean space. This means that the cross-product only really makes sense when working with vectors that contain three components. # # For example, let's suppose we have the following vectors: # # \begin{equation}\vec{p} = \begin{bmatrix}2 \\ 3 \\ 1 \end{bmatrix}\;\; \vec{q} = \begin{bmatrix}1 \\ 2 \\ -2 \end{bmatrix}\end{equation} # # To calculate the cross product of these vectors, written as **p** x **q**, we need to create a new vector (let's call it **r**) with three components (r<sub>1</sub>, r<sub>2</sub>, and r<sub>3</sub>). The values for these components are calculated like this: # # \begin{equation}r_{1} = p_{2}q_{3} - p_{3}q_{2}\end{equation} # \begin{equation}r_{2} = p_{3}q_{1} - p_{1}q_{3}\end{equation} # \begin{equation}r_{3} = p_{1}q_{2} - p_{2}q_{1}\end{equation} # # So in our case: # # \begin{equation}\vec{r} = \vec{p} \times \vec{q} = \begin{bmatrix}(3 \cdot -2) - (1 \cdot 2) \\ (1 \cdot 1) - (2 \cdot -2) \\ (2 \cdot 2) - (3 \cdot 1) \end{bmatrix} = \begin{bmatrix}-6 - 2 \\ 1 - -4 \\ 4 - 3 \end{bmatrix} = \begin{bmatrix}-8 \\ 5 \\ 1 \end{bmatrix}\end{equation} # # In Python, you can use the *numpy.**cross*** function to calculate the cross product of two vector arrays: # + import numpy as np p = np.array([2,3,1]) q = np.array([1,2,-2]) r = np.cross(p,q) print (r) # -
MathsToML/Module03-Vectors and Matrices/03-02-Vector Multiplication.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Physical Distancing Detector # Importing Libraries - Make sure opencv, tensorflow and scipy is installed import os import cv2 as cv import numpy as np from scipy.spatial.distance import cdist import argparse import itertools import tensorflow as tf # ## Person Detection # + active="" # Input - Tensorflow object detection model(.tflite) # Optional Input(s) - person detection thresold value # tensor input shape - current model uses 320x320 image # Output - Prediction dictionary with detection boxes, confidence scores and object classes # - class Person_detection(object): def __init__(self, model_name, min_threshold=0.40, input_shape=(320,320)): #initialize threshold values, interpreters and tensors self.min_score_threshold = min_threshold self.model = os.path.join('models', model_name) self.interpreter = tf.lite.Interpreter(model_path=self.model) self.input_tensor = self.interpreter.get_input_details() self.output_tensor = self.interpreter.get_output_details() self.interpreter.allocate_tensors() def predict(self, frame): # return the predictions for each frame # prediction contains the bounding box coordinates, object classes and scores if self.input_tensor[0]['dtype'] == np.float32: dtype_model = tf.float32 else: dtype_model = tf.uint8 input_tensor = tf.convert_to_tensor(frame, dtype=dtype_model) input_tensor = input_tensor[tf.newaxis, ...] self.interpreter.set_tensor(self.input_tensor[0]['index'], input_tensor) self.interpreter.invoke() det_box = tf.convert_to_tensor(self.interpreter.get_tensor(self.output_tensor[0]['index'])) det_class = tf.convert_to_tensor(self.interpreter.get_tensor(self.output_tensor[1]['index'])) det_score = tf.convert_to_tensor(self.interpreter.get_tensor(self.output_tensor[2]['index'])) # convert tensor object to numpy array det_class = tf.squeeze(det_class, axis=[0]).numpy().astype(np.int64) + 1 det_box = tf.squeeze(det_box, axis=[0]).numpy() det_score = tf.squeeze(det_score, axis=[0]).numpy() return{ 'det_boxes': det_box, 'det_classes': det_class, 'det_scores': det_score } # Physical distancing detection function which uses euclidean distance between the centroids of each person. def physical_distance_detection(prediction, dist_threshold, frame): detection = [False] * len(prediction['det_boxes']) centroids = [] red_color = (0,0,255) # calculate centroid value of each bunding box / person for boxes in prediction['det_boxes']: centroids.append(((boxes[1] + boxes[3])/2, (boxes[0]+ boxes[2])/2)) # calculate the euclidean distance between each centroid for ((x,x1),( y,y1)) in itertools.combinations(enumerate(centroids), 2): if detection[x] and detection[y]: continue if cdist([x1],[y1], 'euclidean')[0][0] < dist_threshold: detection[x] = True detection[y] = True frame = cv.arrowedLine(frame, (int(x1[0]),int(x1[1])), (int(y1[0]),int(y1[1])), red_color,6) return detection # ## Post Processing # Clean Up code to remove unwanted detections - like low confidence scores, objects other than person and object with bounding box too large considering the frame and camera view def cleanup(prediction, image_w, image_h): delete_ids = [] for i in range(len(prediction['det_classes'])): #select only person object if prediction['det_classes'][i] != 1: delete_ids.append(i) # select only objects with scores greater than threshold if prediction['det_scores'][i] < 0.5: delete_ids.append(i) x_min, y_min = int(prediction['det_boxes'][i][1] * image_w), int(prediction['det_boxes'][i][0] * image_h) x_max, y_max = int(prediction['det_boxes'][i][3] * image_w), int(prediction['det_boxes'][i][2] * image_h) prediction['det_boxes'][i] = [y_min, x_min, y_max, x_max] if (x_max - x_min > image_w/3) or (y_max - y_min > image_h /2): delete_ids.append(i) cleaned_list = list(dict.fromkeys(delete_ids)) prediction['det_classes'] = np.delete(prediction['det_classes'], cleaned_list, axis=0) prediction['det_boxes'] = np.delete(prediction['det_boxes'], cleaned_list, axis=0) prediction['det_scores'] = np.delete(prediction['det_scores'], cleaned_list, axis=0) return prediction # Draw bounding box rectangles around detected persons using open cv functions def draw_rect(image, box, image_w, image_h, detection=False): red_color = (0, 0 , 255) green_color = (0, 255, 0) y_min = int(max(1, box[0])) x_min = int(max(1, box[1])) y_max = int(min(image_h, box[2])) x_max = int(min(image_w, box[3])) # draw a rectangle on the image if detection: cv.rectangle(image, (x_min, y_min), (x_max, y_max), red_color, 2) else: cv.rectangle(image, (x_min, y_min), (x_max, y_max), green_color, 2) # ## Main Function # Entry point # Can be executed as a script with 3 optional arguments # args [-d] - predefined threshold for physical distancing (in pixel, depending on the video or stream source and fov) # [-i] - input video filename with path # [-o] - output video filename if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('-d', type=int, required=False, dest='dist', default=150, \ help="physical distancing threshold distance in pixel") parser.add_argument('-i', type=str, required=False, dest='video', \ default='PDD_demo.avi', help="input video file name") parser.add_argument('-o', type=str, required=False, dest='op', \ default='PDD_ouput_demo.avi', help="output video file name") args, unknown = parser.parse_known_args() dist_threshold = args.dist input_video = args.video # load the input Video vid = cv.VideoCapture(input_video) # Calculate the height and width of the stream image_w = int(vid.get(cv.CAP_PROP_FRAME_WIDTH)) image_h = int(vid.get(cv.CAP_PROP_FRAME_HEIGHT)) # Storing the output to a avi video fourcc = cv.VideoWriter_fourcc(*'XVID') out = cv.VideoWriter(args.op, fourcc, 24, (image_w,image_h), True) # Load the detection model with threshold people_model = Person_detection('model.tflite',0.5) while(vid.isOpened()): ret_val, frame = vid.read() if frame is None or frame.size == 0: break else: # Resize the image to expected tensor shape for the loaded model prediction = people_model.predict(cv.resize(frame, (320,320))) # Cleanup non person and weak predictions person_prediction = cleanup(prediction, image_w, image_h) # Run the physical distance detector for each person # dist_threshold is the minimum distance between persons to consider breach detection = physical_distance_detection(person_prediction, dist_threshold, frame) # Draw the rectangle bounding boxes for i in range(len(person_prediction['det_boxes'])): draw_rect(frame, person_prediction['det_boxes'][i], image_w, image_h, detection[i]) # Display the frame cv.imshow('PDD_DEMO', frame) out.write(frame) if cv.waitKey(1) == 27 or ret_val is False: break cv.destroyWindow('PDD_DEMO') if out is not None: out.release()
PDD_demo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # [![image](https://colab.research.google.com/assets/colab-badge.svg)](https://githubtocolab.com/giswqs/leafmap/blob/master/examples/notebooks/18_point_layer.ipynb) # [![image](https://binder.pangeo.io/badge_logo.svg)](https://gishub.org/leafmap-pangeo) # # **Adding a point layer with popup attributes to the map** # # The notebook requires the ipyleaflet plotting backend. Folium is not supported. The point dataset can be any geopandas-supported file stored locally or online. # # # Uncomment the following line to install [leafmap](https://leafmap.org) if needed. # + # # !pip install leafmap # - from leafmap import leafmap # Use the toolbar GUI to open a point-type vector dataset. m = leafmap.Map() m # Display a single popup attribute. m = leafmap.Map() m.add_point_layer("../data/us_cities.geojson", popup="name", layer_name="US Cities") m # Display multiple popup attributes. m = leafmap.Map() url= "https://raw.githubusercontent.com/giswqs/leafmap/master/examples/data/us_cities.geojson" m.add_point_layer(url, popup=["name", "pop_max"], layer_name="US Cities") m # ![](https://i.imgur.com/1QVEtlN.gif)
examples/notebooks/18_point_layer.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import random from functools import reduce # # 1. Compute area of this. # ![area](area.png) def computeArea(x): x = (x**2 - x**2/8) return x # # 2. Get Fibonacci at given number. def Fib(): x = int(input("number of iteration of fibonacci: ")) a = [1,1] if x==0: return a[0] elif x==1: return a[1] else: for i in range(2,x): a.append(a[i-1]+a[i-2]) return a # # 3. Generate list of unique random numbers. def uniqueRandom(): def_random = [] for i in range(1,301): def_random.append(random.randrange(1,100)) uniq_random = reduce(lambda l, x: l.append(x) or l # alternate return value if x not in l else l, def_random, []) return uniq_random # # Result # + in_x = int(input("calculate area of x: ")) print(computeArea(in_x)) print(Fib()) print("Unique Random: ") print(uniqueRandom()) # -
class2/Functions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Audio file analysis # # This notebook does some very basic analysis of an audio file like # waveform plotting, spectrum display, etc. The content is # inspired by the video lectures of <NAME> from The Sound of AI. # # A 25 second sample of the song "I ran (So Far away)" by "A Flock of Seagulls" # was taken from the song's Wikipedia page, the link of which is given below. # # Following operations are performed in this notebook: # * Waveform plotting # * PSD plot # * Spectrogram # * MFCC # + pycharm={"name": "#%%\n", "is_executing": false} import librosa, librosa.display import matplotlib.pyplot as plt import numpy as np # Audio file source: https://en.wikipedia.org/wiki/File:A_Flock_of_Seagulls-I_Ran_So_Far_Away_sample.ogg file = "data/A_Flock_of_Seagulls-I_Ran_So_Far_Away_sample.ogg" # + [markdown] pycharm={"name": "#%% md\n"} # ## Simple Waveform plotting # ### Time vs Amplitude # + pycharm={"name": "#%%\n", "is_executing": false} # Simple Waveform plotting # Sample rate sr is 22050 # Total no. of samples = sr * T = 22050 per sec * 25 sec signal, sr = librosa.load(file) print("Sample rate: ", sr) librosa.display.waveplot(signal, sr=sr) plt.xlabel("Time") plt.ylabel("Amplitude") plt.savefig("img/plot_wave.png", format="png", dpi=200) plt.show() # - # ## PSD plot # ### Frequency vs Magnitude # * #### Full plot # + pycharm={"name": "#%%\n", "is_executing": false} fft = np.fft.fft(signal) magnitude = np.abs(fft) frequency = np.linspace(0, sr, len(magnitude)) plt.plot(frequency, magnitude) plt.xlabel("Frequency") plt.ylabel("Magnitude") plt.title("PSD Full") plt.savefig("img/plot_PSD-Full.png", format="png", dpi=200) plt.show() # - # * #### Half plot # <p> # The graph repeats itself (mirrors) after half the frequency, # so we only take the left half of the complete frequency spectrum. # </p> # + pycharm={"name": "#%%\n", "is_executing": false} fft = np.fft.fft(signal) magnitude = np.abs(fft) frequency = np.linspace(0, sr, len(magnitude)) left_frequency = frequency[:int(len(frequency)/2)] left_magnitude = magnitude[:int(len(magnitude)/2)] plt.plot(left_frequency, left_magnitude) plt.xlabel("Frequency") plt.ylabel("Magnitude") plt.title("PSD Half") plt.savefig("img/plot_PSD-Half.png", format="png", dpi=200) plt.show() # - # ## Spectrogram # #### Frequency vs Time vs Magnitude # # We take the STFT of the signal, with `n_fft` number of points, # and a window length of `hop_length`. # # * ##### Without dB scale # + pycharm={"name": "#%%\n", "is_executing": false} n_fft = 2048 # No. of FFT points hop_length = 512 # Window length stft = librosa.core.stft(signal, hop_length=hop_length, n_fft=n_fft) spectrogram = np.abs(stft) librosa.display.specshow(spectrogram, sr=sr, hop_length=hop_length, x_axis='time', y_axis='linear') plt.xlabel("Time") plt.ylabel("Frequency") plt.title("Spectrogram") plt.colorbar() plt.savefig("img/plot_spectrogram.png", format="png", dpi=200) plt.show() # + [markdown] pycharm={"name": "#%% md\n"} # * ##### With dB scale # + pycharm={"name": "#%%\n", "is_executing": false} n_fft = 2048 # No. of FFT points hop_length = 512 # Window length stft = librosa.core.stft(signal, hop_length=hop_length, n_fft=n_fft) spectrogram = np.abs(stft) log_spectrogram = librosa.amplitude_to_db(spectrogram) librosa.display.specshow(log_spectrogram, sr=sr, hop_length=hop_length, x_axis='time', y_axis='log') plt.xlabel("Time") plt.ylabel("Frequency") plt.title("Spectrogram") plt.colorbar() plt.savefig("img/plot_log-spectrogram.png", format="png", dpi=200) plt.show() # + [markdown] pycharm={"name": "#%% md\n", "is_executing": false} # ## Mel Spectrogram # # A Mel Spectrogram is like a normal spectrogram, # but with mel filter banks as its y axis instead of frequency in Hz. # The normal frequency axis is scaled to this special mel filter banks by the following steps: # # * Calculate the spectrogram of signal # * Calculate the mel filter banks # * Multiply mel filter banks with spectrogram # - # First we get the mel filter banks: # + pycharm={"name": "#%%\n", "is_executing": false} n_mels=6 mel = librosa.filters.mel(sr=sr, n_fft=n_fft, n_mels=n_mels) librosa.display.specshow(mel, sr=sr, hop_length=hop_length, x_axis='linear') plt.title("Mel Filter") plt.ylabel('Mel banks') plt.colorbar() #plt.colorbar(format='%+2.0f dB') plt.savefig("img/plot_mel.png", format="png", dpi=200) plt.show() # + [markdown] pycharm={"name": "#%% md\n"} # * The Mel filter banks have a shape of $[n\_mels, \frac{n\_fft}{2} + 1]$. # * There are $n\_mels$ number of bands, each consisting of $\frac{n\_fft}{2} + 1$ # (1025 in our case) number of points. # * All the bands can be visualized by plotting a graph for each: # + pycharm={"name": "#%%\n", "is_executing": false} idxs_to_plot = [0, 1, 2, 3, 4, 5] for i in idxs_to_plot: plt.plot(mel[i]) plt.title("Mel plot") plt.ylabel("Weight of Mel band") plt.savefig("img/plot_melplot.png", format="png", dpi=200) plt.show() # - # * The bands only cover a specific range of frequency points. # Rest all the points of any band have 0 values. # # We finally plot the Mel spectrogram by multiplying the # Mel filter banks with spectrogram of our signal. # ### Plotting Mel Spectrogram # We first plot Mel spectrogram using librosa's built-in function # `melspectrogram()` # # #### Using Built-in Function # * The signal is passed through `melspectrogram()`, which returns a spectrogram of mel type. # * We then take the absolute value and scale it to dB (log scale) # # + pycharm={"name": "#%%\n", "is_executing": false} # Using built-in melspectrogram function mel_spectrogram = librosa.feature.melspectrogram(signal, sr=sr, n_fft=n_fft, hop_length=hop_length, n_mels=n_mels) log_melspectrogram = librosa.power_to_db(abs(mel_spectrogram)) librosa.display.specshow(log_melspectrogram, sr=sr, hop_length=hop_length, x_axis="time", y_axis="mel") plt.title("Mel spectrogram") plt.colorbar(format='%+2.0f dB') plt.savefig("img/plot_melspec_auto.png", format="png", dpi=200) plt.show() # - # #### Using Manual Method # # We now plot mel spectogram maually, without using the built-in function: # # * We calculate the Fourier transform of the signal frame by frame (STFT) # (as done previously) # * We then take the absolute value and then square it to obtain `spectro` # * The mel filter banks are calculated (as previously done) # and stored in `mel` variable # * The matrices `mel` (mel filter banks) and `spectro` # (the square of absolute of STFT) are multiplied in order to obtain # the final mel spectrogram `mat_mul` # * The multiplied result `mat_mul` is scaled to dB (log scale) to obtain `log_spec` (for plotting purposes) # + pycharm={"name": "#%%\n", "is_executing": false} # Manual method stft = librosa.core.stft(signal, hop_length=hop_length, n_fft=n_fft) spectro = np.power(np.abs(stft), 2) mat_mul = mel.dot(spectro) log_spec = librosa.power_to_db(mat_mul) # print("Shape of maunal matrix product result:", mat_mul.shape) # print("Shape of automatic mel spectogram", mel_spectrogram.shape) librosa.display.specshow(log_spec, sr=sr, x_axis="time", y_axis="mel") plt.title("Mel spectrogram") plt.colorbar(format='%+2.0f dB') plt.savefig("img/plot_melspec_manual.png", format="png", dpi=200) plt.show() # Check both automatic and manual matrices' dimensions # and values to be equal print("Automatic and manual method match: ", np.allclose(mat_mul, mel_spectrogram)) # - # ## MFCC # # Calculating MFCCs using librosa's built-in function `mfcc()` # + pycharm={"name": "#%%\n", "is_executing": false} mfcc = librosa.feature.mfcc(signal, sr=sr, n_fft=n_fft, hop_length=hop_length, n_mfcc=13) print(len(mfcc.tolist())) librosa.display.specshow(mfcc, sr=sr, hop_length=hop_length, x_axis="time") #plt.xlabel("Time") plt.ylabel("MFCC") plt.title("MFCC") plt.colorbar(format="%+2.0f") plt.savefig("img/plot_mfcc.png", format="png", dpi=200) plt.show() # + pycharm={"name": "#%%\n", "is_executing": false} print((mfcc.tolist())) # + pycharm={"name": "#%%\n"}
1-basic-input-data-visualization.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from selenium import webdriver from bs4 import BeautifulSoup as soup import re import time import csv import datetime def bookings_today(borough): """ bookings_today(): given borough, visits current OpenTable search results page for that borough and extracts the number of bookings today for every restaurant args: borough: string, 'manhattan', 'bronx', 'queens', 'staten_island', or 'brooklyn' output: csv file named 'bookings_<borough>_<date>', where date format is 'YYYY-mm-dd' column headers are 'url' and <date> """ today = datetime.datetime.today().strftime('%Y-%m-%d') tomorrow = (datetime.datetime.today()+datetime.timedelta(days=1)).strftime('%Y-%m-%d') # dict of string to 'regionId' identifier number to be used in OpenTable search results url boroughs = {'manhattan':'16', 'bronx':'324', 'queens':'17', 'staten_island':'18', 'brooklyn':'24'} sel_borough = boroughs[borough] url = f'https://www.opentable.com/s?dateTime={tomorrow}T22%3A00%3A00&covers=1&metroId=8&regionIds%5B0%5D={sel_borough}&neighborhoodIds%5B0%5D=&term=&page=1' driver=webdriver.Chrome() driver.get(url) html = driver.page_source time.sleep(0.1) driver.close() # calculates how many pages of search results there are results_soup = soup(html, 'html.parser') total_restaurants = int(re.search('\d+', results_soup.find('h3', attrs = {"class" : "_6X5n-Vu8eAbxx_nrEuxjc", "data-test" : "multi-search-total-count"}).string).group(0)) if total_restaurants % 100 == 0: num_results_pages = (total_restaurants/100) else: num_results_pages = int((total_restaurants/100) + 1) print(num_results_pages, ' pages of results') bookings_list = [] # visits each search results page for i in range(0, int(num_results_pages)): page_i = f'https://www.opentable.com/s?dateTime={tomorrow}T22%3A00%3A00&covers=1&metroId=8&regionIds%5B0%5D={sel_borough}&neighborhoodIds%5B0%5D=&term=&page={i+1}' driver=webdriver.Chrome() driver.get(page_i) # scroll down page incrementally to load restaurant elements y = 500 for timer in range(0,70): driver.execute_script("window.scrollTo(0, "+str(y)+")") y += 500 time.sleep(0.05) page_i_html = driver.page_source time.sleep(0.1) driver.close() text = soup(page_i_html, 'html.parser') restaurants = text.find_all('div', attrs = {"class" : "_3uVfVbI1iLfMbszbU6KoOL"}) for restaurant in restaurants: restaurant_child = restaurant.find('a', attrs = {"class":"_1e9PcCDb012hY4BcGfraQB"}) # get restaurant url rest_url = restaurant_child.get('href') # get number of bookings per day booked_raw = restaurant.find_all('span', attrs = {"class": "_2VIffaVUDxw_-tEh-6XOB_ _2EluNCOTdgGq9H4SxGZwUg"}) booked_today = 0 if not (booked_raw is None): for span in booked_raw: if 'Booked' in span.string: booked_today = int(re.search('\d+', span.string).group(0)) # zip url and bookings into dict bookings_keys = ['url', today] bookings_dict = dict(zip(bookings_keys, [None]*2)) bookings_dict['url'] = rest_url bookings_dict[today] = booked_today # append current restaurant dict to bookings_list bookings_list.append(bookings_dict) i+=1 # write bookings_list to csv headings_list = ['url', today] with open(f'bookings_{borough}_{today}.csv', 'w', encoding = 'utf-8', newline='') as csvfile: csv_writer = csv.writer(csvfile) csv_writer.writerow(headings_list) for item in bookings_list: csv_writer.writerow(item.values()) boroughs_list = ['manhattan', 'bronx', 'queens', 'staten_island', 'brooklyn'] for item in boroughs_list: bookings_today(item)
scraper/nyc_daily_bookings.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/microprediction/precise/blob/main/examples_colab_notebooks/partial_moments_cov_estimation.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="U6Mky28wX5y8" # !pip install git+https://github.com/microprediction/precise.git # + [markdown] id="fY3DQOlwX-MU" # Illustrates use of the precise package # # # # 1. Pick a skater # 2. Run it # # # + colab={"base_uri": "https://localhost:8080/"} id="Gbi01UFRYJwb" outputId="a8accdd1-3f49-4080-fcbb-40208b247d3c" from precise.skatertools.syntheticdata.miscellaneous import create_correlated_dataset from precise.skaters.covariance.ewapm import ewa_pm_emp_scov_r01 # <- skater from pprint import pprint if __name__=='__main__': ys = create_correlated_dataset(n=500) s = {} # <-- Note the initialization of state as empty dict for y in ys: x, x_cov, s = ewa_pm_emp_scov_r01(s=s, y=y) pprint(x_cov) # + [markdown] id="M2UPPFKeYcTj" # If you care to peek you can look at the state too. In particular, this skater maintains four different covariance matrices using data conditioned on up/down moves. # + colab={"base_uri": "https://localhost:8080/"} id="G2Klm0PPYbUL" outputId="69452018-3f78-4759-877c-c7e9d954bae6" partials = ['cu','du','dl','cl'] for pt in partials: pprint((pt,s[pt]['scov'])) # <-- Partial sample covariance # + colab={"base_uri": "https://localhost:8080/"} id="C92FvAgXbK8T" outputId="4614e31c-567b-4125-d86a-12f77dc0b80d" # The reported cov estimate is almost the sum of these four import numpy as np c_check = np.zeros((3,3)) for pt in partials: c_check = c_check + s[pt]['scov'] pprint(c_check) print(c_check/x_cov) # + [markdown] id="ZydBwfuHdAhD" # You can find the code pretty easily, and there is a utility for that # + colab={"base_uri": "https://localhost:8080/"} id="I1Q6B70wb5nN" outputId="bb15c8cb-7684-4fe8-cc6f-90a4b9208eb0" from precise.whereami import url_from_skater_name print(url_from_skater_name('ewa_pm_emp_scov_r01')) # + [markdown] id="tAGehCACdZpG" # If you don't like it, pull requests are welcome! Here's a complete list of other covariance skaters you can try # + colab={"base_uri": "https://localhost:8080/"} id="XtrsbS1TdgRD" outputId="cd8550b8-382c-4ee8-b2fd-b0f964572230" from precise.skaters.covariance.allcovskaters import cov_skater_manifest pprint(cov_skater_manifest())
examples_colab_notebooks/partial_moments_cov_estimation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.7.4 64-bit (''base'': conda)' # name: python37464bitbaseconda8cbf85cc44b94ed2a5c37d93e746daff # --- # # 8. Web Scraping # # ## What is Web Scraping? # # The dictionary meaning of word ‘Scrapping’ implies getting something from the web. Here two questions arise: What we can get from the web and How to get that. # # The answer to the first question is ‘data’. Data is indispensable for any programmer and the basic requirement of every programming project is the large amount of useful data. # # The answer to the second question is a bit tricky, because there are lots of ways to get data. In general, we may get data from a database or data file and other sources. But what if we need large amount of data that is available online? One way to get such kind of data is to manually search (clicking away in a web browser) and save (copy-pasting into a spreadsheet or file) the required data. This method is quite tedious and time consuming. Another way to get such data is using web scraping. # # Web scraping, also called web data mining or web harvesting, is the process of constructing an agent which can extract, parse, download and organize useful information from the web automatically. In other words, we can say that instead of manually saving the data from websites, the web scraping software will automatically load and extract data from multiple websites as per our requirement. # ## Beautiful Soup # # Beautiful Soup is a Python library for getting data out of HTML, XML, and other markup languages. Say you’ve found some webpages that display data relevant to your research, such as date or address information, but that do not provide any way of downloading the data directly. Beautiful Soup helps you pull particular content from a webpage, remove the HTML markup, and save the information. It is a tool for web scraping that helps you clean up and parse the documents you have pulled down from the web. # + from bs4 import BeautifulSoup import requests import random def get_imd_movies(url): page = requests.get(url) soup = BeautifulSoup(page.text, 'html.parser') movies = soup.find_all("td", class_="titleColumn") random.shuffle(movies) return movies def get_imd_summary(url): movie_page = requests.get(url) soup = BeautifulSoup(movie_page.text, 'html.parser') return soup.find("div", class_="summary_text").contents[0].strip() def get_imd_movie_info(movie): movie_title = movie.a.contents[0] movie_year = movie.span.contents[0] movie_url = 'http://www.imdb.com' + movie.a['href'] return movie_title, movie_year, movie_url def imd_movie_picker(): ctr=0 print("--------------------------------------------") for movie in get_imd_movies('http://www.imdb.com/chart/top'): movie_title, movie_year, movie_url = get_imd_movie_info(movie) movie_summary = get_imd_summary(movie_url) print(movie_title, movie_year) print(movie_summary) print("--------------------------------------------") ctr=ctr+1 if (ctr==10): break; if __name__ == '__main__': imd_movie_picker() # - # ## Scrapy # # Scrapy is an application framework for crawling web sites and extracting structured data which can be used for a wide range of useful applications, like data mining, information processing or historical archival. # # Even though Scrapy was originally designed for web scraping, it can also be used to extract data using APIs (such as Amazon Associates Web Services) or as a general purpose web crawler. # # ** Walk-through of an example spider ** # # In order to show you what Scrapy brings to the table, we’ll walk you through an example of a Scrapy Spider using the simplest way to run a spider. # # Here’s the code for a spider that scrapes famous quotes from website http://quotes.toscrape.com, following the pagination: # + import scrapy class QuotesSpider(scrapy.Spider): name = 'quotes' start_urls = [ 'http://quotes.toscrape.com/tag/humor/', ] def parse(self, response): for quote in response.css('div.quote'): yield { 'author': quote.xpath('span/small/text()').get(), 'text': quote.css('span.text::text').get(), } next_page = response.css('li.next a::attr("href")').get() if next_page is not None: yield response.follow(next_page, self.parse) # - # Put this in a text file, name it to something like quotes_spider.py and run the spider using the runspider command: # ``` # scrapy runspider quotes_spider.py -o quotes.json # ``` # When this finishes you will have in the quotes.json file a list of the quotes in JSON format, containing text and author, looking like this (reformatted here for better readability): # ``` # [{ # "author": "<NAME>", # "text": "\u201cThe person, be it gentleman or lady, who has not pleasure in a good novel, must be intolerably stupid.\u201d" # }, # { # "author": "<NAME>", # "text": "\u201cOutside of a dog, a book is man's best friend. Inside of a dog it's too dark to read.\u201d" # }, # { # "author": "<NAME>", # "text": "\u201cA day without sunshine is like, you know, night.\u201d" # }, # ...] # ```
8 Web Scraping.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/MichaelSumagui/Linear-Algebra-58020/blob/main/Prelim_Exam.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="p4eVDW4PlrFL" # Question 1 # + colab={"base_uri": "https://localhost:8080/"} id="WzzwlvLWkJDa" outputId="66efe69e-1776-4b5e-96b6-adbc143ba12d" import numpy as np C = np.eye(4) print("C",C) # + [markdown] id="Np4Cwu1kmizy" # Question 2 # + colab={"base_uri": "https://localhost:8080/"} id="EnIst79Dmie5" outputId="17aee079-7200-4d36-e74e-d8ac82ce0338" import numpy as np C = np.eye(4) print(C*2) # + [markdown] id="u3IPStZpkFx-" # Question 3 # + colab={"base_uri": "https://localhost:8080/"} id="CMID7EkQi87J" outputId="271ddd90-afaf-4d14-9e56-d79daa1ff252" import numpy as np A = ([2,7,4]) B = ([3,9,8]) cross = np.cross(A,B) print(cross)
Prelim_Exam.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline # # # Superflux onsets # # This notebook demonstrates how to recover the Superflux onset detection algorithm of # `Bo<NAME>, 2013 <http://dafx13.nuim.ie/papers/09.dafx2013_submission_12.pdf>`_ # from librosa. # # This algorithm improves onset detection accuracy in the presence of vibrato. # # + # Code source: <NAME> # License: ISC # - # We'll need numpy and matplotlib for this example # # # + from __future__ import print_function import numpy as np import matplotlib.pyplot as plt import librosa import librosa.display # - # We'll load in a five-second clip of a track that has # noticeable vocal vibrato. # The method works fine for longer signals, but the # results are harder to visualize. # # y, sr = librosa.load('audio/Karissa_Hobbs_-_09_-_Lets_Go_Fishin.mp3', sr=44100, duration=5, offset=35) # These parameters are taken directly from the paper # # n_fft = 1024 hop_length = int(librosa.time_to_samples(1./200, sr=sr)) lag = 2 n_mels = 138 fmin = 27.5 fmax = 16000. max_size = 3 # The paper uses a log-frequency representation, but for # simplicity, we'll use a Mel spectrogram instead. # # # + S = librosa.feature.melspectrogram(y, sr=sr, n_fft=n_fft, hop_length=hop_length, fmin=fmin, fmax=fmax, n_mels=n_mels) plt.figure(figsize=(6, 4)) librosa.display.specshow(librosa.power_to_db(S, ref=np.max), y_axis='mel', x_axis='time', sr=sr, hop_length=hop_length, fmin=fmin, fmax=fmax) plt.tight_layout() # - # Now we'll compute the onset strength envelope and onset events # using the librosa defaults. # # odf_default = librosa.onset.onset_strength(y=y, sr=sr, hop_length=hop_length) onset_default = librosa.onset.onset_detect(y=y, sr=sr, hop_length=hop_length, units='time') # And similarly with the superflux method # # # + odf_sf = librosa.onset.onset_strength(S=librosa.power_to_db(S, ref=np.max), sr=sr, hop_length=hop_length, lag=lag, max_size=max_size) onset_sf = librosa.onset.onset_detect(onset_envelope=odf_sf, sr=sr, hop_length=hop_length, units='time') # - # If you look carefully, the default onset detector (top sub-plot) has # several false positives in high-vibrato regions, eg around 0.62s or # 1.80s. # # The superflux method (middle plot) is less susceptible to vibrato, and # does not detect onset events at those points. # # # + # sphinx_gallery_thumbnail_number = 2 plt.figure(figsize=(6, 6)) frame_time = librosa.frames_to_time(np.arange(len(odf_default)), sr=sr, hop_length=hop_length) ax = plt.subplot(2, 1, 2) librosa.display.specshow(librosa.power_to_db(S, ref=np.max), y_axis='mel', x_axis='time', sr=sr, hop_length=hop_length, fmin=fmin, fmax=fmax) plt.xlim([0, 5.0]) plt.axis('tight') plt.subplot(4, 1, 1, sharex=ax) plt.plot(frame_time, odf_default, label='Spectral flux') plt.vlines(onset_default, 0, odf_default.max(), label='Onsets') plt.xlim([0, 5.0]) plt.legend() plt.subplot(4, 1, 2, sharex=ax) plt.plot(frame_time, odf_sf, color='g', label='Superflux') plt.vlines(onset_sf, 0, odf_sf.max(), label='Onsets') plt.xlim([0, 5.0]) plt.legend() plt.tight_layout() plt.show()
0.7.2/_downloads/fec4b8b0e79dada6a1874e401ce5c881/plot_superflux.ipynb