code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Seldon Core Real Time Stream Processing with KNative Eventing # # In this example we will show how you can enable real time stream processing in Seldon Core by leveraging the KNative Eventing integration. # # In this example we will deploy a simple model containerised with Seldon Core and we will leverage the basic Seldon Core integration with KNative Eventing which will allow us to connect it so it can receive cloud events as requests and return a cloudevent-enabled response which can be collected by other components. # ## Pre-requisites # # You will require the following in order to go ahead: # * Istio 1.42+ Installed ([Documentation Instructions](https://istio.io/latest/docs/setup/install/)) # * KNative Eventing 0.13 installed ([Documentation Instructions](https://knative.dev/docs/admin/install/)) # * Seldon Core v1.1+ installed with Istio Ingress Enabled ([Documentation Instructions](https://docs.seldon.io/projects/seldon-core/en/latest/workflow/install.html#ingress-support)) # ## Deploy your Seldon Model # # We will first deploy our model using Seldon Core. In this case we'll use one of the [pre-packaged model servers](https://docs.seldon.io/projects/seldon-core/en/latest/servers/overview.html). # # We first createa configuration file: # + # %%writefile ./assets/simple-iris-deployment.yaml apiVersion: machinelearning.seldon.io/v1 kind: SeldonDeployment metadata: name: iris-deployment spec: predictors: - graph: implementation: SKLEARN_SERVER modelUri: gs://seldon-models/v1.14.0-dev/sklearn/iris name: simple-iris-model children: [] name: default replicas: 1 # - # ### Run the model in our cluster # # Now we run the Seldon Deployment configuration file we just created. # !kubectl apply -f assets/simple-iris-deployment.yaml # ### Check that the model has been deployed # # # !kubectl get pods | grep iris # ## Create a Trigger to reach our model # We want to create a trigger that is able to reach directly to the service. # # We will be using the following seldon deployment: # !kubectl get sdep | grep iris # ### Create trigger configuration # + # %%writefile ./assets/seldon-knative-trigger.yaml apiVersion: eventing.knative.dev/v1beta1 kind: Trigger metadata: name: seldon-eventing-sklearn-trigger spec: broker: default filter: attributes: type: seldon.iris-deployment.default.request subscriber: ref: apiVersion: machinelearning.seldon.io/v1 kind: SeldonDeployment name: iris-deployment # - # Create this trigger file which will send all cloudevents of type `"seldon.<deploymentName>.request"`. # !kubectl apply -f assets/seldon-knative-trigger.yaml # CHeck that the trigger is working correctly (you should see "Ready: True"), together with the URL that will be reached. # !kubectl get trigger # ### Send a request to the KNative Eventing default broker # # To send requests we can do so by sending a curl command from a pod inside of the cluster. # !kubectl run --quiet=true -it --rm curl --image=radial/busyboxplus:curl --restart=Never -- \ # curl -v "default-broker.default.svc.cluster.local" \ # -H "Ce-Id: 536808d3-88be-4077-9d7a-a3f162705f79" \ # -H "Ce-specversion: 0.3" \ # -H "Ce-Type: seldon.iris-deployment.default.request" \ # -H "Ce-Source: seldon.examples.streaming.curl" \ # -H "Content-Type: application/json" \ # -d '{"data": { "ndarray": [[1,2,3,4]]}}' # ### Check our model has received it # # We can do this by checking the logs (we can query the logs through the service name) and see that the request has been processed # !kubectl logs svc/iris-deployment-default simple-iris-model | tail -6 # ## Connect a source to listen to the results of the seldon model # # Our Seldon Model is producing results which are sent back to KNative. # # This means that we can connect other subsequent services through a trigger that filters for those response cloudevents. # # ### First create the service that willl print the results # # This is just a simple pod that prints all the request data into the console. # + # %%writefile ./assets/event-display-deployment.yaml # event-display app deploment apiVersion: apps/v1 kind: Deployment metadata: name: event-display spec: replicas: 1 selector: matchLabels: &labels app: event-display template: metadata: labels: *labels spec: containers: - name: helloworld-python image: gcr.io/knative-releases/github.com/knative/eventing-sources/cmd/event_display --- # Service that exposes event-display app. # This will be the subscriber for the Trigger kind: Service apiVersion: v1 metadata: name: event-display spec: selector: app: event-display ports: - protocol: TCP port: 80 targetPort: 8080 # - # ### Now run the event display resources # !kubectl apply -f assets/event-display-deployment.yaml # ### Check that the event display has been deployed # !kubectl get pods | grep event # ### Create trigger for event display # # We now can create a trigger that sends all the requests of the type and source created by the seldon deployment to our event display pod # + # %%writefile ./assets/event-display-trigger.yaml # Trigger to send events to service above apiVersion: eventing.knative.dev/v1alpha1 kind: Trigger metadata: name: event-display spec: broker: default filter: attributes: type: seldon.iris-deployment.default.response source: seldon.iris-deployment subscriber: ref: apiVersion: v1 kind: Service name: event-display # - # ### Apply that trigger # !kubectl apply -f assets/event-display-trigger.yaml # ### Check our triggers are correctly set up # # We now should see the event trigger available. # !kubectl get trigger # ## Send a couple of requests more # # We can use the same process we outlined above to send a couple more events. # # !kubectl run --quiet=true -it --rm curl --image=radial/busyboxplus:curl --restart=Never -- \ # curl -v "default-broker.default.svc.cluster.local" \ # -H "Ce-Id: 536808d3-88be-4077-9d7a-a3f162705f79" \ # -H "Ce-Specversion: 0.3" \ # -H "Ce-Type: seldon.iris-deployment.default.request" \ # -H "Ce-Source: dev.knative.samples/helloworldsource" \ # -H "Content-Type: application/json" \ # -d '{"data": { "ndarray": [[1,2,3,4]]}}' # ### Visualise the requests that come from the service # !kubectl logs svc/event-display | tail -40
examples/streaming/knative-eventing/README.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Setup # # Load libraries, set matplotlib to inline plotting, and create constants for analysis # + # %matplotlib inline from io import BytesIO from zipfile import ZipFile from urllib.request import urlopen import pyodbc as db import numpy as np import pandas as pd from pandas import DataFrame, Series import matplotlib.pyplot as plt import seaborn as sns start_year = 2000 end_year = 2005 years = [i for i in range(start_year, end_year + 1)] # key columns indices = ["unitid", "date_key", "year", "cipcode", "awlevel", "majornum"] # value columns cols = ["cnralm", "cnralw", "cunknm", "cunknw", "chispm", "chispw", "caianm", "caianw", "casiam", "casiaw", "cbkaam", "cbkaaw", "cnhpim", "cnhpiw", "cwhitm", "cwhitw", "c2morm", "c2morw"] def fix_cols(dat, year): dat.columns = [colname.lower() for colname in list(dat.columns.values)] if year < 2001: dat["majornum"] = 1 dat["cnralm"] = pd.to_numeric(dat["crace01"], errors = "coerce", downcast = "integer") dat["cnralw"] = pd.to_numeric(dat["crace02"], errors = "coerce", downcast = "integer") dat["cunknm"] = pd.to_numeric(dat["crace13"], errors = "coerce", downcast = "integer") dat["cunknw"] = pd.to_numeric(dat["crace14"], errors = "coerce", downcast = "integer") dat["chispm"] = pd.to_numeric(dat["crace09"], errors = "coerce", downcast = "integer") dat["chispw"] = pd.to_numeric(dat["crace10"], errors = "coerce", downcast = "integer") dat["caianm"] = pd.to_numeric(dat["crace05"], errors = "coerce", downcast = "integer") dat["caianw"] = pd.to_numeric(dat["crace06"], errors = "coerce", downcast = "integer") dat["casiam"] = pd.to_numeric(dat["crace07"], errors = "coerce", downcast = "integer") dat["casiaw"] = pd.to_numeric(dat["crace08"], errors = "coerce", downcast = "integer") dat["cbkaam"] = pd.to_numeric(dat["crace03"], errors = "coerce", downcast = "integer") dat["cbkaaw"] = pd.to_numeric(dat["crace04"], errors = "coerce", downcast = "integer") dat["cnhpim"] = 0 dat["cnhpiw"] = 0 dat["cwhitm"] = pd.to_numeric(dat["crace11"], errors = "coerce", downcast = "integer") dat["cwhitw"] = pd.to_numeric(dat["crace12"], errors = "coerce", downcast = "integer") dat["c2morm"] = 0 dat["c2morw"] = 0 elif year in range(2001, 2008): dat["cnralm"] = pd.to_numeric(dat["crace01"], errors = "coerce", downcast = "integer") dat["cnralw"] = pd.to_numeric(dat["crace02"], errors = "coerce", downcast = "integer") dat["cunknm"] = pd.to_numeric(dat["crace13"], errors = "coerce", downcast = "integer") dat["cunknw"] = pd.to_numeric(dat["crace14"], errors = "coerce", downcast = "integer") dat["chispm"] = pd.to_numeric(dat["crace09"], errors = "coerce", downcast = "integer") dat["chispw"] = pd.to_numeric(dat["crace10"], errors = "coerce", downcast = "integer") dat["caianm"] = pd.to_numeric(dat["crace05"], errors = "coerce", downcast = "integer") dat["caianw"] = pd.to_numeric(dat["crace06"], errors = "coerce", downcast = "integer") dat["casiam"] = pd.to_numeric(dat["crace07"], errors = "coerce", downcast = "integer") dat["casiaw"] = pd.to_numeric(dat["crace08"], errors = "coerce", downcast = "integer") dat["cbkaam"] = pd.to_numeric(dat["crace03"], errors = "coerce", downcast = "integer") dat["cbkaaw"] = pd.to_numeric(dat["crace04"], errors = "coerce", downcast = "integer") dat["cnhpim"] = 0 dat["cnhpiw"] = 0 dat["cwhitm"] = pd.to_numeric(dat["crace11"], errors = "coerce", downcast = "integer") dat["cwhitw"] = pd.to_numeric(dat["crace12"], errors = "coerce", downcast = "integer") dat["c2morm"] = 0 dat["c2morw"] = 0 elif year in range(2008, 2011): dat["cnralm"] = pd.to_numeric(dat["cnralm"], errors = "coerce", downcast = "integer") dat["cnralw"] = pd.to_numeric(dat["cnralw"], errors = "coerce", downcast = "integer") dat["cunknm"] = pd.to_numeric(dat["cunknm"], errors = "coerce", downcast = "integer") dat["cunknw"] = pd.to_numeric(dat["cunknw"], errors = "coerce", downcast = "integer") dat["chispm"] = pd.to_numeric(dat["dvchsm"], errors = "coerce", downcast = "integer") dat["chispw"] = pd.to_numeric(dat["dvchsw"], errors = "coerce", downcast = "integer") dat["caianm"] = pd.to_numeric(dat["dvcaim"], errors = "coerce", downcast = "integer") dat["caianw"] = pd.to_numeric(dat["dvcaiw"], errors = "coerce", downcast = "integer") dat["casiam"] = pd.to_numeric(dat["dvcapm"], errors = "coerce", downcast = "integer") dat["casiaw"] = pd.to_numeric(dat["dvcapw"], errors = "coerce", downcast = "integer") dat["cbkaam"] = pd.to_numeric(dat["dvcbkm"], errors = "coerce", downcast = "integer") dat["cbkaaw"] = pd.to_numeric(dat["dvcbkw"], errors = "coerce", downcast = "integer") dat["cnhpim"] = 0 dat["cnhpiw"] = 0 dat["cwhitm"] = pd.to_numeric(dat["dvcwhm"], errors = "coerce", downcast = "integer") dat["cwhitw"] = pd.to_numeric(dat["dvcwhw"], errors = "coerce", downcast = "integer") dat["c2morm"] = pd.to_numeric(dat["c2morm"], errors = "coerce", downcast = "integer") dat["c2morw"] = pd.to_numeric(dat["c2morw"], errors = "coerce", downcast = "integer") elif year > 2010: dat["cnralm"] = pd.to_numeric(dat["cnralm"], errors = "coerce", downcast = "integer") dat["cnralw"] = pd.to_numeric(dat["cnralw"], errors = "coerce", downcast = "integer") dat["cunknm"] = pd.to_numeric(dat["cunknm"], errors = "coerce", downcast = "integer") dat["cunknw"] = pd.to_numeric(dat["cunknw"], errors = "coerce", downcast = "integer") dat["chispm"] = pd.to_numeric(dat["chispm"], errors = "coerce", downcast = "integer") dat["chispw"] = pd.to_numeric(dat["chispw"], errors = "coerce", downcast = "integer") dat["caianm"] = pd.to_numeric(dat["caianm"], errors = "coerce", downcast = "integer") dat["caianw"] = pd.to_numeric(dat["caianw"], errors = "coerce", downcast = "integer") dat["casiam"] = pd.to_numeric(dat["casiam"], errors = "coerce", downcast = "integer") dat["casiaw"] = pd.to_numeric(dat["casiaw"], errors = "coerce", downcast = "integer") dat["cbkaam"] = pd.to_numeric(dat["cbkaam"], errors = "coerce", downcast = "integer") dat["cbkaaw"] = pd.to_numeric(dat["cbkaaw"], errors = "coerce", downcast = "integer") dat["cnhpim"] = pd.to_numeric(dat["cnhpim"], errors = "coerce", downcast = "integer") dat["cnhpiw"] = pd.to_numeric(dat["cnhpiw"], errors = "coerce", downcast = "integer") dat["cwhitm"] = pd.to_numeric(dat["cwhitm"], errors = "coerce", downcast = "integer") dat["cwhitw"] = pd.to_numeric(dat["cwhitw"], errors = "coerce", downcast = "integer") dat["c2morm"] = pd.to_numeric(dat["c2morm"], errors = "coerce", downcast = "integer") dat["c2morw"] = pd.to_numeric(dat["c2morw"], errors = "coerce", downcast = "integer") years # + active="" # # Read Data # # Read data file from NCES website for each year selected, set column names to lower case for sanity, reduce to needed columns, and fill NaN with zero. Show data frame shape (to # + df = DataFrame() for year in years: # prior to 2014, admissions was reported in the IPEDS-IC survey rather than IPEDS-ADM url = "https://nces.ed.gov/ipeds/datacenter/data/c" + str(year) + "_a.zip" file_name = "c" + str(year) + "_a.csv" resp = urlopen(url) zipfile = ZipFile(BytesIO(resp.read())) myfile = zipfile.open(file_name) temp = pd.read_csv(myfile, low_memory = True, encoding = "iso-8859-1") fix_cols(temp, year) temp["year"] = year temp["date_key"] = (year * 10000) + 1015 temp = pd.concat((temp[indices], temp[cols]), axis = 1) df = pd.concat([df, temp], sort = True) temp = None # replace NaN with zero df = df.fillna(0) df.shape # + active="" # # Look At Summaries # - df.info() # + active="" # Look at first and last cases # - df.iloc[[0, 1, 2, 3, 4, -5, -4, -3, -2, -1],:] # + active="" # # Look for potential structural issues # + active="" # Zero applications, non-zero admissions # - # zero apps, non-zero admits df.loc[(df["applcn"] == 0) & (df["admssn"] > 0), :] # + active="" # Zero admissions, non-zero enrollment # - # zero admits, non-zero enrollment df.loc[(df["admssn"] == 0) & (df["enrlt"] > 0), :] # + active="" # Sum of men and women applications is greater than total applications # - # total applications less than sum of parts df.loc[df["applcn"] < df["applcnm"] + df["applcnw"],:] # + active="" # # Calculate unknown sex categories # # Calculate an unknown variable to accomodate those institutions where the total applications, admissions, and enrollment are greater than the sum of their headcounts of men and women. This will ensure that the details roll up to the total in the final long format data frame. # + # calculate unknowns df["applcnu"] = df["applcn"] - (df["applcnm"] + df["applcnw"]) df["admssnu"] = df["admssn"] - (df["admssnm"] + df["admssnw"]) df["enrlu"] = df["enrlt"] - (df["enrlm"] + df["enrlw"]) df.loc[df["applcnu"] > 0, ["applcn", "applcnm", "applcnw", "applcnu"]].head() # + active="" # # Convert From Wide to Long # # Melt the DataFrame, pivoting value columns into a single column. # Create a field column to identify type of value. # Create a sex column to identify values by sex. # + # reshape from wide to long format adm_long = pd.melt(df, id_vars = ["unitid", "date_key"], value_vars = ["applcnm", "applcnw", "applcnu", "admssnm", "admssnw", "admssnu", "enrlm", "enrlw", "enrlu"], value_name = "count") # field indicator adm_long["field"] = np.where(adm_long["variable"].str.slice(0, 3) == "app", "applications", "unknown") adm_long["field"] = np.where(adm_long["variable"].str.slice(0, 3) == "adm", "admissions", adm_long["field"]) adm_long["field"] = np.where(adm_long["variable"].str.slice(0, 3) == "enr", "enrollment", adm_long["field"]) # sex indicator adm_long["sex"] = np.where(adm_long["variable"].str.slice(-1) == "w", "women", "unknown") adm_long["sex"] = np.where(adm_long["variable"].str.slice(-1) == "m", "men", adm_long["sex"]) adm_long.iloc[[0, 1, 2, 3, 4, -5, -4, -3, -2, -1],:] # + active="" # # Inspect Field Values # # Check for unknown. If there is an unknown value here, something has changed in naming conventions. # - adm_long["field"].value_counts() # + active="" # # Add Demographic Key # # This adds a demographic key for warehousing. The first 5 characters are all set to "unkn" because IPEDS-ADM does not collect race/ethnicity. # + adm_long["demographic_key"] = "unknu" adm_long["demographic_key"] = np.where(adm_long["sex"] == "men", "unknm", adm_long["demographic_key"]) adm_long["demographic_key"] = np.where(adm_long["sex"] == "women", "unknw", adm_long["demographic_key"]) adm_long["demographic_key"].value_counts() # + active="" # # Pivot Long Data to Final Format # # Pivot and aggregate (sum) the count column, converting the field variable back into three measures: applications, admissions, and enrollment. For warehousing, we will eventually drop the sex field, but it is kept here for data checking purposes. # + adm = adm_long.pivot_table(index=["unitid", "date_key", "demographic_key", "sex"], columns='field', values='count', aggfunc = np.sum, fill_value = 0).reset_index() # remove institutions with no applications adm = adm.loc[adm["applications"] > 0] adm.iloc[[0, 1, 2, 3, 4, -5, -4, -3, -2, -1],:] # + active="" # # Write Data to Warehouse (later) # + active="" # # Create Some Variables and Do Basic Exploration # + adm["acceptance_rate"] = adm["admissions"] / adm["applications"] adm["yield_rate"] = adm["enrollment"] / adm["admissions"] adm["isUNL"] = np.where(adm["unitid"] == 181464, "UNL", "Others") # & (adm["acceptance_rate"] > 0) & (adm["acceptance_rate"] <= 1.0) & (adm["yield_rate"] > 0) & (adm["yield_rate"] <= 1.0) cases = (adm["date_key"] == 20171015) & (adm["acceptance_rate"] < 1.0) & (adm["yield_rate"] < 1.0) viz_set = adm[cases] viz_set.shape # + # Set theme sns.set_style('darkgrid') sns.jointplot(x = "acceptance_rate", y = "yield_rate", data = viz_set, kind="hex", color="#4CB391") # -
ipeds-completions-reader.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + pycharm={"name": "#%%\n"} evonormlayer = EvoNormLayer(128) test_volume = torch.randn(128,128,128).unsqueeze(0).unsqueeze(1) for i in range(9): evonormlayer.mutate() model = RegUNet(3, 1, 1, 2, evonorm=evonormlayer) model(test_volume) # + pycharm={"name": "#%%\n"} evonormlayer.adjacency_list # + pycharm={"name": "#%%\n"}
test.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import matplotlib.pyplot as plt from ml.visualization import plot_confusion_matrix, plot_learning_curve from sklearn.datasets import load_wine from sklearn.linear_model import SGDClassifier from sklearn.metrics import accuracy_score, classification_report, confusion_matrix from sklearn.model_selection import GridSearchCV, train_test_split np.random.seed(1234) # %matplotlib inline # - # # Mรฉtricas y validaciรณn de resultados # # ## Carga de datos # + wine_data = load_wine() X = wine_data['data'] y = wine_data['target'] print(wine_data['DESCR']) # - # ## Conjunto de datos de entrenamiento y evaluaciรณn X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2) # ## Bรบsqueda de parรกmetros # + plt.figure(figsize=(14, 4), dpi= 80, facecolor='w', edgecolor='k') for idx, loss in enumerate(('hinge', 'log', 'perceptron'), start=1): exploring_params = { 'learning_rate': ['constant'], 'eta0': [0.1, 0.01, 0.001], # Tasa de entrenamiento 'alpha': [0.1, 0.01, 0.001] # Tasa de regularizaciรณn } m = SGDClassifier(loss=loss, tol=1e-3) model = GridSearchCV(m, exploring_params, cv=5, scoring='accuracy') model.fit(X_train, y_train) print("# Exploraciรณn de hiperparรกmetros para funciรณn de coste \"%s\"" % loss, end="\n\n") print("Mejor conjunto de parรกmetros:") print(model.best_params_, end="\n\n") print("Puntajes de la grilla:", end="\n\n") means = model.cv_results_['mean_test_score'] stds = model.cv_results_['std_test_score'] for mean, std, params in zip(means, stds, model.cv_results_['params']): print("Exactitud: %0.3f (+/-%0.03f) para los parรกmetros %r" % (mean, std ** 2, params)) print() print("Reporte de clasificaciรณn para el mejor clasificador (sobre conjunto de evaluaciรณn):", end="\n\n") y_true, y_pred = y_test, model.predict(X_test) print(classification_report(y_true, y_pred), end="\n\n") print("================================================", end="\n\n") plt.subplot(1, 3, idx) plot_confusion_matrix(confusion_matrix(y_true, y_pred), classes=wine_data.target_names, title="Matriz de confusiรณn para %s" % loss)
03 Metricas y validacion de resultados.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- # <div class="contentcontainer med left" style="margin-left: -50px;"> # <dl class="dl-horizontal"> # <dt>Title</dt> <dd> HeatMap Element</dd> # <dt>Dependencies</dt> <dd>Plotly</dd> # <dt>Backends</dt> <dd><a href='../bokeh/HeatMap.ipynb'>Bokeh</a></dd> <dd><a href='../matplotlib/HeatMap.ipynb'>Matplotlib</a></dd> <dd><a href='./HeatMap.ipynb'>Plotly</a></dd> # </dl> # </div> import numpy as np import holoviews as hv hv.extension('plotly') # ``HeatMap`` visualises tabular data indexed by two key dimensions as a grid of colored values. This allows spotting correlations in multivariate data and provides a high-level overview of how the two variables are plotted. # # The data for a ``HeatMap`` may be supplied as 2D tabular data with one or more associated value dimensions. The first value dimension will be colormapped, but further value dimensions may be revealed using the hover tool. # %%opts HeatMap (cmap='RdBu_r') data = [(chr(65+i), chr(97+j), i*j) for i in range(5) for j in range(5) if i!=j] hv.HeatMap(data) # It is important to note that the data should be aggregated before plotting as the ``HeatMap`` cannot display multiple values for one coordinate and will simply use the first value it finds for each combination of x- and y-coordinates. heatmap = hv.HeatMap([(0, 0, 0), (0, 0, 10), (1, 0, 2), (1, 1, 3)]) heatmap + heatmap.aggregate(function=np.max).opts(plot=dict(colorbar=True)) # As the above example shows before aggregating the second value for the (0, 0) is ignored unless we aggregate the data first. # # To reveal the values of a ``HeatMap`` we can enable a ``colorbar`` and if you wish to have interactive hover information, you can use the hover tool in the [Bokeh backend](../bokeh/HeatMap.ipynb): # %%opts HeatMap [colorbar=True] hv.HeatMap((np.random.randint(0, 10, 100), np.random.randint(0, 10, 100), np.random.randn(100), np.random.randn(100)), vdims=['z', 'z2']).redim.range(z=(-2, 2)).sort() # For full documentation and the available style and plot options, use ``hv.help(hv.HeatMap).``
examples/reference/elements/plotly/HeatMap.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import cv2 import os import numpy as np import json from utils.augment import DataAugmentForObjectDetection path = '../datasets/tt100k/test_new' image_path = os.path.join(path,'3236.jpg') annotations_path = os.path.join(path,'annotations.json') image_id = '3236' annotations = json.loads(open(annotations_path).read()) types = annotations['types'] labels = [] for obj in annotations['imgs'][image_id]['objects']: category = obj['category'] category_id = types.index(category) xmin = int(obj['bbox']['xmin']) ymin = int(obj['bbox']['ymin']) xmax = int(obj['bbox']['xmax']) ymax = int(obj['bbox']['ymax']) label = [xmin,ymin,xmax,ymax,category_id] labels.append(label) labels = np.array(labels) print(labels) print(labels.shape) bboxes = labels[:,:4].tolist() print(bboxes) print(len(bboxes)) img = cv2.imread(image_path) aug = DataAugmentForObjectDetection() img_new,bboxes_new = aug.letterbox(img,bboxes) print(img_new.shape) print(len(bboxes_new)) print(bboxes_new) for bbox in bboxes_new: x_min = bbox[0] y_min = bbox[1] x_max = bbox[2] y_max = bbox[3] draw = cv2.rectangle(img_new,(int(x_min), int(y_min)), (int(x_max), int(y_max)), (0, 255, 0), 1) cv2.namedWindow('pic', 0) # 1่กจ็คบๅŽŸๅ›พ cv2.moveWindow('pic', 0, 0) cv2.resizeWindow('pic', 800, 800) # ๅฏ่ง†ๅŒ–็š„ๅ›พ็‰‡ๅคงๅฐ cv2.imshow('pic', draw) cv2.waitKey(0) cv2.destroyAllWindows() import numpy as np import torch from torch.autograd import Variable list = [] a = np.array(list) print(a) b = Variable(torch.from_numpy(a).type(torch.FloatTensor)) print(b) print(b.shape) for t in range(0): print(t) str = '10056.jpg' line=str.split() # box = np.array([np.array(list(map(int, box.split(',')))) for box in line[1:]]) for box in line[1:]: box.split(',') print(box) import cv2 import os import numpy as np import json from utils.augment import DataAugmentForObjectDetection import torch from torch.autograd import Variable from utils.dataloader import random_crop,random_perspective,letterbox,augment_hsv,box_candidates,switch_targets def draw(image,bboxes): for bbox in bboxes: x_min = bbox[0] y_min = bbox[1] x_max = bbox[2] y_max = bbox[3] image = cv2.rectangle(image,(int(x_min), int(y_min)), (int(x_max), int(y_max)), (0, 255, 0), 2) cv2.namedWindow('pic', 0) # 1่กจ็คบๅŽŸๅ›พ cv2.moveWindow('pic', 0, 0) cv2.resizeWindow('pic', 800, 800) # ๅฏ่ง†ๅŒ–็š„ๅ›พ็‰‡ๅคงๅฐ cv2.imshow('pic', image) cv2.waitKey(0) cv2.destroyAllWindows() with open('tt100k_train.txt') as f: lines = f.readlines() line = lines[11].split() image = cv2.imread(line[0]) targets = np.array([np.array(list(map(int, box.split(',')))) for box in line[1:]]) # xyxy,cls n = len(targets) bboxes = targets[:, :4] cls = targets[:, -1].reshape(n,1) targets = switch_targets(targets,1) print(image.shape) print(targets) image, targets = random_crop(image,targets,shape=(608,608),area_thr=0.8) print(image.shape) print(targets) draw(image,targets[:,1:5].tolist()) line = lines[18].split() image = cv2.imread(line[0]) targets = np.array([np.array(list(map(int, box.split(',')))) for box in line[1:]]) # xyxy,cls print(targets) targets = switch_targets(targets,1) print(targets) # 1ใ€้šๆœบ่ฃๅ‰ช๏ผŒๆ›ดๆ–ฐimageใ€bboxesๅ’Œtargets image, bboxes = random_crop(image, bboxes) targets = np.concatenate((cls,bboxes),axis = 1) print(image.shape) print(targets) w = targets[:,3]-targets[:,1] h = targets[:,4]-targets[:,2] print(w) print(h) # + # 2ใ€letterbox๏ผŒ่พ“ๅ‡บ416x416 image, targets = letterbox(image, targets, new_shape=(416,416),auto = False) print(image.shape) print(targets) w = targets[:,3]-targets[:,1] h = targets[:,4]-targets[:,2] print(w) print(h) # + # 3ใ€้šๆœบ้€่ง†ๅ˜ๆข image, targets = random_perspective(image, targets) print(image.shape) print(targets) w = targets[:,3]-targets[:,1] h = targets[:,4]-targets[:,2] print(w) print(h) # - # 4ใ€่‰ฒๅŸŸๅ˜ๆข augment_hsv(image, hgain=0.1, sgain=0.5, vgain=0.5) draw(image,targets[:,1:5]) # 5ใ€targets็”ฑcls,xyxy่ฝฌไธบxyxy,cls cls = targets[:, 0].reshape(len(targets),1) print(cls) bboxes = targets[:, 1:5] print(bboxes) targets = np.concatenate((bboxes, cls), axis=1) print(targets) import random a=random.randint(0,0) a
sometest.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- #hide # %load_ext autoreload # %autoreload 2 # # Install # `pip install pytorch-to-tflite` # # or # # `pip install git+https://github.com/anhvth/pytorch_to_tflite/` # # How to use # ## Pytorch to Onnx # + from pytorch_to_tflite.pytorch_to_tflite import * import torch import yaml import os import mmcv from nanodet.model.arch import build_model PATH_TO_CONFIG = '/gitprojects/nano-det-parkingline/config/nanodet-g.yml' cfg = yaml.safe_load(open(PATH_TO_CONFIG)) cfg = mmcv.Config(cfg) model = build_model(cfg.model) img = torch.randn(1,3,416,416) out = model(img) # !mkdir -p cache/ onnx_out_path = 'cache/out.onnx' torch.onnx.export(model, img, onnx_out_path) # - # # ONNX to Tensorflow onnx_path = onnx_out_path tf_path = onnx_path + '.tf' onnx_to_tf(onnx_path=onnx_path, tf_path=tf_path) assert os.path.exists(tf_path) # # Tensorflow to tflite tflite_path = tf_path+'.tflite' tf_to_tf_lite(tf_path, tflite_path) assert os.path.exists(tflite_path) tflite_path
nbs/index.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from sklearn.datasets import load_digits from sklearn.model_selection import train_test_split from sklearn.neural_network import MLPClassifier from matplotlib import pyplot as plt import pandas as pd import numpy as np from random import randint import joblib columns = [] for i in range(783): if i < 10: columns.append("a0" + str(i) ) else: columns.append("a" + str(i)) #print(columns[:10]) df = pd.read_csv("mnist.csv") y = df["class"] X = df X.drop("class", axis=1, inplace=True) print(X) X = np.array(X) plt.matshow(X[randint(0, 62000)].reshape(28, 28)) plt.show() X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=randint(10, 40), test_size=0.3) model = MLPClassifier() model.fit(X_train, y_train) print(model) print(model.score(X_test, y_test)) # + active="" # evaluate # + active="" # save # - joblib.dump(model, "hand_written_digits_recognition.joblib")
numbers_recognition_1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # import everything and define a test runner function from importlib import reload from time import sleep import bloomfilter, merkleblock from block import Block from bloomfilter import ( BloomFilter, BIP37_CONSTANT, ) from ecc import PrivateKey from helper import ( bit_field_to_bytes, decode_base58, hash160, hash256, little_endian_to_int, murmur3, run, SIGHASH_ALL, ) from merkleblock import MerkleBlock from network import ( GetDataMessage, GetHeadersMessage, HeadersMessage, SimpleNode, FILTERED_BLOCK_DATA_TYPE, TX_DATA_TYPE, ) from script import p2pkh_script from tx import ( Tx, TxIn, TxOut, ) # + # Example Bloom Filter bit_field_size = 10 bit_field = [0] * bit_field_size h256 = hash256(b'hello world') bit = int.from_bytes(h256, 'big') % bit_field_size bit_field[bit] = 1 print(bit_field) # + # Example Bloom Filter 2 bit_field_size = 10 bit_field = [0] * bit_field_size h = hash256(b'hello world') bit = int.from_bytes(h, 'big') % bit_field_size bit_field[bit] = 1 h = hash256(b'goodbye') bit = int.from_bytes(h, 'big') % bit_field_size bit_field[bit] = 1 print(bit_field) # + # Example Bloom Filter 3 bit_field_size = 10 bit_field = [0] * bit_field_size phrase1 = b'hello world' h1 = hash256(phrase1) bit1 = int.from_bytes(h1, 'big') % bit_field_size bit_field[bit1] = 1 h2 = hash160(phrase1) bit2 = int.from_bytes(h2, 'big') % bit_field_size bit_field[bit2] = 1 phrase2 = b'goodbye' h1 = hash256(phrase2) bit1 = int.from_bytes(h1, 'big') % bit_field_size bit_field[bit1] = 1 h2 = hash160(phrase2) bit2 = int.from_bytes(h2, 'big') % bit_field_size bit_field[bit2] = 1 print(bit_field) # + # Example BIP0037 Bloom Filter field_size = 2 num_functions = 2 tweak = 42 bit_field_size = field_size * 8 bit_field = [0] * bit_field_size for phrase in (b'hello world', b'goodbye'): for i in range(num_functions): seed = i * BIP37_CONSTANT + tweak h = murmur3(phrase, seed=seed) bit = h % bit_field_size bit_field[bit] = 1 print(bit_field) # - # ### Exercise 1 # # #### 1.1 Given a Bloom Filter with these parameters: size=10, function count=5, tweak=99, which bits are set after adding these items? # # * `b'Hello World'` # * `b'Goodbye!'` # # #### 1.2. Make [this test](/edit/session8/bloomfilter.py) pass. # + # Exercise 1.1 field_size = 10 function_count = 5 tweak = 99 items = (b'Hello World', b'Goodbye!') # bit_field_size is 8 * field_size bit_field_size = field_size * 8 # create a bit field with the appropriate size bit_field = [0] * bit_field_size # for each item you want to add to the filter for item in items: # iterate function_count number of times for i in range(function_count): # BIP0037 spec seed is i*BIP37_CONSTANT + tweak seed = i * BIP37_CONSTANT + tweak # get the murmur3 hash given that seed h = murmur3(item, seed=seed) # set the bit to be h mod the bit_field_size bit = h % bit_field_size # set the bit_field at the index bit to be 1 bit_field[bit] = 1 # print the bit field converted to bytes using bit_field_to_bytes in hex print(bit_field_to_bytes(bit_field).hex()) # + # Exercise 1.2 reload(bloomfilter) run(bloomfilter.BloomFilterTest('test_add')) # - # ### Exercise 2 # # #### 2.1. Make [this test](/edit/session8/bloomfilter.py) pass. # # ``` # bloomfilter.py:BloomFilterTest:test_filterload # ``` # # #### 2.2. Do the following: # # * Connect to a testnet node # * Load a filter for your testnet address # * Send a request for transactions from the block which had your previous testnet transaction # * Receive the merkleblock and tx messages. # + # Exercise 2.1 reload(bloomfilter) run(bloomfilter.BloomFilterTest('test_filterload')) # + # Exercise 2.2 from bloomfilter import BloomFilter block_hash = bytes.fromhex('00000000537878<PASSWORD>') # FILL THIS IN passphrase = b'<PASSWORD>' # FILL THIS IN secret = little_endian_to_int(hash256(passphrase)) private_key = PrivateKey(secret=secret) addr = private_key.point.address(testnet=True) print(addr) filter_size = 30 filter_num_functions = 5 filter_tweak = 90210 # FILL THIS IN # get the hash160 of the address using decode_base58 h160 = decode_base58(addr) # create a bloom filter using the filter_size, filter_num_functions and filter_tweak above bf = BloomFilter(filter_size, filter_num_functions, filter_tweak) # add the h160 to the bloom filter bf.add(h160) # connect to tbtc.programmingblockchain.com in testnet mode, logging True node = SimpleNode('tbtc.programmingblockchain.com', testnet=True, logging=True) # complete the handshake node.handshake() # send the filterload message node.send(bf.filterload()) # create a getdata message getdata = GetDataMessage() # add_data (FILTERED_BLOCK_DATA_TYPE, block_hash) to request the block getdata.add_data(FILTERED_BLOCK_DATA_TYPE, block_hash) # send the getdata message node.send(getdata) # wait for the merkleblock command mb = node.wait_for(MerkleBlock) # wait for the tx command tx_obj = node.wait_for(Tx) # print the envelope payload in hex print(tx_obj.serialize().hex()) # - # ### Exercise 3 # # #### 3.1. Make [this test](/edit/session8/merkleblock.py) pass. # + # Exercise 3.1 reload(merkleblock) run(merkleblock.MerkleBlockTest('test_is_valid')) # - # ### Exercise 4 # # #### 4.1. You have been sent some unknown amount of testnet bitcoins to your address. # # Send all of it back (minus fees) to `mwJn1YPMq7y5F8J3LkC5Hxg9PHyZ5K4cFv` using only the networking protocol. # + # Exercise 4.1 from merkleblock import MerkleBlock, MerkleTree last_block_hex = '000000000d65610b5af03d73ed67704713c9b734d87cf4b970d39a0416dd80f9' # FILL THIS IN last_block = bytes.fromhex(last_block_hex) passphrase = b'<PASSWORD>' # FILL THIS IN secret = little_endian_to_int(hash256(passphrase)) private_key = PrivateKey(secret=secret) addr = private_key.point.address(testnet=True) print(addr) h160 = decode_base58(addr) target_address = 'mwJn1YPMq7y5F8J3LkC5Hxg9PHyZ5K4cFv' filter_size = 30 filter_num_functions = 5 filter_tweak = 90210 # FILL THIS IN target_h160 = decode_base58(target_address) target_script = p2pkh_script(target_h160) fee = 5000 # fee in satoshis # connect to tbtc.programmingblockchain.com in testnet mode, logging True node = SimpleNode('tbtc.programmingblockchain.com', testnet=True, logging=True) # create a bloom filter using variables above bf = BloomFilter(filter_size, filter_num_functions, filter_tweak) # add the h160 to the bloom filter bf.add(h160) # complete the handshake node.handshake() # send the 'filterload' message node.send(bf.filterload()) # create GetHeadersMessage with the last_block as the start_block getheaders = GetHeadersMessage(start_block=last_block) # send a getheaders message node.send(getheaders) # wait for the headers message headers = node.wait_for(HeadersMessage) # initialize the GetDataMessage getdata = GetDataMessage() # loop through the headers in the headers message for header in headers.headers: # check that the proof of work on the block is valid if not header.check_pow(): raise RuntimeError # check that this block's prev_block is the last block if last_block is not None and header.prev_block != last_block: raise RuntimeError # set the last block to the current hash last_block = header.hash() # add_data(FILTERED_BLOCK_DATA_TYPE, last_block) to get_data_message getdata.add_data(FILTERED_BLOCK_DATA_TYPE, last_block) # send the getdata message node.send(getdata) # initialize prev_tx to None prev_tx = None # while prev_tx is None while prev_tx is None: # wait for the merkleblock or tx commands message = node.wait_for(MerkleBlock, Tx) # if we have the merkleblock command if message.command == b'merkleblock': # check that the MerkleBlock is valid if not message.is_valid(): raise RuntimeError # else we have the tx command else: # set message.testnet=True message.testnet = True # loop through the enumerated tx outs (enumerate(message.tx_outs)) for i, tx_out in enumerate(message.tx_outs): # if our output has the same address as our address (addr) we found it if tx_out.script_pubkey.address(testnet=True) == addr: # we found our utxo. set prev_tx, prev_index, prev_amount prev_tx = message.hash() prev_index = i prev_amount = tx_out.amount # break break # create tx_in tx_in = TxIn(prev_tx, prev_index) # calculate the output amount (prev_amount - fee) output_amount = prev_amount - fee # create tx_out tx_out = TxOut(output_amount, target_script) # create transaction on testnet tx_obj = Tx(1, [tx_in], [tx_out], 0, testnet=True) # sign the one input we have tx_obj.sign_input(0, private_key) # serialize and hex to see what it looks like print(tx_obj.serialize().hex()) # send this signed transaction on the network node.send(tx_obj) # wait a sec so this message goes through to the other node sleep(1) sleep(1) # now ask for this transaction from the other node # create a GetDataMessage getdata = GetDataMessage() # add_data (TX_DATA_TYPE, tx_obj.hash()) to get data message getdata.add_data(TX_DATA_TYPE, tx_obj.hash()) # send the GetDataMessage node.send(getdata) # now wait for a response got = node.wait_for(Tx) if got.id() == tx_obj.id(): # yes! we got to what we wanted print('success!') print(tx_obj.id())
session8/complete/session8.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Calculating Transit Timing Variations (TTV) with REBOUND # The following code finds the transit times in a two planet system. The transit times of the inner planet are not exactly periodic, due to planet-planet interactions. # First, let's import the REBOUND and numpy packages. import rebound import numpy as np # Let's set up a coplanar two planet system. sim = rebound.Simulation() sim.add(m=1) sim.add(m=1e-5, a=1,e=0.1,omega=0.25) sim.add(m=1e-5, a=1.757) sim.move_to_com() # We're now going to integrate the system forward in time. We assume the observer of the system is in the direction of the positive x-axis. We want to meassure the time when the inner planet transits. In this geometry, this happens when the y coordinate of the planet changes sign. Whenever we detect a change in sign between two steps, we try to find the transit time, which must lie somewhere within the last step, by bisection. N=174 transittimes = np.zeros(N) p = sim.particles i = 0 while i<N: y_old = p[1].y - p[0].y # (Thanks to <NAME> for pointing out a bug in this line!) t_old = sim.t sim.integrate(sim.t+0.5) # check for transits every 0.5 time units. Note that 0.5 is shorter than one orbit t_new = sim.t if y_old*(p[1].y-p[0].y)<0. and p[1].x-p[0].x>0.: # sign changed (y_old*y<0), planet in front of star (x>0) while t_new-t_old>1e-7: # bisect until prec of 1e-5 reached if y_old*(p[1].y-p[0].y)<0.: t_new = sim.t else: t_old = sim.t sim.integrate( (t_new+t_old)/2.) transittimes[i] = sim.t i += 1 sim.integrate(sim.t+0.05) # integrate 0.05 to be past the transit # Next, we do a linear least square fit to remove the linear trend from the transit times, thus leaving us with the transit time variations. A = np.vstack([np.ones(N), range(N)]).T c, m = np.linalg.lstsq(A, transittimes)[0] # Finally, let us plot the TTVs. # %matplotlib inline import matplotlib.pyplot as plt fig = plt.figure(figsize=(10,5)) ax = plt.subplot(111) ax.set_xlim([0,N]) ax.set_xlabel("Transit number") ax.set_ylabel("TTV [hours]") plt.scatter(range(N), (transittimes-m*np.array(range(N))-c)*(24.*365./2./np.pi));
rebound/ipython_examples/TransitTimingVariations.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Fichiers et boucles # ## Ouvrir et lire un fichier # ### Ouverture d'un fichier #open() open("python.txt", "r") f = open("python.txt", "r") print(f) # ### Lecture d'un fichier #read() g = f.read() print(g) # ### Training f = open("departements_fr.csv", "r", encoding="UTF-8") print(f) data = f.read() print(data) # ## Sรฉparation des รฉlรฉments # ### Crรฉation d'une liste ร  partir d'une chaine de caractรจres type(data) #split() names = "Tom,Seb,Bob" split_list = names.split(",") print(split_list) # ### Training #split() rows = data.split("\n") print(rows[0:5]) # ## Les boucles / Loops # + cities = ["Paris", "Madrid", "Rome"] for city in cities: print(city) # - # ### Training # + ten_rows = rows[0:10] for row in ten_rows: print (row) # - # ## Liste de listes # + cities_number = ["Paris,45", "Madrid,171", "Rome,12"] final_list = [] for row in cities_number: split_list = row.split(",") final_list.append(split_list) print(final_list) # - print(final_list[0]) print(final_list[1]) print(final_list[2]) # ### Training # + final_data = [] for row in rows: split_list = row.split(",") final_data.append(split_list) print(final_data[0:5]) # - # ## Rรฉcupรฉrer et afficher les รฉlรฉments d'un liste de liste first_list = final_data[0] print(first_list) first_list_first_value = first_list[0] print(first_list_first_value) first_list_first_value = final_data[0][0] #1รจre liste 1er รฉlรฉment print(first_list_first_value) second_list_first_value = final_data[1][0] #2รจme liste 1er รฉlรฉment print(second_list_first_value) second_list_second_value = final_data[1][1] #2รจme liste 2รจme รฉlรฉment print(second_list_second_value) # ### Boucle ร  travers une liste de liste # + five_elements = final_data[0:5] departments_list = [] departments_list.append(five_elements[0][0]) departments_list.append(five_elements[1][0]) departments_list.append(five_elements[2][0]) departments_list.append(five_elements[3][0]) departments_list.append(five_elements[4][0]) print(departments_list) # + department_populations = [] for row in five_elements: department_population = row[1] department_populations.append(department_population) print(department_populations) # - # ### Training # + departments_list = [] for row in final_data: dep = row[0] departments_list.append(dep) print(departments_list) # -
Fichiers et boucles - Cours Skilleos Python.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # make local files accessible to import statements import sys, os sys.path.insert(0, os.path.join(os.getcwd(), '..')) # TODO: make it a slideshow # # TODO: explain pint and dimensional_analysis # # TODO: list all constants from PySDM.simulation.physics.dimensional_analysis import DimensionalAnalysis from PySDM.simulation.physics import constants as const with DimensionalAnalysis(): print(const.p1000)
PySDM_tutorials/constants.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 0.4.5 # language: julia # name: julia-0.4 # --- using JuMP using PyPlot using ImplicitEquations # + m = Model() @variable m x @variable m y @NLobjective(m, Min, (x+1)^2 + (y-1)^2) c1(x, y) = 2*y - 1 c2(x, y) = (1-x)*(4-x^2-y^2) c3(x, y) = 100 - 2*x^2 - y^2 @constraint(m, 2*y - 1 == 0) @NLconstraint(m, (1-x)*(4-x^2-y^2) <= 0) @NLconstraint(m, 100 - 2*x^2 - y^2 >= 0) setvalue(x, -2) setvalue(y, 0) m # - solve(m) println("got ", getobjectivevalue(m), " at ", [getvalue(x),getvalue(y)]) plot(c2 <= 0)
scripts/Untitled.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # Importing the libraries from RBM import RBM import torch import torchvision from torchvision import datasets,transforms from torch.utils.data import Dataset,DataLoader import matplotlib import matplotlib.pyplot as plt import math import numpy as np # - #Loading MNIST dataset mnist_data = datasets.MNIST('../data', train=True, download=True, transform=transforms.Compose( [transforms.ToTensor(),transforms.Normalize((0.1307,), (0.3081,))])) # Need to convert th data into binary variables mnist_data.train_data = (mnist_data.train_data.type(torch.FloatTensor)/255).bernoulli() #Lets us visualize a number from the data set idx = 5 img = mnist_data.train_data[idx] print("The number shown is the number: {}".format(mnist_data.train_labels[idx]) ) plt.imshow(img , cmap = 'gray') plt.show() # If we train on the whole set we expect it to learn to detect edges. batch_size= 10 tensor_x = mnist_data.train_data.type(torch.FloatTensor) # transform to torch tensors tensor_y = mnist_data.train_labels.type(torch.FloatTensor) _dataset = torch.utils.data.TensorDataset(tensor_x,tensor_y) # create your datset train_loader = torch.utils.data.DataLoader(_dataset, batch_size=batch_size, shuffle=True,drop_last = True) # + # I have have set these hyper parameters although you can experiment with them to find better hyperparameters. visible_units=28*28 hidden_units = 500 k=3 learning_rate=0.01 learning_rate_decay = True xavier_init = True increase_to_cd_k = False use_gpu = False rbm_mnist = RBM(visible_units,hidden_units,k ,learning_rate,learning_rate_decay,xavier_init, increase_to_cd_k,use_gpu) # + epochs = 30 rbm_mnist.train(train_loader , epochs,batch_size) # - learned_weights = rbm_mnist.W.transpose(0,1).numpy() plt.show() fig = plt.figure(3, figsize=(10,10)) for i in range(25): sub = fig.add_subplot(5, 5, i+1) sub.imshow(learned_weights[i,:].reshape((28,28)), cmap=plt.cm.gray) plt.show() # + #This is an unsupervised learning algorithm. So let us try training on one particular number.But first # we need to seperate the data. number = 5 #A number between 0 and 10. particular_mnist = [] limit = mnist_data.train_data.shape[0] # limit = 60000 for i in range(limit): if(mnist_data.train_labels[i] == number): particular_mnist.append(mnist_data.train_data[i].numpy()) # particular_mnist = np.array(particular_mnist) len(particular_mnist) # mnist_data.train_data # - tensor_x = torch.stack([torch.Tensor(i) for i in particular_mnist]).type(torch.FloatTensor) tensor_y = torch.stack([torch.Tensor(number) for i in range(len(particular_mnist))]).type(torch.FloatTensor) mnist_particular_dataset = torch.utils.data.TensorDataset(tensor_x,tensor_y) mnist_particular_dataloader = torch.utils.data.DataLoader(mnist_particular_dataset,batch_size = batch_size,drop_last=True,num_workers=0) # + visible_units=28*28 hidden_units = 500 k=3 learning_rate=0.01 learning_rate_decay = False xavier_init = True increase_to_cd_k = False use_gpu = False rbm_mnist = RBM(visible_units,hidden_units,k ,learning_rate,learning_rate_decay,xavier_init, increase_to_cd_k,use_gpu) epochs = 10 rbm_mnist.train(mnist_particular_dataloader , epochs) # + # This shows the weights for each of the 64 hidden neurons and give an idea how each neuron is activated. learned_weights = rbm_mnist.W.transpose(0,1).numpy() plt.show() fig = plt.figure(3, figsize=(10,10)) for i in range(25): sub = fig.add_subplot(5, 5, i+1) sub.imshow(learned_weights[i, :].reshape((28,28)), cmap=plt.cm.gray) plt.show() # + #Lets try reconstructing a random number from this model which has learned 5 idx = 7 img = mnist_data.train_data[idx] reconstructed_img = img.view(-1).type(torch.FloatTensor) # _ , reconstructed_img = rbm_mnist.to_hidden(reconstructed_img) # _ , reconstructed_img = rbm_mnist.to_visible(reconstructed_img) _,reconstructed_img = rbm_mnist.reconstruct(reconstructed_img,1) # print(reconstructed_img) reconstructed_img = reconstructed_img.view((28,28)) print("The original number: {}".format(mnist_data.train_labels[idx])) plt.imshow(img , cmap = 'gray') plt.show() print("The reconstructed image") plt.imshow(reconstructed_img , cmap = 'gray') plt.show() # -
mnist_tutorial_RBM.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # default_exp models.layers # - # # Layers # # > Helper function used to build PyTorch timeseries models. #export from fastai2.torch_core import Module from tsai.imports import * #export def noop(x): return x # + #export # <NAME>. (2019). Mish: A Self Regularized Non-Monotonic Neural Activation Function. arXiv preprint arXiv:1908.08681. # https://arxiv.org/abs/1908.08681 # GitHub: https://github.com/digantamisra98/Mish @torch.jit.script def mish(input): '''Applies the mish function element-wise: mish(x) = x * tanh(softplus(x)) = x * tanh(ln(1 + exp(x)))''' return input * torch.tanh(F.softplus(input)) class Mish(Module): def forward(self, input): return mish(input) # - class Swish(Module): def __init__(self): self.sigmoid = torch.sigmoid def forward(self, x): return x.mul_(self.sigmoid(x)) #export def get_act_layer(act_fn, act_kwargs={}): act_fn = act_fn.lower() assert act_fn in ['relu', 'leakyrelu', 'prelu', 'elu', 'mish', 'swish'], 'incorrect act_fn' if act_fn == 'relu': return nn.ReLU() elif act_fn == 'leakyrelu': return nn.LeakyReLU(**act_kwargs) elif act_fn == 'prelu': return nn.PReLU(**act_kwargs) elif act_fn == 'elu': return nn.ELU(**act_kwargs) elif act_fn == 'mish': return Mish() elif act_fn == 'swish': return Swish() t = torch.rand(2, 3, 4) for act_fn in ['relu', 'leakyrelu', 'prelu', 'elu', 'mish', 'swish']: test_eq(get_act_layer(act_fn)(t).shape, t.shape) # + #export def same_padding1d(seq_len, ks, stride=1, dilation=1): effective_ks = (ks - 1) * dilation + 1 out_dim = (seq_len + stride - 1) // stride p = max(0, (out_dim - 1) * stride + effective_ks - seq_len) padding_before = p // 2 padding_after = p - padding_before return padding_before, padding_after class Pad1d(nn.ConstantPad1d): def __init__(self, padding, value=0.): super().__init__(padding, value) class Conv1dSame(Module): "Conv1d with padding='same'" def __init__(self, c_in, c_out, ks=3, stride=1, dilation=1, **kwargs): self.ks, self.stride, self.dilation = ks, stride, dilation self.conv1d_same = nn.Conv1d(c_in, c_out, ks, stride=stride, dilation=dilation, **kwargs) self.pad = Pad1d def forward(self, x): self.padding = same_padding1d(x.shape[-1],self.ks,stride=self.stride,dilation=self.dilation) return self.conv1d_same(self.pad(self.padding)(x)) # - bs = 2 c_in = 3 c_out = 5 seq_len = 6 t = torch.rand(bs, c_in, seq_len) test_eq(Conv1dSame(c_in, c_out, ks=3, stride=1, dilation=1, bias=False)(t).shape, (bs, c_out, seq_len)) # + #export # https://github.com/locuslab/TCN/blob/master/TCN/tcn.py class Chomp1d(Module): def __init__(self, chomp_size): self.chomp_size = chomp_size def forward(self, x): return x[:, :, :-self.chomp_size].contiguous() class Conv1dCausal(Module): def __init__(self, c_in, c_out, ks, stride=1, dilation=1, **kwargs): padding = (ks - 1) * dilation self.conv = nn.Conv1d(c_in, c_out, ks, stride=stride, padding=padding, dilation=dilation, **kwargs) self.chomp = Chomp1d(math.ceil(padding / stride)) def forward(self, x): return self.chomp(self.conv(x)) # - bs = 2 c_in = 3 c_out = 5 seq_len = 512 t = torch.rand(bs, c_in, seq_len) stride = 1 dilation = 1 test_eq(Conv1dCausal(c_in, c_out, ks=3, stride=stride, dilation=dilation)(t).shape, Conv1dSame(c_in, c_out, ks=3, stride=stride, dilation=dilation)(t).shape) stride = 1 dilation = 2 test_eq(Conv1dCausal(c_in, c_out, ks=3, stride=stride, dilation=dilation)(t).shape, Conv1dSame(c_in, c_out, ks=3, stride=stride, dilation=dilation)(t).shape) stride = 2 dilation = 1 test_eq(Conv1dCausal(c_in, c_out, ks=3, stride=stride, dilation=dilation)(t).shape, Conv1dSame(c_in, c_out, ks=3, stride=stride, dilation=dilation)(t).shape) stride = 2 dilation = 4 test_eq(Conv1dCausal(c_in, c_out, ks=3, stride=stride, dilation=dilation)(t).shape, Conv1dSame(c_in, c_out, ks=3, stride=stride, dilation=dilation)(t).shape) #export def Conv1d(c_in, c_out, ks=3, stride=1, padding='same', dilation=1, bias=True, act_fn='relu', act_kwargs={}, bn_before_conv=False, bn_before_act=True, bn_after_act=False, zero_bn=False, **kwargs): '''conv1d with default padding='same', bn and act_fn (default = 'relu')''' layers = [] if bn_before_conv: layers.append(nn.BatchNorm1d(c_in)) if padding == 'same': layers.append(Conv1dSame(c_in, c_out, ks, stride=stride, dilation=dilation, bias=bias, **kwargs)) elif padding == 'causal': layers.append(Conv1dCausal(c_in, c_out, ks, stride=stride, dilation=dilation, bias=bias, **kwargs)) else: if padding == 'valid': padding = 0 layers.append(nn.Conv1d(c_in, c_out, ks, stride=stride, padding=padding, dilation=dilation, bias=bias, **kwargs)) if bn_before_act: layers.append(nn.BatchNorm1d(c_out)) if act_fn: layers.append(get_act_layer(act_fn, act_kwargs)) if bn_after_act: bn = nn.BatchNorm1d(c_out) nn.init.constant_(bn.weight, 0. if zero_bn else 1.) layers.append(bn) return nn.Sequential(*layers) bs = 2 c_in = 3 c_out = 5 seq_len = 6 ks = 3 t = torch.rand(bs, c_in, seq_len) test_eq(Conv1d(c_in, c_out, ks=ks, padding=0)(t).shape, (bs, c_out, seq_len - (2 * (ks//2)))) test_eq(Conv1d(c_in, c_out, ks=ks, padding='valid')(t).shape, (bs, c_out, seq_len - (2 * (ks//2)))) test_eq(Conv1d(c_in, c_out, ks=ks, padding='same')(t).shape, (bs, c_out, seq_len)) test_eq(Conv1d(c_in, c_out, ks=ks, padding='causal')(t).shape, (bs, c_out, seq_len)) Conv1d(c_in, c_out, ks=ks, padding='same') Conv1d(c_in, c_out, ks=ks, padding='causal') #export class CoordConv1D(Module): def forward(self, x): bs, _, seq_len = x.size() cc = torch.arange(seq_len, device=device, dtype=torch.float) / (seq_len - 1) cc = cc * 2 - 1 cc = cc.repeat(bs, 1, 1) x = torch.cat([x, cc], dim=1) return x bs = 2 c_in = 3 c_out = 5 seq_len = 6 t = torch.rand(bs, c_in, seq_len) test_eq(CoordConv1D()(t).shape, (bs, c_in + 1, seq_len)) #export class LambdaPlus(Module): def __init__(self, func, *args, **kwargs): self.func,self.args,self.kwargs=func,args,kwargs def forward(self, x): return self.func(x, *self.args, **self.kwargs) # + #export class Flatten(Module): def forward(self, x): return x.view(x.size(0), -1) class Squeeze(Module): def __init__(self, dim=-1): self.dim = dim def forward(self, x): return x.squeeze(dim=self.dim) class Unsqueeze(Module): def __init__(self, dim=-1): self.dim = dim def forward(self, x): return x.unsqueeze(dim=self.dim) class YRange(Module): def __init__(self, y_range:tuple): self.y_range = y_range self.sigmoid = torch.sigmoid def forward(self, x): x = self.sigmoid(x) return x * (self.y_range[1] - self.y_range[0]) + self.y_range[0] class Temp(Module): def __init__(self, temp): self.temp = float(temp) self.temp = nn.Parameter(torch.Tensor(1).fill_(self.temp).to(device)) def forward(self, x): return x.div_(self.temp) # - #hide out = create_scripts() beep(out)
nbs/100_layers.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Solving ODEs with scipy.integrate.solve_ivp # ## Solving ordinary differential equations (ODEs) # # Here we will revisit the differential equations solved in 5300_Jupyter_Python_intro_01.ipynb with `odeint`, only now we'll use `solve_ivp` from Scipy. We'll compare the new and old solutions as we go. # ### First-order ODE # + # Import the required modules import numpy as np import matplotlib.pyplot as plt from scipy.integrate import solve_ivp # Now preferred to odeint # - # Let's try a one-dimensional first-order ODE, say: # # $\begin{align} # \quad # \frac{dv}{dt} = -g, \quad \mbox{with} \quad v(0) = 10 # \end{align}$ # # in some appropriate units (we'll use MKS units by default). This ODE can be separated and directly integrated: # # $\begin{align} # \int_{v_0=10}^{v} dv' = - g \int_{0}^{t} dt' # \quad\Longrightarrow\quad # v - v_0 = - g (t - 0) # \quad\Longrightarrow\quad # v(t) = 10 - gt # \end{align}$ # # # The goal is to find the solution $v(t)$ as an array `v_pts` at the times in the array `t_pts`. # + # Define a function which calculates the derivative def dv_dt_new(t, v, g=9.8): """Returns the right side of a simple first-order ODE with default g.""" return -g t_start = 0. t_end = 10. t_pts = np.linspace(t_start, t_end, 20) # 20 points between t=0 and t=10. v_0 = np.array([10.0]) # initial condition, in form of a list or numpy array abserr = 1.e-8 relerr = 1.e-8 solution = solve_ivp(dv_dt_new, (t_start, t_end), v_0, t_eval=t_pts, rtol=relerr, atol=abserr) # solve_ivp( function for rhs with (t, v) argument (cf. (v,t) for odeint), # tspan=(starting t value, ending t value), # initial value of v(t), array of points we want to know v(t), # method='RK45' is the default method, # rtol=1.e-3, atol=1.e-6 are default tolerances # ) v_pts = solution.y # array of results at t_pts # - v_pts.shape # 1 x 100 matrix (row vector) # Here's how we did it before with odeint: # + from scipy.integrate import odeint # Define a function which calculates the derivative def dv_dt(v, t, g=9.8): """Returns the right side of a simple first-order ODE with default g.""" return -g t_pts = np.linspace(0., 10., 20) # 20 points between t=0 and t=10. v_0 = 10. # the initial condition v_pts_odeint = odeint(dv_dt, v_0, t_pts) # odeint( function for rhs, # initial value of v(t), # array of t values ) # - v_pts_odeint.shape # 100 x 1 matrix (column vector) # Make a table comparing results (using `flatten()` to make the matrices into arrays): print(' t v(t) [solve_ivp] v(t) [odeint]') for t, v_solve_ivp, v_odeint in zip(t_pts, v_pts.flatten(), v_pts_odeint.flatten()): print(f' {t:6.3f} {v_solve_ivp:12.7f} {v_odeint:12.7f}') # Differences between `solve_ivp` and `odeint`: # * `dv_dt(t, v)` vs. `dv_dt(v, t)`, i.e., the function definitions have the arguments reversed. # * With `odeint`, you only specify the full array of $t$ points you want to know $v(t)$ at. With `solve_ivp`, you first specify the starting $t$ and ending $t$ as a tuple: `(t_start, t_end)` and then (optionally) specify `t_eval=t_pts` to evaluate $v$ at the points in the `t_pts` array. # * `solve_ivp` returns an object from which $v(t)$ (and other results) can be found, while `ode_int` returns $v(t)$. # * For this single first-order equation, $v(t)$ is returned for the $N$ requested $t$ points as a $1 \times N$ two-dimensional array by `solve_ivp` and as a $N \times 1$ array by `odeint`. # * `odeint` has no choice of solver while the `solve_ivp` solver can be set by `method`. The default is `method='RK45'`, which is good, general-purpose Runge-Kutta solver. # ### Second-order ODE # Suppose we have a second-order ODE such as: # # $$ # \quad y'' + 2 y' + 2 y = \cos(2x), \quad \quad y(0) = 0, \; y'(0) = 0 # $$ # # We can turn this into two first-order equations by defining a new dependent variable. For example, # # $$ # \quad z \equiv y' \quad \Rightarrow \quad z' + 2 z + 2y = \cos(2x), \quad z(0)=y(0) = 0. # $$ # # Now introduce the vector # # $$ # \mathbf{U}(x) = \left(\begin{array}{c} # y(x) \\ # z(x) # \end{array} # \right) # \quad\Longrightarrow\quad # \frac{d\mathbf{U}}{dx} = \left(\begin{array}{c} # z \\ # -2 y' - 2 y + \cos(2x) # \end{array} # \right) # $$ # # We can solve this system of ODEs using `solve_ivp` with lists, as follows. We will try it first without specifying the relative and absolute error tolerances rtol and atol. # + # Define a function for the right side def dU_dx_new(x, U): """Right side of the differential equation to be solved. U is a two-component vector with y=U[0] and z=U[1]. Thus this function should return [y', z'] """ return [U[1], -2*U[1] - 2*U[0] + np.cos(2*x)] # initial condition U_0 = [y(0)=0, z(0)=y'(0)=0] U_0 = [0., 0.] x_pts = np.linspace(0, 15, 20) # Set up the mesh of x points result = solve_ivp(dU_dx_new, (0, 15), U_0, t_eval=x_pts) y_pts = result.y[0,:] # Ok, this is tricky. For each x, result.y has two # components. We want the first component for all # x, which is y(x). The 0 means the first index and # the : means all of the x values. # - # Here's how we did it before with `odeint`: # + # Define a function for the right side def dU_dx(U, x): """Right side of the differential equation to be solved. U is a two-component vector with y=U[0] and z=U[1]. Thus this function should return [y', z'] """ return [U[1], -2*U[1] - 2*U[0] + np.cos(2*x)] # initial condition U_0 = [y(0)=0, z(0)=y'(0)=0] U_0 = [0., 0.] x_pts = np.linspace(0, 15, 20) # Set up the mesh of x points U_pts = odeint(dU_dx, U_0, x_pts) # U_pts is a 2-dimensional array y_pts_odeint = U_pts[:,0] # Ok, this is tricky. For each x, U_pts has two # components. We want the upper component for all # x, which is y(x). The : means all of the first # index, which is x, and the 0 means the first # component in the other dimension. # - # Make a table comparing results (using `flatten()` to make the matrices into arrays): print(' x y(x) [solve_ivp] y(x) [odeint]') for x, y_solve_ivp, y_odeint in zip(x_pts, y_pts.flatten(), y_pts_odeint.flatten()): print(f' {x:6.3f} {y_solve_ivp:12.7f} {y_odeint:12.7f}') # Not very close agreement by the end. Run both again with greater accuracy. # + relerr = 1.e-10 abserr = 1.e-10 result = solve_ivp(dU_dx_new, (0, 15), U_0, t_eval=x_pts, rtol=relerr, atol=abserr) y_pts = result.y[0,:] U_pts = odeint(dU_dx, U_0, x_pts, rtol=relerr, atol=abserr) y_pts_odeint = U_pts[:,0] print(' x y(x) [solve_ivp] y(x) [odeint]') for x, y_solve_ivp, y_odeint in zip(x_pts, y_pts.flatten(), y_pts_odeint.flatten()): print(f' {x:6.3f} {y_solve_ivp:12.7f} {y_odeint:12.7f}') # - # Comparing the results from when we didn't specify the errors we see that the default error tolerances for solve_ivp were insufficient. Moral: specify them explicitly.
week_4/ODEs_with_solve_ivp.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Classifying Galaxies # # We want to classify each galaxy as either containing an AGN or not containing an AGN, assuming that all galaxies are independent. # + import sys from astropy.coordinates import SkyCoord import h5py import matplotlib.pyplot as plt import numpy import sklearn.linear_model import sklearn.ensemble import sklearn.metrics import sklearn.neighbors sys.path.insert(1, '..') import crowdastro NORRIS_DAT_PATH = '../data/norris_2006_atlas_classifications_ra_dec_only.dat' TRAINING_H5_PATH = '../data/training.h5' # %matplotlib inline # - # ## How many infrared objects are there? with h5py.File(TRAINING_H5_PATH) as training_f: print('Total:', training_f['features'].shape[0]) print('Testing:', training_f['is_ir_test'].value.sum(), '({:.02%})'.format(training_f['is_ir_test'].value.sum() / training_f['features'].shape[0])) print('Training:', training_f['is_ir_train'].value.sum(), '({:.02%})'.format(training_f['is_ir_train'].value.sum() / training_f['features'].shape[0])) atlas_test = training_f['is_atlas_test'].value.sum() atlas_train = training_f['is_atlas_train'].value.sum() atlas_total = atlas_test + atlas_train print('Testing:', atlas_test, '({:.02%})'.format(atlas_test / atlas_total)) print('Testing:', atlas_train, '({:.02%})'.format(atlas_train / atlas_total)) # ## Training the classifier with h5py.File(TRAINING_H5_PATH) as training_f: lr = sklearn.linear_model.LogisticRegression(n_jobs=-1, class_weight='balanced', C=100.0, penalty='l1') x = training_f['features'][training_f['is_ir_train'].value, :] y = training_f['labels'][training_f['is_ir_train'].value] lr.fit(x, y) # ## Testing the classifier # + # Load Norris labels. with h5py.File(TRAINING_H5_PATH, 'r') as training_f: ir_positions = training_f['positions'].value ir_tree = sklearn.neighbors.KDTree(ir_positions) with open(NORRIS_DAT_PATH, 'r') as norris_dat: norris_coords = [r.strip().split('|') for r in norris_dat] norris_labels = numpy.zeros((len(ir_positions))) for ra, dec in norris_coords: # Find a neighbour. skycoord = SkyCoord(ra=ra, dec=dec, unit=('hourangle', 'deg')) ra = skycoord.ra.degree dec = skycoord.dec.degree ((dist,),), ((ir,),) = ir_tree.query([(ra, dec)]) if dist < 0.1: norris_labels[ir] = 1 # - # Predict. with h5py.File(TRAINING_H5_PATH) as training_f: test_indices = training_f['is_ir_test'].value x = training_f['features'][test_indices, :] t = norris_labels[test_indices] y = lr.predict(x) # ### Accuracy # Raw accuracy. sklearn.metrics.accuracy_score(t, y) # Balanced accuracy. cm = sklearn.metrics.confusion_matrix(t, y).astype(float) cm /= cm.sum(axis=1).reshape((-1, 1)) cm.trace() / 2 # ##
notebooks/51_thesis_galaxy_classification.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.8 64-bit (''qutip-env'': conda)' # name: python388jvsc74a57bd0d9fe01cc93207f9d85c02c9a2b51282a64a6c5e7b236d31b93012e1f9403b53c # --- # # Quantum Tomography # ## Introduction # # Quantum tomography is an experimental procedure to reconstruct a description of part of quantum system from the measurement outcomes of a specific set of experiments. In Qiskit we implement the following types of tomography: # # 1. **Quantum state tomography**: Given a state-preparation circuit that prepares a system in a state, reconstruct a description of the density matrix $\rho$ of the actual state obtained in the system. # 2. **Quantum process tomography**: Given a circuit, reconstruct a description of the quantum channel $\mathcal{E}$ that describes the circuit's operator when running on the system. # 3. **Quantum gate set tomography**: Performs process tomography on a set of gates in a self-consistent manner, meaning quantum noises on gates used by the tomography process itself is also taken into account. # # This notebook gives examples for how to use the ``ignis.verification.tomography`` modules. # + # Needed for functions import numpy as np import time from copy import deepcopy # Import Qiskit classes import qiskit import qiskit.quantum_info as qi from qiskit import QuantumRegister, QuantumCircuit, ClassicalRegister, Aer from qiskit.providers.aer import noise from qiskit.compiler import assemble # Tomography functions from qiskit.ignis.verification.tomography import state_tomography_circuits, StateTomographyFitter from qiskit.ignis.verification.tomography import process_tomography_circuits, ProcessTomographyFitter from qiskit.ignis.verification.tomography import gateset_tomography_circuits, GatesetTomographyFitter import qiskit.ignis.mitigation.measurement as mc # Auxiliary methods from qiskit.quantum_info import Choi, Kraus from qiskit.extensions import HGate, XGate # - # ## Initial examples # ### 2-Qubit state tomography Example # In the below example we want to perform state tomography on a 2Q Bell state between qubits 3 and 5. To make the reference circuit we generate the expected statevector using ``statevector_simulator`` between qubits 0 and 1. # + # Create the expected statevector q2 = QuantumRegister(2) bell = QuantumCircuit(q2) bell.h(q2[0]) bell.cx(q2[0], q2[1]) print(bell) target_state_bell = qi.Statevector.from_instruction(bell) print(target_state_bell) # - # Create the actual circuit q2 = QuantumRegister(6) bell = QuantumCircuit(q2) bell.h(q2[3]) bell.cx(q2[3], q2[5]) print(bell) # Here we are going to generate and run the state tomography circuits. By only passing in the 2 registers we want to measure the state tomography will only run on that reduced $2^2$ Hilbert space. However, if we pass the whole register in the state tomography module will try and fit the full $2^6$ space. # + # Generate circuits and run on simulator t = time.time() # Generate the state tomography circuits. qst_bell = state_tomography_circuits(bell, [q2[3], q2[5]]) # Execute job = qiskit.execute(qst_bell, Aer.get_backend('qasm_simulator'), shots=5000) print('Time taken:', time.time() - t) # Fit result tomo_fitter_bell = StateTomographyFitter(job.result(), qst_bell) # - # The fitter will output a density matrix ordered according to how we passed in the registers to ``state_tomography_circuits``. # Perform the tomography fit # which outputs a density matrix rho_fit_bell = tomo_fitter_bell.fit(method='lstsq') F_bell = qi.state_fidelity(rho_fit_bell, target_state_bell) print('State Fidelity: F = {:.5f}'.format(F_bell)) # ### Repeat the Example with Measurement Noise # + #Add measurement noise noise_model = noise.NoiseModel() for qubit in range(6): read_err = noise.errors.readout_error.ReadoutError([[0.75, 0.25],[0.1,0.9]]) noise_model.add_readout_error(read_err,[qubit]) #generate the calibration circuits meas_calibs, state_labels = mc.complete_meas_cal(qubit_list=[3,5]) backend = Aer.get_backend('qasm_simulator') job_cal = qiskit.execute(meas_calibs, backend=backend, shots=15000, noise_model=noise_model) job_tomo = qiskit.execute(qst_bell, backend=backend, shots=15000, noise_model=noise_model) meas_fitter = mc.CompleteMeasFitter(job_cal.result(),state_labels) tomo_bell = StateTomographyFitter(job_tomo.result(), qst_bell) #no correction rho_bell = tomo_bell.fit(method='lstsq') F_bell = qi.state_fidelity(rho_bell, target_state_bell) print('State fidelity (no correction): F = {:.5f}'.format(F_bell)) #correct data correct_tomo_results = meas_fitter.filter.apply(job_tomo.result(), method='least_squares') tomo_bell_mit = StateTomographyFitter(correct_tomo_results, qst_bell) rho_fit_bell_mit = tomo_bell_mit.fit(method='lstsq') F_bell_mit = qi.state_fidelity(rho_fit_bell_mit, target_state_bell) print('State fidelity (w/ correction): F = {:.5f}'.format(F_bell_mit)) # - state_labels # ## 1-qubit process tomography example # + # Process tomography of a Hadamard gate q = QuantumRegister(1) circ = QuantumCircuit(q) # circ.h(q[0]) circ.ry(np.pi/2, q[0]) # Get the ideal unitary operator target_unitary = qi.Operator(circ) # Generate process tomography circuits and run on qasm simulator qpt_circs = process_tomography_circuits(circ, q) job = qiskit.execute(qpt_circs, Aer.get_backend('qasm_simulator'), shots=4000) # Extract tomography data so that counts are indexed by measurement configuration qpt_tomo = ProcessTomographyFitter(job.result(), qpt_circs) qpt_tomo.data # + # Tomographic reconstruction t = time.time() choi_fit_lstsq = qpt_tomo.fit(method='lstsq') print('Fit time:', time.time() - t) print('Average gate fidelity: F = {:.5f}'.format(qi.average_gate_fidelity(choi_fit_lstsq, target=target_unitary))) # - # ## 1-qubit process tomography of two-qubit swap gate # # We will prepare qubit-0 and measure qubit-1 so the reconstructed channel should be an identity. # + # Process tomography of a Hadamard gate q = QuantumRegister(2) circ = QuantumCircuit(q) circ.swap(q[0], q[1]) # Generate process tomography circuits and run on qasm simulator # We use the optional prepared_qubits kwarg to specify that the prepared qubit was different to measured qubit qpt_circs = process_tomography_circuits(circ, q[1], prepared_qubits=q[0]) job = qiskit.execute(qpt_circs, Aer.get_backend('qasm_simulator'), shots=2000) # Extract tomography data so that counts are indexed by measurement configuration qpt_tomo = ProcessTomographyFitter(job.result(), qpt_circs) qpt_tomo.data # + # Tomographic reconstruction t = time.time() choi_fit = qpt_tomo.fit(method='lstsq') print('Fit time:', time.time() - t) print('Average gate fidelity: F = {:.5f}'.format(qi.average_gate_fidelity(choi_fit))) # - # ## Advances examples # ## Generating and fitting random states # # We now test the functions on the state generated by a circuit consisting of a layer of random single qubit unitaries u3. def random_u_tomo(nq, shots): def rand_angles(): return tuple(2 * np.pi * np.random.random(3) - np.pi) q = QuantumRegister(nq) circ = QuantumCircuit(q) for j in range(nq): circ.u(*rand_angles(), q[j]) target_state = qi.Statevector.from_instruction(circ) qst_circs = state_tomography_circuits(circ, q) job = qiskit.execute(qst_circs, Aer.get_backend('qasm_simulator'), shots=shots) tomo_data = StateTomographyFitter(job.result(), qst_circs) rho_fit = tomo_data.fit(method='lstsq') print('F = {:.5f}'.format(qi.state_fidelity(rho_fit, target_state))) for j in range(5): print('Random single-qubit unitaries: set {}'.format(j)) random_u_tomo(3, 5000) # ## 5-Qubit Bell State # + # Create a state preparation circuit q5 = QuantumRegister(5) bell5 = QuantumCircuit(q5) bell5.h(q5[0]) for j in range(4): bell5.cx(q5[j], q5[j + 1]) # Get ideal output state target_state_bell5 = qi.Statevector.from_instruction(bell5) # Generate circuits and run on simulator t = time.time() qst_bell5 = state_tomography_circuits(bell5, q5) job = qiskit.execute(qst_bell5, Aer.get_backend('qasm_simulator'), shots=5000) # Extract tomography data so that counts are indexed by measurement configuration tomo_bell5 = StateTomographyFitter(job.result(), qst_bell5) print('Time taken:', time.time() - t) # - t = time.time() rho_fit_bell5 = tomo_bell5.fit(method='lstsq') print('Time taken:', time.time() - t) print('State fidelity: F = {:.5f}'.format(qi.state_fidelity(rho_fit_bell5, target_state_bell5))) # ## 2-Qubit Conditional State Tomography # In this example, we have a three-qubit system where one of the qubits will be an ancilla for performing state tomography, i.e. only perform tomography when the third qubit is in the state "1". The circuit is setup in such a way that after conditional tomography we will get a Bell state on the first two qubits. # # First make a 3Q GHZ state with no classical measurements. # Create the actual circuit q2 = QuantumRegister(3) ghz = QuantumCircuit(q2) ghz.h(q2[0]) ghz.cx(q2[0], q2[1]) ghz.cx(q2[1], q2[2]) ghz.h(q2[2]) print(ghz) # Here we are going to generate and run the state tomography circuits. Only pass the registers we want to perform state tomography on. The code will generate a new classical register for only those measurements. qst_ghz = state_tomography_circuits(ghz, [q2[0],q2[1]]) print(qst_ghz[0]) # Now make a copy of this circuit (we will need it for the fitter) and make a new circuit with an ancilla measurement attached (this is what will be run): #Make a copy without the ancilla register qst_ghz_no_anc = deepcopy(qst_ghz) ca = ClassicalRegister(1) for qst_ghz_circ in qst_ghz: qst_ghz_circ.add_register(ca) qst_ghz_circ.measure(q2[2],ca[0]) #Run in Aer job = qiskit.execute(qst_ghz, Aer.get_backend('qasm_simulator'), shots=10000) raw_results = job.result() # Before sending the results to the state tomography fitter we must strip the register for the Q2 measurement and only keep the results when that register is 1. # + new_result = deepcopy(raw_results) for resultidx, _ in enumerate(raw_results.results): old_counts = raw_results.get_counts(resultidx) new_counts = {} #change the size of the classical register new_result.results[resultidx].header.creg_sizes = [new_result.results[resultidx].header.creg_sizes[0]] new_result.results[resultidx].header.clbit_labels = new_result.results[resultidx].header.clbit_labels[0:-1] new_result.results[resultidx].header.memory_slots = 2 for reg_key in old_counts: reg_bits = reg_key.split(' ') if reg_bits[0]=='1': new_counts[reg_bits[1]]=old_counts[reg_key] new_result.results[resultidx].data.counts = new_counts # - tomo_bell = StateTomographyFitter(new_result, qst_ghz_no_anc) # Perform the tomography fit # which outputs a density matrix rho_fit_bell = tomo_bell.fit(method='lstsq') np.around(rho_fit_bell, 3) # # Gate set tomography # ## 1-Qubit gate set tomography Examples # The main difference between gate set tomography and process tomography is that in gate set tomography, the input consists of a gate set basis: A set of gates that are both used in the initialization/measurement phase of the tomography, and are being reconstructed. # # Qiskit supplies a default gateset basis; in order to use this gateset basis in order to reconstruct another gate, this gate should be added to the basis. We use the following method to simplify the process: # + from qiskit.ignis.verification.tomography.basis import default_gateset_basis def collect_tomography_data(shots=10000, noise_model=None, gateset_basis='Standard GST'): backend_qasm = Aer.get_backend('qasm_simulator') circuits = gateset_tomography_circuits(gateset_basis=gateset_basis) qobj = assemble(circuits, shots=shots) result = backend_qasm.run(qobj, noise_model=noise_model).result() fitter = GatesetTomographyFitter(result, circuits, gateset_basis) return fitter def gate_set_tomography(gate, noise_model=None): basis = default_gateset_basis() basis.add_gate(gate) fitter = collect_tomography_data(shots=10000, noise_model=noise_model, gateset_basis=basis) result_gates = fitter.fit() result_gate = result_gates[gate.name] return Choi(result_gate) # - # ### Noiseless 1-qubit gate set tomography target_unitary = qi.Operator(HGate()) t = time.time() channel_fit = gate_set_tomography(HGate()) print('fit time:', time.time() - t) print('Average gate fidelity: F = {:.5f}'.format(qi.average_gate_fidelity(channel_fit, target_unitary))) import qiskit.tools.jupyter # %qiskit_version_table # %qiskit_copyright
tomography.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # # Remotes in GitHub # + [markdown] slideshow={"slide_type": "slide"} # ## Overview # - **Teaching:** 30 min # - **Exercises:** 0 min # # **Questions** # - How do I share my changes with others (on the web)? # # **Objectives** # - Explain what remote repositories are and why they are useful. # - Push to or pull from a remote repository. # + [markdown] slideshow={"slide_type": "slide"} # Version control really comes into its own when we begin to collaborate with other people. We already have most of the machinery we need to do this; the only thing missing is to copy changes from one repository to another. # # Systems like Git allow us to move work between any two repositories. In practice, though, itโ€™s easiest to use one copy as a central hub, and to keep it on the web rather than on someoneโ€™s laptop. Most programmers use hosting services like [GitHub](https://github.com), [BitBucket](https://bitbucket.com) or [GitLab](https://gitlab.com) to hold those master copies; weโ€™ll explore the pros and cons of this in the final section of this lesson. # # Letโ€™s start by sharing the changes weโ€™ve made to our current project with the world. Log in to GitHub, then click on the icon in the top right corner to create a new repository called planets: # # ![Github create repository](github-create-repo-01.png) # # Name your repository planets, add an optional description and the click `Create Repository`. # # ![Github name and describe repository](github-create-repo-02.png) # + [markdown] slideshow={"slide_type": "slide"} # As soon as the repository is created, GitHub displays a page with a URL and some information on how to configure your local repository: # # ![Github quick start](github-create-repo-03.png) # # which effectively does the following on GitHubโ€™s servers: # ```bash # % mkdir planets # % cd planets # % git init # ``` # # Our local repository contains our earlier work on `mars.txt`, but the remote repository on GitHub doesnโ€™t contain any files yet: # ![Local and remote repo states](../images/git-freshly-made-github-repo.svg) # + [markdown] slideshow={"slide_type": "slide"} # The next step is to connect the two repositories. We do this by making the GitHub repository a remote for the local repository. The home page of the repository on GitHub includes the string we need to identify it: # ![Copy repository string](github-find-repo-string.png) # # Click on the `HTTPS` link to change the protocol from `SSH` to `HTTPS` if needed. # + [markdown] slideshow={"slide_type": "slide"} # ## Pin: HTTPS vs. SSH # We use HTTPS here because it does not require additional configuration. After the workshop you may want to set up SSH access, which is a bit more secure, by following one of the great tutorials from [GitHub](https://help.github.com/articles/generating-ssh-keys), [Atlassian/BitBucket](https://confluence.atlassian.com/display/BITBUCKET/Set+up+SSH+for+Git) and [GitLab](https://about.gitlab.com/2014/03/04/add-ssh-key-screencast/) (this one has a screencast). # + [markdown] slideshow={"slide_type": "slide"} # Copy that URL from the browser, go into the local `planets` repository, and run this command: # ```bash # % git remote add origin https://github.com/vlad/planets.git # ``` # # Make sure to use the URL for your repository rather than Vladโ€™s: the only difference should be your username instead of `vlad`. # # We can check that the command has worked by running `git remote -v`: # ```bash # % git remote -v # ``` # ```brainfuck # origin https://github.com/vlad/planets.git (push) # origin https://github.com/vlad/planets.git (fetch) # ``` # + [markdown] slideshow={"slide_type": "slide"} # The name `origin` is a local nickname for your remote repository. We could use something else if we wanted to, but `origin` is by far the most common choice. # # Once the nickname `origin` is set up, this command will push the changes from our local repository to the repository on GitHub: # ```bash # % git push origin master # ``` # ```brainfuck # Counting objects: 9, done. # Delta compression using up to 4 threads. # Compressing objects: 100% (6/6), done. # Writing objects: 100% (9/9), 821 bytes, done. # Total 9 (delta 2), reused 0 (delta 0) # To https://github.com/vlad/planets # * [new branch] master -> master # Branch master set up to track remote branch master from origin. # ``` # + [markdown] slideshow={"slide_type": "slide"} # Our local and remote repositories are now in this state: # # ![Remote and local repos after first push](../images/github-repo-after-first-push.svg) # + [markdown] slideshow={"slide_type": "slide"} # ## Pin: The `-u` Flag # You may see a `-u` option used with git push in some documentation. This option is synonymous with the `--set-upstream-to` option for the git branch command, and is used to associate the current branch with a remote branch so that the `git pull` command can be used without any arguments. To do this, simply use `git push -u origin master` once the remote has been set up. # + [markdown] slideshow={"slide_type": "slide"} # We can pull changes from the remote repository to the local one as well: # ```bash # % git pull origin master # ``` # ```brainfuck # From https://github.com/vlad/planets # * branch master -> FETCH_HEAD # Already up-to-date. # ``` # # Pulling has no effect in this case because the two repositories are already synchronized. If someone else had pushed some changes to the repository on GitHub, though, this command would download them to our local repository. # + [markdown] slideshow={"slide_type": "slide"} # ## Exercise: GitHub GUI # Browse to your planets repository on GitHub. Under the Code tab, find and click on the text that says โ€œXX commitsโ€ (where โ€œXXโ€ is some number). Hover over, and click on, the three buttons to the right of each commit. What information can you gather/explore from these buttons? How would you get that same information in the shell? # + [markdown] slideshow={"slide_type": "slide"} # ## Solution: GitHub GUI # The left-most button (with the picture of a clipboard) copies the full identifier of the commit to the clipboard. In the shell, git log will show you the full commit identifier for each commit. # # When you click on the middle button, youโ€™ll see all of the changes that were made in that particular commit. Green shaded lines indicate additions and red ones removals. In the shell we can do the same thing with git diff. In particular, `git diff ID1..ID2` where `ID1` and `ID2` are commit identifiers (e.g. `git diff a3bf1e5..041e637`) will show the differences between those two commits. # # The right-most button lets you view all of the files in the repository at the time of that commit. To do this in the shell, weโ€™d need to checkout the repository at that particular time. We can do this with `git checkout ID` where ID is the identifier of the commit we want to look at. If we do this, we need to remember to put the repository back to the right state afterwards! # + [markdown] slideshow={"slide_type": "slide"} # ## Exercise: GitHub Timestamp # Create a remote repository on GitHub. Push the contents of your local repository to the remote. Make changes to your local repository and push these changes. Go to the repo you just created on GitHub and check the timestamps of the files. How does GitHub record times, and why? # + [markdown] slideshow={"slide_type": "slide"} # ## Solution: Github Timestamp # GitHub displays timestamps in a human readable relative format (i.e. โ€œ22 hours agoโ€ or โ€œthree weeks agoโ€). However, if you hover over the timestamp, you can see the exact time at which the last change to the file occurred. # + [markdown] slideshow={"slide_type": "slide"} # ## Exercise: Push vs. Commit # In this lesson, we introduced the `git push` command. How is `git push` different from `git commit`? # + [markdown] slideshow={"slide_type": "slide"} # ## Solution: Push vs. Commit # Commit updates your local repository. When we push changes, weโ€™re interacting with a remote repository to update it with the changes weโ€™ve made locally (often this corresponds to sharing the changes weโ€™ve made with others). Commit only updates your local repository. # + [markdown] slideshow={"slide_type": "slide"} # ## Pin: Proxy # If the network you are connected to uses a proxy, there is a chance that your last command failed with โ€œCould not resolve hostnameโ€ as the error message. To solve this issue, you need to tell Git about the proxy: # ```bash # % git config --global http.proxy http://user:password@proxy.url # % git config --global https.proxy http://user:password@proxy.url # ``` # When you connect to another network that doesnโ€™t use a proxy, you will need to tell Git to disable the proxy using: # ```bash # % git config --global --unset http.proxy # % git config --global --unset https.proxy # ``` # On `linux.bath` the proxy should be configured to allow you to use `git` with remote repositories over `http` so askan demonstrator if you have any issues. # + [markdown] slideshow={"slide_type": "slide"} # ## Pin: Password Managers # If your operating system has a password manager configured, `git push` will try to use it when it needs your username and password. For example, this is the default behavior for Git Bash on Windows. If you want to type your username and password at the terminal instead of using a password manager, type: # ``` # % unset SSH_ASKPASS # ``` # in the terminal, before you run `git push`. Despite the name, git uses `SSH_ASKPASS` for all credential entry, so you may want to unset `SSH_ASKPASS` whether you are using git via SSH or https. # # You may also want to add unset `SSH_ASKPASS` at the end of your `~/.bashrc` to make git default to using the terminal for usernames and passwords. # + [markdown] slideshow={"slide_type": "slide"} # ## Key Points # - A local Git repository can be connected to one or more remote repositories. # - Use the HTTPS protocol to connect to remote repositories until you have learned how to set up SSH. # - `git push` copies changes from a local repository to a remote repository. # - `git pull` copies changes from a remote repository to a local repository.
notebooks_plain/08_episode.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Create gifs # > Create gifs with PIL. The sliding windows and ROIs were created with py and opencv. This image has 3 hierachical pyramids. The 1st pyramids contains 24 ROIs. The 2nd pyramid contains 6 ROIs. The 3rd pyramid contains 2 ROIs. No algorithm was used to detrmined the ROIs, this is just a sliding window. # # - toc: true # - badges: true # - comments: true # - categories: [Image Processing, PIL] # - image: images/chart-preview.png # + import glob from PIL import Image import glob # filepaths fp_in = "images/keras_detection/clone_p*.jpg" fp_out = "images/keras_detection/sliding_window_sw.gif" # https://pillow.readthedocs.io/en/stable/handbook/image-file-formats.html#gif img, *imgs = [Image.open(f) for f in sorted(glob.glob(fp_in))] img.save(fp=fp_out, format='GIF', append_images=imgs, save_all=True, duration=2000, loop=0) # + # filepaths fp_in = "images/keras_detection/roiOrig_p*.jpg" fp_out = "images/keras_detection/sliding_window_rois.gif" # https://pillow.readthedocs.io/en/stable/handbook/image-file-formats.html#gif img, *imgs = [Image.open(f) for f in sorted(glob.glob(fp_in))] img.save(fp=fp_out, format='GIF', append_images=imgs, save_all=True, duration=2000, loop=0) # - # ### Sliding window and ROIs for an image with three hierachical pyramids # # ![](images/keras_detection/sliding_window_sw.gif) # ![](images/keras_detection/sliding_window_rois.gif)
_notebooks/2020-12-23-Gifs_PIL.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + ############################################################# # Author(s): Debaditya, <NAME> # ############################################################# # - import numpy as np import csv import glob from itertools import compress # + #Enter path to data folder: data_root = '/NMA/Mapping Brain Networks/data/allData' #Get list of files in glob session_paths = glob.glob(data_root + '/*') # - #@title groupings of brain regions regions = ["vis ctx", "thal", "hipp", "other ctx", "midbrain", "basal ganglia", "subplate"] brain_groups = [["VISa", "VISam", "VISl", "VISp", "VISpm", "VISrl"], # visual cortex ["CL", "LD", "LGd", "LH", "LP", "MD", "MG", "PO", "POL", "PT", "RT", "SPF", "TH", "VAL", "VPL", "VPM"], # thalamus ["CA", "CA1", "CA2", "CA3", "DG", "SUB", "POST"], # hippocampal ["ACA", "AUD", "COA", "DP", "ILA", "MOp", "MOs", "OLF", "ORB", "ORBm", "PIR", "PL", "SSp", "SSs", "RSP"," TT"], # non-visual cortex ["APN", "IC", "MB", "MRN", "NB", "PAG", "RN", "SCs", "SCm", "SCig", "SCsg", "ZI"], # midbrain ["ACB", "CP", "GPe", "LS", "LSc", "LSr", "MS", "OT", "SNr", "SI"], # basal ganglia ["BLA", "BMA", "EP", "EPd", "MEA"] # cortical subplate ] sessions = [11,12] #Pick which sessions to load. dat = {} for session in sessions: dat[session] = {} # + def get_session_info(root, path): ''' This function returns the date on which the session was carried out and the name of the mouse. Args: root - [string] The root directory path. path - [string] The path to the session directory. Returns: date - [string] Date the experiment was conducted in YYYY-MM-DD format. name - [string] Name of the mouse ''' #Get substring name_date = path.replace(data_root+'\\','') #Get date date = name_date[:-11] #Get name name = name_date[-10:] return date, name def get_cluster_info(path): ''' This function returns information about clusters. Args: path - [string] The path to the session directory. Returns: good_clusters - [ndarray] Logical values representing if a cluster is 'good'. brain_regions - [list] Location where a cluster is located. ''' #Get good clusters. _phy_annotation >=2 good_clusters = (np.load(path + '/clusters._phy_annotation.npy')>=2).flatten() #Get cluster_channels cluster_channels = (np.load(path + '/clusters.peakChannel.npy').astype(int) - 1).flatten() #Create brain region temp variable brain_regions = [] #Open channel files with open(path + '/channels.brainLocation.tsv') as tsvfile: reader = csv.DictReader(tsvfile, dialect='excel-tab') for row in reader: #Parse regions brain_regions.append(row['allen_ontology']) #Create cluster location list. cluster_locations = [] #Iterate through the channels and parse the brain locations for cluster_channel in cluster_channels: brain_region = brain_regions[cluster_channel] cluster_locations.append(brain_region) del brain_region #Return the variables. return good_clusters, cluster_locations def get_cluster_spikes(path): ''' This function retuns the spikes sorted according to clusters. args: path - [string] The path to the session directory. return: cluster_spikes - [list] This is a list of lists of spike timings. ''' #Load the spikes spikes = np.load(path + '/spikes.times.npy', allow_pickle = True).flatten() #load the cluster_ids cluster_ids = np.load(path + '/spikes.clusters.npy', allow_pickle = True).flatten() #Create empty list clusters_spikes = [] #NOTE I CHANGED THIS LOOK INTO THIS LATER! #iterate through cluster_ids to arrange spikes. for cluster_id in range(np.max(cluster_ids)+1): cluster_spikes = spikes[np.where(cluster_ids == cluster_id)] clusters_spikes.append(cluster_spikes) #Return the variables. return cluster_spikes def get_trial_info(path): ''' This function returns all the information about the trials. ''' trial_intervals = np.load(path + '/trials.intervals.npy', allow_pickle = True) visualStim_times = np.load(path + '/trials.visualStim_times.npy', allow_pickle = True) goCue_times = np.load(path + '/trials.goCue_times.npy', allow_pickle = True) response_times = np.load(path + '/trials.response_times.npy', allow_pickle = True) feedback_times = np.load(path + '/trials.feedback_times.npy', allow_pickle = True) feedback_type =np.load(path + '/trials.feedbackType.npy', allow_pickle = True) return trial_intervals, visualStim_times, goCue_times, response_times, feedback_times, feedback_type # - #Load sessions dat = {} for session in sessions: dat[session] = {} path = session_paths[session] session_date, mouse_name = get_session_info(data_root, path) good_clusters, cluster_locations = get_cluster_info(path) cluster_spikes = get_cluster_spikes(path) trial_intervals, visualStim_times, goCue_times, response_times, feedback_times, feedback_type = get_trial_info(path) dat[session]['session_date'] = session_date dat[session]['mouse_name'] = mouse_name dat[session]['good_clusters'] = good_clusters dat[session]['cluster_locations'] = cluster_locations dat[session]['cluster_spikes'] = cluster_spikes dat[session]['trial_intervals'] = trial_intervals dat[session]['visualStim_times'] = visualStim_times dat[session]['goCue_times'] = goCue_times dat[session]['response_times'] = response_times dat[session]['feedback_times'] = feedback_times dat[session]['feedback_type'] = feedback_type print('Data for session',session,'loaded.') print(dat[11].keys()) # + # TODO: # from neo.core import * # blks = {} # for session in sessions: # + ## This part of the code can be used to save the data currently in memory def save_data(filename, objects): ''' This function will save the data you want to into the named file Args: filename - [string] The name of the file that you want to save your data into. (include extention .pkl) objects - [list] The list of objects you want to store from memory into the file. Return: void Usage Example: save_data('data.pkl',[no_of_sessions ,spontaneous_intervals, trials_intervals, channel_brainLocations, clusters_phy_annotation, clusters_peakChannel, spikes_amps, spikes_clusters, spikes_depths, spikes_times]) ''' #Grab dependencies import pickle #Open file with open(filename, 'wb') as f: #Dump memory pickle.dump(data, f) # + # #This cell loads all the intervals # #Load intervals # for session in sessions: # #The path to current session # path = session_paths[session] # #The name & date # session_date = get_name(data_root, path) # mouse_name = get_date(data_root, path) # #List of cells which are good. # good_cells = (np.load(path + '/clusters._phy_annotation.npy')>=2).flatten() # cluster_channels = (np.load(path + '/clusters.peakChannel.npy').astype(int) - 1).flatten() # # #Debug # # print(good_cells) # # print(cluster_channels) # # print(np.shape(cluster_channels),print(np.shape(good_cells))) # # print(np.shape(cluster_channels[good_cells].flatten())) # # brain_regions = [] # # with open(path + '/channels.brainLocation.tsv') as tsvfile: # # reader = csv.DictReader(tsvfile, dialect='excel-tab') # # for row in reader: # # brain_regions.append(row['allen_ontology']) # # del reader # # #Debug # # print(np.shape(brain_regions)) # # print(brain_regions) # cluster_locations = [] # for cluster_channel in cluster_channels: # brain_region = brain_regions[cluster_channel] # cluster_locations.append(brain_region) # del brain_region # # #Debug # # print(np.shape(cluster_locations)) # # print(len(list(compress(cluster_channels,good_cells)))) # # print(np.sum(good_cells)) # spikes = np.load(path + '/spikes.times.npy', allow_pickle = True).flatten() # cluster_ids = np.load(path + '/spikes.clusters.npy', allow_pickle = True).flatten() # # #Debug # # print(np.min(cluster_ids)) # # print(np.max(cluster_ids)) # clusters_spikes = [] #NOTE I CHANGED THIS LOOK INTO THIS LATER! # for cluster_id in range(np.max(cluster_ids)+1): # cluster_spikes = spikes[np.where(cluster_ids == cluster_id)] # clusters_spikes.append(cluster_spikes) # del cluster_spikes # # #Debug # # print(type(clusters_spikes)) # # print(len(clusters_spikes)) # # print(clusters_spikes[np.max(cluster_ids)+1]) # trial_intervals = np.load(path + '/trials.intervals.npy', allow_pickle = True) # no_of_trials = len(trial_intervals) # visualStim_times = np.load(path + '/trials.visualStim_times.npy', allow_pickle = True) # goCue_times = np.load(path + '/trials.goCue_times.npy', allow_pickle = True) # response_times = np.load(path + '/trials.response_times.npy', allow_pickle = True) # feedback_times = np.load(path + '/trials.feedback_times.npy', allow_pickle = True) # feedback_type =np.load(path + '/trials.feedbackType.npy', allow_pickle = True) # dat[session] # # print(no_of_trials) # # trial_start_t = trial_intervals[:,0] # # trial_end_t = trial_intervals[:,1] # # trials_spikes = [] # # for trial in range(1): # # trial_spikes = [] # # trial_start = trial_intervals[trial,0] # # trial_end = trial_intervals[trial,1] # # for cluster_id in cluster_ids: # # cluster_spikes = clusters_spikes[cluster_id] # # response_in_trial = np.logical_and(cluster_spikes>=trial_start,cluster_spikes<trial_end) # # trial_spikes.append(np.array(compress(cluster_spikes,response_in_trial))) # # del cluster_spikes # # del response_in_trial # # trials_spikes.append(trial_spikes) # # del trial_spikes # # print("Finished grouping spikes for trial",trial) # # #print(type(cluster_spikes_trial)) # # trial_intervals = np.load(path + '/trials.intervals.npy', allow_pickle = True) # # for trial_no in range(len(trial_intervals)): # # trial_start = trial_intervals[trial_no,0] # # trial_end = trial_intervals[trial_no,1] # # trial_spikes = {} # # for cell_id, cell_spike in enumerate(cell_spikes): # # trial_spike = cell_spike[cell_spike>=trial_start] # # trial_spike = trial_spike[trial_spike<trial_end] # # trial_spikes[cell_id] = cell_spike[cell_spike>=trial_start and cell_spike>trial_end] # # dat[session]['trial_spikes'].append(trial_spikes)
code/.ipynb_checkpoints/loadData-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Designing Analysis Classes # # We will now learn how to create your own Analysis class by going through the implementation of the `BasicBrainAnalysis` class from the "Applying Analysis Classes" notebook. # # There are three key components of an Analysis class: # # * Data specification # * Parameter specification # * Pipeline constructor methods # # The data specification defines all inputs, outputs and intermediate derivatives. The parameter specification specifies the free (meta) parameters that can be used to customise the analysis. Pipeline constructor methods are just regular methods of the class that return a `Pipeline` that generates one or more derivatives. # # ## Base and Meta-classes and Inheritance # # Every Analysis class should (at least indirectly) inherit from the `arcana.Analysis` base class. This contains methods to perform a lot of the "magic" that Arcana does. # # Arcana is designed to utilise inheritance of Analysis classes, but since Analysis classes specify a number of class attributes (i.e. for the data and parameter specifications) all Analysis classes need to be constructed by a special "meta-class", `arcana.AnalysisMetaClass`. However, you don't need to understand what is going on behind the scenes (or what a meta-class is even), just simply define your class like this # # + from arcana import Analysis, AnalysisMetaClass class MyBasicBrainAnalysis(Analysis, metaclass=AnalysisMetaClass): # Stuff goes here pass # - # ## Data specification # # The data specification is the place to start designing an Analysis class. As the name suggests, it specifies all inputs, outputs and intermediate derivatives of the class via a list of "data-spec" objects: # # * `FilesetSpec` (intermediate file-set derivatives) # * `InputFilesetSpec` # * `OutputFilesetSpec` # * `FieldSpec` (intermediate field derivatives) # * `InputFieldSpec` # * `OutputFieldSpec` # # Instead of setting the data specification directly, data-spec objects are appended to the specifications of base classes (by the meta-class) by defining the `add_data_specs` class attribute. This enables the data specifications of base classes to be altered and overwritten. # # Each data-spec object is given a name (i.e. the one that appears on the class "menu") and assigned a file-format (filesets) or data type (fields). The key difference between input and output (and intermediate) data-specs is that output data-specs refer to the "pipeline constructor method" that constructs a pipeline to generate them. Intermediate specs are equivalent to output specs in every respect except they appear don't appear on the class menu by default. # # Typically, the only thing you will ever need to do with a data-spec is initialise it and append it to the `add_data_specs` list, and the best place to see the available initialisation options is the doc strings, i.e. from arcana import InputFilesetSpec, InputFieldSpec, FilesetSpec, FieldSpec print('InputFilesetSpec:\n', InputFilesetSpec.__doc__) print('InputFieldSpec:\n', InputFieldSpec.__doc__) print('FilesetSpec:\n', FilesetSpec.__doc__) print('FieldSpec:\n', FieldSpec.__doc__) # Key parameters to note are: # # #### file_format/datatype # # The format/data-type that the item will be converted to when it is stored in the dataset # # #### frequency # # Where the "data-items" sit in the dataset tree, i.e. whether there is one for every session, subject, visit or the whole dataset. Valid values for `frequency` are # # * 'per_session' # * 'per_subject' # * 'per_visit' # * 'per_dataset' # # #### pipeline_getter # # The name of the method in the class that constructs the pipeline to generate the derivatives # # ### Example # # In the Basic-Brain Analysis class we define three output, one input and one intermediate fileset specs as such # + from arcana import InputFilesetSpec, FilesetSpec, OutputFilesetSpec from banana.file_format import nifti_gz_format class MyBasicBrainAnalysis(Analysis, metaclass=AnalysisMetaClass): add_data_specs = [ InputFilesetSpec('magnitude', nifti_gz_format, desc="A magnitude image (e.g. T1w, T2w, etc..)"), OutputFilesetSpec('brain', nifti_gz_format, 'brain_extraction_pipeline', desc="Skull-stripped magnitude image"), FilesetSpec('brain_mask', nifti_gz_format, 'brain_extraction_pipeline', desc="Brain mask used for skull-stripping"), OutputFilesetSpec('smooth', nifti_gz_format, 'smooth_mask_pipeline', desc="Smoothed magnitude image"), OutputFilesetSpec('smooth_masked', nifti_gz_format, 'smooth_mask_pipeline', desc="Smoothed and masked magnitude image")] def brain_extraction_pipeline(self, **name_maps): "We'll define this later" def smooth_mask_pipeline(self, **name_maps): "We'll define this later" print(MyBasicBrainAnalysis.static_menu(full=True)) # - # **Note** how the 'pipeline_getter' parameters of the spec objects reference the name of a "pipeline constructor" method defined in the class. The matches between these names are checked by the metaclass so if we don't define them Arcana will throw an error. # # Since `MyBasicBrainAnalysis` inherits directly from `Analysis` the only specs in the data specification are those in `add_data_specs`. However, if we would like to extend `MyBasicBrainAnalysis` to make a new class `MyExtendedBasicBrainAnalysis` we can add to and override the specs from `MyBasicBrainAnalysis`. # + from banana.file_format import mrtrix_image_format class MyExtendedBasicBrainAnalysis(MyBasicBrainAnalysis, metaclass=AnalysisMetaClass): add_data_specs = [ OutputFilesetSpec('smooth', mrtrix_image_format, 'smooth_mask_pipeline', desc="Smoothed magnitude image in Mrtrix format"), OutputFilesetSpec('thresholded', nifti_gz_format, 'threshold_pipeline', desc="Thresholded smoothed magnitude image")] def threshold_pipeline(self, **name_maps): "We'll define this later" print(MyExtendedBasicBrainAnalysis.static_menu(full=True)) # - # As you can see, there is now a `thresholded` output, and the `smooth` image is now of `mrtrix_image` format instead of `nifti_gz`. # # **Note**: we can get away with changing the format of the `smooth` from zipped NiFTI to MRtrix because if the output format of the pipeline doesn't match the format of the specification it will be automatically converted before it is stored in the dataset (as long as a converter exists). # ## Parameter Specification # # The parameter specification works very much like the data specification but for parameters. "Param-specs" can be either of class `ParamSpec` or `SwitchSpec` type. # # `SwitchSpec`s are used to qualitatively change the analysis performed, e.g. using FSL for non-linear registration to a template (i.e. FNIRT) instead of ANTs. `ParamSpec`s are used to quantitatively change the analysis, e.g. change the required threshold value. # + from arcana import ParamSpec, SwitchSpec print('ParamSpec:\n', ParamSpec.__doc__) print('SwitchSpec:\n', SwitchSpec.__doc__) # - # As with the data specification, instead of setting the parameter specification directly it is added to the class via `add_param_specs` to allow manipulation by subclasses. # # Returning to the `BasicBrainAnalysis` example we add in the FWHM parameter used in the smoothing pipeline. # + class BasicBrainAnalysis(Analysis, metaclass=AnalysisMetaClass): """ A baisc example that demonstrates how Analysis classes work. """ add_data_specs = [ InputFilesetSpec('magnitude', nifti_gz_format, desc="A magnitude image (e.g. T1w, T2w, etc..)"), OutputFilesetSpec('brain', nifti_gz_format, 'brain_extraction_pipeline', desc="Skull-stripped magnitude image"), FilesetSpec('brain_mask', nifti_gz_format, 'brain_extraction_pipeline', desc="Brain mask used for skull-stripping"), OutputFilesetSpec('smooth', nifti_gz_format, 'smooth_mask_pipeline', desc="Smoothed magnitude image"), OutputFilesetSpec('smooth_masked', nifti_gz_format, 'smooth_mask_pipeline', desc="Smoothed and masked magnitude image")] add_param_specs = [ ParamSpec('smoothing_fwhm', 4.0, desc=("The full-width-half-maxium radius of the smoothing " "kernel"))] def brain_extraction_pipeline(self, **name_maps): "We'll define this later" def smooth_mask_pipeline(self, **name_maps): "We'll define this later" print(BasicBrainAnalysis.static_menu()) # - # ## Pipeline Constructor Methods # # Pipeline constructor methods are where the action happens in Analysis classes. They return `Pipeline` objects (which are just thin wrappers around `nipype.Workflows`) that link the data specification together by taking one or more data-specs as inputs and generating one or more as outputs. # # ### Initialising a pipeline # # The basic form of a pipeline constructor method is as follows # + from banana.citation import fsl_cite def smooth_mask_pipeline(self, **name_maps): pipeline = self.new_pipeline( 'smooth_mask', desc="Smooths and masks a brain image", name_maps=name_maps, citations=[fsl_cite]) return pipeline # - # where the `new_pipeline` creates the `Pipeline` object, which is returned at the end of the method. Looking at the doc string of the `new_pipeline` we can see the parameters that you need/can pass to it print(Analysis.new_pipeline.__doc__) # The `name` parameter is just used internally to distinguish working directories between pipeline nodes. There aren't any requirements on it except that needs to be unique amongst all pipelines that can be generated by the Analysis instance. # # `citations` lists the publications that should be cited when using this pipeline. However, I plan to replace this bespoke solution with the third-party package [duecredit](https://pypi.org/project/duecredit/), which nipype uses. # # The `name_maps` parameter should be passed directly from the keyword arguments of the pipeline constructor. It allows sub-classes to do funky things when manipulating methods defined in base classes such as mapping inputs and outputs onto different data-specs. However, going into the details of how it works is beyond the scope of this notebook. # # ### Adding nodes to a pipeline # # The syntax for adding nodes to a pipeline is somewhat different to how it is done in Nipype. This is because it is modelled on the proposed syntax for [Nipype v2.0](https://github.com/nipy/nipype/projects/8), which aims to streamline code for workflow construction and make it easy to read. # # Nodes are added to a pipeline using the `add` method from arcana.pipeline import Pipeline print(Pipeline.add.__doc__) # which in our BasicBrainAnalysis example looks like this def smooth_mask_pipeline(self, **name_maps): pipeline = self.new_pipeline( 'smooth_mask', desc="Smooths and masks a brain image", name_maps=name_maps, citations=[fsl_cite]) # Smoothing process smooth = pipeline.add( 'smooth', fsl.IsotropicSmooth( fwhm=self.parameter('smoothing_fwhm')), # Param. passed from param-spec inputs={ 'in_file': ('magnitude', nifti_gz_format)}, # Input from data-spec outputs={ 'smooth': ('out_file', nifti_gz_format)}, # Output to data-spec requirements=[ fsl_req.v('5.0.10')]) # Requires FSL >= 5.0.10 pipeline.add( 'mask', fsl.ApplyMask( output_datatype=int), # Fixed param of pipeline inputs={ 'in_file': (smooth, 'out_file'), # Input from previous node 'mask_file': ('brain_mask', nifti_gz_format)}, # Input from data-spec outputs={ 'smooth_masked': ('out_file', nifti_gz_format)}, # Output to data-spec requirements=[ fsl_req.v('5.0.10')]) # Requires FSL >= 5.0.10 return pipeline # Key points to note are: # # * All nodes need a unique name within the pipeline (as in Nipype) # * Stylistic convention dictates that constant interface traits are set when the interface is initialised # * `inputs` is a dictionary that maps the name of an input-trait of the interface to a 2-tuple consisting of either # * a data-spec name (i.e. inputs of the pipeline) and the file-format/datatype the input is expected in (the format will be automatically converted if required) # * an upstream node and name of the trait to connect from the upstream node # * `outputs` is a dictionary that maps data-spec names (i.e. outputs of the pipeline) to a 2-tuple consisting of the name of an output-trait and the file-format/datatype it is produced in. # * `requirements` are a list of `arcana.Requirement` objects that specify versions of external packages (e.g. FSL, SPM, MRtrix) that are required for the node to run. # # ### Merging and Splitting Pipelines Across Subjects/Visits # # Arcana handles iteration over subjects and sessions in the background as implicitly specifed by the frequencies of inputs and outputs of the pipeline. However, in some cases you may need to join over all subjects/visits to create a summary statistic (e.g. mean), and then potentially use this variable back on an individual subject/visit level again (e.g. normalisation). As we saw in the in the morning "Advanced Nipype" section, these cases are handled by Nipype using iterators, map nodes and join nodes. # # In Arcana join nodes are specified by providing the `joinsource` an `joinfield` parameters of when adding a node to a pipeline. These parameters work the same as they do for Nipype map nodes. Access to Arcana's implicit iterator nodes are exposed via the `self.SUBJECT_ID` and `self.VISIT_ID` variables. For example, in the `statistics_pipeline` of the `example.analysis.ToyAnalysis` we do a two-step merge, first over visits and then subjects to create the *per_dataset* metrics 'average' and 'std_dev' from the *per_session* 'selected_metric'. def statistics_pipeline(self, **name_maps): pipeline = self.new_pipeline( name='statistics', name_maps=name_maps, desc="Calculate statistics") merge_visits = pipeline.add( 'merge_visits', Merge( numinputs=1), inputs={ 'in1': ('selected_metric', text_format)}, joinsource=self.VISIT_ID, joinfield=['in1']) merge_subjects = pipeline.add( 'merge_subjects', Merge( numinputs=1, ravel_inputs=True), inputs={ 'in1': (merge_visits, 'out')}, joinsource=self.SUBJECT_ID, joinfield=['in1']) concat = pipeline.add( 'concat', ConcatFloats(), inputs={ 'in_files': (merge_subjects, 'out')}) pipeline.add( 'extract_metrics', ExtractMetrics(), inputs={ 'in_list': (concat, 'out_list')}, outputs={ 'average': ('avg', float), 'std_dev': ('std', float)}) return pipeline # `self.SUBJECT_ID` and `self.VISIT_ID` can also be used to expand over all subject/visits again and to create a map node, simply provide the `iterfield` parameter when adding a node. # ## Putting it all together # # We can now put the three key components together (i.e. data & parameter specifications and pipeline constructor methods) to make a fully-functioning Analysis class. class BasicBrainAnalysis(Analysis, metaclass=AnalysisMetaClass): """ A baisc analysis class that demonstrates how Analysis classes work. """ add_data_specs = [ InputFilesetSpec('magnitude', nifti_gz_format, desc="A magnitude image (e.g. T1w, T2w, etc..)"), OutputFilesetSpec('brain', nifti_gz_format, 'brain_extraction_pipeline', desc="Skull-stripped magnitude image"), FilesetSpec('brain_mask', nifti_gz_format, 'brain_extraction_pipeline', desc="Brain mask used for skull-stripping"), OutputFilesetSpec('smooth', nifti_gz_format, 'smooth_mask_pipeline', desc="Smoothed magnitude image"), OutputFilesetSpec('smooth_masked', nifti_gz_format, 'smooth_mask_pipeline', desc="Smoothed and masked magnitude image")] add_param_specs = [ ParamSpec('smoothing_fwhm', 4.0, desc=("The full-width-half-maxium radius of the smoothing " "kernel"))] def brain_extraction_pipeline(self, **name_maps): pipeline = self.new_pipeline( 'brain_extraction', desc="Extracts brain from full-head image", name_maps=name_maps, citations=[fsl_cite]) pipeline.add( 'bet', fsl.BET( mask=True), inputs={ 'in_file': ('magnitude', nifti_gz_format)}, outputs={ 'brain': ('out_file', nifti_gz_format), 'brain_mask': ('mask_file', nifti_gz_format)}, requirements=[ fsl_req.v('5.0.10')]) return pipeline def smooth_mask_pipeline(self, **name_maps): pipeline = self.new_pipeline( 'smooth_mask', desc="Smooths and masks a brain image", name_maps=name_maps, citations=[fsl_cite]) # Smoothing process smooth = pipeline.add( 'smooth', fsl.IsotropicSmooth( fwhm=self.parameter('smoothing_fwhm')), inputs={ 'in_file': ('magnitude', nifti_gz_format)}, outputs={ 'smooth': ('out_file', nifti_gz_format)}, requirements=[ fsl_req.v('5.0.10')]) pipeline.add( 'mask', fsl.ApplyMask(), inputs={ 'in_file': (smooth, 'out_file'), 'mask_file': ('brain_mask', nifti_gz_format)}, outputs={ 'smooth_masked': ('out_file', nifti_gz_format)}, requirements=[ fsl_req.v('5.0.10')]) return pipeline # ## Methods to Create Publication Outputs # # While not necessary, if you are creating a new Analysis class for your specific study, it is a nice idea to implement additional methods to generate all your publication outputs (figures, stats, etc...) within the Analysis class. # # For example in the `BasicBrainAnalysis` class we have the method `plot_comparison`, which is implemented as follows # + import matplotlib.pyplot as plt class BasicBrainAnalysis(Analysis, metaclass=AnalysisMetaClass): """ A baisc analysis class that demonstrates how Analysis classes work. """ add_data_specs = [ InputFilesetSpec('magnitude', nifti_gz_format, desc="A magnitude image (e.g. T1w, T2w, etc..)"), OutputFilesetSpec('brain', nifti_gz_format, 'brain_extraction_pipeline', desc="Skull-stripped magnitude image"), FilesetSpec('brain_mask', nifti_gz_format, 'brain_extraction_pipeline', desc="Brain mask used for skull-stripping"), OutputFilesetSpec('smooth', nifti_gz_format, 'smooth_mask_pipeline', desc="Smoothed magnitude image"), OutputFilesetSpec('smooth_masked', nifti_gz_format, 'smooth_mask_pipeline', desc="Smoothed and masked magnitude image")] add_param_specs = [ ParamSpec('smoothing_fwhm', 4.0, desc=("The full-width-half-maxium radius of the smoothing " "kernel"))] def brain_extraction_pipeline(self, **name_maps): pipeline = self.new_pipeline( 'brain_extraction', desc="Extracts brain from full-head image", name_maps=name_maps, citations=[fsl_cite]) pipeline.add( 'bet', fsl.BET( mask=True), inputs={ 'in_file': ('magnitude', nifti_gz_format)}, outputs={ 'brain': ('out_file', nifti_gz_format), 'brain_mask': ('mask_file', nifti_gz_format)}, requirements=[ fsl_req.v('5.0.10')]) return pipeline def smooth_mask_pipeline(self, **name_maps): pipeline = self.new_pipeline( 'smooth_mask', desc="Smooths and masks a brain image", name_maps=name_maps, citations=[fsl_cite]) # Smoothing process smooth = pipeline.add( 'smooth', fsl.IsotropicSmooth( fwhm=self.parameter('smoothing_fwhm')), inputs={ 'in_file': ('magnitude', nifti_gz_format)}, outputs={ 'smooth': ('out_file', nifti_gz_format)}, requirements=[ fsl_req.v('5.0.10')]) pipeline.add( 'mask', fsl.ApplyMask(), inputs={ 'in_file': (smooth, 'out_file'), 'mask_file': ('brain_mask', nifti_gz_format)}, outputs={ 'smooth_masked': ('out_file', nifti_gz_format)}, requirements=[ fsl_req.v('5.0.10')]) return pipeline def plot_comparision(self, figsize=(12, 4)): for subj_i in self.subject_ids: for visit_i in self.visit_ids: f = plt.figure(figsize=figsize) f.suptitle('Subject "{}" - Visit "{}"'.format(subj_i, visit_i)) for i, spec_name in enumerate(['magnitude', 'smooth', 'brain_mask', 'smooth_masked']): f.add_subplot(1, 4, i + 1) self._plot_slice(spec_name, subj_i, visit_i) plt.title(spec_name) plt.show() def _plot_slice(self, spec_name, subject_id=None, visit_id=None): # Load the image data = self.data(spec_name, derive=True).item( subject_id=subject_id, visit_id=visit_id).get_array() # Cut in the middle of the brain cut = int(data.shape[-1] / 2) + 10 # Plot the data plt.imshow(np.rot90(data[..., cut]), cmap="gray") plt.gca().set_axis_off() # - # Notice how the `_plot_slice` method access the derived data using the `Analysis.data` method like we did in the "Applying Analysis Class" notebook. From the `FilesetSlice` it returns you can access a single data "item" using the `item` method. In Banana, the data array of `Fileset`s in standard image format can be accessed using the `get_arrray` method, which we then plot with Matplotlib. # ## Exercise # # Extend the `example.analaysis.BasicBrainAnalysis` class to add the `image_std` data-spec using the `nipype.interfaces.fsl.ImageStats` interface, which is the standard deviation of the smooth-masked image. Then run this analysis on the Tw-weighted images in the 'output/sample-datasets/depth1' dataset created in the "Applying Analysis Classes" notebook. # + solution2="hidden" solution2_first=true ## Write your solution here # + solution2="hidden" # ! fslstats -h # + solution2="hidden" from arcana import OutputFieldSpec from example.analysis import BasicBrainAnalysis from banana.requirement import fsl_req from banana.citation import fsl_cite class MyExtendedBasicBrainAnalysis(BasicBrainAnalysis, metaclass=AnalysisMetaClass): add_data_specs = [ OutputFilesetSpec('smooth', mrtrix_image_format, 'smooth_mask_pipeline', desc="Smoothed magnitude image in Mrtrix format"), OutputFieldSpec('image_std', float, 'image_std_pipeline', desc="Standard deviation of the smoothed masked image")] def image_std_pipeline(self, **name_maps): pipeline = self.new_pipeline( 'image_std_pipeline', desc="Calculates the standard deviation of the smooth masked image", name_maps=name_maps, citations=[fsl_cite]) pipeline.add( 'mask', fsl.ImageStats( op_string='-s'), inputs={ 'in_file': ('smooth_masked', nifti_gz_format)}, outputs={ 'image_std': ('out_stat', nifti_gz_format)}, requirements=[ fsl_req.v('5.0.10')]) return pipeline print(MyExtendedBasicBrainAnalysis.static_menu(full=True)) # + solution2="hidden" from arcana import Dataset, FilesetFilter my_analysis = MyExtendedBasicBrainAnalysis( 'my_extended_analysis', # The name needs to be the same as the previous version dataset=Dataset('output/sample-datasets/depth1', depth=1), processor='work', inputs=[ FilesetFilter('magnitude', '.*T1w$', is_regex=True)]) my_analysis.derive('image_std') # + solution2="hidden" for std in my_analysis.data('image_std'): print('Subject/visit ({}/{}): {} '.format(std.subject_id, std.visit_id, std.value))
notebooks/arcana_design.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Tictactoe # # Nbviewer [https://nbviewer.jupyter.org/github/shaundsouza/deep-learning/blob/master/notebooks/tictactoe.ipynb](https://nbviewer.jupyter.org/github/shaundsouza/deep-learning/blob/master/notebooks/tictactoe.ipynb) # # Code [https://nbviewer.jupyter.org/format/script/github/shaundsouza/deep-learning/blob/master/notebooks/tictactoe.ipynb](https://nbviewer.jupyter.org/format/script/github/shaundsouza/deep-learning/blob/master/notebooks/tictactoe.ipynb) # + import numpy as np import math import pickle import operator import random import time import sys class tictactoe: N = 3 board = None V = dict() alpha = 0.3 epsilon = 0.2 player1 = "O" player2 = "X" empty = "-" player = None moves = None def init_board(self): self.board = np.empty(self.N**2, dtype=object) self.board[:] = '-' self.moves = set() if not self.V: self.V[self.pos_board()] = 0.5 self.player = random.choice([self.player1, self.player2]) def pos_board(self): # return np.array_str(self.board)[1:-1].strip() return tuple(self.board) def print_board(self): board_2d = np.reshape(self.board, (self.N, self.N)) sys.stdout.flush() print(board_2d, flush=True) print() def flip_player(self): if self.player == self.player1: return self.player2 else: return self.player1 def game_win(self, player): board_2d = np.reshape([ord(item) for item in self.board], (self.N, self.N)) # self.print_board() row = np.sum(board_2d, axis=0) col = np.sum(board_2d, axis=1) # if player == -1: # if row.min() == -3 or col.min() == -3: # return "Win" # else: # if row.max() == 3 or col.max() == 3: # return "Win" return np.any(row == ord(self.player1) * self.N) or np.any(col == ord(self.player1) * self.N)\ or np.any(row == ord(self.player2) * self.N) or np.any(col == ord(self.player2) * self.N)\ or np.trace(board_2d) == ord(self.player1) * self.N or np.trace(board_2d) == ord(self.player2) * self.N\ or np.trace(np.fliplr(board_2d)) == ord(self.player1) * self.N or np.trace(np.fliplr(board_2d)) == ord(self.player2) * self.N def game_play_train(self): self.init_board() count = 0 while True: s = self.pos_board() end_game = False pos = np.random.randint(self.N**2, size=1) while self.board[pos] != self.empty: pos = np.random.randint(self.N**2, size=1) self.board[pos] = self.player # self.print_board() s_ = self.pos_board() # print(pos) if self.game_win(None): if s_ in self.V: assert self.V[s_] == 1 self.V[s_] = 1 # self.V[s] = self.V[s] + self.alpha * (self.V[s_] - self.V[s]) # print("Win Player " + str(self.player)) end_game = True elif not len(np.where(self.board == self.empty)[0]): if s_ in self.V: assert self.V[s_] == 0 self.V[s_] = 0 # self.V[s] = self.V[s] + self.alpha * (self.V[s_] - self.V[s]) # print("Draw") end_game = True elif not s_ in self.V: self.V[s_] = 0.5 self.V[s] = self.V[s] + self.alpha * (self.V[s_] - self.V[s]) if end_game: break self.player = self.flip_player() count = count + 1 if count % 1000 == 0: print("Train", count) return count def value_move(self, player): b = np.array(self.pos_board()) # print(b) m = [] m_V = [] ii = np.where(b == self.empty) # print(ii) # print(player) for i in ii[0]: c = list(b) c[i] = player # print(i, c) m.append(i) try: m_V.append(self.V[tuple(c)]) except: m_V.append(0.5) m = np.array(m) m_V = np.array(m_V) # print(m, m_V) # print(max(m_V)) ii = np.where(m_V == max(m_V)) # print(ii) # print(random.choice(m[ii[0]])) return random.choice(m[ii]) def game_play(self, user = False): self.init_board() count = 0 while True: s = self.pos_board() end_game = False if user: if self.player == self.player2: pos = -1 while pos < 0 or pos >= self.N**2 or self.board[pos] != self.empty: # print(pos) pos_str = input("Enter valid move? ") pos_i = pos_str.split() pos = self.N * int(pos_i[0]) + int(pos_i[1]) else: pos = self.value_move(self.player) else: pos = np.random.randint(self.N**2, size=1) if random.random() < self.epsilon: while self.board[pos] != self.empty: # print(pos) pos = np.random.randint(self.N**2, size=1) else: pos = self.value_move(self.player) # print(pos) self.board[pos] = self.player self.print_board() # print(self.game_win(None)) # print(self.board,np.where(self.board == 0)) if self.game_win(None): print("Player Win", self.player) end_game = True elif not len(np.where(self.board == self.empty)[0]): print("Draw") end_game = True if end_game: break self.player = self.flip_player() count = count + 1 # time.sleep(.5) return count if __name__ == "__main__": a = tictactoe() if True: count = 0 N = 10000 x = 0 for i in range(N): count = count + a.game_play_train() x = x + 1 if x % 1000 == 0: print("Train ", x) print("Avg ", count / N) output = open('V.pkl', 'wb') pickle.dump(a.V, output) output.close() pkl_file = open('V.pkl', 'rb') a.V = pickle.load(pkl_file) # for i in a.V: # print(i, a.V[i]) count = 0 N = 1 for i in range(N): count = count + a.game_play(True) print("Avg ", count / N) # -
notebooks/tictactoe.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Latin Hypercube Sampling import numpy as np import pandas as pd import pyDOE from scipy.stats.distributions import uniform from math import * from numpy import * # + # Default values default= np.array([ 25.85e3, 2.098e3, 25.85e-6, 2.098e-6, 0.95e-4, 1.5, 10, 5.4, 140, 70e-3, 0.8, 0.035, 0.583, 0.417, 0.007, 0.024, 0.00145, 0.2, 0.00008015, 0.0000324, 0.000138, 65, 1.2, 0.065, 0.02, 9.13e-4, 50000, 87.5, 1.38, 0.35, 0.1, 38.5e-3, 0.45e-3, 0.5e-3, 0.0035e-3, 0.5e-3, 2.6875e-8, 1.8951e-5, 2.382e-3, 50e-3, 100, 0.2, 2, 8e-3, 2e-3, 1.75e-3, 3, 0.15, 3, 1.05e-3, 0.85, -4, 4.9, 56.2, 0.35, -29, 138, 129, 0.03, 0.13, 0.625, -2, 7, 2.35, 1, 650, 2.43, 0.05, 0.012, 41e-3, 0.22e-3, 0.0625, 14, 0.01, 100, 2.100] ) # - default.shape default[51] default[55] default[61] # + pc = 0.25 sample_range = np.array([[default[0]*pc, default[0]*(1+pc)- default[0]*pc], [default[1]*pc, default[1]*(1+pc)- default[1]*pc], [default[2]*pc, default[2]*(1+pc)- default[2]*pc], [default[3]*pc, default[3]*(1+pc)- default[3]*pc], [default[4]*pc, default[4]*(1+pc)- default[4]*pc], [default[5]*pc, default[5]*(1+pc)- default[5]*pc], [default[6]*pc, default[6]*(1+pc)- default[6]*pc], [default[7]*pc, default[7]*(1+pc)- default[7]*pc], [default[8]*pc, default[8]*(1+pc)- default[8]*pc], [default[9]*pc, default[9]*(1+pc)- default[9]*pc], [default[10]*pc, default[10]*(1+pc)- default[10]*pc], [default[11]*pc, default[11]*(1+pc)- default[11]*pc], [default[12]*pc, default[12]*(1+pc)- default[12]*pc], [default[13]*pc, default[13]*(1+pc)- default[13]*pc], [default[14]*pc, default[14]*(1+pc)- default[14]*pc], [default[15]*pc, default[15]*(1+pc)- default[15]*pc], [default[16]*pc, default[16]*(1+pc)- default[16]*pc], [default[17]*pc, default[17]*(1+pc)- default[17]*pc], [default[18]*pc, default[18]*(1+pc)- default[18]*pc], [default[19]*pc, default[19]*(1+pc)- default[19]*pc], [default[20]*pc, default[20]*(1+pc)- default[20]*pc], [default[21]*pc, default[21]*(1+pc)- default[21]*pc], [default[22]*pc, default[22]*(1+pc)- default[22]*pc], [default[23]*pc, default[23]*(1+pc)- default[23]*pc], [default[24]*pc, default[24]*(1+pc)- default[24]*pc], [default[25]*pc, default[25]*(1+pc)- default[25]*pc], [default[26]*pc, default[26]*(1+pc)- default[26]*pc], [default[27]*pc, default[27]*(1+pc)- default[27]*pc], [default[28]*pc, default[28]*(1+pc)- default[28]*pc], [default[29]*pc, default[29]*(1+pc)- default[29]*pc], [default[30]*pc, default[30]*(1+pc)- default[30]*pc], [default[31]*pc, default[31]*(1+pc)- default[31]*pc], [default[32]*pc, default[32]*(1+pc)- default[32]*pc], [default[33]*pc, default[33]*(1+pc)- default[33]*pc], [default[34]*pc, default[34]*(1+pc)- default[34]*pc], [default[35]*pc, default[35]*(1+pc)- default[35]*pc], [default[36]*pc, default[36]*(1+pc)- default[36]*pc], [default[37]*pc, default[37]*(1+pc)- default[37]*pc], [default[38]*pc, default[38]*(1+pc)- default[38]*pc], [default[39]*pc, default[39]*(1+pc)- default[39]*pc], [default[40]*pc, default[40]*(1+pc)- default[40]*pc], [default[41]*pc, default[41]*(1+pc)- default[41]*pc], [default[42]*pc, default[42]*(1+pc)- default[42]*pc], [default[43]*pc, default[43]*(1+pc)- default[43]*pc], [default[44]*pc, default[44]*(1+pc)- default[44]*pc], [default[45]*pc, default[45]*(1+pc)- default[45]*pc], [default[46]*pc, default[46]*(1+pc)- default[46]*pc], [default[47]*pc, default[47]*(1+pc)- default[47]*pc], [default[48]*pc, default[48]*(1+pc)- default[48]*pc], [default[49]*pc, default[49]*(1+pc)- default[49]*pc], [default[50]*pc, default[50]*(1+pc)- default[50]*pc], [default[51]*(1+pc), default[51]*pc- default[51]*(1+pc)], #51, negative value [default[52]*pc, default[52]*(1+pc)- default[52]*pc], [default[53]*pc, default[53]*(1+pc)- default[53]*pc], [default[54]*pc, default[54]*(1+pc)- default[54]*pc], [default[55]*(1+pc), default[55]*pc- default[55]*(1+pc)], #55, negative value [default[56]*pc, default[56]*(1+pc)- default[56]*pc], [default[57]*pc, default[57]*(1+pc)- default[57]*pc], [default[58]*pc, default[58]*(1+pc)- default[58]*pc], [default[59]*pc, default[59]*(1+pc)- default[59]*pc], [default[60]*pc, default[60]*(1+pc)- default[60]*pc], [default[61]*(1+pc), default[61]*pc- default[61]*(1+pc)], #61 negative parametrer value [default[62]*pc, default[62]*(1+pc)- default[62]*pc], [default[63]*pc, default[63]*(1+pc)- default[63]*pc], [default[64]*pc, default[64]*(1+pc)- default[64]*pc], [default[65]*pc, default[65]*(1+pc)- default[65]*pc], [default[66]*pc, default[66]*(1+pc)- default[66]*pc], [default[67]*pc, default[67]*(1+pc)- default[67]*pc], [default[68]*pc, default[68]*(1+pc)- default[68]*pc], [default[69]*pc, default[69]*(1+pc)- default[69]*pc], [default[70]*pc, default[70]*(1+pc)- default[70]*pc], [default[71]*pc, default[71]*(1+pc)- default[71]*pc], [default[72]*pc, default[72]*(1+pc)- default[72]*pc], [default[73]*pc, default[73]*(1+pc)- default[73]*pc], [default[74]*pc, default[74]*(1+pc)- default[74]*pc], [default[75]*pc, default[75]*(1+pc)- default[75]*pc], ]) # - sample_range.shape # ## Create a uniform distribution uniform_dist = uniform(sample_range[:,0],sample_range[:,1]) #initialization of LHS lh = pyDOE.lhs(76,samples=10000) # Creating samples samples = uniform_dist.ppf(lh) samples.shape # + #np.savetxt('values_code_20.csv', samples, delimiter=",") # - X_data = pd.DataFrame(samples) X_data X_data.columns=[ "V_myo(um3)", "V_SR(um3)", "V_myo_uL(uL)", "V_SR_uL(uL)", "i_NaK_max(uA)", "K_m_K(mM)", "K_m_Na(mM)", "K_o(millimolar)", "Na_o(millimolar)", "Ca_TRPN_Max(mM)", "g_Na(microS)", "g_t(microS)", "a_endo", "b_endo", "g_ss(microS)", "g_K1(microS)", "g_f(microS)", "f_Na", "g_B_Na(microS)", "g_B_Ca(microS)", "g_B_K(microS)", "E_Ca(millivolt)", "Ca_o(millimolar)", "g_D(um3_per_ms)", "J_R(um3_per_ms)", "J_L(um3_per_ms)", "N", "K_mNa(mM)", "K_mCa(mM)", "eta", "k_sat", "g_NCX(mM_per_ms)", "g_SERCA(mM_per_ms)", "K_SERCA(mM)", "g_pCa(mM_per_ms)", "K_mpCa(mM)", "g_CaB(mM_per_mV_ms)", "g_SRl(per_ms)", "k_CMDN(mM)", "B_CMDN(mM)", "k_on(per_mM_per_ms)", "k_Ref_off(per_ms)", "gamma_trpn", "alpha_0(per_ms)", "alpha_r1(per_ms)", "alpha_r2(per_ms)", "n_Rel", "K_z", "n_Hill", "Ca_50ref(mM)", "z_p", "beta_1", "beta_0", "T_ref(N_per_mm2)", "a in component Cross_Bridges", "A_1", "A_2", "A_3", "alpha_1(per_ms)", "alpha_2(per_ms)", "alpha_3(per_ms)", "V_L(mV)", "del_VL(mV)", "phi_L", "t_L(ms)", "tau_L(ms)", "tau_R(ms)", "phi_R", "theta_R", "K_RyR(mM)", "K_L(mM)", "a", "b", "c", "d", "tau_s_ss"] X_data X_data.to_csv("samples_data_25.csv", index=False) X_data.describe()
Data Generation/Latin Hypercube Sampling to generate samples for simulation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <table> <tr> # <td style="background-color:#ffffff;"> # <a href="http://qworld.lu.lv" target="_blank"><img src="..\images\qworld.jpg" width="25%" align="left"> </a></td> # <td style="background-color:#ffffff;vertical-align:bottom;text-align:right;"> # prepared by <a href="http://abu.lu.lv" target="_blank"><NAME></a> (<a href="http://qworld.lu.lv/index.php/qlatvia/" target="_blank">QLatvia</a>) # </td> # </tr></table> # <table width="100%"><tr><td style="color:#bbbbbb;background-color:#ffffff;font-size:11px;font-style:italic;text-align:right;">This cell contains some macros. If there is a problem with displaying mathematical formulas, please run this cell to load these macros. </td></tr></table> # $ \newcommand{\bra}[1]{\langle #1|} $ # $ \newcommand{\ket}[1]{|#1\rangle} $ # $ \newcommand{\braket}[2]{\langle #1|#2\rangle} $ # $ \newcommand{\dot}[2]{ #1 \cdot #2} $ # $ \newcommand{\biginner}[2]{\left\langle #1,#2\right\rangle} $ # $ \newcommand{\mymatrix}[2]{\left( \begin{array}{#1} #2\end{array} \right)} $ # $ \newcommand{\myvector}[1]{\mymatrix{c}{#1}} $ # $ \newcommand{\myrvector}[1]{\mymatrix{r}{#1}} $ # $ \newcommand{\mypar}[1]{\left( #1 \right)} $ # $ \newcommand{\mybigpar}[1]{ \Big( #1 \Big)} $ # $ \newcommand{\sqrttwo}{\frac{1}{\sqrt{2}}} $ # $ \newcommand{\dsqrttwo}{\dfrac{1}{\sqrt{2}}} $ # $ \newcommand{\onehalf}{\frac{1}{2}} $ # $ \newcommand{\donehalf}{\dfrac{1}{2}} $ # $ \newcommand{\hadamard}{ \mymatrix{rr}{ \sqrttwo & \sqrttwo \\ \sqrttwo & -\sqrttwo }} $ # $ \newcommand{\vzero}{\myvector{1\\0}} $ # $ \newcommand{\vone}{\myvector{0\\1}} $ # $ \newcommand{\stateplus}{\myvector{ \sqrttwo \\ \sqrttwo } } $ # $ \newcommand{\stateminus}{ \myrvector{ \sqrttwo \\ -\sqrttwo } } $ # $ \newcommand{\myarray}[2]{ \begin{array}{#1}#2\end{array}} $ # $ \newcommand{\X}{ \mymatrix{cc}{0 & 1 \\ 1 & 0} } $ # $ \newcommand{\Z}{ \mymatrix{rr}{1 & 0 \\ 0 & -1} } $ # $ \newcommand{\Htwo}{ \mymatrix{rrrr}{ \frac{1}{2} & \frac{1}{2} & \frac{1}{2} & \frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & \frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} & \frac{1}{2} } } $ # $ \newcommand{\CNOT}{ \mymatrix{cccc}{1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & 1 \\ 0 & 0 & 1 & 0} } $ # $ \newcommand{\norm}[1]{ \left\lVert #1 \right\rVert } $ # $ \newcommand{\pstate}[1]{ \lceil \mspace{-1mu} #1 \mspace{-1.5mu} \rfloor } $ # <h2>Quantum Tomography</h2> # # [Watch Lecture](https://youtu.be/mIEiWCJ6R58) # # We study a simplified version of quantum tomography here. # # It is similar to learn the bias of a coin by collecting statistics from tossing this coin many times. But, only making measurement may not be enough to make a good guess. # # Suppose that you are given 1000 copies of a qubit and your task is to learn the state of this qubit. We use a python class called "unknown_qubit" for doing our quantum experiments. # # Please run the following cell before continuing. # + # class unknown_qubit # available_qubit = 1000 -> you get at most 1000 qubit copies # get_qubits(number_of_qubits) -> you get the specified number of qubits for your experiment # measure_qubits() -> your qubits are measured and the result is returned as a dictionary variable # -> after measurement, these qubits are destroyed # rotate_qubits(angle) -> your qubits are rotated with the specified angle in radian # compare_my_guess(my_angle) -> your guess in radian is compared with the real angle from random import randrange from math import pi from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer class unknown_qubit: def __init__(self): self.__theta = randrange(18000)/18000*pi self.__available_qubits = 1000 self.__active_qubits = 0 print(self.__available_qubits,"qubits are created") def get_qubits(self,number_of_qubits=None): if number_of_qubits is None or isinstance(number_of_qubits,int) is False or number_of_qubits < 1: print() print("ERROR: the method 'get_qubits' takes the number of qubit(s) as a positive integer, i.e., get_qubits(100)") elif number_of_qubits <= self.__available_qubits: self.__qc = QuantumCircuit(1,1) self.__qc.ry(2 * self.__theta,0) self.__active_qubits = number_of_qubits self.__available_qubits = self.__available_qubits - self.__active_qubits print() print("You have",number_of_qubits,"active qubits that are set to (cos(theta),sin(theta))") self.available_qubits() else: print() print("WARNING: you requested",number_of_qubits,"qubits, but there is not enough available qubits!") self.available_qubits() def measure_qubits(self): if self.__active_qubits > 0: self.__qc.measure(0,0) job = execute(self.__qc,Aer.get_backend('qasm_simulator'),shots=self.__active_qubits) counts = job.result().get_counts(self.__qc) print() print("your",self.__active_qubits,"qubits are measured") print("counts = ",counts) self.__active_qubits = 0 return counts else: print() print("WARNING: there is no active qubits -- you might first execute 'get_qubits()' method") self.available_qubits() def rotate_qubits(self,angle=None): if angle is None or (isinstance(angle,float) is False and isinstance(angle,int) is False): print() print("ERROR: the method 'rotate_qubits' takes a real-valued angle in radian as its parameter, i.e., rotate_qubits(1.2121)") elif self.__active_qubits > 0: self.__qc.ry(2 * angle,0) print() print("your active qubits are rotated by angle",angle,"in radian") else: print() print("WARNING: there is no active qubits -- you might first execute 'get_qubits()' method") self.available_qubits() def compare_my_guess(self,my_angle): if my_angle is None or (isinstance(my_angle,float) is False and isinstance(my_angle,int) is False): print("ERROR: the method 'compare_my_guess' takes a real-valued angle in radian as your guessed angle, i.e., compare_my_guess(1.2121)") else: self.__available_qubits = 0 diff = abs(my_angle-self.__theta) print() print(self.__theta,"is the original",) print(my_angle,"is your guess") print("the angle difference between the original theta and your guess is",diff/pi*180,"degree") print("-->the number of available qubits is (set to) zero, and so you cannot make any further experiment") def available_qubits(self): print("--> the number of available unused qubit(s) is",self.__available_qubits) # - # class unknown_qubit: # # available_qubit = 1000 -> you get at most 1000 qubit copies # get_qubits(number_of_qubits) -> you get the specified number of qubits for your experiment # measure_qubits() -> your qubits are measured and the result is returned as a dictionary variable # -> after measurement, these qubits are destroyed # rotate_qubits(angle) -> your qubits are rotated with the specified angle in radian # compare_my_guess(my_angle) -> your guess in radian is compared with the real angle # <h3> Task 1 </h3> # # You are given 1000 copies of the identical qubits which are in the same quantum state lying in the first or second quadrant of the unit circle. # # This quantum state is represented by an angle $ \theta \in [0,\pi) $, and your task is to guess this angle. # # You use the class __unknown_qubit__ and its methods for your experiments. # # _Remark that the measurement outcomes of the quantum states with angles $ \pi \over 3 $ and $ 2 \pi \over 3 $ are identical even though they are different quantum states. Therefore, getting 1000 qubits and then measuring them does not guarantee the correct answer._ # # Test your solution at least ten times. # + from math import pi, cos, sin, acos, asin # an angle theta is randomly picked and it is fixed througout the experiment my_experiment = unknown_qubit() # # my_experiment.get_qubits(number_of_qubits) # my_experiment.rotate_qubits(angle) # my_experiment.measure_qubits() # my_experiment.compare_my_guess(my_angle) # # # your solution is here # # - for i in range(10): my_experiment = unknown_qubit() # # your solution # # [click for our solution](B48_Quantum_Tomography_Solution.ipynb#task1) # <h3> Task 2 (extra) </h3> # # You are given 1000 identical quantum systems with two qubits that are in states $ \myvector{\cos \theta_1 \\ \sin \theta_1} $ and $ \myvector{\cos \theta_2 \\ \sin \theta_2} $, where $ \theta_1,\theta_2 \in [0,\pi) $. # # Your task is to guess the values of $ \theta_1 $ and $ \theta_2 $. # # Create a quantum circuit with two qubits. # # Randomly pick $\theta_1$ and $ \theta_2 $ and set the states of qubits respectively. (Do not use $ \theta_1 $ and $ \theta_2 $ except initializing the qubits.) # # Do experiments (making measurements and/or applying basic quantum operators) with your circuit(s). You may create more than one circuit. # # Assume that the total number of shots does not exceed 1000 throughout the whole experiment. # # _Since you have two qubits, your measurement outcomes will be '00', '01', '10', and '11'._ # # your solution # # <h3> Task 3 (Discussion) </h3> # # If the angle in Task 1 is picked in range $ [0,2\pi) $, then can we determine its quadrant correctly? # <h3> Global phase </h3> # # Suppose that we have a qubit and its state is either $ \ket{0} $ or $ -\ket{0} $. # # Is there any sequence of one-qubit gates such that we can measure different results after applying them? # # All one-qubit gates are $ 2 \times 2 $ matrices, and their application is represented by a single matrix: $ A_n \cdot \cdots \cdot A_2 \cdot A_1 = A $. # # By linearity, if $ A \ket{0} = \ket{u} $, then $ A (- \ket{0}) = -\ket{u} $. Thus, after measurement, the probabilities of observing state $ \ket{0} $ and state $ \ket{1} $ are the same for $ \ket{u} $ and $ -\ket{u} $. Therefore, we cannot distinguish them. # # Even though the states $ \ket{0} $ and $ -\ket{0} $ are different mathematically, they are assumed as identical from the physical point of view. # # The minus sign in front of $ -\ket{0} $ is called as a global phase. # # In general, a global phase can be a complex number with magnitude 1.
bronze/B48_Quantum_Tomography.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Simple Seq2Seq machine translation using GRU based encoder-decoder architecture import torch from torch import nn import numpy as np import time # ## Preparing the dataset for Machine Translation from ProcessData import * # ## Encoder class Seq2SeqEncoder(nn.Module): """The RNN encoder for sequence to sequence learning.""" def __init__(self, vocab_size, embed_size, num_hiddens, num_layers, dropout=0): super(Seq2SeqEncoder, self).__init__() # Embedding layer self.embedding = nn.Embedding(vocab_size, embed_size) self.rnn = nn.GRU(embed_size, num_hiddens, num_layers, dropout=dropout, batch_first=True) def forward(self, X): # Input X: (`batch_size`, `num_steps`, `input_size`) X = self.embedding(X) # After embedding X: (`batch_size`, `num_steps`, `embed_size`) # When batch_first is True: # in RNN models, the first axis corresponds to batch_size # the second axis corresponds to num_steps # the first axis corresponds to embed_dim # When state is not mentioned, it defaults to zeros output, state = self.rnn(X) # `output` shape: (`batch_size`, `num_steps`, `num_hiddens`) # `state` shape: (`num_layers`, `batch_size`, `num_hiddens`) return output, state # + encoder = Seq2SeqEncoder(vocab_size=10, embed_size=8, num_hiddens=16, num_layers=1) encoder.eval() X = torch.zeros((5, 4), dtype=torch.long) enc_output, enc_state = encoder(X) print(enc_output.shape) print(enc_state.shape) # - # ## Decoder class Seq2SeqDecoder(nn.Module): """The RNN decoder for sequence to sequence learning.""" def __init__(self, vocab_size, embed_size, num_hiddens, num_layers, dropout=0): super(Seq2SeqDecoder, self).__init__() self.embedding = nn.Embedding(vocab_size, embed_size) self.rnn = nn.GRU(embed_size + num_hiddens, num_hiddens, num_layers, dropout=dropout,batch_first=True) self.dense = nn.Linear(num_hiddens, vocab_size) def forward(self, X, state): # Inputs: # X : (`batch_size`, `num_steps`, `input_size`) # initial hidden state : (`num_layers`, `batch_size`, `num_hiddens`) , # This comes from hidden state output from encoder. X = self.embedding(X) # After embedding X: (`batch_size`, `num_steps`, `embed_size`) # Context is last layer hidden state from last timestep of encoder # last layer hidden state from last time step of encoder last_layer_state = state[-1] # shape (`batch_size`,`num_hiddens`) # context is last timestep hidden state of encoder. # Broadcast `context` so it has the same `num_steps` as `X` context = last_layer_state.repeat(X.shape[1], 1, 1).permute(1,0,2) # context has now shape (`batch_size`,`num_steps`,`num_hiddens`) # concat(X,context) = X_and_context of shape (`batch_size`,`num_steps`,`emb_dim + num_hiddens`) X_and_context = torch.cat((X, context), 2) output, state = self.rnn(X_and_context, state) # output : (`batch_size`,`num_steps`,`num_hiddens`) # state : (`num_layers`,`batch_size`,`num_hiddens`), this is final timestep hidden state of decoder output = self.dense(output) # final output of decoder : # `output` shape: (`batch_size`, `num_steps`, `vocab_size`) # `state` shape: (`num_layers`, `batch_size`, `num_hiddens`) return output, state # + decoder = Seq2SeqDecoder(vocab_size=10, embed_size=8, num_hiddens=16, num_layers=1) decoder.eval() output, state = decoder(X, enc_state) output.shape, state.shape # + # You can feed input consisting of a single timestep as well. dec_X = torch.from_numpy(np.zeros((5,1))).long() print(dec_X.shape) # batch_size=5, num_steps=1 output, state = decoder(dec_X, enc_state) output.shape, state.shape # - # ## Putting encoder and decoder together # + class EncoderDecoder(nn.Module): """The base class for the encoder-decoder architecture.""" def __init__(self, encoder, decoder): super(EncoderDecoder, self).__init__() self.encoder = encoder self.decoder = decoder def forward(self, enc_X, dec_X): enc_output, enc_state = self.encoder(enc_X) return self.decoder(dec_X, enc_state) encoder_decoder = EncoderDecoder(encoder,decoder) encoder_decoder.eval() # - # ## Allow parts of sequence to be masked as we have variable length sequences # + def sequence_mask(X, valid_len, value=0): """Mask irrelevant entries in sequences.""" maxlen = X.size(1) mask = torch.arange((maxlen),device=X.device)[None, :] < valid_len[:, None] X[~mask] = value return X X = torch.tensor([[1, 2, 3], [4, 5, 6]]) valid_lens = torch.tensor([1, 2]) print('Input has 2 sequences :\n',X) print('Assume that first sequence has 1 valid elements, second sequence has 2 valid elements', valid_lens) print('After masking:\n',sequence_mask(X, valid_lens)) # - # ## Build cross entropy loss using masked sequences # + class MaskedSoftmaxCELoss(nn.CrossEntropyLoss): """The softmax cross-entropy loss with masks.""" # `pred` shape: (`batch_size`, `num_steps`, `vocab_size`) # `label` shape: (`batch_size`, `num_steps`) # `valid_len` shape: (`batch_size`,) def forward(self, pred, label, valid_len): weights = torch.ones_like(label) weights = sequence_mask(weights, valid_len).float() self.reduction='none' unweighted_loss = super(MaskedSoftmaxCELoss, self).forward(pred.permute(0, 2, 1), label) weighted_loss = (unweighted_loss * weights).mean(dim=1) return weighted_loss loss = MaskedSoftmaxCELoss() # - # ## Prepare for training # + embed_size = 32 num_hiddens = 32 num_layers = 2 dropout = 0.1 batch_size = 64 num_steps = 10 lr = 0.005 num_epochs = 300 device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") print('device=',device) data_path = '../data/fra-eng/fra.txt' train_iter, src_vocab, tgt_vocab = load_data_nmt(data_path, batch_size, num_steps) encoder = Seq2SeqEncoder( len(src_vocab), embed_size, num_hiddens, num_layers, dropout ) decoder = Seq2SeqDecoder( len(tgt_vocab), embed_size, num_hiddens, num_layers, dropout ) net = EncoderDecoder(encoder, decoder) net.eval() # - # ## Initialize weights in GRU layers of encoder and decoder # + def xavier_init_weights(m): if type(m) == nn.Linear: nn.init.xavier_uniform_(m.weight) if type(m) == nn.GRU: #initialize biases and weights for name, param in m.named_parameters(): if 'bias' in name: nn.init.constant(param, 0.0) elif 'weight' in name: nn.init.xavier_uniform_(m._parameters[name]) net.apply(xavier_init_weights) net.to(device) optimizer = torch.optim.Adam(net.parameters(), lr=lr) loss = MaskedSoftmaxCELoss() net.train() # - # ## Training # + class Accumulator: """For accumulating sums over `n` variables.""" def __init__(self, n): self.data = [0.0] * n def add(self, *args): self.data = [a + float(b) for a, b in zip(self.data, args)] def reset(self): self.data = [0.0] * len(self.data) def __getitem__(self, idx): return self.data[idx] def grad_clipping(net, theta): """Clip the gradient.""" if isinstance(net, nn.Module): params = [p for p in net.parameters() if p.requires_grad] else: params = net.params norm = torch.sqrt(sum(torch.sum((p.grad ** 2)) for p in params)) if norm > theta: for param in params: param.grad[:] *= theta / norm cum_losses = [] for epoch in range(num_epochs): start_time = time.time() metric = Accumulator(2) # Sum of training loss, no. of tokens for batch in train_iter: X, X_valid_len, Y, Y_valid_len = [x.to(device) for x in batch] bos = torch.tensor([tgt_vocab['<bos>']] * Y.shape[0], device=device).reshape(-1, 1) dec_input = torch.cat([bos, Y[:, :-1]], 1) # Teacher forcing Y_hat, _ = net(X, dec_input) l = loss(Y_hat, Y, Y_valid_len) l.sum().backward() # Make the loss scalar for `backward` grad_clipping(net, 1) num_tokens = Y_valid_len.sum() optimizer.step() with torch.no_grad(): metric.add(l.sum(), num_tokens) if (epoch + 1) % 10 == 0: print(epoch+1,metric[0] / metric[1]) cum_losses.append(metric[0] / metric[1]) elapsed_time = time.time() - start_time print(f'loss {metric[0] / metric[1]:.3f}, {metric[1] / elapsed_time:.1f} 'f'tokens/sec on {str(device)}') # - # ## Plot the loss over epochs # + import matplotlib.pyplot as plt X = range(len(cum_losses)) plt.plot(X, cum_losses) plt.show() # - # ## Prediction def translate_a_sentence(src_sentence, src_vocab, bos_token, num_steps): # First process the src_sentence, tokenize and truncate/pad it. #src_sentence : a sentence to translate #Tokenize the sentence src_sentence_words = src_sentence.lower().split(' ') print('src sentence words = ',src_sentence_words) src_tokens = src_vocab[src_sentence.lower().split(' ')] src_tokens = src_tokens + [src_vocab['<eos>']] print('src_tokens = ',src_tokens) enc_valid_len = torch.tensor([len(src_tokens)], device=device) #Truncate the sentence to num_steps if the sentence is longer. If shorter, pad the sentence. print('Truncating/padding to length',num_steps) padding_token = src_vocab['<pad>'] if len(src_tokens) > num_steps: line[:num_steps] # Truncate #Pad src_tokens = src_tokens + [padding_token] * (num_steps - len(src_tokens)) print('After truncating/padding',src_tokens,'\n') # Next convert the src_tokens to a tensor to be fed to the decoder one word at a timestep # Covert src_tokens to a tesnor, add the batch axis enc_X = torch.unsqueeze(torch.tensor(src_tokens, dtype=torch.long, device=device), dim=0) # Now shape of enc_X : (`batch_size` , `num_steps`) = (1,10) # Pass it through the encoder enc_output, enc_state = net.encoder(enc_X) # feed the decoder one word token at a time # prepare the first token for decoder : beginning of sentence dec_X = torch.unsqueeze(torch.tensor([tgt_vocab['<bos>']], dtype=torch.long, device=device), dim=0) #Initialize input state for the decoder to be the final timestep state of the encoder dec_input_state = enc_state output_token_seq = [] for _ in range(num_steps): curr_output, curr_dec_state = decoder(dec_X, dec_input_state) dec_input_state = curr_dec_state # curr_output is of shape (`batch_size`, `num_steps`, `len(tgt_vocab)`) = (1,10,201) # Use the token with the highest prediction likelihood as the input of the decoder for the next time step dec_X = curr_output.argmax(dim=2) #next timestep input for decoder #remove batch_size dimension as we are working with single sentences pred = dec_X.squeeze(dim=0).type(torch.int32).item() #eos predicted, stop if pred == tgt_vocab['<eos>']: break output_token_seq.append(pred) return output_token_seq # ### Lets look for some translation # + english_batch = ['go .', "i lost .", 'he\'s calm .', 'i\'m home .'] french_batch = ['va !', 'j\'ai perdu .', 'il est calme .', 'je suis chez moi .'] bos_token = tgt_vocab['<bos>'] for eng_sent, fr_sent in zip(english_batch,french_batch): fr_sent_predicted_tokens = translate_a_sentence(eng_sent, src_vocab, bos_token, num_steps) fr_sent_predicted = ' '.join(tgt_vocab.to_tokens(fr_sent_predicted_tokens)) print(f'Actual translation: english:{eng_sent} => french:{fr_sent}') print(f'Predicted translation: english:{eng_sent} => french:{fr_sent_predicted}') print('-------------------------------------------------------------------')
data/Seq2Seq_Simple.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # <img style="float: left; padding-right: 10px; width: 45px" src="https://github.com/Harvard-IACS/2018-CS109A/blob/master/content/styles/iacs.png?raw=true"> CS109A Introduction to Data Science # # # ## Pre - Lab 3: `numpy`, plotting # ## <font color='red'> PRE-LAB : DO THIS PART BEFORE COMING TO LAB</font> # # **Harvard University**<br> # **Fall 2019**<br> # **Instructors:** <NAME>, <NAME>, and <NAME><br> # # **Material prepared by**: <NAME>, <NAME>, <NAME>, and <NAME> # # --- #RUN THIS CELL import requests from IPython.core.display import HTML styles = requests.get("https://raw.githubusercontent.com/Harvard-IACS/2018-CS109A/master/content/styles/cs109.css").text HTML(styles) # ## Numerical Python: `numpy` # Review the concepts on numpy: Scientific `Python` code uses a fast array structure, called the `numpy` array. Those who have worked in `Matlab` will find this very natural. For reference, the `numpy` documention can be found here: [`numpy`](http://www.numpy.org/). # # # Let's make a numpy array. import numpy as np my_array = np.array([1,4,9,16]) my_array # Numpy arrays support the same operations as lists! Below we compute length, slice, and iterate. # + print("len(array):", len(my_array)) # Length of array print("array[2:4]:", my_array[2:4]) # A slice of the array # Iterate over the array for ele in my_array: print("element:", ele) # - # **In general you should manipulate numpy arrays by using numpy module functions** (e.g. `np.mean`). This is for efficiency purposes, and a discussion follows below this section. # # You can calculate the mean of the array elements either by calling the method `.mean` on a numpy array or by applying the function `np.mean` with the `numpy` array as an argument. # + # Two ways of calculating the mean print(my_array.mean()) print(np.mean(my_array)) # - # The way we constructed the `numpy` array above seems redundant. After all we already had a regular `python` list. Indeed, it is the other ways we have to construct `numpy` arrays that make them super useful. # # There are many such `numpy` array *constructors*. Here are some commonly used constructors. Look them up in the documentation. zeros = np.zeros(10) # generates 10 floating point zeros zeros # `Numpy` gains a lot of its efficiency from being strongly typed. That is, all elements in the array have the same type, such as integer or floating point. The default type, as can be seen above, is a float of size appropriate for the machine (64 bit on a 64 bit machine). zeros.dtype np.ones(10, dtype='int') # generates 10 integer ones # If the elements of an array are of a different type, `numpy` will force them into the same type (the longest in terms of bytes) # + mixed = np.array([1, 2.3, 'eleni', True]) #elements are of different type print(type(1), type(2.3), type('eleni'), type(True)) # all elements will become strings mixed # - # Often you will want random numbers. Use the `random` constructor! np.random.rand(10) # uniform on [0,1] # You can generate random numbers from a normal distribution with mean $0$ and variance $1$ using `np.random.randn`: normal_array = np.random.randn(10000) print("The sample mean and standard devation are {0:17.16f} and {1:17.16f}, respectively.".format(np.mean(normal_array), np.std(normal_array))) # #### `numpy` supports vector operations # # What does this mean? It means that to add two arrays instead of looping ovr each element (e.g. via a list comprehension as in base Python) you get to simply put a plus sign between the two arrays. ones_array = np.ones(5) twos_array = 2*np.ones(5) ones_array + twos_array # Note that this behavior is very different from `python` lists, which just get longer when you try to + them. first_list = [1., 1., 1., 1., 1.] second_list = [2., 2., 2., 2., 2.] first_list + second_list # not what you want # On some computer chips nunpy's addition actually happens in parallel, so speedups can be high. But even on regular chips, the advantage of greater readability is important. # `Numpy` supports a concept known as *broadcasting*, which dictates how arrays of different sizes are combined together. There are too many rules to list all of them here. Here are two important rules: # # 1. Multiplying an array by a number multiplies each element by the number # 2. Adding a number adds the number to each element. ones_array + 1 5 * ones_array # This means that if you wanted the distribution $N(5, 7)$ you could do: # + normal_5_7 = 5.0 + 7.0 * normal_array np.mean(normal_5_7), np.std(normal_5_7) # - # Now you have seen how to create and work with simple one dimensional arrays in `numpy`. You have also been introduced to some important `numpy` functionality (e.g. `mean` and `std`). # # Next, we push ahead to two-dimensional arrays and begin to dive into some of the deeper aspects of `numpy`. # ### 2D arrays # We can create two-dimensional arrays without too much fuss. # + # create a 2d-array by handing a list of lists my_array2d = np.array([ [1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12] ]) # you can do the same without the pretty formatting (decide which style you like better) my_array2d = np.array([ [1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12] ]) # 3 x 4 array of ones ones_2d = np.ones([3, 4]) print(ones_2d, "\n") # 3 x 4 array of ones with random noise ones_noise = ones_2d + 0.01*np.random.randn(3, 4) print(ones_noise, "\n") # 3 x 3 identity matrix my_identity = np.eye(3) print(my_identity, "\n") # - # Like lists, `numpy` arrays are $0$-indexed. Thus we can access the $n$th row and the $m$th column of a two-dimensional array with the indices $[n - 1, m - 1]$. print(my_array2d) print("element [2,3] is:", my_array2d[2, 3]) # Numpy arrays can be sliced, and can be iterated over with loops. Below is a schematic illustrating slicing two-dimensional arrays. # # <img src="../images/2dindex_v2.png" alt="Drawing" style="width: 500px;"/> # # Notice that the list slicing syntax still works! # `array[2:,3]` says "in the array, get rows 2 through the end, column 3]" # `array[3,:]` says "in the array, get row 3, all columns". # Numpy functions will by default work on the entire array: np.sum(ones_2d) # The axis `0` is the one going downwards (i.e. the rows), whereas axis `1` is the one going across (the columns). You will often use functions such as `mean` or `sum` along a particular axis. If you `sum` along axis 0 you are summing across the rows and will end up with one value per column. As a rule, any axis you list in the axis argument will dissapear. np.sum(ones_2d, axis=0) np.sum(ones_2d, axis=1) # <div class="exercise"><b>Exercise</b></div> # # Create a two-dimensional array of size $3\times 5$ and do the following: # * Print out the array # * Print out the shape of the array # * Create two slices of the array: # 1. The first slice should be the last row and the third through last column # 2. The second slice should be rows $1-3$ and columns $3-5$ # * Square each element in the array and print the result # # (*solutions follow but try not to look at them!*) # your code here # + # Solution A = np.array([ [5, 4, 3, 2, 1], [1, 2, 3, 4, 5], [1.1, 2.2, 3.3, 4.4, 5.5] ]) print(A, "\n") # set length(shape) dims = A.shape print(dims, "\n") # slicing print(A[-1, 2:], "\n") print(A[1:3, 3:5], "\n") # squaring A2 = A * A print(A2) # - # #### `numpy` supports matrix operations # 2d arrays are numpy's way of representing matrices. As such there are lots of built-in methods for manipulating them # Earlier when we generated the one-dimensional arrays of ones and random numbers, we gave `ones` and `random` the number of elements we wanted in the arrays. In two dimensions, we need to provide the shape of the array, i.e., the number of rows and columns of the array. three_by_four = np.ones([3,4]) three_by_four # You can transpose the array: three_by_four.shape four_by_three = three_by_four.T four_by_three.shape # Matrix multiplication is accomplished by `np.dot`. The `*` operator will do element-wise multiplication. print(np.dot(three_by_four, four_by_three)) # 3 x 3 matrix np.dot(four_by_three, three_by_four) # 4 x 4 matrix # ### `Numpy `Arrays vs. `Python` Lists? # # 1. Why the need for `numpy` arrays? Can't we just use `Python` lists? # 2. Iterating over `numpy` arrays is slow. Slicing is faster. # # `Python` lists may contain items of different types. This flexibility comes at a price: `Python` lists store *pointers* to memory locations. On the other hand, `numpy` arrays are typed, where the default type is floating point. Because of this, the system knows how much memory to allocate, and if you ask for an array of size $100$, it will allocate one hundred contiguous spots in memory, where the size of each spot is based on the type. This makes access extremely fast. # # <img src="https://jakevdp.github.io/PythonDataScienceHandbook/figures/array_vs_list.png" alt="Drawing" style="width: 500px;"/> # # (image from <NAME>'s Data Science Handbook) # # Unfortunately, looping over an array slows things down. In general you should not access `numpy` array elements by iteration. This is because of type conversion. `Numpy` stores integers and floating points in `C`-language format. When you operate on array elements through iteration, `Python` needs to convert that element to a `Python` `int` or `float`, which is a more complex beast (a `struct` in `C` jargon). This has a cost. # # <img src="https://jakevdp.github.io/PythonDataScienceHandbook/figures/cint_vs_pyint.png" alt="Drawing" style="width: 500px;"/> # # (image from Jake Vanderplas's Data Science Handbook) # # If you want to know more, we will suggest that you read # - [<NAME>'s Data Science Handbook](https://jakevdp.github.io/PythonDataScienceHandbook/). # - [<NAME>'s Python for Data Analysis](https://hollis.harvard.edu/primo-explore/fulldisplay?docid=01HVD_ALMA512247401160003941&context=L&vid=HVD2&lang=en_US&search_scope=everything&adaptor=Local%20Search%20Engine&tab=everything&query=any,contains,Wes%20McKinney%27s%20Python%20for%20Data%20Analysis&sortby=rank&offset=0) (HOLLIS)<br> # You will find them both incredible resources for this class. # # Why is slicing faster? The reason is technical: slicing provides a *view* onto the memory occupied by a `numpy` array, instead of creating a new array. That is the reason the code above this cell works nicely as well. However, if you iterate over a slice, then you have gone back to the slow access. # # By contrast, functions such as `np.dot` are implemented at `C`-level, do not do this type conversion, and access contiguous memory. If you want this kind of access in `Python`, use the `struct` module or `Cython`. Indeed many fast algorithms in `numpy`, `pandas`, and `C` are either implemented at the `C`-level, or employ `Cython`. # ## 2 - Plotting with matplot lib (and beyond) # <img style="float: center" src="https://imgs.xkcd.com/comics/convincing.png"> # # Conveying your findings convincingly is an absolutely crucial part of any analysis. Therefore, you must be able to write well and make compelling visuals. Creating informative visuals is an involved process and we won't cover that in this lab. However, part of creating informative data visualizations means generating *readable* figures. If people can't read your figures or have a difficult time interpreting them, they won't understand the results of your work. Here are some non-negotiable commandments for any plot: # * Label $x$ and $y$ axes # * Axes labels should be informative # * Axes labels should be large enough to read # * Make tick labels large enough # * Include a legend if necessary # * Include a title if necessary # * Use appropriate line widths # * Use different line styles for different lines on the plot # * Use different markers for different lines # # There are other important elements, but that list should get you started on your way. # # Here is the anatomy of a figure: # <img src="https://tacaswell.github.io/matplotlib/_images/anatomy.png" alt="Drawing" style="width: 500px;"/> # # taken from [showcase example code: anatomy.py](https://tacaswell.github.io/matplotlib/examples/showcase/anatomy.html). # Before diving in, one more note should be made. We will not focus on the internal aspects of `matplotlib`. Today's lab will really only focus on the basics and developing good plotting practices. There are many excellent tutorials out there for `matplotlib`. For example, # * [`matplotlib` homepage](https://matplotlib.org/) # * [`matplotlib` tutorial](https://github.com/matplotlib/AnatomyOfMatplotlib) # ### `matplotlib` # First, let's generate some data. # <div class="exercise"><b>Exercise</b></div> # Use the following three functions to make some plots: # # * Logistic function: # \begin{align*} # f\left(z\right) = \dfrac{1}{1 + be^{-az}} # \end{align*} # where $a$ and $b$ are parameters. # * Hyperbolic tangent: # \begin{align*} # g\left(z\right) = b\tanh\left(az\right) + c # \end{align*} # where $a$, $b$, and $c$ are parameters. # * Rectified Linear Unit: # \begin{align*} # h\left(z\right) = # \left\{ # \begin{array}{lr} # z, \quad z > 0 \\ # \epsilon z, \quad z\leq 0 # \end{array} # \right. # \end{align*} # where $\epsilon < 0$ is a small, positive parameter. # # You are given the code for the first two functions. Notice that $z$ is passed in as a `numpy` array and that the functions are returned as `numpy` arrays. Parameters are passed in as floats. # # You should write a function to compute the rectified linear unit. The input should be a `numpy` array for $z$ and a positive float for $\epsilon$. # + # Your code here # + # solution import numpy as np def logistic(z: np.ndarray, a: float, b: float) -> np.ndarray: """ Compute logistic function Inputs: a: exponential parameter b: exponential prefactor z: numpy array; domain Outputs: f: numpy array of floats, logistic function """ den = 1.0 + b * np.exp(-a * z) return 1.0 / den def stretch_tanh(z: np.ndarray, a: float, b: float, c: float) -> np.ndarray: """ Compute stretched hyperbolic tangent Inputs: a: horizontal stretch parameter (a>1 implies a horizontal squish) b: vertical stretch parameter c: vertical shift parameter z: numpy array; domain Outputs: g: numpy array of floats, stretched tanh """ return b * np.tanh(a * z) + c def relu(z: np.ndarray, eps: float = 0.01) -> np.ndarray: """ Compute rectificed linear unit Inputs: eps: small positive parameter z: numpy array; domain Outputs: h: numpy array; relu """ return np.fmax(z, eps * z) # - # Now let's make some plots. First, let's just warm up and plot the logistic function. # + x = np.linspace(-5.0, 5.0, 100) # Equally spaced grid of 100 pts between -5 and 5 f = logistic(x, 1.0, 1.0) # Generate data # + import matplotlib.pyplot as plt # This is only needed in Jupyter notebooks! Displays the plots for us. # %matplotlib inline plt.plot(x, f); # Use the semicolon to suppress some iPython output (not needed in real Python scripts) # - # Wonderful! We have a plot. Let's clean it up a bit by putting some labels on it. plt.plot(x, f) plt.xlabel('x') plt.ylabel('f') plt.title('Logistic Function'); # Okay, it's getting better. Still super ugly. I see these kinds of plots at conferences all the time. Unreadable. We can do better. Much, much better. First, let's throw on a grid. plt.plot(x, f) plt.xlabel('x') plt.ylabel('f') plt.title('Logistic Function') plt.grid(True) # At this point, our plot is starting to get a little better but also a little crowded. # # #### A note on gridlines # Gridlines can be very helpful in many scientific disciplines. They help the reader quickly pick out important points and limiting values. On the other hand, they can really clutter the plot. Some people recommend never using gridlines, while others insist on them being present. The correct approach is probably somewhere in between. Use gridlines when necessary, but dispense with them when they take away more than they provide. Ask yourself if they help bring out some important conclusion from the plot. If not, then best just keep them away. # # Before proceeding any further, I'm going to change notation. The plotting interface we've been working with so far is okay, but not as flexible as it can be. In fact, I don't usually generate my plots with this interface. I work with slightly lower-level methods, which I will introduce to you now. The reason I need to make a big deal about this is because the lower-level methods have a slightly different API. This will become apparent in my next example. # + fig, ax = plt.subplots(1,1) # Get figure and axes objects ax.plot(x, f) # Make a plot # Create some labels ax.set_xlabel('x') ax.set_ylabel('f') ax.set_title('Logistic Function') # Grid ax.grid(True) # - # Wow, it's *exactly* the same plot! Notice, however, the use of `ax.set_xlabel()` instead of `plt.xlabel()`. The difference is tiny, but you should be aware of it. I will use this plotting syntax from now on. # # What else do we need to do to make this figure better? Here are some options: # * Make labels bigger! # * Make line fatter # * Make tick mark labels bigger # * Make the grid less pronounced # * Make figure bigger # # Let's get to it. # + fig, ax = plt.subplots(1,1, figsize=(10,6)) # Make figure bigger ax.plot(x, f, lw=4) # Linewidth bigger ax.set_xlabel('x', fontsize=24) # Fontsize bigger ax.set_ylabel('f', fontsize=24) # Fontsize bigger ax.set_title('Logistic Function', fontsize=24) # Fontsize bigger ax.grid(True, lw=1.5, ls='--', alpha=0.75) # Update grid # - # Notice: # * `lw` stands for `linewidth`. We could also write `ax.plot(x, f, linewidth=4)` # * `ls` stands for `linestyle`. # * `alpha` stands for transparency. # Things are looking good now! Unfortunately, people still can't read the tick mark labels. Let's remedy that presently. # + fig, ax = plt.subplots(1,1, figsize=(10,6)) # Make figure bigger # Make line plot ax.plot(x, f, lw=4) # Update ticklabel size ax.tick_params(labelsize=24) # Make labels ax.set_xlabel(r'$x$', fontsize=24) # Use TeX for mathematical rendering ax.set_ylabel(r'$f(x)$', fontsize=24) # Use TeX for mathematical rendering ax.set_title('Logistic Function', fontsize=24) ax.grid(True, lw=1.5, ls='--', alpha=0.75) # - # The only thing remaining to do is to change the $x$ limits. Clearly these should go from $-5$ to $5$. # + fig, ax = plt.subplots(1,1, figsize=(10,6)) # Make figure bigger # Make line plot ax.plot(x, f, lw=4) # Set axes limits ax.set_xlim(x.min(), x.max()) # Update ticklabel size ax.tick_params(labelsize=24) # Make labels ax.set_xlabel(r'$x$', fontsize=24) # Use TeX for mathematical rendering ax.set_ylabel(r'$f(x)$', fontsize=24) # Use TeX for mathematical rendering ax.set_title('Logistic Function', fontsize=24) ax.grid(True, lw=1.5, ls='--', alpha=0.75) # - # You can play around with figures forever making them perfect. At this point, everyone can read and interpret this figure just fine. Don't spend your life making the perfect figure. Make it good enough so that you can convey your point to your audience. Then save if it for later. fig.savefig('logistic.png') # Done! Let's take a look. # ![](../images/logistic.png) # #### Resources # If you want to see all the styles available, please take a look at the documentation. # * [Line styles](https://matplotlib.org/2.0.1/api/lines_api.html#matplotlib.lines.Line2D.set_linestyle) # * [Marker styles](https://matplotlib.org/2.0.1/api/markers_api.html#module-matplotlib.markers) # * [Everything you could ever want](https://matplotlib.org/2.0.1/api/lines_api.html#matplotlib.lines.Line2D.set_marker) # # We haven't discussed it yet, but you can also put a legend on a figure. You'll do that in the next exercise. Here are some additional resources: # * [Legend](https://matplotlib.org/api/_as_gen/matplotlib.pyplot.legend.html) # * [Grid](https://matplotlib.org/api/_as_gen/matplotlib.pyplot.grid.html) # <div class="exercise"><b>Exercise</b></div> # # Do the following: # * Make a figure with the logistic function, hyperbolic tangent, and rectified linear unit. # * Use different line styles for each plot # * Put a legend on your figure # # Here's an example of a figure: # ![](../images/nice_plots.png) # # You don't need to make the exact same figure, but it should be just as nice and readable. # + # your code here # First get the data f = logistic(x, 2.0, 1.0) g = stretch_tanh(x, 2.0, 0.5, 0.5) h = relu(x) fig, ax = plt.subplots(1,1, figsize=(10,6)) # Create figure object # Make actual plots # (Notice the label argument!) ax.plot(x, f, lw=4, ls='-', label=r'$L(x;1)$') ax.plot(x, g, lw=4, ls='--', label=r'$\tanh(2x)$') ax.plot(x, h, lw=4, ls='-.', label=r'$relu(x; 0.01)$') # Make the tick labels readable ax.tick_params(labelsize=24) # Set axes limits to make the scale nice ax.set_xlim(x.min(), x.max()) ax.set_ylim(h.min(), 1.1) # Make readable labels ax.set_xlabel(r'$x$', fontsize=24) ax.set_ylabel(r'$h(x)$', fontsize=24) ax.set_title('Activation Functions', fontsize=24) # Set up grid ax.grid(True, lw=1.75, ls='--', alpha=0.75) # Put legend on figure ax.legend(loc='best', fontsize=24); fig.savefig('nice_plots.png') # - # There a many more things you can do to the figure to spice it up. Remember, there must be a tradeoff between making a figure look good and the time you put into it. # # **The guiding principle should be that your audience needs to easily read and understand your figure.** # # There are of course other types of figures including, but not limited to, # * Scatter plots (you will use these all the time) # * Bar charts # * Histograms # * Contour plots # * Surface plots # * Heatmaps # # We will learn more about these different types of plotting in Lab5. # + #import config # User-defined config file #plt.rcParams.update(config.pars) # Update rcParams to make nice plots # First get the data f1 = logistic(x, 1.0, 1.0) f2 = logistic(x, 2.0, 1.0) f3 = logistic(x, 3.0, 1.0) fig, ax = plt.subplots(1,1, figsize=(10,6)) # Create figure object # Make actual plots # (Notice the label argument!) ax.plot(x, f1, ls='-', label=r'$L(x;-1)$') ax.plot(x, f2, ls='--', label=r'$L(x;-2)$') ax.plot(x, f3, ls='-.', label=r'$L(x;-3)$') # Set axes limits to make the scale nice ax.set_xlim(x.min(), x.max()) ax.set_ylim(h.min(), 1.1) # Make readable labels ax.set_xlabel(r'$x$') ax.set_ylabel(r'$h(x)$') ax.set_title('Logistic Functions') # Set up grid ax.grid(True, lw=1.75, ls='--', alpha=0.75) # Put legend on figure ax.legend(loc='best') # - # That's a good-looking plot! Notice that we didn't need to have all those annoying `fontsize` specifications floating around. If you want to reset the defaults, just use `plt.rcdefaults()`. # # Now, how in the world did this work? Obviously, there is something special about the `config` file. I didn't give you a config file, but the next exercise requires you to create one. # ### No Excuses # With all of these resourses, there is no reason to have a bad figure.
labs/lab03/notebook/cs109a_lab3_prelab.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # -*- coding: utf-8 -*- # ๅŸบไบŽ็”จๆˆท็š„ๅๅŒ่ฟ‡ๆปคๆŽจ่็ฎ—ๆณ•ๅฎž็Žฐ import random import pandas as pd import math from operator import itemgetter import sys user_sim_mat = {} class UserBasedCF(object): ''' TopN recommendation - User Based Collaborative Filtering ''' def __init__(self): self.trainset = {} # ่ฎญ็ปƒ้›† self.testset = {} # ๆต‹่ฏ•้›† self.initialset = {} # ๅญ˜ๅ‚จ่ฆๆŽจ่็š„็”จๆˆท็š„ไฟกๆฏ self.n_sim_user = 30 self.n_rec_movie = 10 self.movie_popular = {} self.movie_count = 0 # ๆ€ป็”ตๅฝฑๆ•ฐ้‡ print('Similar user number = %d' % self.n_sim_user, file=sys.stderr) print('recommended movie number = %d' % self.n_rec_movie, file=sys.stderr) @staticmethod def loadfile(filename): ''' load a file, return a generator. ''' fp = open(filename, 'r', encoding='UTF-8') for i, line in enumerate(fp): yield line.strip('\r\n') # if i % 100000 == 0: # print ('loading %s(%s)' % (filename, i), file=sys.stderr) fp.close() print('load %s success' % filename, file=sys.stderr) #ๆ•ฐๆฎ่ฏปๅ–ๅ‡ฝๆ•ฐ def initial_dataset(self, filename1): initialset_len = 0 for lines in self.loadfile(filename1): users, movies, ratings = lines.split(',') self.initialset.setdefault(users, {}) self.initialset[users][movies] = (ratings) initialset_len += 1 def generate_dataset(self, filename2, pivot=1.0): ''' load rating data and split it to training set and test set ''' trainset_len = 0 testset_len = 0 for line in self.loadfile(filename2): # user, movie, rating, _ = line.split('::') user, movie, rating = line.split(',') # split the data by pivot if random.random() < pivot: # pivot=0.7ๅบ”่ฏฅ่กจ็คบ่ฎญ็ปƒ้›†๏ผšๆต‹่ฏ•้›†=7๏ผš3 self.trainset.setdefault(user, {}) self.trainset[user][movie] = (rating) # trainset[user][movie]ๅฏไปฅ่Žทๅ–็”จๆˆทๅฏน็”ตๅฝฑ็š„่ฏ„ๅˆ† ้ƒฝๆ˜ฏๆ•ดๆ•ฐ trainset_len += 1 else: self.testset.setdefault(user, {}) self.testset[user][movie] = (rating) testset_len += 1 print('split training set and test set successful!', file=sys.stderr) print('train set = %s' % trainset_len, file=sys.stderr) print('test set = %s' % testset_len, file=sys.stderr) def UserSimilarity(self): #ๅปบ็ซ‹็‰ฉๅ“-็”จๆˆท็š„ๅ€’ๆŽ’่กจ #ๆ•ฐๆฎๆ ผๅผ๏ผškey:็‰ฉๅ“ value:็”จๆˆท1๏ผŒ็”จๆˆท2 movie2users = dict() #้ๅކ่ฎญ็ปƒ้›†ไธญ็”จๆˆท-็‰ฉๅ“ๆ•ฐๆฎ for user, movies in self.trainset.items(): #้ๅކ็”จๆˆทๅฏนๅบ”็š„็‰ฉๅ“ๆ•ฐๆฎ for movie in movies: # inverse table for item-users if movie not in movie2users: #ๅ€’ๆŽ’่กจ่ฟ˜ๆฒกๆœ‰่ฏฅ็‰ฉๅ“ movie2users[movie] = set()#ๅ€’ๆŽ’่กจไธญ่ฏฅ็‰ฉๅ“้กน่ต‹ๅ€ผไธบset()้›†ๅˆ movie2users[movie].add(user) #ๅ€’ๆŽ’่กจไธญ่ฏฅ็‰ฉๅ“้กนๆทปๅŠ ่ฏฅ็”จๆˆท if movie not in self.movie_popular: self.movie_popular[movie] = 0 self.movie_popular[movie] += 1 # print ('build movie-users inverse table succ', file=sys.stderr) # save the total movie number, which will be used in evaluation self.movie_count = len(movie2users) print('total movie number = %d' % self.movie_count, file=sys.stderr) # count co-rated items between users ่ฎก็ฎ—็”จๆˆทไน‹้—ดๅ…ฑๅŒ่ฏ„ๅˆ†็š„็‰ฉๅ“ usersim_mat = user_sim_mat # print ('building user co-rated movies matrix...', file=sys.stderr) for movie, users in movie2users.items(): # ้€š่ฟ‡.items()้ๅކmovie2users่ฟ™ไธชๅญ—ๅ…ธ้‡Œ็š„ๆ‰€ๆœ‰้”ฎใ€ๅ€ผ for u in users: for v in users: if u == v: continue usersim_mat.setdefault(u, {}) usersim_mat[u].setdefault(v, 0) usersim_mat[u][v] += 1 / math.log(1 + len(users)) # usersim_matไบŒ็ปด็Ÿฉ้˜ตๅบ”่ฏฅๅญ˜็š„ๆ˜ฏ็”จๆˆทuๅ’Œ็”จๆˆทvไน‹้—ดๅ…ฑๅŒ่ฏ„ๅˆ†็š„็”ตๅฝฑๆ•ฐ็›ฎ simfactor_count = 0 for u, related_users in usersim_mat.items(): for v, count in related_users.items(): usersim_mat[u][v] = count / math.sqrt( len(self.trainset[u]) * len(self.trainset[v])) simfactor_count += 1 #็”จๆˆทuserๆœชไบง็”Ÿ่ฟ‡่กŒไธบ็š„็‰ฉๅ“ def recommend(self, user): ''' Find K similar users and recommend N movies. ''' matrix=[] K = self.n_sim_user # ่ฟ™้‡Œ็ญ‰ไบŽ20 N = self.n_rec_movie # ่ฟ™้‡Œ็ญ‰ไบŽ10 rank = dict() # ็”จๆˆทๅฏน็”ตๅฝฑ็š„ๅ…ด่ถฃๅบฆ # print(self.initialset[user]) watched_movies = self.trainset[user] # user็”จๆˆทๅทฒ็ป็œ‹่ฟ‡็š„็”ตๅฝฑ ๅชๅŒ…ๆ‹ฌ่ฎญ็ปƒ้›†้‡Œ็š„ # ่ฟ™้‡Œไน‹ๅŽไธ่ƒฝๆ˜ฏ่ฎญ็ปƒ้›† # watched_movies = self.initialset[user] for similar_user, similarity_factor in sorted(user_sim_mat[user].items(), key=itemgetter(1), reverse=True)[ 0:K]: # itemgetter(1)่กจ็คบๅฏน็ฌฌ2ไธชๅŸŸ(็›ธไผผๅบฆ)ๆŽ’ๅบ reverse=TRUE่กจ็คบ้™ๅบ for imdbid in self.trainset[similar_user]: # similar_userๆ˜ฏitems้‡Œ้ข็š„้”ฎ๏ผŒๅฐฑๆ˜ฏๆ‰€ๆœ‰็”จๆˆท similarity_factorๆ˜ฏๅ€ผ๏ผŒๅฐฑๆ˜ฏๅฏนๅบ”็š„็›ธไผผๅบฆ if imdbid in watched_movies: continue # ๅฆ‚ๆžœ่ฏฅ็”ตๅฝฑ็”จๆˆทๅทฒ็ป็œ‹่ฟ‡๏ผŒๅˆ™่ทณ่ฟ‡ rank.setdefault(imdbid, 0) # ๆฒกๆœ‰ๅ€ผๅฐฑไธบ0 rank[imdbid] += similarity_factor #rank[movie]ๅฐฑๆ˜ฏๅ„ไธช็”ตๅฝฑ็š„็›ธไผผๅบฆ # ่ฟ™้‡Œๆ˜ฏๆŠŠๅ’Œๅ„ไธช็”จๆˆท็š„็›ธไผผๅบฆๅŠ ่ตทๆฅ๏ผŒ่€Œๅ„ไธช็”จๆˆท็š„็›ธไผผๅบฆๅชๆ˜ฏๅŸบไบŽ็œ‹่ฟ‡็š„ๅ…ฌๅ…ฑ็”ตๅฝฑๆ•ฐ็›ฎ้™คไปฅ่ฟ™ไธคไธช็”จๆˆท็œ‹่ฟ‡็š„็”ตๅฝฑๆ•ฐ้‡็งฏ #print(rank[movie]) # return the N best movies # rank_ = dict() rank_ = sorted(rank.items(), key=itemgetter(1), reverse=True)[0:N] #็ฑปๅž‹ๆ˜ฏlistไธๆ˜ฏๅญ—ๅ…ธไบ† for key,value in rank_: matrix.append(key) #matrixไธบๅญ˜ๅ‚จๆŽจ่็š„imdbIdๅท็š„ๆ•ฐ็ป„ #print(key) #ๅพ—ๅˆฐไบ†ๆŽจ่็š„็”ตๅฝฑ็š„imdbidๅท print(matrix) #return sorted(rank.items(), key=itemgetter(1), reverse=True)[0:N] return matrix # - if __name__ == '__main__': rating_file = 'C:\\Users\\tong\\Desktop\\movies\\ml-latest-small\\rating.csv' userID=input('่ฏท่พ“ๅ…ฅ็”จๆˆท็ผ–ๅท๏ผš') userCF = UserBasedCF() userCF.generate_dataset(rating_file) userCF.UserSimilarity() print('ไธบ่ฏฅ็”จๆˆทๆŽจ่็š„่ฏ„ๅˆ†ๆœ€้ซ˜็š„10้ƒจ็”ตๅฝฑๆ˜ฏ๏ผš'.center(30, '=')) userCF.recommend(userID)#ๆŽจ่็ป“ๆžœ
algorithm/UserBasedCF.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # # <img src="https://img.icons8.com/bubbles/100/000000/3d-glasses.png" style="height:50px;display:inline"> EE 046746 - Technion - Computer Vision # --- # #### <a href="https://taldatech.github.io/"><NAME></a> # # ## Tutorial 07 - Deep Object Tracking # --- # # <img src="./assets/tut_track_anim.gif" style="height:200px"> # # * <a href="https://www.imageannotation.ai/blog/object-tracking-in-videos">Image Source</a> # + [markdown] slideshow={"slide_type": "slide"} # ### <img src="https://img.icons8.com/bubbles/50/000000/checklist.png" style="height:50px;display:inline"> Agenda # --- # # * [What is the Object Tracking Task?](#-What-is-Object-Tracking?) # * [Object Tracking Vs. Object Detection](#-Object-Tracking-Vs.-Object-Detection) # * [Under the Hood of Object Tracking](#-Under-the-Hood-of-Object-Tracking) # * [Motion Model](#-Motion-Model) # * [Visual Appearance Model](#-Visual-Appearance-Model) # * [Object Tracking Procedure](#-Object-Tracking-Procedure) # * [Types of Tracking Algorithms](#-Types-of-Tracking-Algorithms) # * [From Classic to Deep Learning Algorithms](#-From-Classic-to-Deep-Learning-Algorithms) # * [Deep Object Tracking Algorithms](#-Deep-Object-Tracking-Algorithms) # * [Recommended Videos](#-Recommended-Videos) # * [Credits](#-Credits) # + slideshow={"slide_type": "skip"} # imports for the tutorial import numpy as np import matplotlib.pyplot as plt import cv2 import os # + [markdown] slideshow={"slide_type": "slide"} # ## <img src="https://img.icons8.com/dusk/50/000000/accuracy.png" style="height:50px;display:inline"> What is Object Tracking? # --- # * Object tracking in videos, or video object tracking, is the process of **detecting an object as it moves through space** in a video. # * Object tracking has a wide range of applications in Computer Vision, such as surveillance, human-computer interaction, and medical imaging, traffic flow monitoring, human activity recognition, etc. # * For example, if the FBI wants to track a criminal running away in a car using citywide surveillance cameras. # * Or analyze a soccer game and the performance of the players. # + [markdown] slideshow={"slide_type": "subslide"} # * Another example would be tracking shoppers path in a mall and anlayzing the number of people that entered/exited the mall. # <img src="./assets/tut_track_mall.gif" style="height:200px"> # + [markdown] slideshow={"slide_type": "subslide"} # ### <img src="https://img.icons8.com/color/96/000000/fight-pokemon.png" style="height:50px;display:inline"> Object Tracking Vs. Object Detection # --- # * Why canโ€™t we use object detection in each frame in the whole video and track the object? # * If the image has multiple objects, then we have no way of connecting the objects in the current frame to the previous frames. # * If the object you were tracking goes out of the camera view for a few frames and another one appears, we have no way of knowing if itโ€™s the same object. # * Essentially, during detection, we work with **one image at a time** and we have no idea about the *motion* and past movement of the object, so **we canโ€™t uniquely track objects in a video**. # * Whenever there is a moving object in the videos, there are certain cases when the visual appearance of the object is not clear. # * In all such cases, *detection* would fail while *tracking* succeeds as it also has the motion model and history of the object. # + [markdown] slideshow={"slide_type": "subslide"} # #### Detection Failure Cases # --- # * **Occlusion**- the object in question is partially or completely occluded. # * **Identity Switches** - after two objects cross each other, can't tell the correct identity. # * **Motion Blur** - object is blurred due to the motion of the object or camera. Hence, visually, the object doesnโ€™t look the same anymore. # * **Viewpoint Variation** - different viewpoint of an object may look very different visually and without the context. It becomes very difficult to identify the object using only visual detection. # * **Scale Change** - huge changes in object scale may cause a failure in detection. # + [markdown] slideshow={"slide_type": "subslide"} # * **Background Clutters** - background near object has similar color or texture as the target object. Hence, it may become harder to separate the object from the background. # * **Illumination Variation** - illumination near the target object is significantly changed. Hence, it may become harder to visually identify it. # * **Low Resolution** - when the number of pixels inside the ground truth bounding box is low, it may be too hard to detect the objects visually. # + [markdown] slideshow={"slide_type": "subslide"} # ### <img src="https://img.icons8.com/dusk/64/000000/car-service.png" style="height:50px;display:inline"> Under the Hood of Object Tracking # --- # * There are various techniques and algorithms which try to solve the tracking problem in many different ways. # * A good object tracker has two basic models: **Motion Model** and **Visual Appearance Model**. # # <img src="./assets/tut_track_optical_flow.gif" style="height:250px"> # # * <a href="https://nanonets.com/blog/optical-flow/">Image Source - Introduction to Motion Estimation with Optical Flow</a> # + [markdown] slideshow={"slide_type": "subslide"} # #### <img src="https://img.icons8.com/officel/48/000000/motion-detector.png" style="height:50px;display:inline"> Motion Model # --- # * The ability to understand and model the motion of the object. # * A good motion model captures the dynamic behavior of an object. # * It predicts the potential position of objects in the future frames, hence, reducing the search space. # * However, the motion model alone can fail in scenarios where motion is caused by things that are not in a video or abrupt direction and speed change. # * Some of the classic methods understand the motion pattern of the object and try to predict that. # * However, the problem with such approaches is that they canโ€™t predict the abrupt motion and direction changes. # * Examples of such techniques are Optical Flow, Kalman Filtering, Kanade-Lucas-Tomashi (KLT) feature tracker, mean shift tracking. # + [markdown] slideshow={"slide_type": "subslide"} # #### <img src="https://img.icons8.com/doodle/48/000000/interior-mirror.png" style="height:50px;display:inline"> Visual Appearance Model # --- # * The ability to understand the appearance of the object that is tracked. # * Trackers need to learn to discriminate the object from the background. # * In single object trackers (one object), visual appearance alone could be enough to track the object across frames, while In multiple-object trackers, visual appearance alone is not enough. # + [markdown] slideshow={"slide_type": "slide"} # ## <img src="https://img.icons8.com/dusk/64/000000/workflow.png" style="height:50px;display:inline"> Object Tracking Procedure # --- # In general, the object tracking procedure is composed of **4 main modules**: # * **Target initialization/object detection**: an initial set of object detections is created. This is typically done by taking a set of bounding box coordinates and using them as inputs for the network. # * The idea is to draw bounding box of the target in the initial frame of the video and tracker has to estimate the target position in the remaining frames in the video. # * **Appearance modeling**: learning the visual appearance of the object by using (deep) learning techniques. In this phase, the model learns the visual features of the object while in motion, various view-points, scale, illuminations etc. # + [markdown] slideshow={"slide_type": "subslide"} # * **Motion estimation**: the objective of motion estimation is learning to predict a zone where the target is most likely to be present in the subsequent frames. # * **Target positioning**: motion estimation predicts the possible region where the target could be present, thus, yielding an area to search to lock down the exact location of the target. # # It is usually the case that tracking algorithms donโ€™t try to learn all the variations of the object. Hence, most of the tracking algorithms are much faster than regular object detection. # # <img src="./assets/tut_track_cars.gif" style="height:200px"> # # * <a href="https://www.move-lab.com/blog/tracking-things-in-object-detection-videos">Image Source - Tracking Things In Object Detection Videos</a> # + [markdown] slideshow={"slide_type": "slide"} # ## <img src="https://img.icons8.com/nolan/64/categorize.png" style="height:50px;display:inline"> Types of Tracking Algorithms # --- # We can classifiy object trackers according to whether they are based on automatic object detection or manual, whether they track a single object or capable of tracking multi objects and whether they operate *online* or *offline*. # + [markdown] slideshow={"slide_type": "subslide"} # ### <img src="https://img.icons8.com/nolan/64/rectangle.png" style="height:50px;display:inline"> Detection-Based Vs. Detection-Free # --- # * **Detection-based**: the consecutive video frames are given to a pretrained object detector that forms a detection hypothesis which in turn is used to form tracking trajectories. # * It is more popular because new objects are detected and disappearing objects are terminated automatically. # * In these approaches, the tracker is used for the failure cases of object detection. # * In an another approach, object detector is run every $n$ frames and the remaining predictions are done using the tracker. # * Suitable approach for tracking for a long time. # + [markdown] slideshow={"slide_type": "subslide"} # * **Detection-free**: requires manual initialization of a fixed number of objects in the first frame. It then localizes these objects in the subsequent frames. # * Cannot deal with the case where new objects appear in the middle frames. # # <img src="./assets/tut_track_manual.jpg" style="height:250px"> # # # * <a href="https://www.freecodecamp.org/news/object-detection-in-colab-with-fizyr-retinanet-efed36ac4af3/">Image Source</a> # + [markdown] slideshow={"slide_type": "subslide"} # ### <img src="https://img.icons8.com/nolan/64/group-objects.png" style="height:50px;display:inline"> Single Object Vs. Multi Object # --- # * **Single Object Tracking**: only a single object is tracked even if the environment has multiple objects in it. The object to be tracked is determined by the initialization in the first frame. # * **Multi Object Tracking**: all the objects present in the environment are tracked over time. If a detection based tracker is used it can even track new objects that emerge in the middle of the video. # + [markdown] slideshow={"slide_type": "subslide"} # ### <img src="https://img.icons8.com/color/96/000000/wi-fi-connected.png" style="height:50px;display:inline"> Offline Vs. Online # --- # * **Offline Trackers** - used when you have to track an object in a recorded stream. # * For example if you have recorded videos of a soccer game of an opponent team which needs to be analyzed for strategic analysis. In such case, you can not only use the past frames but also future frames to make more accurate tracking predictions. # * **Online Trackers** - online trackers are used where predictions are available immediately and hence, they canโ€™t use future frames to improve the results. # # + [markdown] slideshow={"slide_type": "subslide"} # * **Offline Learning Trackers** - the training of these trackers only happen offline. # * As opposed to online learning trackers, these trackers donโ€™t learn anything during run time. # * We can train a tracker to identify persons and then these trackers can be used to continuously track all the persons in a video stream. Pre-trained. # * **Online Learning Trackers** - typically learn about the object to track using the initialization frame and few subsequent frames, making these trackers more general because you can just draw a bounding box around any object and track it. # * For example, if you want to track a person with red shirt in the airport, you can just draw a bounding box around that person in 1 or few frames. The tracker would learn about the object using these frames and would continue to track that person. # + [markdown] slideshow={"slide_type": "subslide"} # * In online learning trackers, Center Red box is specified by the user, it is taken as the positive example and all the boxes surrounding the object are taken as negative class and a classifier is trained which learns to distinguish the object from the background. # # <img src="./assets/tut_track_online.jpeg" style="height:250px"> # + [markdown] slideshow={"slide_type": "slide"} # ## <img src="https://img.icons8.com/cute-clipart/64/000000/horizontal-settings-mixer.png" style="height:50px;display:inline"> From Classic to Deep Learning Algorithms # --- # * Most of the classic trackers are not very accurate due to the limitations mentioned in the beginning. # * However, some times they can be useful to run in a resource constraint environment like an embedded system. # * **Kernelized Correlation Filters (KCF)** tracker is a very fast, well-performing tracker. Read more: <a href="https://arxiv.org/abs/1404.7584">High-Speed Tracking with Kernelized Correlation Filters</a> # * A lot of classic tracking algorithms are integrated in <a href="https://www.pyimagesearch.com/2018/07/30/opencv-object-tracking/">OpenCVโ€™s tracking API</a>. # * A very simple tracking technique is based on finding centroids and can be easily implemented with OpenCV. # * <a href="https://www.pyimagesearch.com/2018/07/23/simple-object-tracking-with-opencv/">Simple object tracking with OpenCV</a> (Code is available). # * Deep learning based trackers are now miles ahead of traditional trackers in terms of accuracy. # # <a href="https://www.pyimagesearch.com/2018/07/23/simple-object-tracking-with-opencv/"><img src="./assets/tut_track_centeroids_opencv.gif" style="height:250px"></a> # + [markdown] slideshow={"slide_type": "slide"} # ## <img src="https://img.icons8.com/cotton/64/000000/magic-crystal-ball.png" style="height:50px;display:inline"> Deep Object Tracking Algorithms # --- # * We will present 3 popular deep learning based trackers, but there are more. # * Only one of them is implemented in OpenCV, but we will provide PyTorch code for the rest. # + [markdown] slideshow={"slide_type": "subslide"} # ### <img src="https://img.icons8.com/nolan/64/network.png" style="height:50px;display:inline"> Generic Object Tracking Using Regression Networks (GOTURN) # --- # * GOTURN was introduced by <NAME>, <NAME>, <NAME> in their paper titled <a href="http://davheld.github.io/GOTURN/GOTURN.pdf">โ€œLearning to Track at 100 FPS with Deep Regression Networksโ€</a>. # * GOTURN uses deep neural networks to track objects in an **offline** fashion. # * Most tracking algorithms train *online*, which is to say the algorithm learns how the object appears *only at runtime*. # * In contrast, GOTURN is trained on thousands of chunks of video before runtime, and as a result, it doesnโ€™t need to train at all during runtime. # * GOTURN is trained using a pair of *cropped* frames from thousands of videos and outputs the bounding box around the object in the second frame. # # <img src="./assets/tut_track_goturn.jpg" style="height:250px"> # + [markdown] slideshow={"slide_type": "subslide"} # * In the first frame (also referred to as the previous frame), the location of the object is known, and the frame is cropped to two times the size of the bounding box around the object. # * The object in the first cropped frame is *always centered*. # * The location of the object in the second frame (also referred to as the current frame) needs to be predicted. # * The bounding box used to crop the first frame is also used to crop the second frame. Because the object might have moved, the object is not centered in the second frame. # * A **Convolutional Neural Network (CNN)** is trained to predict the location of the bounding box in the second frame. # + [markdown] slideshow={"slide_type": "subslide"} # <img src="./assets/tut_track_goturn_arch.jpg" style="height:300px"> # + [markdown] slideshow={"slide_type": "subslide"} # * GOTURN is the only deep-learning algorithm implemented in OpenCV. # * GOTURN can run very fast i.e. 100fps on a GPU powered machine. # * We will now see how it works! # * If you prefer to work with PyTorch, here are 2 repositories: # * <a href="https://github.com/amoudgl/pygoturn">Source 1</a> # * <a href="https://github.com/nrupatunga/PY-GOTURN/">Source 2</a> # + [markdown] slideshow={"slide_type": "slide"} # # ------------------------- <img src="https://img.icons8.com/color/96/000000/code.png" style="height:50px;display:inline"> CODE TIME ------------------------- # + [markdown] slideshow={"slide_type": "subslide"} # The first step is downloading the pre-trained model files. There are 2 ways to download it: # 1. Downloading the files separately and merging them locally, instructions can be found here: <a href="https://github.com/spmallick/goturn-files">LINK</a> # 2. Download the merged files from Dropbox: <a href="https://www.dropbox.com/sh/77frbrkmf9ojfm6/AACgY7-wSfj-LIyYcOgUSZ0Ua?dl=0">LINK</a> # # You should end up with 2 files: `goturn.caffemodel` and `goturn.prototxt` and they should be placed in the current working directory (`./`). # + slideshow={"slide_type": "subslide"} # initialize tracker tracker = cv2.TrackerGOTURN_create() # Read video video = cv2.VideoCapture("./datasets/tracking/ball.mp4") # Exit if video not opened if not video.isOpened(): print("Could not open video") raise SystemError # Read first frame ok, frame = video.read() if not ok: print("Cannot read video file") raise SystemError # + slideshow={"slide_type": "subslide"} # Define a bounding box # bbox = (276, 23, 86, 320) # bounding box parameters: (y1, y2, x1, x2) # Uncomment the line below to select a different bounding box bbox = cv2.selectROI(frame, False) # False is to say that we want to draw rectangle from top left (and not from the center) # + slideshow={"slide_type": "subslide"} # Initialize tracker with first frame and bounding box ok = tracker.init(frame, bbox) # `ok` is a boolean which is False until the frame is tracked. # close all windows cv2.destroyAllWindows() # + [markdown] slideshow={"slide_type": "subslide"} # #### Predict the bounding box in a new frame # # We loop over all frames in the video and find the bounding box for new frames using `tracker.update`. The rest of the code is simply for timing and displaying. # + slideshow={"slide_type": "subslide"} while True: # this loop will stop once we reached the final frame # Read a new frame ok, frame = video.read() if not ok: break # Start timer timer = cv2.getTickCount() # Update tracker ok, bbox = tracker.update(frame) # bbox is the returned bounding box, you can do all sorts of stuff with it # Calculate Frames per second (FPS) fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer); # Draw bounding box if ok: # Tracking success p1 = (int(bbox[0]), int(bbox[1])) p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3])) cv2.rectangle(frame, p1, p2, (255, 0, 0), 2, 1) # mark the object # cv2.putText(frame, "Object", (int(bbox[0]), int(bbox[1])), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255),2) else: # Tracking failure cv2.putText(frame, "Tracking failure detected", (100, 80), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255),2) # Display tracker type on frame cv2.putText(frame, "GOTURN Tracker", (100, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50),2); # Display FPS on frame cv2.putText(frame, "FPS : " + str(int(fps)), (100, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2); # Display result cv2.imshow("Tracking", frame) # Exit if ESC pressed k = cv2.waitKey(1) & 0xff if k == 27: break # + slideshow={"slide_type": "subslide"} # close all windows cv2.destroyAllWindows() # + [markdown] slideshow={"slide_type": "subslide"} # ### <img src="https://img.icons8.com/cotton/64/000000/network.png" style="height:50px;display:inline"> Multi-Domain Convolutional Neural Network Tracker (MDNet) # --- # * MDNet, an **online** video object tracker, was introduced by <NAME> and <NAME> in their paper titled <a href="https://arxiv.org/abs/1510.07945">โ€œLearning Multi-Domain Convolutional Neural Networks for Visual Trackingโ€</a>. # * Winner of VOT2015 challenge. # * Because training a deep neural network is computationally expensive, small networks are used for training around deployment time. # * The drawback to small networks is that they lack the classification/discrimination power of larger networks. # * In order to deal with the fact that networks which train at runtime have lower discriminatory power, the training of the network can be split into different steps. # + [markdown] slideshow={"slide_type": "subslide"} # * For instance, the entire network can be trained *before* runtime, but during runtime, the first few layers of the network are used as feature extractors and only the last few layers of the network have their weights adjusted. # * Essentially, the **CNNs are trained beforehand** and used to extract features, while the **last layers can quickly be trained online**. # * Theoretically, this creates a multi-domain CNN that can be used in many different scenarios, capable of discriminating between background and target. # + [markdown] slideshow={"slide_type": "subslide"} # * In practice, the *background* of one video could be the *target* of a different video, and so the CNN must have some method of discriminating between these two situations. # * MDNet handles possible confusion from similar targets and backgrounds by dividing the network into two portions, a shared portion and a portion that remains independent for every domain. # * **Every domain has its own training video**, and the network is trained for the total number of different domains. # * The network is first trained over K-domains iteratively where each domain classifies between its target and background. # + [markdown] slideshow={"slide_type": "subslide"} # <img src="./assets/tut_track_mdnet.PNG" style="height:300px"> # # * In the $k^{th}$ iteration, the network is updated based on a minibatch that consists of the training samples from the $(k mod K)^{th}$ sequence, where only a single branch `fc6(k mod K)` is enabled. # + [markdown] slideshow={"slide_type": "subslide"} # * After training is complete, the layers specific to the different domains are removed and as a result, a feature extractor capable of interpreting any given background/object pairs is created. # * During the process of inference, a binary classification layer (a single fully-connected layer, `fc6`) is created by removing the domain-specific layers and adding a binary classifier. # * To estimate the target state in each frame, $N$ target candidates $x_1, . . . , x_N$ sampled around the previous target state are evaluated using the network, and we obtain their positive scores $f^{+}(x^{i})$ and negative scores $f^{โˆ’}(x^{i})$ from the network. The optimal target state $x^{โˆ—}$ is given by finding the example with the maximum positive score as $$ x^{*} = arg \max_{x^{i}} f^{+}(x^{i}) $$ # # * MDNet is one of the most accurate deep learning based online training, detection free, single object tracker. # * <a href="https://github.com/HyeonseobNam/py-MDNet">PyTorch Code</a> # + [markdown] slideshow={"slide_type": "subslide"} # # ------------------------- <img src="https://img.icons8.com/color/96/000000/code.png" style="height:50px;display:inline"> CODE TIME ------------------------- # # * Run from Terminal # + slideshow={"slide_type": "subslide"} import sys sys.path.append("./models/pyMDNet/tracking") from models.pyMDNet.tracking.run_tracker import track_mdnet # + slideshow={"slide_type": "subslide"} vid_path = './datasets/tracking/ironman.mp4' img_path = './datasets/tracking/ironman_frames' seq_name = "iron_man_custom" track_mdnet(vid_path, img_path, display=False, seq_name=seq_name) # + [markdown] slideshow={"slide_type": "subslide"} # * Converting in in between frames and mp4 # + slideshow={"slide_type": "subslide"} image_folder = './models/pyMDNet/results/ironman_frames/figs' video_name = 'ironman_mdnet.mp4' images = [img for img in os.listdir(image_folder) if img.endswith(".jpg")] frame = cv2.imread(os.path.join(image_folder, images[0])) height, width, layers = frame.shape video = cv2.VideoWriter(video_name, 0, 30, (width,height)) for image in images: video.write(cv2.imread(os.path.join(image_folder, image))) cv2.destroyAllWindows() video.release() # + [markdown] slideshow={"slide_type": "slide"} # ### <img src="https://img.icons8.com/dusk/50/000000/network.png" style="height:50px;display:inline"> Deep Simple Online and Realtime Tracking (Deep SORT) # --- # * Deep SORT, an **online** multiple object tracker which is an extenstion of the SORT algorithm, was introduced by <NAME>, <NAME> and <NAME> in their paper titled <a href="https://arxiv.org/abs/1703.07402">โ€œSimple Online and Realtime Tracking with a Deep Association Metricโ€</a>. # * Deep SORT integrates appearance information to improve the performance of SORT. # * Due to this extension it is possible to track objects through longer periods of occlusions, effectively reducing the number of *identity switches*. # * Much of the computational complexity is placed into an offline pre-training stage where a deep association metric is learned on a large-scale person re-identification dataset. # * During online application, measurement-to-track associations is established using nearest neighbor queries in visual appearance space. # * Experimental evaluation shows that these extensions reduce the number of identity switches by 45%, achieving overall competitive performance at high frame rates. # + [markdown] slideshow={"slide_type": "subslide"} # <img src="./assets/tut_track_deepsort.gif" style="height:250px"> # # * <a href="https://github.com/kimyoon-young/centerNet-deep-sort">Image Source</a> # + [markdown] slideshow={"slide_type": "subslide"} # * The original SORT algorithm (<a href="https://arxiv.org/abs/1602.00763">Paper</a>, <a href="https://github.com/abewley/sort">Code</a>) proposed the following steps to perform tracking: # * **Detection**: using a CNN-based detection architecture (VGG16) to extract regions. # * **Estimation Model**: the representation and the motion model used to propagate a targetโ€™s identity into the next frame. # * Inter-frame displacements of each object are estimated with a linear constant velocity model which is independent of other objects and camera motion. The state of each target is modeled as: $$x=[u, v, s, r, \dot{u}, \dot{v}, \dot{s}]$$ # * $u$ and $v$ represent the horizontal and vertical pixel location of the centre of the target, while the scale $s$ and $r$ represent the scale (area) and the aspect ratio of the targetโ€™s bounding box respectively. # * When a detection is associated to a target, the detected bounding box is used to update the target state where the velocity components are solved optimally via a <a href="https://www.codeproject.com/articles/865935/object-tracking-kalman-filter-with-ease">**Kalman filter**</a> framework. # + [markdown] slideshow={"slide_type": "subslide"} # * Steps continued: # * **Data Association**: In assigning detections to existing targets, each targetโ€™s bounding box geometry is estimated by predicting its new location in the current frame. # * The assignment cost matrix is then computed as the intersection-over-union (IOU) distance between each detection and all predicted bounding boxes from the existing targets. # * **Creation and Deletion of Track Identities**: When objects enter and leave the image, unique identities need to be created or destroyed accordingly. # * If an object reappears, tracking will implicitly resume under a new identity. # + [markdown] slideshow={"slide_type": "subslide"} # * So where is the *deep learning* in all of that? # * Despite the effectiveness of Kalman filter, it fails in many of the real world scenarios, like occlusions, different view points etc. # * **Deep SORT** replaced the CNNs with YOLO (You Only Look Once) deep object detector and introduced another distance metric based on the *โ€œappearanceโ€* of the object. # * The idea is to obtain a vector that can describe all the features of a given image (here, the crop of the object is used). # * First, build a classifier over the dataset, train it till it achieves a reasonably good accuracy, and then strip the final classification layer. # * Assuming a classical architecture, we will be left with a dense layer producing a single feature vector, waiting to be classified. # * That feature vector becomes the โ€œappearance descriptorโ€ of the object. # * The loss of appearance vectors is added to the loss of the original SORT. # * <a href="https://github.com/ZQPei/deep_sort_pytorch">PyTorch Code (with YOLO)</a> # + [markdown] slideshow={"slide_type": "slide"} # ### <img src="https://img.icons8.com/bubbles/50/000000/video-playlist.png" style="height:50px;display:inline"> Recommended Videos # --- # #### <img src="https://img.icons8.com/cute-clipart/64/000000/warning-shield.png" style="height:30px;display:inline"> Warning! # * These videos do not replace the lectures and tutorials. # * Please use these to get a better understanding of the material, and not as an alternative to the written material. # # #### Video By Subject # * Deep Video Object Tracking - <a href="https://www.youtube.com/watch?v=CYW6T2Q24z0">Deep Video Object Tracking - <NAME> - UPC Barcelona 2019</a> # * GOTURN - <a href="https://www.youtube.com/watch?v=SygkiWNSkWk">GOTURN : Deep Learning based Object Tracker </a> # * GOTURN - <a href="https://www.youtube.com/watch?v=kMhwXnLgT_I"> GOTURN - a neural network tracker</a> # * MDNet - <a href="https://www.youtube.com/watch?v=zYM7G5qd090">Learning Multi-Domain Convolutional Neural Networks for Visual Tracking (MDNet)</a> # + [markdown] slideshow={"slide_type": "skip"} # ## <img src="https://img.icons8.com/dusk/64/000000/prize.png" style="height:50px;display:inline"> Credits # --- # # * EE 046746 Spring 21 - <a href="https://taldatech.github.io/"><NAME></a> # * <a href="https://cv-tricks.com/object-tracking/quick-guide-mdnet-goturn-rolo/">Zero to Hero: A Quick Guide to Object Tracking: MDNET, GOTURN, ROLO - <NAME></a> # * <a href="https://www.imageannotation.ai/blog/object-tracking-in-videos">Object Tracking In Videos</a> # * <a href="https://www.learnopencv.com/goturn-deep-learning-based-object-tracking/">GOTURN : Deep Learning based Object Tracking</a> # * <a href="https://nanonets.com/blog/object-tracking-deepsort/#deep-sort">DeepSORT: Deep Learning to Track Custom Objects in a Video</a> # * Icons from <a href="https://icons8.com/">Icon8.com</a> - https://icons8.com
ee046746_tut_07_deep_object_tracking.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # __Input:__ sorted arrays _C_ and _D_ (length _n_/2 each). # __Output:__ sorted array _B_ (length _n_) and the number of split inversions. # __Simplifying assumption:__ _n_ is even. # *** # $i:=1$, $j:=1$, $splitInv:=0$ # __for__ $k:=1$ to $n$ __do__ # &nbsp;&nbsp;&nbsp;&nbsp;__if__ $C[i]$ < $D[j]$ __then__ # &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;$B[k]:=C[i]$, $i:=i+1$ # &nbsp;&nbsp;&nbsp;&nbsp;__else__ $\hspace{50mm} // D[j] < C[i]$ # &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;$B[k]:=D[j]$, $j:=j+1$ # &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;$splitInv:=splitInv+\underbrace{( \frac{n}{2}-i+1)}_\text{# left in c}$ # # return $(B, splitInv)$
algorithms/week01/merge-and-countSplitInv.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Praca domowa - <NAME> # + import sympy sympy.init_printing() t, lambda3a, lambdaa12, N4, N12, N16, dN4, dN12, dN16, dt = sympy.symbols('t, lambda_3a, lambda_a12, N4, N12, N16, dN4, dN12, dN16, dt', real=True) eqs = [ sympy.Eq(dN4/dt, -3*lambda3a * N4 **3 - lambdaa12 * N4 * N12), sympy.Eq(dN12/dt, lambda3a * N4 **3 - lambdaa12 * N4 * N12), sympy.Eq(dN16/dt, lambdaa12 * N4 * N12) ] eqs # - m, rho = sympy.symbols('m, rho', real=True) X4, X12, X16, dX4, dX12, dX16 = sympy.symbols('X4, X12, X16, dX4, dX12, dX16', real=True) Xeqs = [ sympy.Eq(X4, m/rho*4*N4), sympy.Eq(X12, m/rho*12*N12), sympy.Eq(X16, m/rho*16*N16), ] Xeqs subs = {X4: dX4, X12: dX12, X16: dX16, N4: dN4, N12: dN12, N16: dN16} dXeqs = [eq.subs(subs) for eq in Xeqs] dXeqs full_conservation = [sympy.Eq(X4 + X12 + X16, 1), sympy.Eq(dX4 + dX12 + dX16, 0)] full_conservation all_eqs = eqs + Xeqs + dXeqs + full_conservation all_eqs X_all_eqs = [eq.subs(sympy.solve(Xeqs, [N4, N12, N16])).subs(sympy.solve(dXeqs, [dN4, dN12, dN16])) for eq in eqs] + [full_conservation[1]] X_all_eqs solutions = sympy.solve(X_all_eqs, [dX4, dX12, dX16]) dX12dX4 = solutions[dX12]/solutions[dX4] dX12dX4 q = sympy.symbols('q', real=True) dX12dX4_final = dX12dX4.subs({lambdaa12*m: q * lambda3a * rho}).simplify() dX12dX4_final fX12 = sympy.Function('X12')(X4) diffeq = sympy.Eq(fX12.diff(X4), dX12dX4_final.subs(X12, fX12)) diffeq dX16dX4 = solutions[dX16]/solutions[dX4] dX16dX4 dX16dX4_final = dX16dX4.subs({lambdaa12*m: q * lambda3a * rho}).simplify() dX16dX4_final derivatives_func = sympy.lambdify((X4, X12, X16, q), [dX12dX4_final, dX16dX4_final]) derivatives_func(1, 0, 0, 1) def f(X, X4, q): return derivatives_func(X4, *X, q) f([0, 0], 1, 1) import numpy as np from scipy.integrate import odeint import matplotlib.pyplot as plt X4 = np.linspace(1, 0, 1000) q_list = np.logspace(-3, np.log10(2), 500) results = [] # fig, (ax1, ax2) = plt.subplots(2, sharex=True, figsize=(10, 8)) # ax1.set_xlim(0, 1) # ax2.set_xlim(0, 1) # ax1.set_ylim(0, 1) # ax2.set_ylim(0, 1) for q in q_list: X = odeint(f, [0, 0], X4, args=(q,)) X12, X16 = X.T # ax1.plot(X4, X12, label=f"q: {q:.1f}") # ax2.plot(X4, X16, label=f"q: {q:.1f}") # ax2.set_xlabel("X4") # ax1.set_ylabel("X12") # ax2.set_ylabel("X16") # plt.plot(X4, X16) # plt.legend() results.append(X[-1]) results = np.array(results) X12, X16 = results.T plt.figure(figsize=(10, 10)) plt.plot(q_list, X12, label="X12") plt.plot(q_list, X16, label="X16") plt.xlabel("q") plt.xscale("log") plt.ylabel("X") plt.legend(loc='best') plt.xlim(q_list.min(), q_list.max()); plt.grid() plt.savefig("Reacts.png")
NuclearReactsOnly.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="view-in-github" # <a href="https://colab.research.google.com/github/Jason-Adam/msds-462/blob/master/fashion_mnist.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] colab_type="text" id="6pa9_rrL5TRK" # # MSDS-462 Week 1: Fashion MNIST Classification # Objective: Build a classification model on the Kaggle Fashion MNIST Data Set # + [markdown] colab_type="text" id="c-nkiOUVtu_s" # # Initial Imports & Data Ingest # + [markdown] colab_type="text" id="i_9U1mvK_7YE" # ## Initial Imports # + colab={} colab_type="code" id="TrTJZEPO_9pZ" import matplotlib.pyplot as plt import numpy as np import pandas as pd from sklearn.model_selection import train_test_split import tensorflow as tf from tensorflow.keras import layers # + [markdown] colab_type="text" id="b95XwBvK7uy-" # ## Wire Up Gdrive # + colab={"base_uri": "https://localhost:8080/", "height": 124} colab_type="code" id="BBH2gQVE8Bf-" outputId="807a3e53-1ad2-486b-d9b7-5451e3007fb5" # Mount Gdrive from google.colab import drive drive.mount("/content/gdrive") # + colab={"base_uri": "https://localhost:8080/", "height": 52} colab_type="code" id="xMiuyTvA9u55" outputId="bcad5ac4-1ef8-45a0-b2ee-c6819b638bf0" # !ls /content/gdrive/My\ Drive/data/fashion_mnist/ # + [markdown] colab_type="text" id="u9dUlufH_KIu" # ## Data Ingest # + colab={} colab_type="code" id="6YcN48Oe_oy7" train_df = pd.read_csv("/content/gdrive/My Drive/data/fashion_mnist/fashion-mnist_train.csv") test_df = pd.read_csv("/content/gdrive/My Drive/data/fashion_mnist/fashion-mnist_test.csv") # + colab={"base_uri": "https://localhost:8080/", "height": 52} colab_type="code" id="OYkGUMs1ApJZ" outputId="76bde7a1-14d5-42d2-8eb5-8de67fa2d4f1" print(train_df.shape) print(test_df.shape) # + [markdown] colab_type="text" id="akMYDs5pB9L5" # ### Split Training Data # * Split the training into train and validate # * Split out X and y from each dataset (format needed for model) # + colab={} colab_type="code" id="65f67nKGEkN9" img_rows, img_cols = 28, 28 input_shape = (img_rows, img_cols, 1) # Separate dependent and independent vars X = np.array(train_df.iloc[:, 1:]) y = tf.keras.utils.to_categorical(np.array(train_df.iloc[:, 0])) # Split out validation set X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=13) #Test data X_test = np.array(test_df.iloc[:, 1:]) y_test = tf.keras.utils.to_categorical(np.array(test_df.iloc[:, 0])) X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1) X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1) X_val = X_val.reshape(X_val.shape[0], img_rows, img_cols, 1) # Convert to float X_train = X_train.astype('float32') X_test = X_test.astype('float32') X_val = X_val.astype('float32') # + colab={"base_uri": "https://localhost:8080/", "height": 69} colab_type="code" id="Sf-efsz7ezUV" outputId="36b68f12-4549-48f8-a2ad-58341be7ab77" # Validate reshaping print(X_train.shape) print(X_val.shape) print(X_test.shape) # + colab={"base_uri": "https://localhost:8080/", "height": 69} colab_type="code" id="aqLTIsOOfI3u" outputId="fe6a29a9-7c6e-471c-b686-e1131b2e459f" # Validate reshaping print(y_train.shape) print(y_val.shape) print(y_test.shape) # + [markdown] colab_type="text" id="gRbpq8Lwtn_Z" # # EDA # + colab={"base_uri": "https://localhost:8080/", "height": 269} colab_type="code" id="mCnPGmXZt1bV" outputId="38d9c854-a9b8-415a-b722-44252a644e90" # Sample Plots for num in range(0, 9): plt.subplot(3, 3, num+1) plt.imshow(X_test[num].reshape(28, 28), cmap="gray") # + colab={"base_uri": "https://localhost:8080/", "height": 104} colab_type="code" id="gvsiCAlcwh9K" outputId="4f5e0a06-0595-496f-d655-37b82854fd43" # !pip list | grep pandas # + colab={} colab_type="code" id="J2WE4-BuwvrO" # !pip install pandas==0.25.1 # + colab={"base_uri": "https://localhost:8080/", "height": 394} colab_type="code" id="nPe7e7Ahu8MJ" outputId="90569267-9bf8-4ce4-b79a-db4544c51e28" train_df.groupby(["label"]).agg( { "label": "count" } ) # + [markdown] colab_type="text" id="281qJbWDfYnH" # # Modeling # + [markdown] colab_type="text" id="AXuMUVUAxqj9" # ## First Model: 2 Convolutional Layers # + colab={"base_uri": "https://localhost:8080/", "height": 89} colab_type="code" id="caLBy9s-gmc_" outputId="aea2ccbe-4c1a-457a-a5ef-fb7777f9d291" # Model Params BATCH_SIZE = 256 NUM_CLASSES = 10 EPOCHS = 50 # Instantiate model object model = tf.keras.models.Sequential() # First Convolutional Layer model.add(layers.Conv2D( filters=32, kernel_size=(3, 3), activation="relu", input_shape=input_shape) ) model.add(layers.MaxPool2D()) model.add(layers.Dropout(0.2)) # Second Conv Layer model.add(layers.Conv2D(filters=64, kernel_size=(3, 3), activation="relu")) model.add(layers.MaxPool2D()) model.add(layers.Dropout(0.2)) # Flatten model.add(layers.Flatten()) model.add(layers.Dense(64, activation="relu")) model.add(layers.Dense(NUM_CLASSES, activation="softmax")) # Compile model.compile( loss=tf.keras.losses.categorical_crossentropy, optimizer=tf.keras.optimizers.RMSprop(), metrics=["accuracy"] ) # + colab={"base_uri": "https://localhost:8080/", "height": 469} colab_type="code" id="YqlyMMz_l57l" outputId="e66d0333-4074-4d49-8445-ddcdac37f12a" model.summary() # + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" id="2_Oo_lwYmAIJ" outputId="2627dfaf-4012-49a7-a0f4-3056d182d65e" # Fit model m1_history = model.fit( x=X_train, y=y_train, batch_size=BATCH_SIZE, epochs=EPOCHS, validation_data=(X_val, y_val) ) # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="sgpiFIudmapg" outputId="3f3dbc93-2294-44f8-f95e-8a96acb1b4a6" m1_score = model.evaluate(X_test, y_test) # + colab={} colab_type="code" id="UaH82j_anMA6" def plot_accuracy(history): # Accuracy plt.plot(range(len(history.history["acc"])), history.history["acc"], "bo", label="Training Accuracy") plt.plot(range(len(history.history["acc"])), history.history["val_acc"], "g", label="Validation Accuracy") plt.title("Training vs Validation Accuracy") plt.legend() # Loss plt.figure() plt.plot(range(len(history.history["acc"])), history.history["loss"], "bo", label="Training Loss") plt.plot(range(len(history.history["acc"])), history.history["val_loss"], "g", label="Validation Loss") plt.title("Training vs Validation Loss") plt.legend() plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 545} colab_type="code" id="1z5ErQ1kp1Pt" outputId="360b1d26-ffe7-4d33-bc56-8bd56e94ae8a" plot_accuracy(m1_history) # + [markdown] colab_type="text" id="ujfv3GoFq7vh" # It's obvious from the graphs that my validation accuracy didn't improve much beyond 20 epochs. Neither did the loss. I'll attempt to add an additional convolutional layer and reduce the number of epochs. I'll also adjust the dropout rate to see if that has any affect on the accuracy. # + [markdown] colab_type="text" id="-XcebQl3r44Q" # ## Second Model: 3 Convolutional Layers # + colab={} colab_type="code" id="dt24YXzHr9Xq" # model_2 Params BATCH_SIZE = 256 NUM_CLASSES = 10 EPOCHS = 20 # Instantiate model_2 object model_2 = tf.keras.models.Sequential() # First Convolutional Layer model_2.add(layers.Conv2D( filters=32, kernel_size=(3, 3), activation="relu", input_shape=input_shape) ) model_2.add(layers.MaxPool2D()) model_2.add(layers.Dropout(0.2)) # Second Conv Layer model_2.add(layers.Conv2D(filters=64, kernel_size=(3, 3), activation="relu")) model_2.add(layers.MaxPool2D()) model_2.add(layers.Dropout(0.2)) # Second Conv Layer model_2.add(layers.Conv2D(filters=128, kernel_size=(3, 3), activation="relu")) model_2.add(layers.MaxPool2D()) model_2.add(layers.Dropout(0.2)) # Flatten model_2.add(layers.Flatten()) model_2.add(layers.Dense(128, activation="relu")) model_2.add(layers.Dense(64, activation="relu")) model_2.add(layers.Dense(NUM_CLASSES, activation="softmax")) # Compile model_2.compile( loss=tf.keras.losses.categorical_crossentropy, optimizer=tf.keras.optimizers.RMSprop(), metrics=["accuracy"] ) # + colab={"base_uri": "https://localhost:8080/", "height": 729} colab_type="code" id="CtFw7vuMsz38" outputId="a7ecffb4-d042-4358-b645-8d261e01f0d9" # Fit model m2_history = model_2.fit( x=X_train, y=y_train, batch_size=BATCH_SIZE, epochs=EPOCHS, validation_data=(X_val, y_val) ) # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="LyuMjrXitCr-" outputId="5d69c54e-4bfc-42bc-8626-e9da68893529" m2_score = model_2.evaluate(X_test, y_test) # + colab={"base_uri": "https://localhost:8080/", "height": 545} colab_type="code" id="C3VuJhKltZVT" outputId="7093c2d8-71cb-4d0d-dffc-3c50c854dc9e" plot_accuracy(m2_history) # + [markdown] colab_type="text" id="ipb03xabtcX3" # # Conclusions / Recommendations # Overall, a very simple model architecture can produce accurate predictions of various clothing items. I would recommend to continue exploring model architecture in order to increase the accuracy without sacrificing an extensive amount of training time. It was evident that a relatively small number of epochs is needed to reach a plateau of sorts.
fashion_mnist.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # This notebook has all of the numbers and plots I used in the writeup. I tried to keep it reasonably organized but I was also a bit lazy, so some code might be sloppy hard to follow... sorry. # # load libraries and data from nltk.sentiment.vader import SentimentIntensityAnalyzer from googleapiclient import discovery import utils import os import json import matplotlib.pyplot as plt import seaborn as sns import numpy as np import pandas as pd from scipy.stats import ttest_ind import pickle guest_df = utils.load_guest_list_file(apply_filters=True) # + # vader sentiment analyzer sia = SentimentIntensityAnalyzer() # perspective API api_key = open(utils.perspective_api_key_file).read().split()[0] api = discovery.build('commentanalyzer', 'v1alpha1', developerKey=api_key) # - # I'm going to use R for some visualizations because ggplot kicks matplotlib's butt. # %load_ext rpy2.ipython # install R packages with conda: conda install -c r r-ggplot2 # + # %%capture # %%R library(ggplot2) library(dplyr) library(readr) library(tidyr) df <- read_csv("./guest_list.csv") %>% # these same filters are applied to the pandas dataframe filter(!is.na(video_id), guest != "holiday special", guest != "<NAME>") df$season <- factor(df$season) df$female_flag <- factor(df$female_flag) # - # # Chrissy Teigen examples # load comments file video_id = guest_df.loc[guest_df['guest'] == '<NAME>', 'video_id'].values[0] comment_file = os.path.join(utils.comment_dir, f'comments-{video_id}.json') comments = [c['commentText'] for c in json.load(open(comment_file, 'r')) if 'commentText' in c] def get_scores(text): sent = sia.polarity_scores(text)['compound'] analyze_request = { 'comment': {'text': text}, 'requestedAttributes': {'TOXICITY': {}, 'SEVERE_TOXICITY': {}}, 'languages': ['en'] } response = api.comments().analyze(body=analyze_request).execute() tox_score = response['attributeScores']['TOXICITY']['summaryScore']['value'] sev_tox_score = response['attributeScores']['SEVERE_TOXICITY']['summaryScore']['value'] out = f'\nsentiment score: {sent}' out += f'\ntoxicity: {tox_score}' out += f'\nsevere toxicity: {sev_tox_score}' return out c1 = comments[2288] print(c1) print(get_scores(c1)) c2 = comments[232] print(c2) print(get_scores(c2)) c3 = comments[6042] print(c3) print(get_scores(c3)) # # sentiment analysis # I'm using a metric that I'm calling positive ratio, defined as # # $$ positive\_ratio = \frac{\text{# of positive comments}}{\text{# of negative comments}} $$ # # where positive comments have sentiment scores greater than 0 and negative comments have scores less than 0. As an example, a video with twice as many positive comments as negative would have a positive ratio of 2. # # The benefit to using positive ratio is that it removes the effect of neutral comments. Many comments in the dataset have sentiment scores of exactly 0, and all of those 0 values can dilute a metric like average sentiment score. # # The downside to using positive ratio is that it ignores the magnitude of sentiment scores. For example, a comment with sentiment score 0.1 has the same effect on positive ratio as a comment with sentiment score 0.9. # # Due to the large number of neutral comments, I decided that positive ratio was the most appropriate metric for this dataset. In any case, the results are pretty similar with any metric. # + magic_args="-w 700 -h 350" language="R" # # p <- df %>% # mutate(female_flag = if_else(female_flag == 0, ' male', ' female')) %>% # filter(season != 1) %>% # ggplot() + # geom_density(aes(x=positive_ratio, color=female_flag, fill=female_flag), alpha=0.2) + # labs(title="Positive Ratio for Female vs. Male Guests", x="positive ratio") + # expand_limits(x=0) + # theme_light(base_size=14) + # theme(plot.title=element_text(hjust = 0.5), legend.title=element_blank()) # # ggsave(filename='./visualizations/positive_ratio_by_male_female.png', plot=p) # # p # + df = guest_df[guest_df['season'] != 1] f_vals = df[df['female_flag'] == 1]['positive_ratio'] m_vals = df[df['female_flag'] == 0]['positive_ratio'] print(f'female guest positive ratio: {round(f_vals.mean(), 3)}') print(f'male guest positive ratio : {round(m_vals.mean(), 3)}') # - ttest_ind(m_vals, f_vals) # # toxicity scores # + magic_args="-w 700 -h 350" language="R" # # p <- df %>% # mutate(female_flag = if_else(female_flag == 0, ' male', ' female')) %>% # filter(season != 1) %>% # rename(toxicity = mean_toxicity, # `severe toxicity` = mean_severe_toxicity) %>% # gather("metric", "value", c("toxicity", "severe toxicity")) %>% # mutate(metric = factor(metric, levels=c("toxicity", "severe toxicity"))) %>% # ggplot() + # geom_density(aes(x=value, color=female_flag, fill=female_flag), alpha=0.2) + # labs(title="Perspective Toxicity Scores for Female vs. Male Guests", x="score") + # expand_limits(x=0) + # expand_limits(x=0.5) + # facet_grid(. ~ metric) + # theme_light(base_size=14) + # theme(plot.title=element_text(hjust = 0.5), legend.title=element_blank()) # # ggsave(filename='./visualizations/toxicity_scores_by_male_female.png', plot=p) # # p # + df = guest_df[guest_df['season'] != 1] f_tox = df[df['female_flag'] == 1]['mean_toxicity'] m_tox = df[df['female_flag'] == 0]['mean_toxicity'] f_sev_tox = df[df['female_flag'] == 1]['mean_severe_toxicity'] m_sev_tox = df[df['female_flag'] == 0]['mean_severe_toxicity'] print(f'female guest average toxicity: {round(f_tox.mean(), 3)}') print(f'male guest average toxicity : {round(m_tox.mean(), 3)}') print(f'female guest average severe toxicity: {round(f_sev_tox.mean(), 3)}') print(f'male guest average severe toxicity : {round(m_sev_tox.mean(), 3)}') # - ttest_ind(m_tox, f_tox) ttest_ind(m_sev_tox, f_sev_tox) # ### why exclude season 1 from sentiment analysis? # Far lower average sentiment score than later seasons. Show was still finding its stride and had some structural and aesthetic differences from later seasons. Some major outliers (especially the infamous DJ Khaled episode). # + magic_args="-w 700 -h 350" language="R" # # p <- df %>% # ggplot() + # geom_density(aes(x=positive_ratio, color=season, fill=season), alpha=0.1) + # labs(title="Positive Ratio by Season", x="positive ratio") + # expand_limits(x=0) + # theme_light(base_size=14) + # theme(plot.title=element_text(hjust = 0.5)) # # ggsave(filename='./visualizations/positive_ratio_by_season.png', plot=p) # # p # - # # word usage pd.set_option('display.max_rows', 500) pd.set_option('display.max_columns', 500) pd.set_option('display.max_colwidth', 50) word_df = pickle.load(open('./data/gender_analysis_bigram.pickle', 'rb')) f_top_100 = word_df[['token', 'z_score']].rename(columns={'token': 'female'}).head(100) m_top_100 = word_df[['token', 'z_score']].rename(columns={'token': 'male'}).sort_index(ascending=False).reset_index(drop=True).head(100) f_top_100.join(m_top_100, lsuffix='_f', rsuffix='_m')
data_for_writeup.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: R # language: R # name: ir # --- # # Identifying and Extracting Longitudinal Variables using R PIC-SURE API # This tutorial notebook will demonstrate how to identify and extract longitudinal variables using the R PIC-SURE API. Longitudinal variables are defined as containing multiple 'Exam' or 'Visit' descriptions within their concept path. # # In this example, we will find the patient level data for a lipid-related longitudinal variable within the Framingham Heart study. We will: # 1. Identify what longitudinal variables are associated with the keywords of interest (lipid, triglyceride), and how many exams / visits are associated with each one # 2. Select a longitudinal variable of interest from a specific study (Framingham heart study) # 3. Extract patient level data into a dataframe where each rows represent patients and columns represent visits # For a more basic introduction to the R PIC-SURE API, see the `1_PICSURE_API_101.ipynb` notebook. # # **Before running this notebook, please be sure to get a user-specific security token. For more information about how to proceed, see the "Get your security token" instructions in the [README.md](https://github.com/hms-dbmi/Access-to-Data-using-PIC-SURE-API/tree/master/NHLBI_BioData_Catalyst#get-your-security-token).** # ## Environment Set-Up # ### System Requirements # R >= 3.4 # ### Install Packages source("R_lib/requirements.R") # Install latest R PIC-SURE API libraries from github Sys.setenv(TAR = "/bin/tar") options(unzip = "internal") install.packages("https://cran.r-project.org/src/contrib/Archive/devtools/devtools_1.13.6.tar.gz", repos=NULL, type="source") install.packages("https://cran.r-project.org/src/contrib/R6_2.5.1.tar.gz", repos=NULL, type="source") install.packages("https://cran.r-project.org/src/contrib/hash_2.2.6.1.tar.gz", repos=NULL, type="source") install.packages(c("urltools"),repos = "http://cran.us.r-project.org") devtools::install_github("hms-dbmi/pic-sure-r-client", force=T) devtools::install_github("hms-dbmi/pic-sure-r-adapter-hpds", force=T) devtools::install_github("hms-dbmi/pic-sure-biodatacatalyst-r-adapter-hpds", force=T) # Load user-defined functions source("R_lib/utils.R") # ## Connecting to a PIC-SURE Network # **Again, before running this notebook, please be sure to get a user-specific security token. For more information about how to proceed, see the "Get your security token" instructions in the [README.md](https://github.com/hms-dbmi/Access-to-Data-using-PIC-SURE-API/tree/master/NHLBI_BioData_Catalyst#get-your-security-token).** PICSURE_network_URL <- "https://picsure.biodatacatalyst.nhlbi.nih.gov/picsure" resource_id <- "02e23f52-f354-4e8b-992c-d37c8b9ba140" token_file <- "token.txt" token <- scan(token_file, what = "character") myconnection <- picsure::connect(url = PICSURE_network_URL, token = token) resource <- bdc::get.resource(myconnection, resourceUUID = resource_id) # ## Longitudinal Lipid Variable Example # Example showing how to extract lipid measurements from multiple visits for different cohorts # # ### Access the data # First, we will create multiIndex variable dictionaries of all variables that contain 'lipid' or 'triglyceride'. We will then combine these multiIndex variable dictionaries into `lipid_vars`. # + lipid_varDict <- bdc::find.in.dictionary(resource, 'lipid') %>% bdc::extract.entries() triglyceride_varDict <- bdc::find.in.dictionary(resource, 'triglyceride') %>% bdc::extract.entries() lipid_multiindex <- get_multiIndex_variablesDict(lipid_varDict) triglyceride_multiindex <- get_multiIndex_variablesDict(triglyceride_varDict) # - lipid_vars <- rbind(lipid_multiindex, triglyceride_multiindex) lipid_vars # ### Identify the longitudinal lipid variables # This block of code does the following: # - uses the multiindex dataframe containing variables which are related to 'lipid' or 'triglyceride' # - filters for variables with keywords 'exam #' or 'visit #' # - extracts the exam number of each variable into column `exam_number` # - groups variables by study (`level_0`) and longitudinal variable (`longvar`) # - returns a table showing the variables that have more than one exam recorded # + longitudinal_lipid_vars <- lipid_vars %>% # Filter to variables containing exam # or visit # filter((grepl('exam \\d+', name, ignore.case=TRUE) | grepl('visit \\d+', name, ignore.case=TRUE))) %>% # Save exam # as exam_number and variable without exam # info as longvar mutate(exam_number = str_extract(name, regex("(exam \\d+)|(visit \\d+)", ignore_case=T)), longvar = tolower(str_replace_all(name, regex('(exam|visit) \\d+', ignore_case = T), 'exam'))) %>% # Group by level_0 (study) and longvar group_by(level_0, longvar) %>% # Count number of exams for each longvar summarise(n_exams = n_distinct(exam_number)) %>% # Find longvars with 2+ exams (longitudinal variables) filter(n_exams > 1) %>% arrange(desc(n_exams)) longitudinal_lipid_vars # - # *Note: Some variables have capitalization differences, which is why* `longvar` *has been changed to lowercase.* # Now that we know which longitudinal variables are available to us, we can choose a variable of interest and extract the patient and visit level data associated with it. # # However, note that the `longvar` we extracted is not equivalent to the actual PIC-SURE concept path needed to query for this variable. # *Now we can filter for specific studies and extract the longitudinal variable names. Note that* `longvar` *is not equivalent to the actual PIC-SURE concept path, we will need to use the original name from* `multiindex`*. You will not be able to use only the table above to get the variables of interest.* # ### Isolate variables of interest # # In this example, we will choose to further investigate the first longitudinal variable in the `longitudinal_lipid_vars` dataframe we generated above. my_variable <- longitudinal_lipid_vars$longvar[1] print(my_variable) # To add the longitudinal variable of interest to our PIC-SURE query, we will need to search for our variable within the overall multiindex data dictionary we created before (`multiindex`) # *Note: There are some variables that have minor text differences. The workaround here is to separate the variable into parts. Here, we separate* `longvar` *where it says "exam" or "visit" into the variable* `keywords`*. Then we check to see if each of these parts are in the variable name.* # # *This workaround does not work for every variable, so be sure to double check that you are selecting all longitudinal variables of interest.* # + # Getting rid of punctuation that gives R trouble fixed_my_variable <- str_replace_all(my_variable, '[[:punct:]]', '') # Split the fixed_my_variable into separate strings wherever 'exam' or 'visit' is keywords <- unlist(strsplit(fixed_my_variable, c('exam','visit'))) keywords # - # Filter the lipid_vars to get query variables query_vars <- lipid_vars %>% mutate(new_name = tolower(str_replace_all(name, '[[:punct:]]', '')), # Get rid of punctuation from concept path and make lowercase test_val = sapply(keywords, # For each string in keywords, grepl, # see if it is in... new_name, # the concept path ignore.case=TRUE), other = apply(test_val, 1, sum)) %>% # Count the number of "TRUE", or times that theres a keywords & new_name match filter(other == length(keywords)) %>% # Keep only rows where all keywords matched new_name pull(name) # Return only full concept paths query_vars # The resulting `query_vars` variable contains the variables we will want to add to our query. # ### Create & run query # First, we will create a new query object. my_query <- bdc::new.query(resource = resource) # We will use the `bdc::query.anyof.add()` method. This will allow us to include all input variables, but only patient records that contain at least one non-null value for those variables in the output. See the `1_PICSURE_API_101.ipynb` notebook for a more in depth explanation of query methods. bdc::query.anyof.add(query = my_query, keys = lapply(query_vars, as.character)) # #### Update consent codes if necessary # Uncomment this code below and run as necessary to restrict your query to certain consent codes. # In the current example, the query is restricted to the 'phs000179.c2' consent code. # + # Delete current consents #bdc::query.filter.delete(query = my_query, # keys = "\\_consents\\") # Add in consents #bdc::query.filter.add(query = my_query, # keys = "\\_consents\\", # as.list(c("phs000179.c2"))) # - # We can now run our query: my_df <- bdc::query.run(my_query, result.type = "dataframe") # Our dataframe contains each exam / visit for the longitudinal variable of interest, with each row representing a patient. In order to be included in the output, each patient must have at least one reported value for one of the exams / visits for the variable of interest my_df
NHLBI_BioData_Catalyst/R/5_LongitudinalData.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # `cdotu(N, CX, INCX, CY, INCY)` # # Computes the dot product of a vector $\mathbf{x}$ and a vector $\mathbf{y}$. # # Operates on single-precision complex valued arrays. # # Input vector $\mathbf{x}$ is represented as a [strided array](../strided_arrays.ipynb) `CX`, spaced by `INCX`. # Input vector $\mathbf{y}$ is represented as a [strided array](../strided_arrays.ipynb) `CY`, spaced by `INCY`. # Both $\mathbf{x}$ and $\mathbf{y}$ are of size `N`. # ### Example usage # + jupyter={"source_hidden": true} import os import sys sys.path.insert(0, os.path.abspath(os.path.join(os.path.abspath(''), "..", ".."))) # - import numpy as np from pyblas.level1 import cdotu x = np.array([1+2j, 2+3j, 3+4j], dtype=np.complex64) y = np.array([6+7j, 7+8j, 8+9j], dtype=np.complex64) N = len(x) incx = 1 incy = 1 cdotu(N, x, incx, y, incy) # ### Docstring # + jupyter={"source_hidden": true} help(cdotu) # - # ### Source code # + jupyter={"source_hidden": true} # cdotu??
docs/level1/cdotu.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Perform QC for the 62 primer pairs designed: # # - create degenerate primer sequences to assert coordinate and sequence ambiguity correctness # - get alignment statistics for internal part of product # - blast versus outgroup dipterans # + # read_table used by pybedtools import warnings warnings.simplefilter(action='ignore', category=FutureWarning) import os import re import sys import pandas as pd import numpy as np import pybedtools from Bio import AlignIO from Bio.Seq import Seq from Bio.Phylo.TreeConstruction import DistanceCalculator, DistanceTreeConstructor from Bio.Blast.Applications import NcbiblastnCommandline from Bio.Blast import NCBIXML from collections import defaultdict # - AMPL_FILE = 'data/20180927_62_markers.xlsx' ANN_FILE = 'data/20180927_62_markers_ann.csv' # copied to ../../data/panel_info.csv ALN_FILE = 'data/20180927_62_markers.maf' DATA_DIR = "../../../data/" CHR_FILE = DATA_DIR + "comparative/AgamP3_C21/chr{}.maf" GENES_BED = pybedtools.BedTool(DATA_DIR + "genome/AgamP3/Anopheles-gambiae-PEST_BASEFEATURES_AgamP3.8.gff3") REPEATS_BED = pybedtools.BedTool(DATA_DIR + "genome/AgamP3/Anopheles-gambiae-PEST_REPEATFEATURES_AgamP3.gff3") COMB_DB = DATA_DIR + 'genome/AgAaCpDm' # ## Import amplicon data ampl_data = pd.read_excel(AMPL_FILE) ampl_data.head() # sort amplicons by chromosome and start ampl_data = ampl_data.sort_values(by=['chr','start']).reset_index(drop=True) ampl_data.head(10) # ## Extract alignments for amplicons def ann_to_aln_coord(aln, ann_coord, ref=0): ''' Transforms annotation coordinate into maf alignment position accounting for number of gaps in ref (i-th seq in alignment) ''' gapped_coord = ann_coord - aln[ref].annotations['start'] out_coord = 0 i = 0 while i < gapped_coord: if aln[ref][out_coord] != '-': i += 1 out_coord += 1 return out_coord def get_subaln(alns, intervals): """ Given maf file with alignments and pandas iterrows object containing sorted starts and ends, yield list of subalignments matching these coordinates """ def se_from_row(row): return (row[1]['start'], row[1]['end']) subalns = [] (start, end) = se_from_row(next(intervals)) alns_iter = AlignIO.parse(alns, "maf") alignment = next(alns_iter) while True: # quickly skip alignments until end coordinate falls within alignment - assume slignments coordinate-sorted if alignment[0].annotations['start'] + alignment.get_alignment_length() < end: alignment = next(alns_iter) continue # more thorough check accounting for gaps in alignment if alignment[0].annotations['start'] \ + alignment.get_alignment_length() \ - (alignment[0].seq).count('-') < end: alignment = next(alns_iter) continue try: #print(alignment[0].annotations, str(alignment[0].seq)) in_start = ann_to_aln_coord(alignment, start) in_end = ann_to_aln_coord(alignment, end) except: raise ValueError('Coordniate error!\nSequence: {}\nAnnotation start: {}\n' 'Start: {}\nEnd: '.format(alignment[0].seq, alignment[0].annotations['start'], start, end)) subalns.append(alignment[:, in_start:in_end]) try: (start, end) = se_from_row(next(intervals)) except: return subalns if not os.path.isfile(ALN_FILE): x = get_subaln(CHR_FILE.format('2R'), ampl_data.iloc[21:24].iterrows()) print(x) # LONG RUN requires reading all mafs # extract alignments from amplicons if not os.path.isfile(ALN_FILE): ampl_alns = [] for seqid in ('2L', '2R', '3L', '3R', 'X'): ampl_alns.extend(get_subaln(CHR_FILE.format(seqid), ampl_data[ampl_data.chr==seqid].iterrows())) len(ampl_alns) else: ampl_alns = list(AlignIO.parse(ALN_FILE, "maf")) # write amplicons if not os.path.isfile(ALN_FILE): count = 0 with open(ALN_FILE, "w") as handle: for a in ampl_alns: count += AlignIO.write(a, handle, "maf") count # ## Re-create degenerate primers # + def get_primer(aln, primer, reverse=False, min_alts=2, debug=False): ''' Locate primer in first sequence in the alignment, (at start for F, at end for R) return primer (based on input primer length) with ambiguities coded as [ref/alt/...]. Parameters: aln - input alignment in MAF format primer - primer sequence used to regulate output primer length reverse - is primer located on reverse strand min_alts - minimum number of genomes with alt alleles to be treated as ambiguous. Positions with lower number of alts are converted to lowercase. ''' primer = primer.replace('-','') if debug: print(aln[0].seq, primer) seq = '' # get primer lengtb plen = len(primer) i = 0 if reverse: pos = aln.get_alignment_length() - 1 while i < plen: if aln[0][pos] != '-': i += 1 col = [str(Seq(nt).reverse_complement()) for nt in aln[:, pos]] ref = col[0] alt = set(col) alt.remove(ref) if len(alt) == 0: # no alts seq += ref elif len(alt) == 1 and col.count(''.join(alt)) < min_alts: # single occurence of alt in alignment seq += ref.lower() else: #print(''.join(col)) seq += '[{}/{}]'.format(ref, '/'.join(alt)) pos -= 1 # forward strand primer else: pos = 0 while i < plen: if aln[0][pos] != '-': i += 1 col = aln[:, pos] ref = col[0] alt = set(col) alt.remove(ref) if len(set(col)) == 1: seq += ref elif len(alt) == 1 and col.count(''.join(alt)) < min_alts: # single occurence of alt in alignment seq += ref.lower() else: alt = set(col) alt.remove(ref) seq += '[{}/{}]'.format(ref, '/'.join(alt)) pos += 1 # reverse return seq print(get_primer(ampl_alns[0], 'TGTSTACGGTCTGAAGAACATc', debug=True)) print(get_primer(ampl_alns[0], 'TTATCCGGCTCCAAGTTAAGG', reverse=True, debug=True)) # - ampl_data['F_deg'] = np.vectorize(get_primer)(ampl_alns, ampl_data['F']) ampl_data['R_deg'] = np.vectorize(get_primer)(ampl_alns, ampl_data['R'], reverse=True) ampl_data # ## Alignment statistics def identical_clusters(aln, fp, rp): ''' Given alignment, forward and reverse primers, for insert located between primers, return list of sets with species IDs with identical sequences and number of discriminated lineages''' ins = aln[:, len(fp.replace('-','')):-len(rp.replace('-',''))] ids = [set()] dm = DistanceCalculator('identity').get_distance(ins) dm.names = [n.split('.')[0] for n in dm.names] for i in range(len(dm)): for j in range(i + 1, len(dm)): if dm[i,j] == 0: n1 = dm.names[i] n2 = dm.names[j] for cl in ids: if (n1 in cl): if (n2 in cl): break if (n2 not in cl): cl.add(n2) break else: ids.append(set((n1, n2))) id_clusters = ids[1:] discrim = len(dm) - sum([len(cl)-1 for cl in id_clusters]) return (id_clusters, discrim) identical_clusters(ampl_alns[0], 'TGTSTACGGTCTGAAGAACATc', 'TTATCCGGCTCCAAGTTAAGG') ampl_data['identical_seq'],ampl_data['idenified_lineages'] = np.vectorize(identical_clusters)(ampl_alns, ampl_data['F'], ampl_data['R']) ampl_data # + ALL_SP = ['AgamP3', 'AgamS1', 'AgamM1', 'AmerM1', 'AaraD1', 'AquaS1', 'AmelC1', 'AchrA1', 'AepiE1', 'AminM1', 'AculA1', 'AfunF1', 'AsteS1', 'AsteI2', 'AmacM1', 'AfarF1', 'AdirW1', 'AsinS1', 'AatrE1', 'AdarC2', 'AalbS1'] def seq_repr(alignment): ''' Given multiple sequence alignment, return first sequence with Ns for ambiguous chars and X's for indels.''' seq = '' for i in range(alignment.get_alignment_length()): col = alignment[:, i] if '-' in col: # indel has higher priority than substitution seq += 'X' elif len(set(col)) == 1: seq += col[0] else: seq += 'N' return seq def get_aln_stats(aln, fp, rp): ins = aln[:, len(fp.replace('-','')):-len(rp.replace('-',''))] aln_seq = seq_repr(aln) ins_seq = seq_repr(ins) aln_sp = [seq.id.split('.')[0] for seq in aln] unaln_sp = ';'.join([sp for sp in ALL_SP if sp not in aln_sp]) return (len(aln), # number of aligned species unaln_sp, # unaligned species IDs aln.get_alignment_length(), len(str(aln[0].seq).replace('-','')), # agam amplicon length ins.get_alignment_length(), len(str(ins[0].seq).replace('-','')), # agam insert length aln_seq.count('N'), aln_seq.count('X'), ins_seq.count('N'), ins_seq.count('X'), ) get_aln_stats(ampl_alns[2], 'CAGTCAAATTTCCAGACAATCT', 'CGGAAGTGCATTTGAAGG-AAaA') # - (ampl_data['aligned_spp'], ampl_data['unaligned_spp'], ampl_data['aligned_ampl_length'], ampl_data['agam_ampl_length'], ampl_data['aligned_insert_length'], ampl_data['agam_insert_length'], ampl_data['total_snvs'], ampl_data['total_indels'], ampl_data['insert_snvs'], ampl_data['insert_indels']) = np.vectorize(get_aln_stats)(ampl_alns, ampl_data['F'], ampl_data['R']) ampl_data.head() ampl_data['prop_id_lineages'] = ampl_data['idenified_lineages']/ampl_data['aligned_spp'] ampl_data.head() # ## Gene annotation # create list of BED intervals for amplicons amplicon_beds = ampl_data[['chr', 'start', 'end']].to_string(header=False, index=False).split('\n') amplicon_beds[0] # + def bt_to_df(bt): ''' Convert bedtool to pandas dataframe replacing empty files with None''' if len(bt) > 0: return bt.to_dataframe() else: return None def annotate_interval(bed_str, genes, repeats): ''' Annotate interval in string format genes and repats annotation tracks ''' def bt_to_df(bt): ''' Convert bedtool to pandas dataframe''' if len(bt) > 0: return bt.to_dataframe() else: return pd.DataFrame() def get_attrs(d, feature, attr_id): ''' From gff dataframe extract list of features by attribute ID Attribute string example for gene feature: ID=AGAP001235;biotype=protein_coding ''' out = [] try: for attr in d[d.feature == feature]['attributes']: for a in attr.split(';'): aa = a.split('=') if aa[0] == attr_id: out.append(aa[1]) if len(out) > 0: return ';'.join(out) except: # no annotations pass return 'None' attr_dict = dict() # intersect a_bed = pybedtools.BedTool(bed_str, from_string=True) ag_gff = genes.intersect(a_bed) ar_gff = repeats.intersect(a_bed) # convert annotations to dataframe ampl_annot = pd.concat([bt_to_df(ag_gff), bt_to_df(ar_gff)]) # convert gff coordinate to BED coordinate ampl_annot['start'] = ampl_annot.start - 1 # generate gene and exon beds gene_bed = ampl_annot.loc[ampl_annot.feature == 'gene', ['seqname','start','end']].to_string(header=False, index=False) # intergenic, avoid empty bed operation if gene_bed.startswith('Empty'): attr_dict['intron'] = 'No' attr_dict['intergenic'] = 'Yes' else: gene_bed = pybedtools.BedTool(gene_bed, from_string=True) exon_bed = ampl_annot.loc[ampl_annot.feature == 'exon', ['seqname','start','end']].to_string(header=False, index=False) # intron, avoid empty bed operation if exon_bed.startswith('Empty'): attr_dict['intron'] = 'Yes' attr_dict['intergenic'] = 'No' else: exon_bed = pybedtools.BedTool(exon_bed, from_string=True) # generate intergenic bed based on gene bed subtraction from amplicon bed intergenic = bt_to_df(a_bed.subtract(gene_bed)) # generate intronic bed based on exon bed subtraction from amplicon bed intron = bt_to_df(a_bed.subtract(exon_bed)) # all non-exonic sequences are intergenic - exonic terminus only if (intron.shape[0] > 0) and intron.equals(intergenic): attr_dict['intron'] = 'No' attr_dict['intergenic'] = 'Yes' else: attr_dict['intron'] = ('Yes' if intron.shape[0] > 0 else 'No') attr_dict['intergenic'] = ('Yes' if intergenic.shape[0] > 0 else 'No') attr_dict.update({ 'gene': get_attrs(ampl_annot, 'gene', 'ID'), 'mRNA': get_attrs(ampl_annot, 'mRNA', 'ID'), 'exon': get_attrs(ampl_annot, 'exon', 'ID'), 'repeat': get_attrs(ampl_annot, 'repeat', 'Name'), }) # predict utr by name attr_dict['utr'] = ('Yes' if ('utr' in str(ampl_annot['feature'])) else 'No') return attr_dict annotate_interval(amplicon_beds[1], GENES_BED, REPEATS_BED) # - ann_dict = dict() for (i, bed) in enumerate(amplicon_beds): sys.stdout.write('\r{}'.format(i)) ann_dict[i] = annotate_interval(bed, GENES_BED, REPEATS_BED) # add annotation columns print('\nDone!') ampl_data = pd.concat([ampl_data, pd.DataFrame(ann_dict).T], axis=1) ampl_data['repeat'].value_counts() # genes ampl_data['gene'].value_counts() # intron ampl_data['intron'].value_counts() # intergenic ampl_data['intergenic'].value_counts() # exon ampl_data[(ampl_data.intron == 'No') & (ampl_data.intergenic == 'No')].shape # both intron an intergenic spanned ampl_data[(ampl_data.intron == 'Yes') & (ampl_data.intergenic == 'Yes')] # ## BLAST vs diptera # + # extract agam sequences for markers removing alignment gaps # headers are indices of amplicons agam_fa = 'data/temp_ampl.fasta' with open('data/temp_ampl.fasta', 'w') as o: i = 0 for a in ampl_alns: o.write(">{}\n".format(i)) o.write(str(a[0].seq).replace('-','')) o.write('\n') i += 1 # !head {agam_fa} # - blast_file = 'data/temp_blast.xml' cline = NcbiblastnCommandline(cmd='blastn', out=blast_file, outfmt=5, query=agam_fa, db=COMB_DB, evalue=0.001) print(cline) # execute cline() # read blast output blast_records = list(NCBIXML.parse(open(blast_file))) # http://biopython.org/DIST/docs/api/Bio.Blast.Record.HSP-class.html # str not working because self.bits is not available for some reason # test_hsp = blast_records[1].alignments[0].hsps[0] # print(test_hsp) # print(test_hsp.identities, test_hsp.align_length) blast_records[1].alignments[0].hsps[0] # + # convert blast output bd = dict() species = ('AgamP3', 'CpipJ2', 'AaegL5', 'BDGP6') for record in blast_records: q = record.query bd[q] = defaultdict(list) for aln in record.alignments: #extract species from hit definition (s,c) = [l.split(':')[1:3] for l in aln.hit_def.split(' ') if len(l.split(':')) > 2][0] if s not in species: raise ValueError('Unknown genome ' + s) # if s not in bd[q].keys(): # bd[q][s] = defaultdict(list) for hsp in aln.hsps: if (hsp.num_alignments is not None): raise ValueError('Multiple alignmed fragments per HSP:\n' + str(hsp)) #bd[q][s + '_length'].append(hsp.align_length) bd[q][s + '_identity'].append(hsp.identities / hsp.align_length) bd[q][s + '_q_span'].append('{}-{}'.format(hsp.query_start, hsp.query_end)) bd[q][s + '_s_span'].append('{}:{}-{}'.format(c, hsp.sbjct_start, hsp.sbjct_end)) #bd[q][s + '_expect'].append(hsp.expect) i+=1 #bd[q][s + '_hits'] = len(bd[q][s + '_length']) for (k,v) in bd[q].items(): if type(bd[q][k]) is list: bd[q][k] = ';'.join([str(s) for s in v]) bd['0'] # - bd = {int(k): v for k, v in bd.items()} bd[1] comb_meta = pd.concat([ampl_data, pd.DataFrame(bd).T.fillna('')], axis=1) comb_meta # ## Check individual BLAST results def display_primer_matches(idx): """ For marker listed undex idx in annotation dataframe, get primer sequence identities in matching outgroup (not AgamP4) genomes. """ fp = comb_meta.iloc[idx]['F'] rp = comb_meta.iloc[idx]['R'] al = comb_meta.iloc[idx]['agam_ampl_length'] print(blast_records[idx].query) for aln in blast_records[idx].alignments: if 'AgamP3' not in aln.title: print(aln.title.split(':')[2]) a = aln.hsps[0] s = a.query_start - 1 e = a.query_end delta = al - a.align_length print(fp + '.'*(3 + delta) + str(Seq(rp).reverse_complement())) print('.' * s + a.query[:len(fp)] + '...' + a.query[-len(rp):]) print('.' * s + a.match[:len(fp)] + '...' + a.match[-len(rp):]) print('.' * s + a.sbjct[:len(fp)] + '...' + a.sbjct[-len(rp):]) display_primer_matches(22) display_primer_matches(50) comb_meta.columns # ## Write and clean up comb_meta.to_csv(ANN_FILE) # !rm {blast_file} {agam_fa} # ## TODO # - annotation code clean up
work/1_panel_design/20180927_62_marker_qc.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Coronagraph Basics # This set of exercises guides the user through a step-by-step process of simulating NIRCam coronagraphic observations of the HR 8799 exoplanetary system. The goal is to familiarize the user with basic `pynrc` classes and functions relevant to coronagraphy. # + # Import the usual libraries import numpy as np import matplotlib import matplotlib.pyplot as plt # Enable inline plotting at lower left # %matplotlib inline from IPython.display import display, Latex, clear_output # + # jedi 0.14.1 tab completion fails; will be fixed in 0.14.2 import jedi if jedi.__version__ == '0.14.1': # %config Completer.use_jedi = False # Progress bar from tqdm.auto import trange, tqdm # - # We will start by first importing `pynrc` along with the `obs_hci` (High Contrast Imaging) class, which lives in the `pynrc.obs_nircam` module. # + import pynrc from pynrc import nrc_utils # Variety of useful functions and classes from pynrc.obs_nircam import obs_hci # High-contrast imaging observation class # Disable informational messages and only include warnings and higher pynrc.setup_logging(level='WARN') # - # ## Source Definitions # # The `obs_hci` class first requires two arguments describing the spectra of the science and reference sources (`sp_sci` and `sp_ref`, respectively. Each argument should be a Pysynphot spectrum already normalized to some known flux. `pynrc` includes built-in functions for generating spectra. The user may use either of these or should feel free to supply their own as long as it meets the requirements. # # 1. The `pynrc.stellar_spectrum` function provides the simplest way to define a new spectrum: # ```python # bp_k = pynrc.bp_2mass('k') # Define bandpass to normalize spectrum # sp_sci = pynrc.stellar_spectrum('F0V', 5.24, 'vegamag', bp_k) # ``` # You can also be more specific about the stellar properties with `Teff`, `metallicity`, and `log_g` keywords. # ```python # sp_sci = pynrc.stellar_spectrum('F0V', 5.24, 'vegamag', bp_k, # Teff=7430, metallicity=-0.47, log_g=4.35) # ``` # # 2. Alternatively, the `pynrc.source_spectrum` class ingests spectral information of a given target and generates a model fit to the known photometric SED. Two model routines can be fit. The first is a very simple scale factor that is applied to the input spectrum, while the second takes the input spectrum and adds an IR excess modeled as a modified blackbody function. The user can find the relevant photometric data at http://vizier.u-strasbg.fr/vizier/sed/ and click download data as a VOTable. # + # Define 2MASS Ks bandpass and source information bp_k = pynrc.bp_2mass('k') # Science source, dist, age, sptype, Teff, [Fe/H], log_g, mag, band args_sources = [('HR 8799', 39.0, 30, 'F0V', 7430, -0.47, 4.35, 5.24, bp_k)] # References source, sptype, Teff, [Fe/H], log_g, mag, band ref_sources = [('HD 220657', 'F8III', 5888, -0.01, 3.22, 3.04, bp_k)] # + name_sci, dist_sci, age, spt_sci, Teff_sci, feh_sci, logg_sci, mag_sci, bp_sci = args_sources[0] name_ref, spt_ref, Teff_ref, feh_ref, logg_ref, mag_ref, bp_ref = ref_sources[0] # For the purposes of simplicity, we will use pynrc.stellar_spectrum() sp_sci = pynrc.stellar_spectrum(spt_sci, mag_sci, 'vegamag', bp_sci, Teff=Teff_sci, metallicity=feh_sci, log_g=logg_sci) sp_sci.name = name_sci # And the refernece source sp_ref = pynrc.stellar_spectrum(spt_ref, mag_ref, 'vegamag', bp_ref, Teff=Teff_ref, metallicity=feh_ref, log_g=logg_ref) sp_ref.name = name_ref # + # Plot the two spectra fig, ax = plt.subplots(1,1, figsize=(8,5)) xr = [2.5,5.5] for sp in [sp_sci, sp_ref]: w = sp.wave / 1e4 ind = (w>=xr[0]) & (w<=xr[1]) sp.convert('Jy') f = sp.flux / np.interp(4.0, w, sp.flux) ax.semilogy(w[ind], f[ind], lw=1.5, label=sp.name) ax.set_ylabel('Flux (Jy) normalized at 4 $\mu m$') sp.convert('flam') ax.set_xlim(xr) ax.set_xlabel(r'Wavelength ($\mu m$)') ax.set_title('Spectral Sources') # Overplot Filter Bandpass bp = pynrc.read_filter('F444W', 'CIRCLYOT', 'MASK430R') ax2 = ax.twinx() ax2.plot(bp.wave/1e4, bp.throughput, color='C2', label=bp.name+' Bandpass') ax2.set_ylim([0,0.8]) ax2.set_xlim(xr) ax2.set_ylabel('Bandpass Throughput') ax.legend(loc='upper left') ax2.legend(loc='upper right') fig.tight_layout() # - # ## Initialize Observation # # Now we will initialize the high-contrast imaging class `pynrc.obs_hci` using the spectral objects and various other settings. The `obs_hci` object is a subclass of the more generalized `NIRCam` class. It implements new settings and functions specific to high-contrast imaging observations for corongraphy and direct imaging. # For this tutorial, we want to observe these targets using the `MASK430R` coronagraph in the `F444W` filter. All circular coronagraphic masks such as the `430R` (R=round) should be paired with the `CIRCLYOT` pupil element, whereas wedge/bar masks are paired with `WEDGELYOT` pupil. Observations in the LW channel are most commonly observed in `WINDOW` mode with a 320x320 detector subarray size. Full detector sizes are also available. # # The PSF simulation size (`fov_pix` keyword) should also be of similar size as the subarray window (recommend avoiding anything above `fov_pix=1024` due to computation time and memory usage). Use odd numbers to center the PSF in the middle of the pixel. If `fov_pix` is specified as even, then PSFs get centered at the corners. This distinction really only matter for unocculted observations, (ie., where the PSF flux is concentrated in a tight central core). # # We also need to specify a WFE drift value (`wfe_ref_drift` parameter), which defines the anticipated drift in nm between the science and reference sources. For the moment, let's intialize with a value of 0nm. This prevents an initially long process by which `pynrc` calculates changes made to the PSF over a wide range of drift values. This process only happens once, then stores the resulting coefficient residuals to disk for future quick retrieval. # # Extended disk models can also be specified upon initialization using the `disk_hdu` keyword. # + filt, mask, pupil = ('F444W', 'MASK430R', 'CIRCLYOT') wind_mode, subsize = ('WINDOW', 320) fov_pix, oversample = (320, 2) wfe_ref_drift = 0 obs = pynrc.obs_hci(sp_sci, sp_ref, dist_sci, filter=filt, mask=mask, pupil=pupil, wfe_ref_drift=wfe_ref_drift, fov_pix=fov_pix, oversample=oversample, wind_mode=wind_mode, xpix=subsize, ypix=subsize, verbose=True) # - # Some information for the reference observation is stored in the attribute `obs.nrc_ref`, which is simply it's own isolated `NIRCam` class. Currently, this is the easiest way to separate out the reference observations different MULTIACCUM ramp modes. Any changes made to the filter, detector size, etc on the parent class will be taken into account when simulating the reference slope images and nosie. In some ways, it's best to think of these as two separate classes, # ```python # obs_sci = obs # obs_ref = obs.nrc_ref # ``` # with some linked references between the two. # # Now that we've succesffully initialized the obs_hci observations, let's specify the `wfe_ref_drift`. If this is your first time, then the `nrc_utils.wfed_coeff` function is called to determine a relationship between PSFs in the presense of WFE drift. This relationship is saved to disk in the `PYNRC_DATA` directory as a set of polynomial coefficients. Future calculations utilize these coefficients to quickly generate a new PSF for any arbitary drift value. # + # WFE drift amount between rolls # This only gets called during gen_roll_image(). obs.wfe_roll_drift = 2 # Drift amount between Roll 1 and Reference. obs.wfe_ref_drift = 10 # - # You can also turn on `obs.wfe_field` in order to take into account field-dependent WFE variations to the PSF. This does not include field distortion that would shift the location of a star at the focal palne, but simply creates a field-dependent OPD map that will modify a given PSF for off-axis sources. Since we're mainly dealing with faint companions, this is an unnecessary feature here. # ## Exposure Settings # # Optimization of exposure settings are demonstrated in another tutorial, so we will not repeat that process here. We can assume the optimization process was performed elsewhere to choose the `DEEP8` pattern with 16 groups and 5 total integrations. These settings apply to each roll position of the science observation as well as the for the reference observation. # Update both the science and reference observations obs.update_detectors(read_mode='DEEP8', ngroup=16, nint=5, verbose=True) obs.nrc_ref.update_detectors(read_mode='DEEP8', ngroup=16, nint=5) # ## Add Planets # # There are four known giant planets orbiting HR 8799 at various locations. Ideally, we would like to position them at their predicted locations on the anticipated observation date. For this case, we choose a plausible observation date of November 1, 2019. To convert between $(x,y)$ and $(r,\theta)$, use the `nrc_utils.xy_to_rtheta` and `nrc_utils.rtheta_to_xy` functions. # # When adding the planets, it doesn't matter too much which exoplanet model spectrum we decide to use since the spectra are still fairly unconstrained at these wavelengths. We do know roughly the planets' luminosities, so we can simply choose some reasonable model and renormalize it to the appropriate filter brightness. Currently, the only exoplanet spectral models available to `pynrc` are those from Spiegel & Burrows (2012). # + # Projected locations for date 11/01/2019 # These are prelimary positions, but within constrained orbital parameters loc_list = [(-1.57, 0.64), (0.42, 0.87), (0.5, -0.45), (0.35, 0.20)] # Estimated magnitudes within F444W filter pmags = [16.0, 15.0, 14.6, 14.7] # - # Add planet information to observation class. # These are stored in obs.planets. # Can be cleared using obs.kill_planets(). obs.kill_planets() for i, loc in enumerate(loc_list): obs.add_planet(mass=10, entropy=13, age=age, xy=loc, runits='arcsec', renorm_args=(pmags[i], 'vegamag', obs.bandpass)) # Generate and plot a noiseless slope image to make sure things look right PA1 = 85 im_planets = obs.gen_planets_image(PA_offset=PA1) # + from matplotlib.patches import Circle from pynrc.nrc_utils import coron_ap_locs, build_mask_detid, xy_rot, plotAxes from pynrc.maths.image_manip import fshift, pad_or_cut_to_size fig, ax = plt.subplots(figsize=(6,6)) xasec = obs.det_info['xpix'] * obs.pix_scale yasec = obs.det_info['ypix'] * obs.pix_scale extent = [-xasec/2, xasec/2, -yasec/2, yasec/2] xylim = 4 vmin = 0 vmax = 0.5*im_planets.max() ax.imshow(im_planets, extent=extent, vmin=vmin, vmax=vmax) # Overlay the coronagraphic mask detid = obs.Detectors[0].detid im_mask = obs.mask_images[detid] # Do some masked transparency overlays masked = np.ma.masked_where(im_mask>0.99, im_mask) #ax.imshow(1-masked, extent=extent, alpha=0.5) ax.imshow(1-masked, extent=extent, alpha=0.3, cmap='Greys_r', vmin=-0.5) xc_off = obs.bar_offset for loc in loc_list: xc, yc = loc xc, yc = xy_rot(xc, yc, PA1) xc += xc_off circle = Circle((xc,yc), radius=xylim/15., alpha=0.7, lw=1, edgecolor='red', facecolor='none') ax.add_artist(circle) xlim = ylim = np.array([-1,1])*xylim xlim = xlim + xc_off ax.set_xlim(xlim) ax.set_ylim(ylim) ax.set_xlabel('Arcsec') ax.set_ylabel('Arcsec') ax.set_title('{} planets -- {} {}'.format(sp_sci.name, obs.filter, obs.mask)) color = 'grey' ax.tick_params(axis='both', color=color, which='both') for k in ax.spines.keys(): ax.spines[k].set_color(color) plotAxes(ax, width=1, headwidth=5, alength=0.15, angle=PA1, position=(0.25,0.9), label1='E', label2='N') fig.tight_layout() # - # As we can see, even with "perfect PSF subtraction" and no noise, it's difficult to make out planet e. This is primarily due to its location relative to the occulting mask reducing throughput along with confusion of bright diffraction spots from nearby sources. # ## Estimated Performance # # Now we are ready to determine contrast performance and sensitivites as a function of distance from the star. # ### 1. Roll-Subtracted Images # # First, we will create a quick simulated roll-subtracted image using the in `gen_roll_image` method. For the selected observation date of 11/1/2019, APT shows a PA range of 84$^{\circ}$ to 96$^{\circ}$. So, we'll assume Roll 1 has PA1=85, while Roll 2 has PA2=95. In this case, "roll subtraction" simply creates two science images observed at different parallactic angles, then subtracts the same reference observation from each. The two results are then de-rotated to a common PA=0 and averaged. # # There is also the option to create ADI images, where the other roll position becomes the reference star by setting `no_ref=True`. # + # Cycle through a few WFE drift values wfe_list = [0,5,10] # PA values for each roll PA1, PA2 = (85, 95) # A dictionary of HDULists hdul_dict = {} for i in trange(len(wfe_list)): wfe_drift = wfe_list[i] # Upate WFE reference drift value obs.wfe_ref_drift = wfe_drift # Set the final output image to be oversampled hdulist = obs.gen_roll_image(PA1=PA1, PA2=PA2) hdul_dict[wfe_drift] = hdulist # + from pynrc.nb_funcs import plot_hdulist from matplotlib.patches import Circle fig, axes = plt.subplots(1,3, figsize=(14,4.3)) xylim = 2.5 xlim = ylim = np.array([-1,1])*xylim for j, wfe_drift in enumerate(wfe_list): ax = axes[j] hdul = hdul_dict[wfe_drift] plot_hdulist(hdul, xr=xlim, yr=ylim, ax=ax, vmin=0, vmax=8) # Location of planet for loc in loc_list: circle = Circle(loc, radius=xylim/15., lw=1, edgecolor='red', facecolor='none') ax.add_artist(circle) ax.set_title('$\Delta$WFE = {:.0f} nm'.format(wfe_drift)) nrc_utils.plotAxes(ax, width=1, headwidth=5, alength=0.15, position=(0.9,0.7), label1='E', label2='N') fig.suptitle('{} -- {} {}'.format(name_sci, obs.filter, obs.mask), fontsize=14) fig.tight_layout() fig.subplots_adjust(top=0.85) # - # **Note:** At first glance, it may appear as if the innermost Planet e is getting brighter with increased WFE drift, which would be understandably confusing. However, upon further investigation, there just happens to be a bright residual speckle that lines up well with Planet e when observed at this specific parallactic angle. This was verified by adjusting the observed PA as well as removing the planets from the simulations. # ### 2. Contrast Curves # # Next, we will cycle through a few WFE drift values to get an idea of potential predicted sensitivity curves. The `calc_contrast` method returns a tuple of three arrays: # 1. The radius in arcsec. # 2. The n-sigma contrast. # 3. The n-sigma magnitude sensitivity limit (vega mag). # + # Cycle through varying levels of WFE drift and calculate contrasts wfe_list = [0,5,10] nsig = 5 # PA values for each roll PA1, PA2 = (85, 95) roll_angle = np.abs(PA2 - PA1) curves = [] for i in trange(len(wfe_list)): wfe_drift = wfe_list[i] # Generate series of observations for each filter obs.wfe_ref_drift = wfe_drift # Generate contrast curves result = obs.calc_contrast(roll_angle=roll_angle, nsig=nsig) curves.append(result) # + from pynrc.nb_funcs import plot_contrasts, plot_planet_patches, plot_contrasts_mjup, update_yscale import matplotlib.patches as mpatches # fig, ax = plt.subplots(figsize=(8,5)) fig, axes = plt.subplots(1,2, figsize=(14,4.5)) xr=[0,5] yr=[24,8] # 1a. Plot contrast curves and set x/y limits ax = axes[0] ax, ax2, ax3 = plot_contrasts(curves, nsig, wfe_list, obs=obs, xr=xr, yr=yr, ax=ax, return_axes=True) # 1b. Plot the locations of exoplanet companions label = 'Companions ({})'.format(filt) planet_dist = [np.sqrt(x**2+y**2) for x,y in loc_list] ax.plot(planet_dist, pmags, marker='o', ls='None', label=label, color='k', zorder=10) # 1c. Plot Spiegel & Burrows (2012) exoplanet fluxes (Hot Start) plot_planet_patches(ax, obs, age=age, entropy=13, av_vals=None) ax.legend(ncol=2) # 2. Plot in terms of MJup using COND models ax = axes[1] ax1, ax2, ax3 = plot_contrasts_mjup(curves, nsig, wfe_list, obs=obs, age=age, ax=ax, twin_ax=True, xr=xr, yr=None, return_axes=True) yr = [0.03,100] for xval in planet_dist: ax.plot([xval,xval],yr, lw=1, ls='--', color='k', alpha=0.7) update_yscale(ax1, 'log', ylim=yr) yr_temp = np.array(ax1.get_ylim()) * 318.0 update_yscale(ax2, 'log', ylim=yr_temp) # ax.set_yscale('log') # ax.set_ylim([0.08,100]) ax.legend(loc='upper right', title='BEX ({:.0f} Myr)'.format(age)) fig.suptitle('{} ({} + {})'.format(name_sci, obs.filter, obs.mask), fontsize=16) fig.tight_layout() fig.subplots_adjust(top=0.85, bottom=0.1 , left=0.05, right=0.97) # - # The innermost Planet e is right on the edge of the detection threshold as suggested by the earlier simulated images. # ### 3. Saturation Levels # # Create an image showing level of saturation for each pixel. For NIRCam, saturation is important to track for purposes of accurate slope fits and persistence correction. In this case, we will plot the saturation levels both at `NGROUP=2` and `NGROUP=obs.det_info['ngroup']`. Saturation is defined at 80% well level, but can be modified using the `well_fill` keyword. # # We want to perform this analysis for both science and reference targets. # + # Saturation limits ng_max = obs.det_info['ngroup'] sp_flat = pynrc.stellar_spectrum('flat') print('NGROUP=2') _ = obs.sat_limits(sp=sp_flat,ngroup=2,verbose=True) print('') print('NGROUP={}'.format(ng_max)) _ = obs.sat_limits(sp=sp_flat,ngroup=ng_max,verbose=True) mag_sci = obs.star_flux('vegamag') mag_ref = obs.star_flux('vegamag', sp=obs.sp_ref) print('') print('{} flux at {}: {:0.2f} mags'.format(obs.sp_sci.name, obs.filter, mag_sci)) print('{} flux at {}: {:0.2f} mags'.format(obs.sp_ref.name, obs.filter, mag_ref)) # - # In this case, we don't expect HR 8799 to saturate. However, the reference source should have some saturated pixels before the end of an integration. # + # Well level of each pixel for science source sci_levels1 = obs.saturation_levels(ngroup=2) sci_levels2 = obs.saturation_levels(ngroup=ng_max) # Which pixels are saturated? sci_mask1 = sci_levels1 > 0.8 sci_mask2 = sci_levels2 > 0.8 # + # Well level of each pixel for reference source ref_levels1 = obs.saturation_levels(ngroup=2, do_ref=True) ref_levels2 = obs.saturation_levels(ngroup=ng_max, do_ref=True) # Which pixels are saturated? ref_mask1 = ref_levels1 > 0.8 ref_mask2 = ref_levels2 > 0.8 # + # How many saturated pixels? nsat1_sci = len(sci_levels1[sci_mask1]) nsat2_sci = len(sci_levels2[sci_mask2]) print(obs.sp_sci.name) print('{} saturated pixel at NGROUP=2'.format(nsat1_sci)) print('{} saturated pixel at NGROUP={}'.format(nsat2_sci,ng_max)) # How many saturated pixels? nsat1_ref = len(ref_levels1[ref_mask1]) nsat2_ref = len(ref_levels2[ref_mask2]) print('') print(obs.sp_ref.name) print('{} saturated pixel at NGROUP=2'.format(nsat1_ref)) print('{} saturated pixel at NGROUP={}'.format(nsat2_ref,ng_max)) # + # Saturation Mask for science target nsat1, nsat2 = (nsat1_sci, nsat2_sci) sat_mask1, sat_mask2 = (sci_mask1, sci_mask2) sp = obs.sp_sci nrc = obs # Only display saturation masks if there are saturated pixels if nsat2 > 0: fig, axes = plt.subplots(1,2, figsize=(10,5)) xasec = nrc.det_info['xpix'] * nrc.pix_scale yasec = nrc.det_info['ypix'] * nrc.pix_scale extent = [-xasec/2, xasec/2, -yasec/2, yasec/2] axes[0].imshow(sat_mask1, extent=extent) axes[1].imshow(sat_mask2, extent=extent) axes[0].set_title('{} Saturation (NGROUP=2)'.format(sp.name)) axes[1].set_title('{} Saturation (NGROUP={})'.format(sp.name,ng_max)) for ax in axes: ax.set_xlabel('Arcsec') ax.set_ylabel('Arcsec') ax.tick_params(axis='both', color='white', which='both') for k in ax.spines.keys(): ax.spines[k].set_color('white') fig.tight_layout() else: print('No saturation detected.') # + # Saturation Mask for reference nsat1, nsat2 = (nsat1_ref, nsat2_ref) sat_mask1, sat_mask2 = (ref_mask1, ref_mask2) sp = obs.sp_ref nrc = obs.nrc_ref # Only display saturation masks if there are saturated pixels if nsat2 > 0: fig, axes = plt.subplots(1,2, figsize=(10,5)) xasec = nrc.det_info['xpix'] * nrc.pix_scale yasec = nrc.det_info['ypix'] * nrc.pix_scale extent = [-xasec/2, xasec/2, -yasec/2, yasec/2] axes[0].imshow(sat_mask1, extent=extent) axes[1].imshow(sat_mask2, extent=extent) axes[0].set_title('{} Saturation (NGROUP=2)'.format(sp.name)) axes[1].set_title('{} Saturation (NGROUP={})'.format(sp.name,ng_max)) for ax in axes: ax.set_xlabel('Arcsec') ax.set_ylabel('Arcsec') ax.tick_params(axis='both', color='white', which='both') for k in ax.spines.keys(): ax.spines[k].set_color('white') fig.tight_layout() else: print('No saturation detected.') # -
notebooks/Coronagraph_Basics.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Metropolis and Gibbs Sampling # ==== # %matplotlib inline import matplotlib.pyplot as plt import numpy as np import seaborn as sns from functools import partial sns.set_context('notebook', font_scale=1.5) # ## Introduction to MCMC # In regular Markov chain models, we are usually interested in finding the equilibrium distribution $\pi$ at whihc $\pi^T T = \pi^T$ for a given transition kernel $T$. # # MCMC inverts this thinking - we fix the equilibrium distribution to be the posterior distribution # # $$ # p(\theta \mid X) = \frac{p(X \mid \theta) \, p(\theta)}{\int{p(X \mid \theta) \, p(\theta) d\theta}} # $$ # # and look for a transition kernel $T$ that will converge to this equilibrium distribution. # ### Island hopping # # We first provide an example to show the mechanics of the Metropolis algorithm concretely, then explore why it works. # [Kruschke's book](https://sites.google.com/site/doingbayesiandataanalysis/) begins with a fun example of a politician visiting a chain of islands to canvas support - being callow, the politician uses a simple rule to determine which island to visit next. Each day, the politician chooses a neighboring island and compares the populations there with the population of the current island. If the neighboring island has a larger population, the politician goes over. If the neighboring island has a smaller population, then the politician visits with probability $p = N_\text{neighbor} / N_\text{current}$ where $N$ is the island population; otherwise the politician stays on the same island. After doing this for many days, the politician will end up spending time on each island proportional to the population of each island - in other words, estimating the distribution of island populations correctly. How a simple comparison of only two states at a time can lead to accurate estimation of a probability density is the topic of the next few lectures. def make_islands(n, low=10, high=101): islands = np.random.randint(low, high, n+2) islands[0] = 0 islands[-1] = 0 return islands def hop(islands, start=1, niter=1000): pos = start pop = islands[pos] thetas = np.zeros(niter+1, dtype='int') thetas[0] = pos for i in range(niter): # generate sample from proposal distribution k = np.random.choice([-1, 1], 1) next_pos = pos + k # evaluate unnormalized target distribution at proposed position next_pop = islands[next_pos] # calculate acceptance probability p = min(1, next_pop/pop) # use uniform random to decide accept/reject proposal if np.random.random() < p: pos = next_pos pop = next_pop thetas[i+1] = pos return thetas islands = make_islands(10) thetas = hop(islands, start=1, niter=10000) # ### True population proportions data = islands[1:-1] data = data/data.sum() sns.barplot(x=np.arange(len(data)), y=data) pass # ### Estimated population proportions data = np.bincount(thetas)[1:] data = data/data.sum() sns.barplot(x=np.arange(len(data)), y=data) pass # ### Generic Metropolis scheme def metroplis(start, target, proposal, niter, nburn=0): current = start post = [current] for i in range(niter): proposed = proposal(current) p = min(target(proposed)/target(current), 1) if np.random.random() < p: current = proposed post.append(current) return post[nburn:] # ### Apply to island hooper target = lambda x: islands[x] proposal = lambda x: x + np.random.choice([-1, 1]) post = metroplis(1, target, proposal, 2000) data = np.bincount(post)[1:] data = data/data.sum() sns.barplot(x=np.arange(len(data)), y=data) pass # Bayesian Data Analysis # ---- # # The fundamental objective of Bayesian data analysis is to determine the posterior distribution # # $$ # p(\theta \ | \ X) = \frac{p(X \ | \ \theta) p(\theta)}{p(X)} # $$ # # where the denominator is # # $$ # p(X) = \int d\theta^* p(X \ | \ \theta^*) p(\theta^*) # $$ # # Here, # # - $p(X \ | \ \theta)$ is the likelihood, # - $p(\theta)$ is the prior and # - $p(X)$ is a normalizing constant also known as the evidence or marginal likelihood # # The computational issue is the difficulty of evaluating the integral in the denominator. There are many ways to address this difficulty, including: # # - In cases with conjugate priors (with conjugate priors, the posterior has the same distribution family as the prior), we can get closed form solutions # - We can use numerical integration # - We can approximate the functions used to calculate the posterior with simpler functions and show that the resulting approximate posterior is "close" to true posterior (variational Bayes) # - We can use Monte Carlo methods, of which the most important is Markov Chain Monte Carlo (MCMC). In simple Monte Carlo integration, we want to estimate the integral $\int f(x) \, p(x) dx$. With Bayesian models, the distribution $p(x)$ in the integral is the posterior # # $$ # p(x) = p(\theta \ | \ X) = \frac{p(X \ | \ \theta) p(\theta)}{\int d\theta^* p(X \ | \ \theta^*) p(\theta^*) } # $$ # - MCMC allows to sample from the posterior distribution - the samples will not be independent unlike simple Monte Carlo integration, but this is OK as we can compensate for the auto-correlation by drawing a larger number of samples. # ### Motivating example # # We will use the toy example of estimating the bias of a coin given a sample consisting of $n$ tosses to illustrate a few of the approaches. # #### Analytical solution # # If we use a beta distribution as the prior, then the posterior distribution has a closed form solution. This is shown in the example below. Some general points: # # - We need to choose a prior distribution family (i.e. the beta here) as well as its parameters (here a=10, b=10) # - The prior distribution may be relatively uninformative (i.e. more flat) or informative (i.e. more peaked) # - The posterior depends on both the prior and the data # - As the amount of data becomes large, the posterior approximates the MLE # - An informative prior takes more data to shift than an uninformative one # - Of course, it is also important the model used (i.e. the likelihood) is appropriate for the fitting the data # - The mode of the posterior distribution is known as the maximum a posteriori (MAP) estimate (cf MLE which is the mode of the likelihood) import scipy.stats as stats # + n = 100 h = 61 p = h/n rv = stats.binom(n, p) mu = rv.mean() a, b = 10, 10 prior = stats.beta(a, b) post = stats.beta(h+a, n-h+b) ci = post.interval(0.95) thetas = np.linspace(0, 1, 200) plt.plot(thetas, prior.pdf(thetas), label='Prior', c='blue') plt.plot(thetas, post.pdf(thetas), label='Posterior', c='red') plt.plot(thetas, n*stats.binom(n, thetas).pmf(h), label='Likelihood', c='green') plt.axvline((h+a-1)/(n+a+b-2), c='red', linestyle='dashed', alpha=0.4, label='MAP') plt.axvline(mu/n, c='green', linestyle='dashed', alpha=0.4, label='MLE') plt.xlim([0, 1]) plt.axhline(0.3, ci[0], ci[1], c='black', linewidth=2, label='95% CI'); plt.xlabel(r'$\theta$', fontsize=14) plt.ylabel('Density', fontsize=16) plt.legend(loc='upper left') pass # - # #### Numerical integration # # One simple way of numerical integration is to estimate the values on a grid of values for $\theta$. To calculate the posterior, we find the prior and the likelihood for each value of $\theta$, and for the marginal likelihood, we replace the integral with the equivalent sum # # $$ # p(X) = \sum_{\theta^*} p(X | \theta^*) p(\theta^*) # $$ # # One advantage of this is that the prior does not have to be conjugate (although the example below uses the same beta prior for ease of comparison), and so we are not restricted in our choice of an appropriate prior distribution. For example, the prior can be a mixture distribution or estimated empirically from data. The disadvantage, of course, is that this is computationally very expensive when we need to estimate multiple parameters, since the number of grid points grows as $\mathcal{O}(n^d)$, where $n$ defines the grid resolution and $d$ is the size of $\theta$. # + thetas = np.linspace(0, 1, 200) prior = stats.beta(a, b) post = prior.pdf(thetas) * stats.binom(n, thetas).pmf(h) # Normalzie so volume is 1 post /= (post.sum() / len(thetas)) plt.plot(thetas, prior.pdf(thetas), label='Prior', c='blue') plt.plot(thetas, n*stats.binom(n, thetas).pmf(h), label='Likelihood', c='green') plt.plot(thetas, post, label='Posterior', c='red') plt.xlim([0, 1]) plt.xlabel(r'$\theta$', fontsize=14) plt.ylabel('Density', fontsize=16) plt.legend() pass # - # ### Markov Chain Monte Carlo (MCMC) # # This lecture will only cover the basic ideas of MCMC and the 3 common variants - Metroplis, Metropolis-Hastings and Gibbs sampling. All code will be built from the ground up to illustrate what is involved in fitting an MCMC model, but only toy examples will be shown since the goal is conceptual understanding. More realistic computational examples will be shown in coming lectures using the `pymc3` and `pystan` packages. # # In Bayesian statistics, we want to estimate the posterior distribution, but this is often intractable due to the high-dimensional integral in the denominator (marginal likelihood). A few other ideas we have encountered that are also relevant here are Monte Carlo integration with independent samples and the use of proposal distributions (e.g. rejection and importance sampling). As we have seen from the Monte Carlo integration lectures, we can approximate the posterior $p(\theta | X)$ if we can somehow draw many samples that come from the posterior distribution. With vanilla Monte Carlo integration, we need the samples to be independent draws from the posterior distribution, which is a problem if we do not actually know what the posterior distribution is (because we cannot integrate the marginal likelihood). # # With MCMC, we draw samples from a (simple) proposal distribution so that each draw depends only on the state of the previous draw (i.e. the samples form a Markov chain). Under certain conditions, the Markov chain will have a unique stationary distribution. In addition, not all proposal draws are used - instead we set up acceptance criteria for each draw based on comparing successive states with respect to a target distribution that ensure that the stationary distribution is the posterior distribution of interest. The nice thing is that this target distribution only needs to be proportional to the posterior distribution, which means we don't need to evaluate the potentially intractable marginal likelihood, which is just a normalizing constant. We can find such a target distribution easily, since `posterior` $\propto$ `likelihood` $\times$ `prior`. After some time, the Markov chain of accepted draws will converge to the stationary distribution, and we can use those samples as (correlated) draws from the posterior distribution, and find functions of the posterior distribution in the same way as for vanilla Monte Carlo integration. # # There are several flavors of MCMC, but the simplest to understand is the Metropolis-Hastings random walk algorithm, and we will start there. # #### Metropolis-Hastings random walk algorithm for estimating the bias of a coin # # To carry out the Metropolis-Hastings algorithm, we need to draw random samples from the following distributions # # - the standard uniform distribution # - a proposal distribution $p(x)$ that we choose to be $\mathcal{N}(0, \sigma)$ # - the target function $g(x)$ which is proportional to the posterior probability (the target function is essentially an unnormalized distribution) # # Given an initial guess for $\theta$ with positive probability of being drawn, the Metropolis-Hastings algorithm proceeds as follows # # - Choose a new proposed value ($\theta_p$) such that $\theta_p = \theta + \Delta\theta$ where $\Delta \theta \sim \mathcal{N}(0, \sigma)$ # - Caluculate the ratio # # $$ # \rho = \frac{g(\theta_p \ | \ X)}{g(\theta \ | \ X)} # $$ # # where $g$ is the posterior probability. # # - If the proposal distribution is not symmetrical, we need to weight the acceptance probability to maintain detailed balance (reversibility) of the stationary distribution, and instead calculate # # $$ # \rho = \frac{g(\theta_p \ | \ X) p(\theta \ | \ \theta_p)}{g(\theta \ | \ X) p(\theta_p \ | \ \theta)} # $$ # # Note: The Metropolis algorithm refers to symmetrical proposals, and Metropolis-Hastings refers to this correction for asymmetrical proposals. # # Since we are taking ratios, the denominator cancels any distribution proportional to $g$ will also work - so we can use # # $$ # \rho = \frac{p(X | \theta_p ) p(\theta_p)}{p(X | \theta ) p(\theta)} # $$ # # - If $\rho \ge 1$, then set $\theta = \theta_p$ # - If $\rho \lt 1$, then set $\theta = \theta_p$ with probability $\rho$, otherwise set $\theta = \theta$ (this is where we use the standard uniform distribution) # - Repeat the earlier steps # # After some number of iterations $k$, the samples $\theta_{k+1}, \theta_{k+2}, \dots$ will be samples from the posterior distributions. Here are initial concepts to help your intuition about why this is so: # # - We accept a proposed move to $\theta_{k+1}$ whenever the density of the (unnormalized) target function at $\theta_{k+1}$ is larger than the value of $\theta_k$ - so $\theta$ will more often be found in places where the target function is denser # - If this was all we accepted, $\theta$ would get stuck at a local mode of the target function, so we also accept occasional moves to lower density regions - it turns out that the correct probability of doing so is given by the ratio $\rho$ # - The acceptance criteria only looks at ratios of the target function, so the denominator cancels out and does not matter - that is why we only need the target function to be proportional to the posterior distribution # - So, $\theta$ will be expected to bounce around in such a way that its spends its time in places proportional to the density of the posterior distribution - that is, $\theta$ is a draw from the posterior distribution. # # Additional notes: # # Different proposal distributions can be used for Metropolis-Hastings: # # - The independence sampler uses a proposal distribution that is independent of the current value of $\theta$. In this case the proposal distribution needs to be similar to the posterior distribution for efficiency, while ensuring that the acceptance ratio is bounded in the tail region of the posterior. # - The random walk sampler (used in this example) takes a random step centered at the current value of $\theta$ - efficiency is a trade-off between small step size with high probability of acceptance and large step sizes with low probability of acceptance. Note (picture will be sketched in class) that the random walk may take a long time to traverse narrow regions of the probability distribution. Changing the step size (e.g. scaling $\Sigma$ for a multivariate normal proposal distribution) so that a target proportion of proposals are accepted is known as *tuning*. # - Much research is being conducted on different proposal distributions for efficient sampling of the posterior distribution. # # We will first see a numerical example and then try to understand why it works. # + def target(lik, prior, n, h, theta): if theta < 0 or theta > 1: return 0 else: return lik(n, theta).pmf(h)*prior.pdf(theta) n = 100 h = 61 a = 10 b = 10 lik = stats.binom prior = stats.beta(a, b) sigma = 0.3 naccept = 0 theta = 0.1 niters = 10000 samples = np.zeros(niters+1) samples[0] = theta for i in range(niters): theta_p = theta + stats.norm(0, sigma).rvs() rho = min(1, target(lik, prior, n, h, theta_p)/target(lik, prior, n, h, theta )) u = np.random.uniform() if u < rho: naccept += 1 theta = theta_p samples[i+1] = theta nmcmc = len(samples)//2 print("Efficiency = ", naccept/niters) # + post = stats.beta(h+a, n-h+b) plt.hist(samples[nmcmc:], 40, histtype='step', normed=True, linewidth=1, label='Prior'); plt.hist(prior.rvs(nmcmc), 40, histtype='step', normed=True, linewidth=1, label='Posterior'); plt.plot(thetas, post.pdf(thetas), c='red', linestyle='--', alpha=0.5, label='True posterior') plt.xlim([0,1]); plt.legend(loc='upper left') pass # - # #### Assessing for convergence # # Trace plots are often used to informally assess for stochastic convergence. Rigorous demonstration of convergence is an unsolved problem, but simple ideas such as running multiple chains and checking that they are converging to similar distributions are often employed in practice. def mh_coin(niters, n, h, theta, lik, prior, sigma): samples = [theta] while len(samples) < niters: theta_p = theta + stats.norm(0, sigma).rvs() rho = min(1, target(lik, prior, n, h, theta_p)/target(lik, prior, n, h, theta )) u = np.random.uniform() if u < rho: theta = theta_p samples.append(theta) return samples # + n = 100 h = 61 lik = stats.binom prior = stats.beta(a, b) sigma = 0.05 niters = 100 sampless = [mh_coin(niters, n, h, theta, lik, prior, sigma) for theta in np.arange(0.1, 1, 0.2)] # + # Convergence of multiple chains for samples in sampless: plt.plot(samples, '-o') plt.xlim([0, niters]) plt.ylim([0, 1]); # - # #### Why does Metropolis-Hastings work? # # There are two main ideas - first that the samples generated by MCMC constitute a Markov chain, and that this Markov chain has a unique stationary distribution that is always reached if we generate a very large number of samples. The second idea is to show that this stationary distribution is exactly the posterior distribution that we are looking for. We will only give the intuition here as a refresher. # # #### One: There is a unique stationary state # # Since possible transitions depend only on the current and the proposed values of $\theta$, the successive values of $\theta$ in a Metropolis-Hastings sample constitute a Markov chain. Recall that for a Markov chain with a transition matrix $A$ # # $$ # \pi^T = \pi^T A # $$ # # means that $\pi$ is a stationary distribution. If it is possible to go from any state to any other state, then the matrix is irreducible. If in addition, it is not possible to get stuck in an oscillation, then the matrix is also aperiodic or mixing. For finite state spaces, irreducibility and aperiodicity guarantee the existence of a unique stationary state. For continuous state space, we need an additional property of positive recurrence - starting from any state, the expected time to come back to the original state must be finite. If we have all 3 properties of irreducibility, aperiodicity and positive recurrence, then there is a unique stationary distribution. The term ergodic is a little confusing - most standard definitions take ergodicity to be equivalent to irreducibility, but often Bayesian texts take ergodicity to mean irreducibility, aperiodicity and positive recurrence, and we will follow the latter convention. For another intuitive perspective, the random walk Metropolis-Hasting algorithm is analogous to a diffusion process. Since all states are communicating (by design), eventually the system will settle into an equilibrium state. This is analogous to converging on the stationary state. # # #### Two: The stationary state is the posterior probability distribution # # We will consider the simplest possible scenario for an explicit calculation. Suppose we have a two-state system where the posterior probabilities are $\theta$ and $1 - \theta$. Suppose $\theta \lt 0.5$. So we have the following picture with the Metropolis-Hastings algorithm: # # ![Markov chain](figs/mcmc.png) # # and we find the stationary distribution $\pi = \left( \begin{array}{cc} p & 1-p \end{array} \right)$ by solving # # $$ # \begin{align} # \left( \begin{array}{cc} p & 1-p \end{array} \right) &= # \left( \begin{array}{cc} p & 1-p \end{array} \right) \left( # \begin{array}{cc} # 0 & 1 \\ # \frac{\theta}{1-\theta} & 1-\frac{\theta}{1-\theta} # \end{array} # \right) # \end{align} # $$ # # to be $\pi = \left( \begin{array}{cc} \theta & 1-\theta \end{array} \right)$, which is the posterior distribution. # # The final point is that we can find a stationary distribution using the detailed balance (reversibility) criterion that says that the probability of being in state $x$ and moving to state $y$ must be the same as the probability of being in state $y$ and moving to state $x$. Note that detailed balance is a sufficient but not necessary condition ofr $\pi$ to be a steady state distribution (assuming ergodicity). Or, more briefly, # # $$ # \pi(x)T(x \to y) = \pi(y)T(y \to x) # $$ # # and the need to make sure that this condition is true accounts for the strange looking acceptance criterion # # $$ # \min \left(1, \frac{g(\theta_p \ | \ X) p(\theta \ | \ \theta_p)}{g(\theta \ | \ X) p(\theta_p \ | \ \theta)} \right) # $$ # # ### Intuition # # We want the stationary distribution $\pi(x)$ to be the posterior distribution $P(x)$. So we set # # $$ # P(x)T(x \to y) = P(y)T(y \to x) # $$ # # Rearranging, we get # # $$ # \frac{T(x \to y)}{T(y \to x)} = \frac{P(y)}{P(x)} # $$ # # We split the transition probability into separate proposal $q$ and acceptance $A$ parts, and after a little algebraic rearrangement get # # $$ # \frac{A(x \to y)}{A(y \to x)} = \frac{P(y) \, q(y \to x)}{P(x) \, q(x \to y)} # $$ # # An acceptance probability that meets this condition is # $$ # A(x \to y) = \min \left(1, \frac{P(y) \, q(y \to x)}{P(x) \, q(x \to y)} \right) # $$ # # since $A$ in the numerator and denominator are both bounded above by 1. # # See [Chib and Greenberg](https://eml.berkeley.edu/reprints/misc/understanding.pdf) for algebraic details. # ### The Gibbs sampler # # Suppose we have a vector of parameters $\theta = (\theta_1, \theta_2, \dots, \theta_k)$, and we want to estimate the joint posterior distribution $p(\theta | X)$. Suppose we can find and draw random samples from all the conditional distributions # # $$ # p(\theta_1 | \theta_2, \dots \theta_k, X) \\ # p(\theta_2 | \theta_1, \dots \theta_k, X) \\ # \dots \\ # p(\theta_k | \theta_1, \theta_2, \dots, X) # $$ # # With Gibbs sampling, the Markov chain is constructed by sampling from the conditional distribution for each parameter $\theta_i$ in turn, treating all other parameters as observed. When we have finished iterating over all parameters, we are said to have completed one cycle of the Gibbs sampler. Since hierarchical models are typically set up as products of conditional distributions, the Gibbs sampler is ubiquitous in Bayesian modeling. Where it is difficult to sample from a conditional distribution, we can sample using a Metropolis-Hastings algorithm instead - this is known as Metropolis within Gibbs. # # Gibbs sampling is a type of random walk through parameter space, and hence can be thought of as a Metropolis-Hastings algorithm with a special proposal distribution. At each iteration in the cycle, we are drawing a proposal for a new value of a particular parameter, where the proposal distribution *is* the conditional posterior probability of that parameter. This means that the proposal move is *always* accepted. Hence, if we can draw samples from the conditional distributions, Gibbs sampling can be much more efficient than regular Metropolis-Hastings. # More formally, we want to show that # # $$ # \frac{P(y) \, q(y \to x)}{P(x) \, q(x \to y)} = 1 # $$ # # We start by noting that $P(x_{-i})$ is the same as $P(y_{-i})$ since apart from the component $i$, the old state and the proposed new state are identical in Gibbs sampling. We also recall that # # $$P(x_i \mid x_{-i}) \, P(x_{-i}) = P(x_i, x_{-i}) = P(x)$$ # # by definition of conditional probability. So we have # # $$ # \begin{align} # \frac{P(y) \, q(y \to x)}{P(x) \, q(x \to y)} &= \frac{P(y_i \mid y_{-i}) \, P(y_{-i})\, P(x_i \mid x_{-i}) }{P(x_i \mid x_{-i}) \, P(x_{-i})\, P(y_i \mid y_{-1})} &= 1 # \end{align} # $$ # # # **Advantages of Gibbs sampling** # # - No need to tune proposal distribution # - Proposals are always accepted # # **Disadvantages of Gibbs sampling** # # - Need to be able to derive conditional probability distributions # - Need to be able to (cheaply) draw random samples from conditional probability distributions # - Can be very slow if parameters are correlated because you cannot take "diagonal" steps (draw picture to illustrate) # ### Motivating example # # We will use the toy example of estimating the bias of two coins given sample pairs $(z_1, n_1)$ and $(z_2, n_2)$ where $z_i$ is the number of heads in $n_i$ tosses for coin $i$. # #### Setup def bern(theta, z, N): """Bernoulli likelihood with N trials and z successes.""" return np.clip(theta**z * (1-theta)**(N-z), 0, 1) def bern2(theta1, theta2, z1, z2, N1, N2): """Bernoulli likelihood with N trials and z successes.""" return bern(theta1, z1, N1) * bern(theta2, z2, N2) def make_thetas(xmin, xmax, n): xs = np.linspace(xmin, xmax, n) widths =(xs[1:] - xs[:-1])/2.0 thetas = xs[:-1]+ widths return thetas # + from mpl_toolkits.mplot3d import Axes3D def make_plots(X, Y, prior, likelihood, posterior, projection=None): fig, ax = plt.subplots(1,3, subplot_kw=dict(projection=projection, aspect='equal'), figsize=(12,3)) if projection == '3d': ax[0].plot_surface(X, Y, prior, alpha=0.3, cmap=plt.cm.jet) ax[1].plot_surface(X, Y, likelihood, alpha=0.3, cmap=plt.cm.jet) ax[2].plot_surface(X, Y, posterior, alpha=0.3, cmap=plt.cm.jet) for ax_ in ax: ax_._axis3don = False else: ax[0].contour(X, Y, prior, cmap=plt.cm.jet) ax[1].contour(X, Y, likelihood, cmap=plt.cm.jet) ax[2].contour(X, Y, posterior, cmap=plt.cm.jet) ax[0].set_title('Prior') ax[1].set_title('Likelihood') ax[2].set_title('Posteior') plt.tight_layout() # - thetas1 = make_thetas(0, 1, 101) thetas2 = make_thetas(0, 1, 101) X, Y = np.meshgrid(thetas1, thetas2) # #### Analytic solution # + a = 2 b = 3 z1 = 11 N1 = 14 z2 = 7 N2 = 14 prior = stats.beta(a, b).pdf(X) * stats.beta(a, b).pdf(Y) likelihood = bern2(X, Y, z1, z2, N1, N2) posterior = stats.beta(a + z1, b + N1 - z1).pdf(X) * stats.beta(a + z2, b + N2 - z2).pdf(Y) make_plots(X, Y, prior, likelihood, posterior) make_plots(X, Y, prior, likelihood, posterior, projection='3d') # - # #### Grid approximation def c2d(thetas1, thetas2, pdf): width1 = thetas1[1] - thetas1[0] width2 = thetas2[1] - thetas2[0] area = width1 * width2 pmf = pdf * area pmf /= pmf.sum() return pmf _prior = bern2(X, Y, 2, 8, 10, 10) + bern2(X, Y, 8, 2, 10, 10) prior_grid = c2d(thetas1, thetas2, _prior) _likelihood = bern2(X, Y, 1, 1, 2, 3) posterior_grid = _likelihood * prior_grid posterior_grid /= posterior_grid.sum() make_plots(X, Y, prior_grid, likelihood, posterior_grid) make_plots(X, Y, prior_grid, likelihood, posterior_grid, projection='3d') # #### Metropolis # + a = 2 b = 3 z1 = 11 N1 = 14 z2 = 7 N2 = 14 prior = lambda theta1, theta2: stats.beta(a, b).pdf(theta1) * stats.beta(a, b).pdf(theta2) lik = partial(bern2, z1=z1, z2=z2, N1=N1, N2=N2) target = lambda theta1, theta2: prior(theta1, theta2) * lik(theta1, theta2) theta = np.array([0.5, 0.5]) niters = 10000 burnin = 500 sigma = np.diag([0.2,0.2]) thetas = np.zeros((niters-burnin, 2), np.float) for i in range(niters): new_theta = stats.multivariate_normal(theta, sigma).rvs() p = min(target(*new_theta)/target(*theta), 1) if np.random.rand() < p: theta = new_theta if i >= burnin: thetas[i-burnin] = theta # - kde = stats.gaussian_kde(thetas.T) XY = np.vstack([X.ravel(), Y.ravel()]) posterior_metroplis = kde(XY).reshape(X.shape) make_plots(X, Y, prior(X, Y), lik(X, Y), posterior_metroplis) make_plots(X, Y, prior(X, Y), lik(X, Y), posterior_metroplis, projection='3d') # #### Gibbs # + a = 2 b = 3 z1 = 11 N1 = 14 z2 = 7 N2 = 14 prior = lambda theta1, theta2: stats.beta(a, b).pdf(theta1) * stats.beta(a, b).pdf(theta2) lik = partial(bern2, z1=z1, z2=z2, N1=N1, N2=N2) target = lambda theta1, theta2: prior(theta1, theta2) * lik(theta1, theta2) theta = np.array([0.5, 0.5]) niters = 10000 burnin = 500 sigma = np.diag([0.2,0.2]) thetas = np.zeros((niters-burnin,2), np.float) for i in range(niters): theta = [stats.beta(a + z1, b + N1 - z1).rvs(), theta[1]] theta = [theta[0], stats.beta(a + z2, b + N2 - z2).rvs()] if i >= burnin: thetas[i-burnin] = theta # - kde = stats.gaussian_kde(thetas.T) XY = np.vstack([X.ravel(), Y.ravel()]) posterior_gibbs = kde(XY).reshape(X.shape) make_plots(X, Y, prior(X, Y), lik(X, Y), posterior_gibbs) make_plots(X, Y, prior(X, Y), lik(X, Y), posterior_gibbs, projection='3d') # Hierarchical models # --- # Hierarchical models have the following structure - first we specify that the data come from a distribution with parameters $\theta$ # # $$ # X \sim f(X\ | \ \theta) # $$ # # and that the parameters themselves come from another distribution with hyperparameters $\lambda$ # # $$ # \theta \sim g(\theta \ | \ \lambda) # $$ # # and finally that $\lambda$ comes from a prior distribution # # $$ # \lambda \sim h(\lambda) # $$ # # More levels of hierarchy are possible - i.e you can specify hyper-hyperparameters for the distribution of $\lambda$ and so on. # # The essential idea of the hierarchical model is because the $\theta$s are not independent but rather are drawn from a common distribution with parameter $\lambda$, we can share information across the $\theta$s by also estimating $\lambda$ at the same time. # # As an example, suppose we have data about the proportion of heads after some number of tosses from several coins, and we want to estimate the bias of each coin. We also know that the coins come from the same mint and so might share some common manufacturing defect. There are two extreme approaches - we could estimate the bias of each coin from its coin toss data independently of all the others, or we could pool the results together and estimate the same bias for all coins. Hierarchical models provide a compromise where we shrink individual estimates towards a common estimate. # # Note that because of the conditionally independent structure of hierarchical models, Gibbs sampling is often a natural choice for the MCMC sampling strategy. # #### Gibbs sampler example from [<NAME>, 10.17](http://www.springer.com/statistics/statistical+theory+and+methods/book/978-0-387-21239-5) # # Suppose we have data of the number of failures ($y_i$) for each of 10 pumps in a nuclear plant. We also have the times ($_i$) at which each pump was observed. We want to model the number of failures with a Poisson likelihood, where the expected number of failure $\lambda_i$ differs for each pump. Since the time which we observed each pump is different, we need to scale each $\lambda_i$ by its observed time $t_i$. # # We now specify the hierarchical model - note change of notation from the overview above - that $\theta$ is $\lambda$ (parameter) and $\lambda$ is $\beta$ (hyperparameter) simply because $\lambda$ is traditional for the Poisson distribution parameter. # # The likelihood $f$ is # $$ # \prod_{i=1}^{10} \text{Poisson}(\lambda_i t_i) # $$ # # We let the prior $g$ for $\lambda$ be # # $$ # \lambda \sim \text{Gamma}(\alpha, \beta) # $$ # with $\alpha = 1$ # # and let $\beta$ to be a random variable to be estimated from the data # # $$ # \beta \sim \text{Gamma}(\gamma, \delta) # $$ # # with $\gamma = 0.01$ and $\delta = 1$. # # There are 11 unknown parameters (10 $\lambda$s and $\beta$) in this hierarchical model. # # The posterior is # $$ # p(\lambda, \beta \ | \ y, t) = \prod_{i=1}^{10} \text{Poisson}(\lambda_i t_i) \times \text{Gamma}(\alpha, \beta) \times \text{Gamma}(\gamma, \delta) # $$ # # with the conditional distributions needed for Gibbs sampling given by # # $$ # p(\lambda_i \ | \ \lambda_{-i}, \beta, y, t) = \text{Gamma}(y_i + \alpha, t_i + \beta) # $$ # # and # # $$ # p(\beta \ | \ \lambda, y, t) = \text{Gamma}(10\alpha + \gamma, \delta + \sum_{i=1}^10 \lambda_i) # $$ from numpy.random import gamma as rgamma # rename so we can use gamma for parameter name # + def lambda_update(alpha, beta, y, t): return rgamma(size=len(y), shape=y+alpha, scale=1.0/(t+beta)) def beta_update(alpha, gamma, delta, lambd, y): return rgamma(size=1, shape=len(y) * alpha + gamma, scale=1.0/(delta + lambd.sum())) def gibbs(niter, y, t, alpha, gamma, delta): lambdas_ = np.zeros((niter, len(y)), np.float) betas_ = np.zeros(niter, np.float) lambda_ = y/t for i in range(niter): beta_ = beta_update(alpha, gamma, delta, lambda_, y) lambda_ = lambda_update(alpha, beta_, y, t) betas_[i] = beta_ lambdas_[i,:] = lambda_ return betas_, lambdas_ # - # #### Setup alpha = 1.8 gamma = 0.01 delta = 1.0 beta0 = 1 y = np.array([5, 1, 5, 14, 3, 19, 1, 1, 4, 22], np.int) t = np.array([94.32, 15.72, 62.88, 125.76, 5.24, 31.44, 1.05, 1.05, 2.10, 10.48], np.float) niter = 1000 betas, lambdas = gibbs(niter, y, t, alpha, gamma, delta) print('%.3f' % betas.mean()) print('%.3f' % betas.std(ddof=1)) print(lambdas.mean(axis=0)) print(lambdas.std(ddof=1, axis=0)) plt.figure(figsize=(8, 16)) for i in range(len(lambdas.T)): plt.subplot(5,2,i+1) plt.plot(lambdas[::10, i]); plt.title('Trace for $\lambda$%d' % i) plt.tight_layout()
notebook/S14F_MCMC.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- BRANCH = 'r1.0.0rc1' # + id="o_0K1lsW1dj9" colab_type="code" colab={} """ You can run either this notebook locally (if you have all the dependencies and a GPU) or on Google Colab. Instructions for setting up Colab are as follows: 1. Open a new Python 3 notebook. 2. Import this notebook from GitHub (File -> Upload Notebook -> "GITHUB" tab -> copy/paste GitHub URL) 3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select "GPU" for hardware accelerator) 4. Run this cell to set up dependencies. """ # If you're using Google Colab and not running locally, run this cell # install NeMo # !python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[nlp] # + pycharm={"name": "#%%\n"} id="uglDB-pVh__t" colab_type="code" colab={} # If you're not using Colab, you might need to upgrade jupyter notebook to avoid the following error: # 'ImportError: IProgress not found. Please update jupyter and ipywidgets.' # ! pip install ipywidgets # ! jupyter nbextension enable --py widgetsnbextension # Please restart the kernel after running this cell # + id="dzqD2WDFOIN-" colab_type="code" colab={} from nemo.utils.exp_manager import exp_manager from nemo.collections import nlp as nemo_nlp import os import wget import torch import pytorch_lightning as pl from omegaconf import OmegaConf # + [markdown] id="daYw_Xll2ZR9" colab_type="text" # # Task Description # Automatic Speech Recognition (ASR) systems typically generate text with no punctuation and capitalization of the words. # This tutorial explains how to implement a model in NeMo that will predict punctuation and capitalization for each word in a sentence to make ASR output more readable and to boost performance of the named entity recognition, machine translation or text-to-speech models. # We'll show how to train a model for this task using a pre-trained BERT model. # For every word in our training dataset weโ€™re going to predict: # # - punctuation mark that should follow the word and # - whether the word should be capitalized # + [markdown] id="ZnuziSwJ1yEB" colab_type="text" # # Dataset # This model can work with any dataset as long as it follows the format specified below. # The training and evaluation data is divided into *2 files: text.txt and labels.txt*. # Each line of the **text.txt** file contains text sequences, where words are separated with spaces: [WORD] [SPACE] [WORD] [SPACE] [WORD], for example: # + [markdown] id="TXFORGBv2Jqu" colab_type="text" # # # ``` # when is the next flight to new york # the next flight is ... # ... # ``` # # # + [markdown] id="Y7ci55rM2QH8" colab_type="text" # The **labels.txt** file contains corresponding labels for each word in text.txt, the labels are separated with spaces. Each label in labels.txt file consists of 2 symbols: # # - the first symbol of the label indicates what punctuation mark should follow the word (where O means no punctuation needed); # - the second symbol determines if a word needs to be capitalized or not (where U indicates that the word should be upper cased, and O - no capitalization needed.) # # In this tutorial, we are considering only commas, periods, and question marks the rest punctuation marks were removed. To use more punctuation marks, update the dataset to include desired labels, no changes to the model needed. # # Each line of the **labels.txt** should follow the format: # [LABEL] [SPACE] [LABEL] [SPACE] [LABEL] (for labels.txt). # For example, labels for the above text.txt file should be: # + [markdown] id="-94C1-864EW1" colab_type="text" # # # ``` # OU OO OO OO OO OO OU ?U # OU OO OO OO ... # ... # ``` # # # + [markdown] id="VsEmwIPO4L4V" colab_type="text" # The complete list of all possible labels for this task used in this tutorial is: `OO, ,O, .O, ?O, OU, ,U, .U, ?U.` # + [markdown] id="SL58EWkd2ZVb" colab_type="text" # ## Download and preprocess the dataยถ # + [markdown] id="THi6s1Qx2G1k" colab_type="text" # In this notebook we are going to use a subset of English examples from the [Tatoeba collection of sentences](https://tatoeba.org/eng) this script will download and preprocess the Tatoeba data [NeMo/examples/nlp/token_classification/get_tatoeba_data.py](https://github.com/NVIDIA/NeMo/blob/main/examples/nlp/token_classification/data/get_tatoeba_data.py). Note, for further experiments with the model, set NUM_SAMPLES=-1 and consider including other datasets to improve model performance. # # + id="n8HZrDmr12_-" colab_type="code" colab={} DATA_DIR = "PATH_TO_DATA" WORK_DIR = "PATH_TO_CHECKPOINTS_AND_LOGS" MODEL_CONFIG = "punctuation_capitalization_config.yaml" # model parameters BATCH_SIZE = 128 MAX_SEQ_LENGTH = 64 LEARNING_RATE = 0.00002 NUM_SAMPLES = 10000 # + id="UOeeeCGqI-9c" colab_type="code" colab={} ## download get_tatoeba_data.py script to download and preprocess the Tatoeba data os.makedirs(WORK_DIR, exist_ok=True) if not os.path.exists(WORK_DIR + '/get_tatoeba_data.py'): print('Downloading get_tatoeba_data.py...') wget.download(f'https://raw.githubusercontent.com/NVIDIA/NeMo/{BRANCH}/examples/nlp/token_classification/data/get_tatoeba_data.py', WORK_DIR) else: print ('get_tatoeba_data.py is already exists') # + id="H0ulD2TL13DR" colab_type="code" colab={} # download and preprocess the data # --clean_dir flag deletes raw Tataoeba data, remove the flag to avoid multiple data downloads if you want to experiment with the dataset size # ! python $WORK_DIR/get_tatoeba_data.py --data_dir $DATA_DIR --num_sample $NUM_SAMPLES --clean_dir # + [markdown] id="pctMm2vsPlPT" colab_type="text" # after execution of the above cell, your data folder will contain the following 4 files needed for training (raw Tatoeba data could be present if `--clean_dir` was not used): # - labels_dev.txt # - labels_train.txt # - text_dev.txt # - text_train.txt # # + id="KKwxXXezPvXF" colab_type="code" colab={} # ! ls -l {DATA_DIR} # + id="6UDPgadLN6SG" colab_type="code" colab={} # let's take a look at the data print('Text:') # ! head -n 5 {DATA_DIR}/text_train.txt print('\nLabels:') # ! head -n 5 {DATA_DIR}/labels_train.txt # + [markdown] id="daludzzL2Jba" colab_type="text" # # Model Configuration # + [markdown] id="_whKCxfTMo6Y" colab_type="text" # In the Punctuation and Capitalization Model, we are jointly training two token-level classifiers on top of the pretrained [BERT](https://arxiv.org/pdf/1810.04805.pdf) model: # - one classifier to predict punctuation and # - the other one - capitalization. # # The model is defined in a config file which declares multiple important sections. They are: # - **model**: All arguments that are related to the Model - language model, token classifiers, optimizer and schedulers, dataset and any other related information # # - **trainer**: Any argument to be passed to PyTorch Lightning # + id="T1gA8PsJ13MJ" colab_type="code" colab={} # download the model's configuration file config_dir = WORK_DIR + '/configs/' os.makedirs(config_dir, exist_ok=True) if not os.path.exists(config_dir + MODEL_CONFIG): print('Downloading config file...') wget.download(f'https://raw.githubusercontent.com/NVIDIA/NeMo/{BRANCH}/examples/nlp/token_classification/conf/' + MODEL_CONFIG, config_dir) else: print ('config file is already exists') # + id="mX3KmWMvSUQw" colab_type="code" colab={} # this line will print the entire config of the model config_path = f'{WORK_DIR}/configs/{MODEL_CONFIG}' print(config_path) config = OmegaConf.load(config_path) print(OmegaConf.to_yaml(config)) # + [markdown] id="ZCgWzNBkaQLZ" colab_type="text" # # Setting up Data within the config # # Among other things, the config file contains dictionaries called dataset, train_ds and validation_ds. These are configurations used to setup the Dataset and DataLoaders of the corresponding config. # # If both training and evaluation files are located in the same directory, simply specify `model.dataset.data_dir`, like we are going to do below. # However, if your evaluation files are located in a different directory, or you want to use multiple datasets for evaluation, specify paths to the directory(ies) with evaluation file(s) in the following way: # # `model.validation_ds.ds_item=[PATH_TO_DEV1,PATH_TO_DEV2]` (Note no space between the paths and square brackets). # # Also notice that some configs, including `model.dataset.data_dir`, have `???` in place of paths, this values are required to be specified by the user. # # Let's now add the data directory path to the config. # + id="LQHCJN-ZaoLp" colab_type="code" colab={} # in this tutorial train and dev data is located in the same folder, so it is enought to add the path of the data directory to our config config.model.dataset.data_dir = DATA_DIR # + [markdown] id="nB96-3sTc3yk" colab_type="text" # # Building the PyTorch Lightning Trainer # # NeMo models are primarily PyTorch Lightning modules - and therefore are entirely compatible with the PyTorch Lightning ecosystem! # # Lets first instantiate a Trainer object! # + id="1tG4FzZ4Ui60" colab_type="code" colab={} print("Trainer config - \n") print(OmegaConf.to_yaml(config.trainer)) # + id="knF6QeQQdMrH" colab_type="code" colab={} # lets modify some trainer configs # checks if we have GPU available and uses it cuda = 1 if torch.cuda.is_available() else 0 config.trainer.gpus = cuda config.trainer.precision = 16 if torch.cuda.is_available() else 32 # For mixed precision training, use precision=16 and amp_level=O1 # Reduces maximum number of epochs to 1 for a quick training config.trainer.max_epochs = 1 # Remove distributed training flags config.trainer.accelerator = None trainer = pl.Trainer(**config.trainer) # + [markdown] id="8IlEMdVxdr6p" colab_type="text" # # Setting up a NeMo Experimentยถ # # NeMo has an experiment manager that handles logging and checkpointing for us, so let's use it! # + id="8uztqGAmdrYt" colab_type="code" colab={} exp_dir = exp_manager(trainer, config.get("exp_manager", None)) # the exp_dir provides a path to the current experiment for easy access exp_dir = str(exp_dir) exp_dir # + [markdown] id="6FI_nQsJo_11" colab_type="text" # # Model Training # + [markdown] id="8tjLhUvL_o7_" colab_type="text" # Before initializing the model, we might want to modify some of the model configs. For example, we might want to modify the pretrained BERT model. # + id="Xeuc2i7Y_nP5" colab_type="code" colab={} # complete list of supported BERT-like models nemo_nlp.modules.get_pretrained_lm_models_list() PRETRAINED_BERT_MODEL = "bert-base-uncased" # + id="RK2xglXyAUOO" colab_type="code" colab={} # add the specified above model parameters to the config config.model.language_model.pretrained_model_name = PRETRAINED_BERT_MODEL config.model.train_ds.batch_size = BATCH_SIZE config.model.validation_ds.batch_size = BATCH_SIZE config.model.optim.lr = LEARNING_RATE config.model.train_ds.num_samples = NUM_SAMPLES config.model.validation_ds.num_samples = NUM_SAMPLES # + [markdown] id="gYKcziSsiAAd" colab_type="text" # Now, we are ready to initialize our model. During the model initialization call, the dataset and data loaders we'll be prepared for training and evaluation. # Also, the pretrained BERT model will be downloaded, note it can take up to a few minutes depending on the size of the chosen BERT model. # + pycharm={"name": "#%%\n"} id="Yk2hJssviAAe" colab_type="code" colab={} # initialize the model # during this stage, the dataset and data loaders we'll be prepared for training and evaluation config.trainer.max_epochs = 3 model = nemo_nlp.models.PunctuationCapitalizationModel(cfg=config.model, trainer=trainer) # + [markdown] id="kQ592Tx4pzyB" colab_type="text" # ## Monitoring training progress # Optionally, you can create a Tensorboard visualization to monitor training progress. # + id="mTJr16_pp0aS" colab_type="code" colab={} try: from google import colab COLAB_ENV = True except (ImportError, ModuleNotFoundError): COLAB_ENV = False # Load the TensorBoard notebook extension if COLAB_ENV: # %load_ext tensorboard # %tensorboard --logdir {exp_dir} else: print("To use tensorboard, please use this notebook in a Google Colab environment.") # + id="hUvnSpyjp0Dh" colab_type="code" colab={} # start the training trainer.fit(model) # + [markdown] id="VPdzJVAgSFaJ" colab_type="text" # # Inference # # To see how the model performs, letโ€™s run inference on a few examples. # + id="DQhsamclRtxJ" colab_type="code" colab={} # define the list of queiries for inference queries = [ 'we bought four shirts and one mug from the nvidia gear store in santa clara', 'what can i do for you today', 'how are you', 'how is the weather in', ] inference_results = model.add_punctuation_capitalization(queries) print() for query, result in zip(queries, inference_results): print(f'Query : {query}') print(f'Combined: {result.strip()}\n') # + [markdown] id="ref1qSonGNhP" colab_type="text" # ## Training Script # # If you have NeMo installed locally, you can also train the model with [nlp/token_classification/punctuation_capitalization_train.py](https://github.com/NVIDIA/NeMo/blob/main/examples/nlp/token_classification/punctuation_capitalization_train.py). # # To run training script, use: # # `python punctuation_and_capitalization_train.py model.dataset.data_dir=PATH_TO_DATA_DIR` # # Set NUM_SAMPLES=-1 and consider including other datasets to improve the performance of the model. # # # Finetuning model with your data # # When we were training the model from scratch, the datasets were prepared for training during the model initialization. When we are using a pretrained Punctuation and Capitalization model, before training, we need to setup training and evaluation data. # + id="4X1BahRlkaNf" colab_type="code" colab={} # let's reload our pretrained model pretrained_model = nemo_nlp.models.PunctuationCapitalizationModel.from_pretrained('Punctuation_Capitalization_with_DistilBERT') # setup train and validation Pytorch DataLoaders pretrained_model.update_data_dir(DATA_DIR) pretrained_model.setup_training_data() pretrained_model.setup_validation_data(data_dir=DATA_DIR) # and now we can create a PyTorch Lightning trainer and call `fit` again # for this tutorial we are setting fast_dev_run to True, and the trainer will run 1 training batch and 1 validation batch # for actual model training, disable the flag fast_dev_run = True trainer = pl.Trainer(gpus=1, fast_dev_run=fast_dev_run) trainer.fit(pretrained_model) # + id="l7A5FeiTl6Zd" colab_type="code" colab={}
tutorials/nlp/Punctuation_and_Capitalization.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python3 # --- # # Artificial Intelligence Nanodegree # # ## Convolutional Neural Networks # # --- # # In this notebook, we train an MLP to classify images from the MNIST database. # # ### 1. Load MNIST Database # + from keras.datasets import mnist # use Keras to import pre-shuffled MNIST database (X_train, y_train), (X_test, y_test) = mnist.load_data() print("The MNIST database has a training set of %d examples." % len(X_train)) print("The MNIST database has a test set of %d examples." % len(X_test)) # - # ### 2. Visualize the First Six Training Images # + import matplotlib.pyplot as plt # %matplotlib inline import matplotlib.cm as cm import numpy as np # plot first six training images fig = plt.figure(figsize=(20,20)) for i in range(6): ax = fig.add_subplot(1, 6, i+1, xticks=[], yticks=[]) ax.imshow(X_train[i], cmap='gray') ax.set_title(str(y_train[i])) # - # ### 3. View an Image in More Detail # + def visualize_input(img, ax): ax.imshow(img, cmap='gray') width, height = img.shape thresh = img.max()/2.5 for x in range(width): for y in range(height): ax.annotate(str(round(img[x][y],2)), xy=(y,x), horizontalalignment='center', verticalalignment='center', color='white' if img[x][y]<thresh else 'black') fig = plt.figure(figsize = (12,12)) ax = fig.add_subplot(111) visualize_input(X_train[0], ax) # - # ### 4. Rescale the Images by Dividing Every Pixel in Every Image by 255 # rescale [0,255] --> [0,1] X_train = X_train.astype('float32')/255 X_test = X_test.astype('float32')/255 # ### 5. Encode Categorical Integer Labels Using a One-Hot Scheme # + from keras.utils import np_utils # print first ten (integer-valued) training labels print('Integer-valued labels:') print(y_train[:10]) # one-hot encode the labels y_train = np_utils.to_categorical(y_train, 10) y_test = np_utils.to_categorical(y_test, 10) # print first ten (one-hot) training labels print('One-hot labels:') print(y_train[:10]) # - # ### 6. Define the Model Architecture # + from keras.models import Sequential from keras.layers import Dense, Dropout, Flatten # define the model - this is the base model # Also lose accuracy when removing dropouts. 97.3700% test accuracy model = Sequential() model.add(Flatten(input_shape=X_train.shape[1:])) model.add(Dense(512, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(512, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(10, activation='softmax')) """ # Also lose accuracy when removing dropouts. 97.3700% test accuracy model = Sequential() model.add(Flatten(input_shape=X_train.shape[1:])) model.add(Dense(512, activation='relu')) model.add(Dense(512, activation='relu')) model.add(Dense(10, activation='softmax')) """ """ # With just one hidden layer got 98.1200% test accuracy. This was surprisingly good to me, given performance of two # hidden layers. In this case, this was the best performing! This data set must be simple enough that added hidden # layers causes overfitting model = Sequential() model.add(Flatten(input_shape=X_train.shape[1:])) model.add(Dense(512, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(10, activation='softmax')) """ """ # When testing by adding a number of hidden layers, we seem to have also gotten overfitting. Our test accuracy # increased with each epoch but our validation accuracy remained fairly stagnant after the 3rd epoch. # Final test accuracy: 97.3600% model = Sequential() model.add(Flatten(input_shape=X_train.shape[1:])) model.add(Dense(512, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(512, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(512, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(512, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(10, activation='softmax')) """ """ # When testing with 256 nodes - the model did not get to a good enough solution quick enough. Accuracy was increasing # throughout, but the validation loss stopped improving at the 5th epoch. # Final test accuracy: 97.8800% model = Sequential() model.add(Flatten(input_shape=X_train.shape[1:])) model.add(Dense(256, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(256, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(10, activation='softmax')) """ """ # When testing with 1024 nodes - the model showed signs of overfitting. Starting in the 3rd epoch validation loss # stopped improving and validation accuracy began to slightly decline even as training accuracy continued to improve # Final test accuracy: 97.2200% model = Sequential() model.add(Flatten(input_shape=X_train.shape[1:])) model.add(Dense(1024, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(1024, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(10, activation='softmax')) """ # summarize the model model.summary() # - # ### 7. Compile the Model # compile the model model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy']) # ### 8. Calculate the Classification Accuracy on the Test Set (Before Training) # + # evaluate test accuracy score = model.evaluate(X_test, y_test, verbose=0) accuracy = 100*score[1] # print test accuracy print('Test accuracy: %.4f%%' % accuracy) # - # ### 9. Train the Model # + from keras.callbacks import ModelCheckpoint # train the model # This checkpoiner capability allows us to take snapshots of the performance of the model over time with multiple # with multiple epochs. This helps us pick the right number of epochs and avoid overfitting checkpointer = ModelCheckpoint(filepath='mnist.model.best.hdf5', verbose=1, save_best_only=True) # Note in this model.fit we are using validation_split. Validation_split gives us a subset of our training data # for validation. At each step, we add a validation checkpoint which freezes the parameters, including epoch, # and saves the validation scores - helping you determine which number of epochs and other parameters perform best # only then after that we test on the test data. This helps us avoid bias hist = model.fit(X_train, y_train, batch_size=128, epochs=10, validation_split=0.2, callbacks=[checkpointer], verbose=1, shuffle=True) # - # ### 10. Load the Model with the Best Classification Accuracy on the Validation Set # load the weights that yielded the best validation accuracy model.load_weights('mnist.model.best.hdf5') # ### 11. Calculate the Classification Accuracy on the Test Set # + # evaluate test accuracy score = model.evaluate(X_test, y_test, verbose=0) accuracy = 100*score[1] # print test accuracy print('Test accuracy: %.4f%%' % accuracy) # -
mnist-mlp/mnist_mlp.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: MindSpore-1.3.0 # language: python # name: mindspore-1.3.0 # --- # + [markdown] pycharm={"name": "#%% md\n"} # # ่‡ชๅฎšไน‰่ฎญ็ปƒ # # [![](https://gitee.com/mindspore/docs/raw/master/resource/_static/logo_source.png)](https://gitee.com/mindspore/docs/blob/master/tutorials/source_zh_cn/intermediate/custom/train.ipynb)&emsp;[![](https://gitee.com/mindspore/docs/raw/master/resource/_static/logo_notebook.png)](https://mindspore-website.obs.cn-north-4.myhuaweicloud.com/notebook/master/tutorials/zh_cn/mindspore_train.ipynb)&emsp;[![](https://gitee.com/mindspore/docs/raw/master/resource/_static/logo_modelarts.png)](https://authoring-modelarts-cnnorth4.huaweicloud.com/console/lab?share-url-b64=aHR0cHM6Ly9taW5kc3BvcmUtd2Vic2l0ZS5vYnMuY24tbm9ydGgtNC5teWh1YXdlaWNsb3VkLmNvbS9ub3RlYm9vay9tYXN0ZXIvdHV0b3JpYWxzL3poX2NuL21pbmRzcG9yZV90cmFpbi5pcHluYg==&imageid=65f636a0-56cf-49df-b941-7d2a07ba8c8c) # # MindSporeๆไพ›ไบ†`model.train`ๆŽฅๅฃๆฅ่ฟ›่กŒๆจกๅž‹่ฎญ็ปƒใ€‚ไฝฟ็”จๆ–นๅผๅฏไปฅๅ‚่€ƒ[ๅˆ็บงๆ•™็จ‹-ๅˆๅญฆๅ…ฅ้—จ](https://www.mindspore.cn/tutorials/zh-CN/master/quick_start.html)ใ€‚ๆญคๅค–๏ผŒ่ฟ˜ๅฏไปฅไฝฟ็”จ`TrainOneStepCell`๏ผŒ่ฏฅๆŽฅๅฃๅฝ“ๅ‰ๆ”ฏๆŒGPUใ€Ascend็Žฏๅขƒใ€‚ # # ไฝœไธบ้ซ˜้˜ถๆŽฅๅฃ๏ผŒ`model.train`ๅฐ่ฃ…ไบ†`TrainOneStepCell`๏ผŒๅฏไปฅ็›ดๆŽฅๅˆฉ็”จ่ฎพๅฎšๅฅฝ็š„็ฝ‘็ปœใ€ๆŸๅคฑๅ‡ฝๆ•ฐไธŽไผ˜ๅŒ–ๅ™จ่ฟ›่กŒ่ฎญ็ปƒใ€‚็”จๆˆทไนŸๅฏไปฅ้€‰ๆ‹ฉไฝฟ็”จ`TrainOneStepCell`ๅฎž็Žฐๆ›ดๅŠ ็ตๆดป็š„่ฎญ็ปƒ๏ผŒไพ‹ๅฆ‚ๆŽงๅˆถ่ฎญ็ปƒๆ•ฐๆฎ้›†ใ€ๅฎž็Žฐๅคš่พ“ๅ…ฅๅคš่พ“ๅ‡บ็ฝ‘็ปœใ€ๆˆ–่‡ชๅฎšไน‰่ฎญ็ปƒ่ฟ‡็จ‹ใ€‚ # # ## TrainOneStepCell่ฏดๆ˜Ž # # `TrainOneStepCell`ไธญๅŒ…ๅซไธ‰็งๅ…ฅๅ‚๏ผš # # - network (Cell)๏ผšๅ‚ไธŽ่ฎญ็ปƒ็š„็ฝ‘็ปœ๏ผŒๅฝ“ๅ‰ไป…ๆŽฅๅ—ๅ•่พ“ๅ‡บ็ฝ‘็ปœใ€‚ # # - optimizer (Cell)๏ผšๆ‰€ไฝฟ็”จ็š„ไผ˜ๅŒ–ๅ™จใ€‚ # # - sens (Number)๏ผšๅๅ‘ไผ ๆ’ญ็š„็ผฉๆ”พๆฏ”ไพ‹ใ€‚ # # ไธ‹้ขไฝฟ็”จ`TrainOneStepCell`ๆ›ฟๆข`model.train`๏ผŒๅฎž็Žฐ็ฎ€ๅ•็š„็บฟๆ€ง็ฝ‘็ปœ่ฎญ็ปƒ่ฟ‡็จ‹ใ€‚ # # ## TrainOneStepCellไฝฟ็”จ็คบไพ‹ # # ### ๅˆ›ๅปบๆจกๅž‹ๅนถ็”Ÿๆˆๆ•ฐๆฎ # # > ๆœฌๅฐ่Š‚่ฏฆ็ป†่งฃ้‡Š่ฏดๆ˜Žๅฏๅ‚่€ƒ[ๅˆ็บงๆ•™็จ‹-ๅˆๅญฆๅ…ฅ้—จ](https://www.mindspore.cn/tutorials/zh-CN/master/quick_start.html)ใ€‚ # # ๅฎšไน‰็ฝ‘็ปœLinearNet๏ผŒๅ†…้ƒจๆœ‰ไธคๅฑ‚ๅ…จ่ฟžๆŽฅๅฑ‚็ป„ๆˆ็š„็ฝ‘็ปœ๏ผŒ ๅŒ…ๅซ5ไธชๅ…ฅๅ‚ๅ’Œ1ไธชๅ‡บๅ‚็š„็ฅž็ป็ฝ‘็ปœใ€‚ # + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"} import numpy as np from mindspore import Tensor import mindspore.nn as nn from mindspore.nn import Cell, Dense import mindspore.ops as ops import mindspore.dataset as ds from mindspore import ParameterTuple class LinearNet(Cell): def __init__(self): super().__init__() self.relu = nn.ReLU() self.dense1 = Dense(5, 32) self.dense2 = Dense(32, 1) def construct(self, x): x = self.dense1(x) x = self.relu(x) x = self.dense2(x) return x # + [markdown] pycharm={"name": "#%% md\n"} # ไบง็”Ÿ่พ“ๅ…ฅๆ•ฐๆฎใ€‚ # + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"} np.random.seed(4) class DatasetGenerator: def __init__(self): self.data = np.random.randn(5, 5).astype(np.float32) self.label = np.random.randn(5, 1).astype(np.int32) def __getitem__(self, index): return self.data[index], self.label[index] def __len__(self): return len(self.data) # + [markdown] pycharm={"name": "#%% md\n"} # ๆ•ฐๆฎๅค„็†ใ€‚ # + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"} # ๅฏน่พ“ๅ…ฅๆ•ฐๆฎ่ฟ›่กŒๅค„็† dataset_generator = DatasetGenerator() dataset = ds.GeneratorDataset(dataset_generator, ["data", "label"], shuffle=True) dataset = dataset.batch(32) # ๅฎžไพ‹ๅŒ–็ฝ‘็ปœ net = LinearNet() # + [markdown] pycharm={"name": "#%% md\n"} # ### ๅฎšไน‰TrainOneStepCell # # ๅœจ`TrainOneStepCell`ไธญ๏ผŒๅฏไปฅๅฎž็Žฐๅฏน่ฎญ็ปƒ่ฟ‡็จ‹็š„ไธชๆ€งๅŒ–่ฎพๅฎšใ€‚ # + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"} class TrainOneStepCell(nn.Cell): def __init__(self, network, optimizer, sens=1.0): """ๅ‚ๆ•ฐๅˆๅง‹ๅŒ–""" super(TrainOneStepCell, self).__init__(auto_prefix=False) self.network = network # ไฝฟ็”จtupleๅŒ…่ฃ…weight self.weights = ParameterTuple(network.trainable_params()) self.optimizer = optimizer # ๅฎšไน‰ๆขฏๅบฆๅ‡ฝๆ•ฐ self.grad = ops.GradOperation(get_by_list=True, sens_param=True) self.sens = sens def construct(self, data, label): """ๆž„ๅปบ่ฎญ็ปƒ่ฟ‡็จ‹""" weights = self.weights loss = self.network(data, label) # ไธบๅๅ‘ไผ ๆ’ญ่ฎพๅฎš็ณปๆ•ฐ sens = ops.Fill()(ops.DType()(loss), ops.Shape()(loss), self.sens) grads = self.grad(self.network, weights)(data, label, sens) return loss, self.optimizer(grads) # + [markdown] pycharm={"name": "#%% md\n"} # ### ็ฝ‘็ปœ่ฎญ็ปƒ # # ๅœจไฝฟ็”จ`TrainOneStepCell`ๆ—ถ๏ผŒ้œ€่ฆๅˆฉ็”จ`WithLossCell`ๆŽฅๅฃๅผ•ๅ…ฅๆŸๅคฑๅ‡ฝๆ•ฐ๏ผŒๅ…ฑๅŒๅฎŒๆˆ่ฎญ็ปƒ่ฟ‡็จ‹ใ€‚ไธ‹้ขๅˆฉ็”จไน‹ๅ‰่ฎพๅฎšๅฅฝ็š„ๅ‚ๆ•ฐ่ฎญ็ปƒLeNet็ฝ‘็ปœ๏ผŒๅนถ่Žทๅ–lossๅ€ผใ€‚ # + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"} # ่ฎพๅฎšๆŸๅคฑๅ‡ฝๆ•ฐ crit = nn.MSELoss() # ่ฎพๅฎšไผ˜ๅŒ–ๅ™จ opt = nn.Adam(params=net.trainable_params()) # ๅผ•ๅ…ฅๆŸๅคฑๅ‡ฝๆ•ฐ net_with_criterion = nn.WithLossCell(net, crit) # ่‡ชๅฎšไน‰็ฝ‘็ปœ่ฎญ็ปƒ train_net = TrainOneStepCell(net_with_criterion, opt) # ่Žทๅ–่ฎญ็ปƒ่ฟ‡็จ‹ๆ•ฐๆฎ for i in range(300): for d in dataset.create_dict_iterator(): train_net(d["data"], d["label"]) print(net_with_criterion(d["data"], d["label"]))
tutorials/source_zh_cn/intermediate/custom/train.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Exploring data using Pandas # # ![Pandas](pandas.jpg) # # So far we explored Python and a few native libraries. Now we will play a little to simplify our life with tools to conduct some **data analysis**. # # **Pandas** is the most popular library (so far) to import and handle data in Python. # # ### Let's import some data from a CSV file # # **When downloading my ipynb, remember to also get the `commits_pr.csv` file** import pandas cpr = pandas.read_csv("commits_pr.csv") # It became this easy to read a CSV file!!! # And more... Look at what my `cpr` is: type(cpr) # Yes! A DataFrame. And it reads really nice, look: cpr.tail() ### We can use head() and tail() functions to see a bit less # Before moving forward... Explaining a little about this dataset. # # This dataset represents a series of Pull Requests made to a subset of projects hosted by GitHub. We worked on this data to capture a specific type of contributor, which we called *casual contributor*. These contributors are known by having a single pull request accepted in a project and not coming back (i.e., they have no long-term commitment to the project). # # In this specific dataset, you will find the following columns: # # * `user`: represent a user in GitHub (anonymized here) # * `project_name`: the name of GitHub project in which the pull request was accepted # * `prog_lang`: programming language of the project # * `pull_req_num`: unique identifier of the pull request # * `num_commits`: number of commits sent within that specific pull request # # # ### Some information about the dataframe # Dimensions/shape of the dataset (lines vs. columns) cpr.shape # What about the column names? cpr.columns # And the datatype per column? cpr.dtypes # Some more information: `info()` method prints information including the index dtype and column dtypes, non-null values and memory usage. cpr.info() # What is the type of a specific column??? type(cpr["num_commits"]) # A *serie* is a list, with one dimension, indexed. Each column of a dataframe is a series # Before moving ahead, we can use the types to filter some columns. # # Let's say we want only the columns that store `int`: int_columns = cpr.dtypes[cpr.dtypes == "int64"].index int_columns # Now... I just want to see these columns... **BOOM** cpr[int_columns].head() # ### What about statistical information about my DataFrame? # # `describe()` method provides a summary of numeric values in your dataset: mean, standard deviation, minimum, maximum, 1st quartile, 2nd quartile (median), 3rd quartile of the columns with numeric values. It also counts the number of variables in the dataset (are there missing variables?) cpr.describe() # We can do it for a Series... #cpr["num_commits"].describe() cpr.num_commits.describe() #LOOK at this with a non-numeric column cpr.prog_lang.describe() #either way work. # And we can get specific information per column cpr.num_commits.median() cpr.num_commits.mean() cpr.num_commits.std() # ### --------------#### # ### Playing with the data: sorting # # We can sort our data easily using pandas. # # In this example, sorting by Programming Language cpr.sort_values("num_commits", ascending=False).head(10) # We can sort using *many columns*, by using a list (sort will happen from the first item to the last) cpr.sort_values(["prog_lang", "project_name", "num_commits"], ascending=False).head(10) cpr.head(10) # If you want to keep the sorted version, you can use the parameter `inplace`: cpr.sort_values(["prog_lang", "project_name", "num_commits"], ascending=False, inplace=True) cpr.head(10) #cpr = pandas.read_csv("commits_pr.csv") #--> to return to the original order # ### Counting the occurences of variables # # So, to count the occurrences in a column we have to select the column first, and use the method `value_counts()` cpr.prog_lang.value_counts() # But... I just want to know what are the languages out there. Is there a way? # # *Always* cpr["prog_lang"].unique() # ## OK! Let's do something else... Like, selecting columns and filtering data # # Let's say that I just want to look at the columns programming language, project name and number of commits. # # I can select them and create a new DF selected_columns = ["prog_lang", "project_name", "num_commits"] my_subset = cpr[selected_columns] my_subset.head() # What if now I want to filter those projects written in `C` language? only_C = cpr[(cpr["prog_lang"]=='C') & (cpr["num_commits"]==2)] only_C.describe() # We can filter whatever we want: single_commit = cpr[cpr["num_commits"] == 1] # We can create filters in variables, and use whenever we want, as well one_commit = cpr["num_commits"]==1 language_C = cpr["prog_lang"]=="C" multi_commit = cpr["num_commits"]>1 cpr[one_commit & language_C].head(10) # And... we can use OR (|) and AND(&) to play! cpr[one_commit & language_C].head(10) # #### What if we want the pull requests with more than one commit for the projects written in "C" and those with 2 commits for the projects written in "typescript"??? # # Let's do it! # # + ##### two_commits = cpr["num_commits"]==2 language_typescript = cpr["prog_lang"]=="typescript" cpr[(one_commit & language_C) | (two_commits & language_typescript)] # - # What if I wanted to convert number of commits into a feature by creating bands of values that we define: # * 1 commit = group 1 # * 2 - 5 commits = group 2 # * 6 - 20 commits = group 3 # * more than 20 = group 4 cpr.loc[cpr["num_commits"]==1, "group_commit"]=1 cpr.loc[(cpr["num_commits"]>1) & (cpr["num_commits"]<=5), "group_commit"]=2 cpr.loc[(cpr["num_commits"]>5) & (cpr["num_commits"]<=20), "group_commit"]=3 cpr.loc[cpr["num_commits"]>20, "group_commit"]=4 cpr.group_commit = cpr.group_commit.astype('int32') cpr.head() # ### I challenge you: # # What if: I wanted to know how the average of num_commits for those pull requests in group_commit 4??? # + # - # ### I challenge you (2): # # Can you do that average per language? # cpr[cpr["prog_lang"] == "typescript"].quantile(0.75) # # # # # # # # # # # # # # ### Some more... # Let's work with a new dataset... # # This is not only related to casual contributors, but all contributors commits_complete = pandas.read_csv('commit_complete.csv') commits_complete.sort_values('num_commits', ascending=False).head(10) commits_complete['num_commits'].corr(commits_complete['additions']) commits_complete.corr() commits_complete.corr(method='pearson').style.background_gradient(cmap='coolwarm') # ### Can we play with graphics? # **Plot types:** # - 'line' : line plot (default) # - 'bar' : vertical bar plot # - 'barh' : horizontal bar plot # - 'hist' : histogram # - 'box' : boxplot # - 'kde' : Kernel Density Estimation plot # - 'density' : same as 'kde' # - 'area' : area plot # - 'pie' : pie plot # - 'scatter' : scatter plot # - 'hexbin' : hexbin plot # **Histogram** cpr.num_commits.plot.hist(bins=200) cpr[cpr["prog_lang"]=="C"].num_commits.plot.hist(bins=20, color="red", alpha=0.5) cpr[cpr["prog_lang"]=="java"].num_commits.plot.hist(bins=20, alpha=0.5).legend(["C", "Java"]) cpr['prog_lang'].value_counts().plot.bar() cpr[cpr["prog_lang"]== "C"].project_name.value_counts().plot.bar() commits_complete.plot.scatter(x = "files_changed", y = "num_commits") # + lang_c = cpr.prog_lang=="C" lang_java = cpr.prog_lang=="java" lang_php = cpr.prog_lang=="php" cpr[(lang_c) | (lang_java) | (lang_php)].boxplot(by='prog_lang', column=['num_commits']) # + plot = cpr[(lang_c) | (lang_java) | (lang_php)].boxplot(by='prog_lang', column=['num_commits'], showfliers=False, grid=False) plot.set_xlabel("Language") plot.set_ylabel("# of commits") plot.set_title("") # - # **Just to show...** # # that it is possible to do statistical analysis # + from scipy import stats stats.mannwhitneyu(cpr[(lang_c)].num_commits, cpr[(lang_java)].num_commits) # - # ### Exporting my_subset.to_dict() cpr.to_csv('test.csv', sep=',') # ## Go for the HW
notebooks/PandasKickoff.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Matrix Equivalents of Hypercomplex Numbers # # Two numbers from a hypercomplex algebra $z, w \in \mathbb{H}^N$ can be multiplied such that the result $m = zw, m \in \mathbb{H}^N$ is an element of the same algebra. The space is closed under addition as well, and so hypercomplex algebras are also vector spaces. # # The map between an elementwise product of two hypercomplex vector $\hat{z}, \hat{w} \in \mathbb{H}^{M \times N}$ can be viewed as as a linear transformation on the elements of the right factor of the product $\hat{m} = \hat{z} \hat{w} = A \hat{w}$. # # Here the matrix $A$ depends on the entries of the geometric vector $\hat{z}$. In this notebook, we explore a systematic way to generate these transformation matrices. # # $\hat{z}, \hat{w} \in \mathbb{H}^{M \times N}$ # # $\hat{m} \in \mathbb{H}^{M \times N}, \hat{z} \hat{w} = \hat{m} $ # # frame as a linear transformation with matrix $A_{[\hat{z}]}$ conitioned on $\hat{z}$ # # $ A_{[\hat{z}]} \hat{w} = \hat{m} $ # # estimate $A_{[\hat{z}]}$ given $\hat{w}, \hat{m}$ with a static $\hat{z}$ by randomly sampling $\hat{w}$ and computing $\hat{m}$ # # $ A_{[\hat{z}]}[t] \in \mathbb{R}^{N \times N}, A_{[\hat{z}]}[t + 1] = (\hat{y}[t] \hat{x}[t]^{T} + \gamma A_{[\hat{z}]}[t])(\hat{x}[t] \hat{x}[t]^{T} + \gamma I)^{-1} $ # # reduce equation in terms of $A_{[\hat{z}]}[0]$ # # $ A_{[\hat{z}]}[t] = \gamma^{t} A_{[\hat{z}]}[0] (\hat{x}[t] \hat{x}[t]^{T} + \gamma I)^{-t} + \sum_{i = 1}^{t} \gamma^{i - 1} \hat{y}[i] \hat{x}[i]^{T} \prod_{j = i}^{N - i + 1} (\hat{x}[j] \hat{x}[j]^{T} + \gamma I)^{-1} $ # # assume $ |\gamma| < 1 $ and take limit $ t \rightarrow \infty $ # # $ A_{[\hat{z}]}[t \rightarrow \infty] = \sum_{i = 1}^{\infty} \gamma^{i - 1} \hat{y}[i] \hat{x}[i]^{T} \prod_{j = i}^{N - i + 1} (\hat{x}[j] \hat{x}[j]^{T} + \gamma I)^{-1} $ # # does not depend on the initial choice of $A_{[\hat{z}]}[0]$ and converges for large $t$ import numpy as np import matplotlib.pyplot as plt def hypercomplex_conjugate(a): c = np.ones(a.shape) c[..., 1:] *= -1 return c * a def hypercomplex_multiply(a, b): if a.shape[-1] == 1: return a * b else: def cayley_dickson(p, q, r, s): return np.concatenate([ (hypercomplex_multiply( p, r) - hypercomplex_multiply( hypercomplex_conjugate(s), q)), (hypercomplex_multiply( s, p) + hypercomplex_multiply( q, hypercomplex_conjugate(r))), ], axis=(len(a.shape) - 1)) return cayley_dickson( a[..., :(a.shape[-1] // 2)], a[..., (a.shape[-1] // 2):], b[..., :(a.shape[-1] // 2)], b[..., (a.shape[-1] // 2):]) def hypercomplex_conjugate_gradient(a, da): return hypercomplex_conjugate(da) def hypercomplex_multiply_gradient(a, b, da, db): return (hypercomplex_multiply(da, b), hypercomplex_multiply(a, db)) def hypercomplex_basis_gradient(a): basis = np.zeros((a.shape[-1], a.shape[-1])) np.fill_diagonal(basis, 1) basis = basis.reshape((1, a.shape[-1], a.shape[-1])) return basis class HCX(object): def random(*kdims, hcx_size=1, mean=0, std=1): shape = [ kdims[i] if i < len(kdims) else 2**hcx_size for i in range(len(kdims) + 1)] return np.random.normal(mean, std, shape) def basis(x, dx=0, dir=1): if dir > 0: return x else: return hypercomplex_basis_gradient(x) def conj(x, dx=0, dir=1): if dir > 0: return hypercomplex_conjugate(x) else: return hypercomplex_conjugate_gradient(x, dx) def add(x, y, dx=0, dy=0, dir=1): if dir > 0: return x + y else: return dx, dy def sub(x, y, dx=0, dy=0, dir=1): if dir > 0: return x - y else: return dx, -dy def mul(x, y, dx=0, dy=0, dir=1): if dir > 0: return hypercomplex_multiply(x, y) else: return hypercomplex_multiply_gradient(x, y, dx, dy) def norm(x, dx=0, dir=1): if dir > 0: return np.sum( hypercomplex_multiply(HCX.conj(x), x), axis=(len(x.shape) - 1))**0.5 else: c = 0.5 / np.sum( hypercomplex_multiply(HCX.conj(x), x), axis=(x.shape[-1] - 1))**0.5 g = hypercomplex_multiply_gradient( HCX.conj(x), x, HCX.conj(x, dx, dir=-1), dx) r = HCX.conj(g[0]) + g[1] return c * r def inv(x, dx=0, dir=1): if dir > 0: return HCX.conj(x) / np.reshape(HCX.norm(x)**2, (-1, 1)) else: return (HCX.conj(x, dx, dir=-1) / HCX.norm(x)**2 - 2 * HCX.conj(x) / HCX.norm(x)**3 * HCX.norm(x, dx, dir=-1)) # + M = 2 # The hypercomplex size N = 1 # The vector elements T = 1e-20 # A convergenece threshold L = 0.05 # A hyperparameter to tune V = 100 # The number of validation steps D = 100 # The number of iterations before validating def validate(_a, _A): _loss = 0.0 for i in range(V): _b = HCX.random(N, 1, hcx_size=M).transpose(0, 2, 1) _c = HCX.mul( _a.transpose(0, 2, 1), _b.transpose(0, 2, 1)).transpose(0, 2, 1) _loss += np.sum(_c - np.matmul(_A, _b))**2 return _loss / V a = HCX.random(N, 1, hcx_size=M).transpose(0, 2, 1) A = np.zeros((N, 2**M, 2**M)) I = np.reshape(np.eye(2**M), (1, 2**M, 2**M)) data_points = [] loss = 1.0 iterations = 0 while loss > T: iterations += 1 b = HCX.random(N, 1, hcx_size=M).transpose(0, 2, 1) c = HCX.mul( a.transpose(0, 2, 1), b.transpose(0, 2, 1)).transpose(0, 2, 1) data_points += [A] A = np.matmul( (c * b.transpose(0, 2, 1)) + L * A, np.linalg.inv( (b * b.transpose(0, 2, 1)) + L * I)) if iterations % D == 0: loss = validate(a, A) print("Estimation Loss:", loss) print("Iterations until Convergence:", iterations) print("a:", a) print("A:", A) data_points = np.array(data_points).reshape(iterations, -1) for i in range(1, N * 2**(2*M)): plt.title("Parameter Space of Estimation") plt.xlabel("Parameter" + str([0, 0])) plt.ylabel("Parameter" + str([i % 2**M, i // 2**M])) plt.plot(data_points[:, 0], data_points[:, i], "ro-") plt.show() # - print(np.linalg.det(A)) # This matrix is full rank eigen_values, eigen_vectors = np.linalg.eig(A) print(eigen_values) # The eigenvalues have a particular structure print(eigen_vectors) # The eigenvectors have a particular structure # ## Developing a Neural Network Update Rule class TrabuccoNet: def __init__( self, *layers, f=(lambda x: x), f_inverse=(lambda x: x), use_b=False, alpha=0.01): self.f = f self.f_inverse = f_inverse self.weights = [] self.biases = [] self.use_b = use_b self.alpha = alpha for a, b in zip( layers[:-1], layers[1:]): layer = np.zeros((b, a)) np.fill_diagonal(layer, 1) self.weights += [ layer] self.biases += [ np.zeros((b, 1))] def forward( self, x): self.activations = [x] for w, b in zip( self.weights, self.biases): self.activations += [ self.f(np.matmul( w, self.activations[-1]) + b)] return self.activations[-1] def reverse( self, y): self.estimations = [y] for w, b, a in zip( reversed(self.weights), reversed(self.biases), reversed(self.activations[:-1])): self.estimations = [ np.matmul( np.linalg.inv( np.matmul(w.T, w) + self.alpha * np.eye(w.shape[1])), np.matmul( w.T, (self.f_inverse( self.estimations[0]) - b)) + self.alpha * a)] + self.estimations return self.estimations[0] def update( self, layer=-1): i = layer % len(self.weights) self.weights[i] = np.matmul( (np.matmul( (self.f_inverse(self.estimations[i + 1]) - self.biases[i]), self.activations[i].T) + self.alpha * self.weights[i]), np.linalg.inv( np.matmul( self.activations[i], self.activations[i].T) + self.alpha * np.eye(self.activations[i].shape[0]))) if self.use_b: self.biases[i] = self.f_inverse( self.estimations[i + 1]) - np.matmul( self.weights[i], self.activations[i]) # + M = 2 # The hypercomplex size T = 1e-6 # A convergenece threshold V = 100 # The number of validation steps D = 10 # The number of iterations before validating def validate(_a, _net): _loss = 0.0 for i in range(V): _b = HCX.random(1, hcx_size=M).T _c = HCX.mul( _a.T, _b.T).T _loss += np.sum(_c - _net.forward(_b))**2 return _loss / V def train(): net = TrabuccoNet( 2**M, 2**M, alpha=0.5, use_b=True) a = HCX.random(1, hcx_size=M).T loss = [] iterations = 0 while len(loss) == 0 or loss[-1] > T: iterations += 1 b = HCX.random(1, hcx_size=M).T c = HCX.mul( a.T, b.T).T net.forward(b) net.reverse(c) net.update(layer=(iterations // D)) if iterations % D == 0: loss += [validate(a, net)] return loss losses = [] max_length = 0 for i in range(100): losses += [train()] max_length = max(max_length, len(losses[-1])) for i in range(len(losses)): losses[i] += [0 for _ in range(max_length - len(losses[i]))] losses = np.array(losses) mean_loss, std_loss = np.mean(losses, axis=0), np.std(losses, axis=0) plt.title("Estimation Loss with Stochastic Training") plt.xlabel("Training Iteration") plt.ylabel("Mean Squared Error") plt.errorbar( (D * np.arange(len(mean_loss))), mean_loss, yerr=std_loss) plt.grid(True) plt.show() # + M = 2 Q = 0.1 S = 0.01 P = 100 T = 0.1 V = 100 D = 100 Z = 1000 dataset_center = np.random.normal(0, 1, (1, M)) cluster_one_center = dataset_center + np.random.normal(0, Q, (1, M)) cluster_two_center = dataset_center + np.random.normal(0, Q, (1, M)) cluster_one = (np.tile(cluster_one_center, (P, 1)) + np.random.normal(0, S, (P, M))).reshape((P, M, 1)) cluster_two = (np.tile(cluster_two_center, (P, 1)) + np.random.normal(0, S, (P, M))).reshape((P, M, 1)) collected_data = np.concatenate([ cluster_one, cluster_two], axis=0) collected_labels = np.concatenate([ np.tile(np.array([[[1.0, 0.0]]]), (P, 1, 1)), np.tile(np.array([[[0.0, 1.0]]]), (P, 1, 1))], axis=0) plt.title("Binary Clusters") plt.xlabel("Feature 0") plt.ylabel("Feature 1") plt.plot( collected_data[:P, 0, 0], collected_data[:P, 1, 0], "ro") plt.plot( collected_data[P:, 0, 0], collected_data[P:, 1, 0], "bo") plt.grid(True) plt.show() def validate(_net): _loss = 0.0 for i in range(2 * P): _loss += np.sum(_net.forward( collected_labels[i, :, 0, np.newaxis] - collected_data[i, :, 0, np.newaxis]))**2 return _loss / 2 / P def train(): net = TrabuccoNet( 2, 5, 1, alpha=0.05, use_b=True) loss = [] iterations = 0 while (len(loss) == 0 or loss[-1] > T) and iterations < Z: iterations += 1 for i in range(2 * P): net.forward(collected_data[i, :, 0, np.newaxis]) net.reverse(collected_labels[i, :, 0, np.newaxis]) net.update(layer=(iterations // D)) if iterations % D == 0: loss += [validate(net)] return loss losses = [] max_length = 0 for i in range(1): losses += [train()] max_length = max(max_length, len(losses[-1])) for i in range(len(losses)): losses[i] += [0 for _ in range(max_length - len(losses[i]))] losses = np.array(losses) mean_loss, std_loss = np.mean(losses, axis=0), np.std(losses, axis=0) plt.title("Estimation Loss with Stochastic Training") plt.xlabel("Training Iteration") plt.ylabel("Mean Squared Error") plt.errorbar( (D * np.arange(len(mean_loss))), mean_loss, yerr=std_loss) plt.grid(True) plt.show() # -
matrix_equivalents.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Tutorial : https://machinelearningmastery.com/tutorial-first-neural-network-python-keras/ # it is a binary classification problem (onset of diabetes as 1 or not as 0). # # 1. Load Data. # 2. Define Keras Model. # 3. Compile Keras Model. # 4. Fit Keras Model. # 5. Evaluate Keras Model. # 6. Tie It All Together. # 7. Make Predictions # first neural network with keras tutorial from numpy import loadtxt from keras.models import Sequential from keras.layers import Dense # load the dataset dataset = loadtxt('pima-indians-diabetes.csv', delimiter=',') # split into input (X) and output (y) variables X = dataset[:,0:8] y = dataset[:,8] # Input Variables (X): # # 1. Number of times pregnant # 2. Plasma glucose concentration a 2 hours in an oral glucose tolerance test # 3. Diastolic blood pressure (mm Hg) # 4. Triceps skin fold thickness (mm) # 5. 2-Hour serum insulin (mu U/ml) # 6. Body mass index (weight in kg/(height in m)^2) # 7. Diabetes pedigree function # 8. Age (years) # # Output Variables (y): # # 1. Class variable (0 or 1) # The first thing to get right is to ensure the input layer has the right number of **input features**. This can be specified when creating the first layer with the input_dim argument and setting it to 8 for the 8 input variables. # + # define the keras model model = Sequential() model.add(Dense(12, input_dim=8, activation='relu')) model.add(Dense(8, activation='relu')) model.add(Dense(1, activation='sigmoid')) # - # compile the keras model model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) # fit the keras model on the dataset model.fit(X, y, epochs=150, batch_size=10) # evaluate the keras model _, accuracy = model.evaluate(X, y) print('Accuracy: %.2f' % (accuracy*100)) # ### Make Predictions # make probability predictions with the model predictions = model.predict(X) # round predictions rounded = [round(x[0]) for x in predictions] predictions = model.predict_classes(X) # summarize the first 5 cases for i in range(5): print('%s => %d (expected %d)' % (X[i].tolist(), predictions[i], y[i])) 6,148,72,35,0,33.6,0.627,50,1 1,85,66,29,0,26.6,0.351,31,0 8,183,64,0,0,23.3,0.672,32,1 1,89,66,23,94,28.1,0.167,21,0 0,137,40,35,168,43.1,2.288,33,1
keras tutorials/keras_first_network.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Topic Model Trends &mdash; Demo # # In this notebook we visualize a previously generated topic model. We use data exported from a Constellate search on the term `hyperparameter`. # # Set Up # ## Imports import pandas as pd import numpy as np from lib import tapi # ## Configuration tapi.list_dbs() data_prefix = 'jstor_hyperparameter_demo' db = tapi.Edition(data_prefix) # ## Import Topic Data # # We import our previously generated model. db.get_tables() db.TOPICS_NMF.topwords.to_list() # # Show Graphs # ## NMF topic_glosses = ['Bayesian models', 'French', 'MCMC', 'priors', 'economics', 'random effects', 'variable selection', 'empirical Bayes', 'env biology', 'genetics'] db.TOPICS_NMF['gloss'] = topic_glosses db.THETA_NMF['label'] = db.LABELS['doc_year'] db.TOPICS_NMF.style.bar() TRENDS_NMF = db.THETA_NMF.groupby('label').mean() TRENDS_NMF.columns = db.TOPICS_NMF.gloss TRENDS_NMF.style.background_gradient(axis=0) import matplotlib.pyplot as plt def show_trend(topic_id, kind='bar', h=3, w=12): gloss = db.TOPICS_NMF.loc[topic_id, 'gloss'] TRENDS_NMF[gloss].plot(kind=kind, figsize=(w, h), rot=45, title= gloss.upper() + '\n' + db.TOPICS_NMF.loc[topic_id].topwords); show_trend(0,'line') show_trend(2,'line') show_trend(3,'line') show_trend(5,'line') show_trend(6,'line') show_trend(7,'line') # + # TRENDS_NMF.plot(figsize=(12, 5)); # - import plotly_express as px px.line(TRENDS_NMF) px.bar(TRENDS_NMF) # # LDA db2 = tapi.Edition('jstor_hyperparameter') corpus = db2.get_corpus() X = corpus.doc_year.value_counts().to_frame('n').sort_index() X.index.name = 'doc_year' X.plot.bar(figsize=(15, 5), legend=False, rot=45); # # Why does the French topic trend in 1987? print('\n\n'.join(corpus[corpus.doc_year == 1987].doc_content.to_list()))
03-TopicTrendsDemo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Import Libraries import numpy as np import pandas as pd import requests import math import xlsxwriter from scipy import stats from secrets import IEX_CLOUD_API_TOKEN # Import List of Stocks stocks = pd.read_csv('sp_500_stocks.csv') # Split List into groups for batch API calls # + def split(list, n): for i in range(0, len(list), n): yield list[i : i+n] groups = list(split(stocks['Ticker'], 100)) stock_symbols = [] for i in range(0, len(groups)): stock_symbols.append(','.join(groups[i])) # - # Create dataframe to store results # + data_columns = [ 'Ticker', 'Price', 'Number of Shares to Buy', 'One-Year Price Return', 'One-Year Return Percentile', 'Six-Month Price Return', 'Six-Month Return Percentile', 'Three-Month Price Return', 'Three-Month Return Percentile', 'One-Month Price Return', 'One-Month Return Percentile', 'Momentum Score' ] momentum_dataframe = pd.DataFrame(columns = data_columns) # - # Store API data in dataframe, using placeholder "none" for uncalcalculated data # + for batch in stock_symbols: batch_api_call_url = f'https://sandbox.iexapis.com/stable/stock/market/batch/?types=stats,quote&symbols={batch}&token={IEX_CLOUD_API_TOKEN}' stock_data = requests.get(batch_api_call_url).json() for stock in batch.split(','): momentum_dataframe = momentum_dataframe.append( pd.Series([stock, stock_data[stock]['quote']['latestPrice'], 'none', stock_data[stock]['stats']['year1ChangePercent'], 'none', stock_data[stock]['stats']['month6ChangePercent'], 'none', stock_data[stock]['stats']['month3ChangePercent'], 'none', stock_data[stock]['stats']['month1ChangePercent'], 'none', 'none' ], index = data_columns), ignore_index = True) momentum_dataframe # - # Calculating Momentum Percentiles # + intervals = [ 'One-Year', 'Six-Month', 'Three-Month', 'One-Month' ] for row in momentum_dataframe.index: for time_period in intervals: price_returns = f'{time_period} Price Return' if momentum_dataframe.loc[row, price_returns] == None: momentum_dataframe.loc[row, price_returns] = 0.0 for row in momentum_dataframe.index: for time_period in intervals: price_returns = f'{time_period} Price Return' percentile_col = f'{time_period} Return Percentile' momentum_dataframe.loc[row, percentile_col] = stats.percentileofscore(momentum_dataframe[price_returns], momentum_dataframe.loc[row, price_returns]) / 100 momentum_dataframe # - # Calculating Momentum Score as Mean of Return Percentiles # + from statistics import mean for row in momentum_dataframe.index: momentum_percentiles = [] for time_period in intervals: momentum_percentiles.append(momentum_dataframe.loc[row, f'{time_period} Return Percentile']) momentum_dataframe.loc[row, 'Momentum Score'] = mean(momentum_percentiles) momentum_dataframe # - # Sort stock tickers by Momentum Score and only keep the top 50 tickers sorted_dataframe = momentum_dataframe.sort_values(by = 'Momentum Score', ascending = False) sorted_dataframe = sorted_dataframe[:51] sorted_dataframe # Calculate number of shares to buy, $50,000 portfolio size will be used as reference. # Assuming equal distribution of entire portfolio across the 50 stocks. # + position_size = float(50000) / len(sorted_dataframe.index) for index, row in sorted_dataframe.iterrows(): sorted_dataframe.loc[index, 'Number of Shares to Buy'] = math.floor(position_size / sorted_dataframe['Price'][index]) sorted_dataframe # -
Momentum Strategy.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.10.4 64-bit # language: python # name: python3 # --- from transformers import pipeline import pickle import os import pandas as pd ner = pipeline("ner",aggregation_strategy='simple') with open("/workspaces/Data-Science-Journey/Natural-Language-Processing/Transformers/Named Entity Recognition/input/ner_train.pkl") as f: train = pickle.load(f)
Natural-Language-Processing/Transformers/Named Entity Recognition/notebooks/ner.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.9.7 # language: python # name: python3 # --- # + import pandas as pd import os import pandas as pd import requests import sys import xarray as xr from datetime import datetime import matplotlib.pyplot as plt from matplotlib.dates import DateFormatter from sklearn.metrics import mean_squared_error from math import sqrt # import dates as date begindate = '20190917' # data is avail. beginning 20180917 enddate = '20190927' # comid_stn = pd.read_csv('D:/Sujana/Project/csv/comid_stationname.csv') comid_stn = pd.read_csv('comid_stationname.csv') comid_all = comid_stn['COMID'] index = comid_stn['Index'] gauge_name_all = comid_stn['station'] rmse_df=pd.DataFrame() # for i in index: # comid = comid_all[i] # gauge_name = gauge_name_all[i] # # comid = 1479611 # # gauge_name = 'JAIT2' # nwm_discharge = pd.read_csv('D:/Sujana/Project/all_in_one/NWM_discharge_with_stage/short_range/'+str(comid)+'_with_stage.csv') # dd6_data = pd.read_csv('D:/Sujana/Project/all_in_one/DD6_stage_with_discharge/'+gauge_name+' with_discharge.csv') # nwm_discharge['Time']=pd.to_datetime(nwm_discharge['Time'],utc=True) # dd6_data['timestamp']=pd.to_datetime(dd6_data['timestamp'],utc=True) # # nwm_discharge = nwm_discharge.set_index('Time') # dd6_data = dd6_data.set_index('timestamp') # nwm_discharge = nwm_discharge.set_index('Time') # - def get_location(ref_data,req_data): # Inputs: # ref_data: "dataframe" from which we need to extact the matching index # req_data: "datetime" for which the index close to ref_data's datetime is required # Output # Int if unique, and slice if monotonic index = ref_data.index.get_loc(req_data,method='nearest') return index # + for i in index: comid = comid_all[i] gauge_name = gauge_name_all[i] nwm_discharge = pd.read_csv('D:/Sujana/Project/all_in_one/NWM_discharge_with_stage/short_range/'+str(comid)+'_with_stage.csv') dd6_data = pd.read_csv('D:/Sujana/Project/all_in_one/DD6_stage_with_discharge/'+gauge_name+' with_discharge.csv') nwm_discharge['Time']=pd.to_datetime(nwm_discharge['Time'],utc=True) dd6_data['timestamp']=pd.to_datetime(dd6_data['timestamp'],utc=True) # nwm_discharge = nwm_discharge.set_index('Time') dd6_data = dd6_data.set_index('timestamp') nwm_discharge = nwm_discharge.set_index('Time') # getting the list of indexes in nwm data that is closet to each dd6_data indx_list = [get_location(nwm_discharge,x) for x in dd6_data.index] # assigning the filtered indexes to the nwm_discharge nwm_discharge_filtered = nwm_discharge.iloc[indx_list] # Creating new dataframe for error calculation new_df = pd.DataFrame() new_df['Date'] = dd6_data.index column1 = dd6_data.reset_index()['dd6_stage_m'] column2 = nwm_discharge_filtered.reset_index()['interp_nwm_stage'] new_df['Observed - Model'] = (column1-column2) new_df.set_index('Date') # Calculating the root mean squared error rmse_df['comid'] = rmse_df(comid) rmse_df['gauge name'] = gauge_name rmse_df['rmse'] = sqrt(mean_squared_error(column1, column2)) # rmse.append(rms) # Plotting the error plt.figure(facecolor='white') plt.rc('font', size=14) fig, ax = plt.subplots(figsize=(10, 6)) # time = discharge_difference['Time'] # print (time) ax.plot(new_df['Date'], new_df['Observed - Model'],color='tab:blue', label='Observed-Model') ax.set(xlabel='Date', ylabel='Error [meters]', title=f'COMID {comid} {gauge_name} from ' \ f'{pd.to_datetime(begindate).strftime("%b %d %Y")} to ' \ f'{pd.to_datetime(enddate).strftime("%b %d %Y")} for short range') date_form = DateFormatter("%b %d") ax.xaxis.set_major_formatter(date_form) # ax.xaxis.set_major_locator(mdates.WeekdayLocator()) # ax.xaxis.set_major_locator(mdates.WeekdayLocator(byweekday=1, interval=1, tz=None) ax.legend() ax.grid(True) plt.savefig('D:/Sujana/Project/all_in_one/error/plot/'+str (comid)+' Station '+gauge_name+'_Stage_graph_shortrange.jpg') # - def get_location(ref_data,req_data): # Inputs: # ref_data: "dataframe" from which we need to extact the matching index # req_data: "datetime" for which the index close to ref_data's datetime is required # Output # Int if unique, and slice if monotonic index = ref_data.index.get_loc(req_data,method='nearest') return index # getting the list of indexes in nwm data that is closet to each dd6_data indx_list = [get_location(nwm_discharge,x) for x in dd6_data.index] # assigning the filtered indexes to the nwm_discharge nwm_discharge_filtered = nwm_discharge.iloc[indx_list] # difference dataframe new_df = pd.DataFrame() new_df['Date'] = dd6_data.index column1 = dd6_data.reset_index()['dd6_stage_m'] column2 = nwm_discharge_filtered.reset_index()['interp_nwm_stage'] new_df['Observed - Model'] = column1-column2 new_df new_df.set_index('Date') # + # Plotting error-time series plt.figure(facecolor='white') plt.rc('font', size=14) fig, ax = plt.subplots(figsize=(10, 6)) # time = discharge_difference['Time'] # print (time) ax.plot(new_df['Date'], new_df['Observed - Model'],color='tab:blue', label='Observed-Model') ax.set(xlabel='Date', ylabel='Error [meters]', title=f'COMID {comid} {gauge_name} from ' \ f'{pd.to_datetime(begindate).strftime("%b %d %Y")} to ' \ f'{pd.to_datetime(enddate).strftime("%b %d %Y")} for short range') date_form = DateFormatter("%b %d") ax.xaxis.set_major_formatter(date_form) # ax.xaxis.set_major_locator(mdates.WeekdayLocator()) # ax.xaxis.set_major_locator(mdates.WeekdayLocator(byweekday=1, interval=1, tz=None) ax.legend() ax.grid(True) plt.savefig('D:/Sujana/Project/all_in_one/error/plot/'+str (comid)+' Station '+gauge_name+'_Stage_graph_AnalysisAssim.jpg') # - from sklearn.metrics import mean_squared_error mean = mean_squared_error(column1,column2) # + new_df['nwm_time'] = pd.to_datetime(nwm_discharge_filtered.index) new_df['time_difference']=new_df['Date']-new_df['nwm_time'] # + new_df['nwm_time'] = nwm_discharge_filtered.index new_df['nwm_time'] = new_df['nwm_time'] # type(new_df['nwm_time']) # - date_1 = new_df['nwm_time'] # + from datetime import datetime date_1 = new_df['nwm_time'] date_2 = new_df['Date'] date_format_str = '%d/%m/%Y %H:%M:%S.%f' end = datetime.strptime(date_2, date_format_str) start = datetime.strptime(date_1, date_format_str) # Get interval between two timstamps as timedelta object diff = end - start # Get interval between two timstamps in hours diff_in_hours = diff.total_seconds() / 3600 print('Difference between two datetimes in hours:') print(diff_in_hours) # - new_df['date_dd6']=dd6_data.index dd6_data.reset_index()['dd6_stage_m'] dd6_data.index dd6_data.iloc[:,:] dd6_data.head(10) nwm_discharge_filtered.head(10) dd6_data[['dd6_stage_m']] dd6_data.head(10) num_discharge_filtered.head(10) # getting the list of indexes in nwm data that is closet to each dd6_data indx_list = [get_location(nwm_discharge,x) for x in dd6_data.index] # assigning the filtered indexes to the nwm_discharge num_discharge_filtered = nwm_discharge.iloc[indx_list] num_discharge_filtered.to_csv(r'D:/Sujana/Project/all_in_one/error/'+str(comid)+'_'+gauge_name+'.csv') num_discharge_combined = pd.read_csv('D:/Sujana/Project/all_in_one/error/'+str(comid)+'_'+gauge_name+'.csv') num_discharge_combined['dd6_stage_m']=dd6_data['dd6_stage_m'] num_discharge_combined['interp_dd6_discharge'] = dd6_data['interp_dd6_discharge'] # + b['new'] = dd6_data['value'] b.head(10) # - # #### Rough c = get_location(nwm_discharge,dd6_data.index[10]) print(c) print(nwm_discharge.iloc[c]) # nwm_discharge['Time'].head() dd6_data.index # + # dd6_data.index.get_loc(nwm_discharge['Time'], method='nearest') dd6_data.index.get_loc(nwm_discharge['Time'], method='nearest') # - df import numpy as np import matplotlib.pyplot as plt import pandas as pd m = np.sqrt(1/2) n = np.sqrt(2) b = np.matrix([[1,0,0,0,0,m], [0,1,0,0,0,m], [0,0,1,0,0,m], [0,0,0,1,0,m], [0,0,0,0,1,m],[0,0,0,0,1,-1],[0,0,0,0,0,0],[0,0,0,0,0,0]]) f = np.matrix([[0],[100],[100],[0],[-100*n],[0],[0],[0]]) force = np.dot(np.linalg.pinv(b),f) print(force) np.matrix(b) np.matrix([[0],[100],[100],[0],[-100*n],[0],[0],[0]]) np.matrix([[1,0,0,0,0,m], [0,1,0,0,0,m], [0,0,1,0,0,m], [0,0,0,1,0,m], [0,0,0,0,1,m],[0,0,0,0,1,-1],[0,0,0,0,0,0],[0,0,0,0,0,0],[0,0,0,0,0,0]]) import numpy as np import matplotlib.pyplot as plt import scipy.io import pandas as pd image = scipy.io.loadmat('radar.mat') image = image['A'] U,svd,V = np.linalg.svd(image) # print(svd) # plt.figure(facecolor='white') # plt.title('Singular Value Decomposition') # plt.plot(svd) # plt.savefig('D:/Sujana/CE397/hw7_8/qs8_6_2.jpg') # + k = [10,50,500] for i in k: U,svd,V = np.linalg.svd(image) NewImage = U[:,:i] @ np.diag(svd[:i]) @V[:i,:] NewImage[NewImage<0.01]=np.nan plt.imshow(NewImage) plt.savefig('NewLowRankImage_'+str(i)+'.jpg') # - np.diag(svd) svd
nwm_discharge_curve_fitting.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="IHCZjF90M4dg" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 322} outputId="1a454fc2-56e1-4145-ca2a-4cd183073250" executionInfo={"status": "ok", "timestamp": 1583433983521, "user_tz": -60, "elapsed": 12438, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMrpAovMDScHJAllY6Jg9RXRStdCGDMRDFeXFYJA=s64", "userId": "09050433327895595390"}} # !pip install --upgrade tables # !pip install eli5 # !pip install xgboost # + id="J81q-h61M_8z" colab_type="code" colab={} import pandas as pd import numpy as np from sklearn.dummy import DummyRegressor from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import RandomForestRegressor import xgboost as xgb from sklearn.metrics import mean_absolute_error as mea from sklearn.model_selection import cross_val_score, KFold import eli5 from eli5.sklearn import PermutationImportance # + id="EGgd0OnmPKjL" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="a5157a00-9c9c-42e9-deac-99755bcab51a" executionInfo={"status": "ok", "timestamp": 1583433983528, "user_tz": -60, "elapsed": 12427, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMrpAovMDScHJAllY6Jg9RXRStdCGDMRDFeXFYJA=s64", "userId": "09050433327895595390"}} # cd /content/drive/My Drive/Colab Notebooks/Matrix_repo/m_2/Car-Price-Prediction/data # + id="EW_6XqB4PtGF" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="e70490d7-a5da-48e3-e5f3-9c81f618eb5c" executionInfo={"status": "ok", "timestamp": 1583438374091, "user_tz": -60, "elapsed": 2153, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMrpAovMDScHJAllY6Jg9RXRStdCGDMRDFeXFYJA=s64", "userId": "09050433327895595390"}} df = pd.read_hdf('car.h5') df.shape # + id="wrJCfIo5PwJt" colab_type="code" colab={} # + id="qZJjyGrlANNV" colab_type="code" colab={} SUFFIX_CAT = '__cat' for feat in df.columns: if isinstance(df[feat][0], list):continue factorized_values = df[feat].factorize()[0] if SUFFIX_CAT in feat: df[feat] = factorized_values else: df[feat + SUFFIX_CAT] = factorized_values # + id="7rcZBw36Aa-f" colab_type="code" outputId="7ee77469-6e4c-443b-c7aa-53a4cdaee36f" executionInfo={"status": "ok", "timestamp": 1583438537010, "user_tz": -60, "elapsed": 1014, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMrpAovMDScHJAllY6Jg9RXRStdCGDMRDFeXFYJA=s64", "userId": "09050433327895595390"}} colab={"base_uri": "https://localhost:8080/", "height": 34} cat_feats = [x for x in df.columns if SUFFIX_CAT in x] cat_feats = [x for x in cat_feats if 'price' not in x] len(cat_feats) # + id="itIN9E5ykbni" colab_type="code" colab={} X= df[cat_feats].values y = df['price_value'].values # + id="Jb-selnm671z" colab_type="code" colab={} def run_model(model,feats): X= df[feats].values y = df['price_value'].values scores = cross_val_score(model, X,y, cv=3, scoring = 'neg_mean_absolute_error') return np.mean(scores), np.std(scores) # + [markdown] id="hKpoXy0gkfs0" colab_type="text" # decision tree # # + id="P8Iv9L0TQrr4" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="6941b60d-2057-44a7-ca3b-136e06a37004" executionInfo={"status": "ok", "timestamp": 1583438553383, "user_tz": -60, "elapsed": 4566, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMrpAovMDScHJAllY6Jg9RXRStdCGDMRDFeXFYJA=s64", "userId": "09050433327895595390"}} run_model(DecisionTreeRegressor(max_depth=5), cat_feats) # + [markdown] id="CxujndZymxQq" colab_type="text" # random forest # # + id="uX8yJOiHm7GK" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="90f3c1e3-b2d8-4a79-cbe7-3a9a75beac03" executionInfo={"status": "ok", "timestamp": 1583438663231, "user_tz": -60, "elapsed": 106675, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMrpAovMDScHJAllY6Jg9RXRStdCGDMRDFeXFYJA=s64", "userId": "09050433327895595390"}} model = RandomForestRegressor(max_depth=5, n_estimators=50, random_state=0) run_model(model, cat_feats) # + [markdown] id="GRAItBYknx2c" colab_type="text" # XGBoost # + id="hyekGLA7n4md" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 104} outputId="75887941-55ab-4875-ada3-d10fa7c54736" executionInfo={"status": "ok", "timestamp": 1583434162768, "user_tz": -60, "elapsed": 191595, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMrpAovMDScHJAllY6Jg9RXRStdCGDMRDFeXFYJA=s64", "userId": "09050433327895595390"}} xgb_params={ 'max_depth': 5, 'n_estimators': 50, 'learning_rate': 0.1, 'seed':0 } run_model(xgb.XGBRegressor(**xgb_params),cat_feats) # + id="CgcKP0WYtkum" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 423} outputId="a0253446-0850-4a73-8736-2a197234f9cc" executionInfo={"status": "ok", "timestamp": 1583434474945, "user_tz": -60, "elapsed": 503759, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMrpAovMDScHJAllY6Jg9RXRStdCGDMRDFeXFYJA=s64", "userId": "09050433327895595390"}} m=xgb.XGBRegressor(max_depth=5, n_estimators=50,learning_rate=0.1, seed=0) m.fit(X,y) imp = PermutationImportance(m, random_state=0).fit(X,y) eli5.show_weights(m,feature_names=cat_feats) # + id="JG-pKkqZtlUy" colab_type="code" colab={} feats = ["param_faktura-vat__cat", "feature_kamera-cofania__cat", "feature_ล‚opatki-zmiany-biegรณw__cat", "param_napฤ™d__cat", "param_skrzynia-biegรณw__cat", "feature_asystent-pasa-ruchu__cat", "param_stan__cat", "feature_ล›wiatล‚a-led__cat", "feature_bluetooth__cat", "feature_regulowane-zawieszenie__cat", "feature_wspomaganie-kierownicy__cat", "feature_system-start-stop__cat", "feature_ล›wiatล‚a-do-jazdy-dziennej__cat", "feature_ล›wiatล‚a-xenonowe__cat", "feature_czujniki-parkowania-przednie__cat", "param_moc__cat", "param_rok-produkcji__cat", "param_pojemnoล›ฤ‡-skokowa__cat", "feature_asystent-parkowania__cat", "seller_name__cat"] # + id="knmzXrBfDSZW" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 104} outputId="762cd9fa-ce99-4942-9514-8aa8909923a0" executionInfo={"status": "ok", "timestamp": 1583437910347, "user_tz": -60, "elapsed": 12677, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMrpAovMDScHJAllY6Jg9RXRStdCGDMRDFeXFYJA=s64", "userId": "09050433327895595390"}} run_model(xgb.XGBRegressor(**xgb_params), feats) # + [markdown] id="c00-yVlrw7iX" colab_type="text" # # + id="MK4Bvv-OD44C" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 104} outputId="435f5e26-262a-4a04-f687-500cc04ed93a" executionInfo={"status": "ok", "timestamp": 1583437969871, "user_tz": -60, "elapsed": 12625, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMrpAovMDScHJAllY6Jg9RXRStdCGDMRDFeXFYJA=s64", "userId": "09050433327895595390"}} df['param_rok-produkcji']=df['param_rok-produkcji'].map(lambda x: -1 if str(x)== 'None' else int(x)) feats = ["param_faktura-vat__cat", "feature_kamera-cofania__cat", "feature_ล‚opatki-zmiany-biegรณw__cat", "param_napฤ™d__cat", "param_skrzynia-biegรณw__cat", "feature_asystent-pasa-ruchu__cat", "param_stan__cat", "feature_ล›wiatล‚a-led__cat", "feature_bluetooth__cat", "feature_regulowane-zawieszenie__cat", "feature_wspomaganie-kierownicy__cat", "feature_system-start-stop__cat", "feature_ล›wiatล‚a-do-jazdy-dziennej__cat", "feature_ล›wiatล‚a-xenonowe__cat", "feature_czujniki-parkowania-przednie__cat", "param_moc__cat", "param_rok-produkcji", "param_pojemnoล›ฤ‡-skokowa__cat", "feature_asystent-parkowania__cat", "seller_name__cat"] run_model(xgb.XGBRegressor(**xgb_params), feats) # + id="aojifLLZHCPm" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 104} outputId="05f01a5a-c493-4659-d063-a4502a43cd05" executionInfo={"status": "ok", "timestamp": 1583438809950, "user_tz": -60, "elapsed": 12617, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMrpAovMDScHJAllY6Jg9RXRStdCGDMRDFeXFYJA=s64", "userId": "09050433327895595390"}} df['param_moc'] = df['param_moc'].map(lambda x: -1 if str(x) == 'None' else int(str(x).split(' ')[0]) ) df['param_rok-produkcji']=df['param_rok-produkcji'].map(lambda x: -1 if str(x)== 'None' else int(x)) feats = ["param_faktura-vat__cat", "feature_kamera-cofania__cat", "feature_ล‚opatki-zmiany-biegรณw__cat", "param_napฤ™d__cat", "param_skrzynia-biegรณw__cat", "feature_asystent-pasa-ruchu__cat", "param_stan__cat", "feature_ล›wiatล‚a-led__cat", "feature_bluetooth__cat", "feature_regulowane-zawieszenie__cat", "feature_wspomaganie-kierownicy__cat", "feature_system-start-stop__cat", "feature_ล›wiatล‚a-do-jazdy-dziennej__cat", "feature_ล›wiatล‚a-xenonowe__cat", "feature_czujniki-parkowania-przednie__cat", "param_moc", "param_rok-produkcji", "param_pojemnoล›ฤ‡-skokowa__cat", "feature_asystent-parkowania__cat", "seller_name__cat"] run_model(xgb.XGBRegressor(**xgb_params), feats) # + id="qAPWGLqSMNQM" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 104} outputId="bbafba70-a4ab-4613-b8bb-026cd8dd19a5" executionInfo={"status": "ok", "timestamp": 1583438826253, "user_tz": -60, "elapsed": 12415, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMrpAovMDScHJAllY6Jg9RXRStdCGDMRDFeXFYJA=s64", "userId": "09050433327895595390"}} df['param_pojemnoล›ฤ‡-skokowa']=df['param_pojemnoล›ฤ‡-skokowa'].map(lambda x: -1 if str(x) == 'None' else int(str(x).split('cm')[0].replace(' ','')) ) feats = ["param_faktura-vat__cat", "feature_kamera-cofania__cat", "feature_ล‚opatki-zmiany-biegรณw__cat", "param_napฤ™d__cat", "param_skrzynia-biegรณw__cat", "feature_asystent-pasa-ruchu__cat", "param_stan__cat", "feature_ล›wiatล‚a-led__cat", "feature_bluetooth__cat", "feature_regulowane-zawieszenie__cat", "feature_wspomaganie-kierownicy__cat", "feature_system-start-stop__cat", "feature_ล›wiatล‚a-do-jazdy-dziennej__cat", "feature_ล›wiatล‚a-xenonowe__cat", "feature_czujniki-parkowania-przednie__cat", "param_moc", "param_rok-produkcji", "param_pojemnoล›ฤ‡-skokowa", "feature_asystent-parkowania__cat", "seller_name__cat"] run_model(xgb.XGBRegressor(**xgb_params), feats) # + id="zMMhcuRuQ7HP" colab_type="code" colab={}
Models_comparison.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # set the matplotlib backend so figures can be saved in the background import matplotlib # matplotlib.use("Agg") # import the necessary packages import time import pandas as pd import seaborn as sn import keras import os import cv2 import pickle import random import argparse import numpy as np from imutils import paths import matplotlib.pyplot as plt from sklearn.metrics import confusion_matrix from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelBinarizer from keras.layers import Dense from keras.preprocessing.image import img_to_array from keras.models import Model from keras.optimizers import Adam from keras.optimizers import Nadam from keras.optimizers import RMSprop from keras.losses import logcosh, binary_crossentropy from keras.activations import relu, elu, sigmoid from keras.preprocessing.image import ImageDataGenerator # import matplotlib # matplotlib.use("Agg") # - # initialize the number of epochs to train for, initial learning rate, # batch size, and image dimensions EPOCHS = 300 INIT_LR = 1e-2 BS = 32 IMAGE_DIMS = (224, 224, 3) # + from imutils import paths imagePaths = sorted(list(paths.list_images("anger_classification"))) random.seed(42) random.shuffle(imagePaths) imagePaths # + from keras.preprocessing.image import img_to_array data = [] labels = [] # loop over the input images for imagePath in imagePaths: # load the image, pre-process it, and store it in the data list image = cv2.imread(imagePath) image = cv2.resize(image, (IMAGE_DIMS[1], IMAGE_DIMS[0])) image = img_to_array(image) data.append(image) # extract the class label from the image path and update the # labels list label = imagePath.split(os.path.sep)[-2] labels.append(label) # - # scale the raw pixel intensities to the range [0, 1] data = np.array(data, dtype="float") / 255.0 labels = np.array(labels) print("[INFO] data matrix: {:.2f}MB".format( data.nbytes / (1024 * 1000.0))) # + from sklearn.preprocessing import LabelBinarizer # binarize the labels lb = LabelBinarizer() labels = lb.fit_transform(labels) # + from sklearn.model_selection import train_test_split # partition the data into training and testing splits using 80% of # the data for training and the remaining 20% for testing (trainX, testX, trainY, testY) = train_test_split(data, labels, test_size=0.2, random_state=42) # - # construct the image generator for data augmentation aug = ImageDataGenerator(rotation_range=25, width_shift_range=0.1, height_shift_range=0.1, shear_range=0.2, zoom_range=0.2, horizontal_flip=True, fill_mode="nearest") # + from keras.models import Sequential from keras.layers.normalization import BatchNormalization from keras.layers.convolutional import Conv2D from keras.layers.convolutional import MaxPooling2D from keras.layers.core import Activation from keras.layers.core import Flatten from keras.layers.core import Dropout from keras.layers.core import Dense from keras import backend as K class SmallerVGGNet: @staticmethod def build(width, height, depth, classes): # initialize the model along with the input shape to be # "channels last" and the channels dimension itself model = Sequential() inputShape = (height, width, depth) chanDim = -1 # if we are using "channels first", update the input shape # and channels dimension if K.image_data_format() == "channels_first": inputShape = (depth, height, width) chanDim = 1 # CONV => RELU => POOL model.add(Conv2D(32, (3, 3), padding="same", input_shape=inputShape)) model.add(Activation("relu")) model.add(BatchNormalization(axis=chanDim)) model.add(MaxPooling2D(pool_size=(3, 3))) model.add(Dropout(0.25)) # (CONV => RELU) * 2 => POOL model.add(Conv2D(64, (3, 3), padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(axis=chanDim)) model.add(Conv2D(64, (3, 3), padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(axis=chanDim)) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) # (CONV => RELU) * 2 => POOL model.add(Conv2D(128, (3, 3), padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(axis=chanDim)) model.add(Conv2D(128, (3, 3), padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(axis=chanDim)) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) # (CONV => RELU) * 2 => POOL model.add(Conv2D(256, (3, 3), padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(axis=chanDim)) model.add(Conv2D(256, (3, 3), padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(axis=chanDim)) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(256, (3, 3), padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(axis=chanDim)) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(256, (3, 3), padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(axis=chanDim)) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) # first (and only) set of FC => RELU layers model.add(Flatten()) model.add(Dense(1024)) model.add(Activation("relu")) model.add(BatchNormalization()) model.add(Dropout(0.5)) # softmax classifier model.add(Dense(classes)) model.add(Activation("softmax")) # return the constructed network architecture return model # + import tensorflow as tf from keras.applications.vgg16 import VGG16 print("[INFO] compiling model...") model = VGG16() opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS) model.compile(loss="sparse_categorical_crossentropy", optimizer=opt, metrics=["accuracy"]) print("[INFO] done compiling.") model.summary() # + import tensorflow as tf config = tf.compat.v1.ConfigProto() config.gpu_options.per_process_gpu_memory_fraction = 0.3 tf.compat.v1.keras.backend.set_session(tf.compat.v1.Session(config=config)) # + # train the network print("[INFO] training network...") H = model.fit_generator( aug.flow(trainX, trainY, batch_size=BS), validation_data=(testX, testY), steps_per_epoch=len(trainX) // BS, epochs=EPOCHS, verbose=1)#, class_weight={0:3, 1:1}) # save the model to disk print("[INFO] serializing network...") model.save("anger_imbalanced.model") # - plt.style.use("ggplot") # %matplotlib inline plt.figure() N = EPOCHS plt.plot(np.arange(0, N), H.history["loss"], label="train_loss") plt.plot(np.arange(0, N), H.history["val_loss"], label="val_loss") plt.plot(np.arange(0, N), H.history["accuracy"], label="train_acc") plt.plot(np.arange(0, N), H.history["val_accuracy"], label="val_acc") plt.title("Training Loss and Accuracy") plt.xlabel("Epoch #") plt.ylabel("Loss/Accuracy") plt.legend(loc="upper left") plt.show() plt.savefig("anger_class_weights_plot_imbalanced.png") # + from sklearn.datasets import make_circles from sklearn.metrics import accuracy_score from sklearn.metrics import precision_score from sklearn.metrics import recall_score from sklearn.metrics import f1_score from sklearn.metrics import cohen_kappa_score from sklearn.metrics import roc_auc_score from sklearn.metrics import confusion_matrix from keras.models import Sequential from keras.layers import Dense # predict probabilities for test set yhat_probs = model.predict(testX, verbose=0) # predict crisp classes for test set yhat_classes = np.argmax(yhat_probs, axis=1) # + from sklearn.metrics import classification_report, confusion_matrix import seaborn as sn classif_report = classification_report(testY, yhat_classes, target_names=['anger', 'non-anger']) print('Classification report: ') print('---------------------------------------------------------- ') print(classif_report) print('---------------------------------------------------------- ') # accuracy: (tp + tn) / (p + n) accuracy = accuracy_score(testY, yhat_classes) print('Accuracy score: %f' % accuracy) # precision tp / (tp + fp) precision = precision_score(testY, yhat_classes) print('Precision score: %f' % precision) # recall: tp / (tp + fn) recall = recall_score(testY, yhat_classes) print('Recall score: %f' % recall) # f1: 2 tp / (2 tp + fp + fn) f1 = f1_score(testY, yhat_classes) print('F1 score: %f' % f1) print('---------------------------------------------------------- ') # confusion matrix labels = ['anger', 'none'] matrix = confusion_matrix(testY, yhat_classes) print('Confusion matrix: ') print(matrix) fig = plt.figure() ax = fig.add_subplot(111) cax = ax.matshow(matrix) plt.title('Confusion Matrix') fig.colorbar(cax) ax.set_xticklabels([''] + labels) ax.set_yticklabels([''] + labels) plt.xlabel('Predicted') plt.ylabel('True') plt.savefig('confusion_matrix_anger_imbalanced.png') plt.show() print(matrix)
CNN_experiments/russian_disinformation_imbalanced_cnn_anger-fullVGG16.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: R # language: R # name: ir # --- # ## This Jupyter Notebook contains basic functions for the implementation of algorithms in the paper [_The Development and Deployment of a Model for Hospital-level COVID-19 Associated Patient Demand Intervals from Consistent Estimators (DICE)_](https://arxiv.org/abs/2011.09377) # Libraries library(ggplot2) library(dplyr) library(hrbrthemes) library(changepoint) Sys.setenv(LANGUAGE = "en") library(reshape2) library(forecast) library(gridExtra) library(invgamma) rm(list=ls()) #work_dir='...' #setwd(work_dir) # + # Actual hospital-level hospitalizations shc.data=read.csv('...') # Actual county-level hospitalizations scc.data=read.csv('...') # county-level hospitalization forecasts county.forecast=read.csv('...') N_list = scc.data$Hospitalizations A_list = shc.data$covid_AAU B_list = shc.data$covid_ICU hat_p = sum(A_list)/sum(N_list) hat_q = sum(B_list)/sum(N_list) # - # # Perfect Forcasts: Bootstrapping intervals bootstrap_perfect_forecast<-function(n,delta,alpha,b0,hat_p,hat_q,F_list,F_r){ N_lambda=F_list A_lower_plugin_list=c() A_upper_plugin_list=c() B_lower_plugin_list=c() B_upper_plugin_list=c() for(b in 1:b0){ A_star_s=c() B_star_s=c() N_star_s=c() N_star_s=rpois(n=length(N_lambda),N_lambda) for(j in 1:n){ multi_result = sapply(N_star_s[j], rmultinom,n=1, prob=c(hat_p,hat_q,(1-hat_p-hat_q))) A_j = multi_result[1,] B_j = multi_result[2,] A_star_s=c(A_star_s,A_j) B_star_s=c(B_star_s,B_j) } p_star = sum(A_star_s)/sum(N_star_s) q_star = sum(B_star_s)/sum(N_star_s) A_lower_plugin_b=qpois(delta/2,p_star*F_r) A_upper_plugin_b=qpois(1-delta/2,p_star*F_r) B_lower_plugin_b=qpois(delta/2,q_star*F_r) B_upper_plugin_b=qpois(1-delta/2,q_star*F_r) A_lower_plugin_list=c(A_lower_plugin_list,A_lower_plugin_b) A_upper_plugin_list=c(A_upper_plugin_list,A_upper_plugin_b) B_lower_plugin_list=c(B_lower_plugin_list,B_lower_plugin_b) B_upper_plugin_list=c(B_upper_plugin_list,B_upper_plugin_b) } A_lower_plugin=qpois(delta/2,hat_p*F_r) A_upper_plugin=qpois(1-delta/2,hat_p*F_r) B_lower_plugin=qpois(delta/2,hat_q*F_r) B_upper_plugin=qpois(1-delta/2,hat_q*F_r) z_al=ceiling(quantile(A_lower_plugin_list-A_lower_plugin,1-alpha)) z_bl=ceiling(quantile(B_lower_plugin_list-B_lower_plugin,1-alpha)) z_ar=floor(quantile(A_upper_plugin_list-A_upper_plugin,alpha)) z_br=floor(quantile(B_upper_plugin_list-B_upper_plugin,alpha)) A_lower_bootstrap=A_lower_plugin-z_al B_lower_bootstrap=B_lower_plugin-z_bl A_upper_bootstrap=A_upper_plugin-z_ar B_upper_bootstrap=B_upper_plugin-z_br return(list(c(A_lower_plugin,A_upper_plugin,B_lower_plugin,B_upper_plugin), c(A_lower_bootstrap,A_upper_bootstrap,B_lower_bootstrap,B_upper_bootstrap), A_lower_plugin_list,A_upper_plugin_list,B_lower_plugin_list,B_upper_plugin_list)) } # # Unbiased Forecasts with Lognormal Errors bootstrap_unbiased_forecast<-function(n,delta,alpha,b0,m,hat_p,hat_q,hat_mu,hat_sigma2,hat_rho,F_list,F_r){ A_lower_plugin_list=c() A_upper_plugin_list=c() B_lower_plugin_list=c() B_upper_plugin_list=c() for(b in 1:b0){ A_star_s=c() B_star_s=c() N_star_s=c() Y=rep(0,n) Y[1]=rnorm(n=1,mean=hat_mu/(1-hat_rho),sd=sqrt(hat_sigma2/(1-hat_rho^2))) for(j in 2:n){ Y[j]=hat_rho*Y[j-1]+rnorm(n=1,mean=hat_mu,sd=sqrt(hat_sigma2)) } N_star_s=rpois(n=n,F_list*exp(Y)) fraction1 = mean((N_star_s^2-N_star_s)/F_list^2) fraction2 = mean(N_star_s[1:(length(N_star_s)-1)]*N_star_s[2:length(N_star_s)]/(F_list[1:(length(F_list)-1)]*F_list[2:length(F_list)])) fn <- function(param){ rho=param[1] sigma2=param[2] mu=-sigma2/(2+2*rho) obj = 0 obj = obj+(fraction1-exp(2*mu/(1-rho)+2*sigma2/(1-rho^2)))^2 obj = obj+(fraction2-exp(2*mu/(1-rho)+sigma2/(1-rho)))^2 obj = obj+10000*max(0,(rho^2-1))+10000*max(0,(-sigma2)) return(obj) } result = optim(rep(0,2),lower=c(-0.99,0),upper=c(0.99,1),fn,method="L-BFGS-B") hat_rho_star=result$par[1] hat_sigma2_star=result$par[2] hat_mu_star=-hat_sigma2_star/(2+2*hat_rho_star^2) for(j in 1:n){ multi_result = sapply(N_star_s[j], rmultinom,n=1, prob=c(hat_p,hat_q,(1-hat_p-hat_q))) A_j = multi_result[1,] B_j = multi_result[2,] A_star_s=c(A_star_s,A_j) B_star_s=c(B_star_s,B_j) } p_star = sum(A_star_s)/sum(N_star_s) q_star = sum(B_star_s)/sum(N_star_s) A_r_star=rep(0,m) B_r_star=rep(0,m) for(i in 1:m){ Yr=rnorm(n=1,mean=hat_mu_star/(1-hat_rho_star), sd=sqrt(hat_sigma2_star/(1-hat_rho_star^2))) A_r_star[i]=rpois(n=1,lambda=p_star*F_r*exp(Yr)) B_r_star[i]=rpois(n=1,lambda=q_star*F_r*exp(Yr)) } A_lower_plugin_b=ceiling(quantile(A_r_star,delta/2)) A_upper_plugin_b=floor(quantile(A_r_star,1-delta/2)) B_lower_plugin_b=ceiling(quantile(B_r_star,delta/2)) B_upper_plugin_b=floor(quantile(B_r_star,1-delta/2)) A_lower_plugin_list=c(A_lower_plugin_list,A_lower_plugin_b) A_upper_plugin_list=c(A_upper_plugin_list,A_upper_plugin_b) B_lower_plugin_list=c(B_lower_plugin_list,B_lower_plugin_b) B_upper_plugin_list=c(B_upper_plugin_list,B_upper_plugin_b) } A_r=rep(0,m) B_r=rep(0,m) for(i in 1:m){ Yr=rnorm(n=1,mean=hat_mu/(1-hat_rho), sd=sqrt(hat_sigma2/(1-hat_rho^2))) A_r[i]=rpois(n=1,lambda=hat_p*F_r*exp(Yr)) B_r[i]=rpois(n=1,lambda=hat_q*F_r*exp(Yr)) } A_lower_plugin=ceiling(quantile(A_r,delta/2)) A_upper_plugin=floor(quantile(A_r,1-delta/2)) B_lower_plugin=ceiling(quantile(B_r,delta/2)) B_upper_plugin=floor(quantile(B_r,1-delta/2)) z_al=ceiling(quantile(A_lower_plugin_list-A_lower_plugin,1-alpha)) z_bl=ceiling(quantile(B_lower_plugin_list-B_lower_plugin,1-alpha)) z_ar=floor(quantile(A_upper_plugin_list-A_upper_plugin,alpha)) z_br=floor(quantile(B_upper_plugin_list-B_upper_plugin,alpha)) A_lower_bootstrap=A_lower_plugin-z_al B_lower_bootstrap=B_lower_plugin-z_bl A_upper_bootstrap=A_upper_plugin-z_ar B_upper_bootstrap=B_upper_plugin-z_br return(list(c(A_lower_plugin,A_upper_plugin,B_lower_plugin,B_upper_plugin), c(A_lower_bootstrap,A_upper_bootstrap,B_lower_bootstrap,B_upper_bootstrap), A_lower_plugin_list,A_upper_plugin_list,B_lower_plugin_list,B_upper_plugin_list)) } # # Biased Forecasts with Lognormal Errors bootstrap_biased_forecast <- function(n,delta,alpha,b0,m,hat_p,hat_q,hat_mu,hat_sigma2,hat_rho,F_list,F_r){ A_lower_plugin_list=c() A_upper_plugin_list=c() B_lower_plugin_list=c() B_upper_plugin_list=c() for(b in 1:b0){ A_star_s=c() B_star_s=c() N_star_s=c() Y=rep(0,n) Y[1]=rnorm(n=1,mean=hat_mu/(1-hat_rho),sd=sqrt(hat_sigma2/(1-hat_rho^2))) for(j in 2:n){ Y[j]=hat_rho*Y[j-1]+rnorm(n=1,mean=hat_mu,sd=sqrt(hat_sigma2)) } N_star_s=rpois(n=n,F_list*exp(Y)) fraction1 = mean(N_star_s/F_list) fraction2 = mean((N_star_s^2-N_star_s)/F_list^2) fraction3 = mean(N_star_s[1:(length(N_star_s)-1)]*N_star_s[2:length(N_star_s)]/(F_list[1:(length(F_list)-1)]*F_list[2:length(F_list)])) fn <- function(param){ rho=param[1] sigma2=param[2] mu=param[3] obj = 0 obj = obj+(fraction1-exp(mu/(1-rho)+0.5*sigma2/(1-rho^2)))^2 obj = obj+(fraction2-exp(2*mu/(1-rho)+2*sigma2/(1-rho^2)))^2 obj = obj+(fraction3-exp(2*mu/(1-rho)+sigma2/(1-rho)))^2 obj = obj+10000*max(0,(rho^2-1))+10000*max(0,(-sigma2)) return(obj) } result = optim(rep(0,3),lower=c(-0.99,0.0001,-4),upper=c(0.99,4,4),fn,method="L-BFGS-B") hat_rho_star=result$par[1] hat_sigma2_star=result$par[2] hat_mu_star=result$par[3] for(j in 1:n){ multi_result = sapply(N_star_s[j], rmultinom,n=1, prob=c(hat_p,hat_q,(1-hat_p-hat_q))) A_j = multi_result[1,] B_j = multi_result[2,] A_star_s=c(A_star_s,A_j) B_star_s=c(B_star_s,B_j) } p_star = sum(A_star_s)/sum(N_star_s) q_star = sum(B_star_s)/sum(N_star_s) A_r_star=rep(0,m) B_r_star=rep(0,m) for(i in 1:m){ Yr=rnorm(n=1,mean=hat_mu_star/(1-hat_rho_star), sd=sqrt(hat_sigma2_star/(1-hat_rho_star^2))) A_r_star[i]=rpois(n=1,lambda=p_star*F_r*exp(Yr)) B_r_star[i]=rpois(n=1,lambda=q_star*F_r*exp(Yr)) } A_lower_plugin_b=ceiling(quantile(A_r_star,delta/2)) A_upper_plugin_b=floor(quantile(A_r_star,1-delta/2)) B_lower_plugin_b=ceiling(quantile(B_r_star,delta/2)) B_upper_plugin_b=floor(quantile(B_r_star,1-delta/2)) A_lower_plugin_list=c(A_lower_plugin_list,A_lower_plugin_b) A_upper_plugin_list=c(A_upper_plugin_list,A_upper_plugin_b) B_lower_plugin_list=c(B_lower_plugin_list,B_lower_plugin_b) B_upper_plugin_list=c(B_upper_plugin_list,B_upper_plugin_b) } A_r=rep(0,m) B_r=rep(0,m) for(i in 1:m){ Yr=rnorm(n=1,mean=hat_mu/(1-hat_rho), sd=sqrt(hat_sigma2/(1-hat_rho^2))) A_r[i]=rpois(n=1,lambda=hat_p*F_r*exp(Yr)) B_r[i]=rpois(n=1,lambda=hat_q*F_r*exp(Yr)) } A_lower_plugin=ceiling(quantile(A_r,delta/2)) A_upper_plugin=floor(quantile(A_r,1-delta/2)) B_lower_plugin=ceiling(quantile(B_r,delta/2)) B_upper_plugin=floor(quantile(B_r,1-delta/2)) z_al=ceiling(quantile(A_lower_plugin_list-A_lower_plugin,1-alpha)) z_bl=ceiling(quantile(B_lower_plugin_list-B_lower_plugin,1-alpha)) z_ar=floor(quantile(A_upper_plugin_list-A_upper_plugin,alpha)) z_br=floor(quantile(B_upper_plugin_list-B_upper_plugin,alpha)) A_lower_bootstrap=A_lower_plugin-z_al B_lower_bootstrap=B_lower_plugin-z_bl A_upper_bootstrap=A_upper_plugin-z_ar B_upper_bootstrap=B_upper_plugin-z_br return(list(c(A_lower_plugin,A_upper_plugin,B_lower_plugin,B_upper_plugin), c(A_lower_bootstrap,A_upper_bootstrap,B_lower_bootstrap,B_upper_bootstrap), A_lower_plugin_list,A_upper_plugin_list,B_lower_plugin_list,B_upper_plugin_list)) }
DICE_basic function.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] run_control={"frozen": false, "read_only": false} # # Library Imports # + run_control={"frozen": false, "read_only": false} # %matplotlib inline import numpy from numpy import arange from matplotlib import pyplot from pandas import read_csv from pandas import set_option from pandas.tools.plotting import scatter_matrix from sklearn.preprocessing import StandardScaler from sklearn.model_selection import train_test_split from sklearn.model_selection import KFold from sklearn.model_selection import cross_val_score from sklearn.model_selection import GridSearchCV from sklearn.linear_model import LinearRegression from sklearn.linear_model import Lasso from sklearn.linear_model import ElasticNet from sklearn.tree import DecisionTreeRegressor from sklearn.neighbors import KNeighborsRegressor from sklearn.svm import SVR from sklearn.pipeline import Pipeline from sklearn.ensemble import RandomForestRegressor from sklearn.ensemble import GradientBoostingRegressor from sklearn.ensemble import ExtraTreesRegressor from sklearn.ensemble import AdaBoostRegressor from sklearn.metrics import mean_squared_error # + [markdown] run_control={"frozen": false, "read_only": false} # # Data Ingestion # + run_control={"frozen": false, "read_only": false} # Load dataset filename = '/home/pybokeh/Dropbox/python/jupyter_notebooks/machine_learning/housing.csv' names = ['CRIM', 'ZN', 'INDUS' , 'CHAS', 'NOX' ,'RM' , 'AGE', 'DIS', 'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT', 'MEDV' ] dataset = read_csv(filename, delim_whitespace=True, names=names) # + run_control={"frozen": false, "read_only": false} dataset.shape # + run_control={"frozen": false, "read_only": false} dataset.dtypes # + run_control={"frozen": false, "read_only": false} dataset.head() # + run_control={"frozen": false, "read_only": false} # descriptions set_option('precision' , 1) dataset.describe() # + run_control={"frozen": false, "read_only": false} # correlation set_option('precision' , 2) dataset.corr(method='pearson') # + run_control={"frozen": false, "read_only": false} # visual representation of correlation matrix fig = pyplot.figure() ax = fig.add_subplot(111) cax = ax.matshow(dataset.corr(), vmin=-1, vmax=1, interpolation= 'none' ) fig.colorbar(cax) ticks = numpy.arange(0,14,1) ax.set_xticks(ticks) ax.set_yticks(ticks) ax.set_xticklabels(names, rotation=270) ax.set_yticklabels(names) pyplot.show()
jupyter_notebooks/machine_learning/sklearn-regression-problem.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: tf-latest # language: python # name: tf-latest # --- # # T81-558: Applications of Deep Neural Networks # **Class 6: Preprocessing.** # * Instructor: [<NAME>](https://sites.wustl.edu/jeffheaton/), School of Engineering and Applied Science, [Washington University in St. Louis](https://engineering.wustl.edu/Programs/Pages/default.aspx) # * For more information visit the [class website](https://sites.wustl.edu/jeffheaton/t81-558/). # # Why is Preprocessing Necessary # # The feature vector, the input to a model (such as a neural network), must be completely numeric. Converting non-numeric data into numeric is one major component of preprocessing. It is also often important to preprocess numeric values. Scikit-learn provides a large number of preprocessing functions: # # * [Scikit-Learn Preprocessing](http://scikit-learn.org/stable/modules/preprocessing.html) # # However, this is just the beginning. The success of your neural network's predictions is often directly tied to the data representation. # # Preprocessing Functions # # The following functions will be used in conjunction with TensorFlow to help preprocess the data. Some of these were [covered previously](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/t81_558_class2_tensor_flow.ipynb), some are new. # # It is okay to just use them. For better understanding, try to see how they work. # # These functions allow you to build the feature vector for a neural network. Consider the following: # # * Predictors/Inputs # * Fill any missing inputs with the median for that column. Use **missing_median**. # * Encode textual/categorical values with **encode_text_dummy** or more creative means (see last part of this class session). # * Encode numeric values with **encode_numeric_zscore**, **encode_numeric_binary** or **encode_numeric_range**. # * Consider removing outliers: **remove_outliers** # * Output # * Discard rows with missing outputs. # * Encode textual/categorical values with **encode_text_index**. # * Do not encode output numeric values. # * Consider removing outliers: **remove_outliers** # * Produce final feature vectors (x) and expected output (y) with **to_xy**. # # Complete Set of Preprocessing Functions # + from sklearn import preprocessing import matplotlib.pyplot as plt import numpy as np import pandas as pd import shutil # Encode text values to dummy variables(i.e. [1,0,0],[0,1,0],[0,0,1] for red,green,blue) def encode_text_dummy(df,name): dummies = pd.get_dummies(df[name]) for x in dummies.columns: dummy_name = "{}-{}".format(name,x) df[dummy_name] = dummies[x] df.drop(name, axis=1, inplace=True) # Encode text values to a single dummy variable. The new columns (which do not replace the old) will have a 1 # at every location where the origional column (name) matches each of the target_values. One column is added for # each target value. def encode_text_single_dummy(df,name,target_values): for tv in target_values: l = list(df[name].astype(str)) l = [1 if str(x)==str(tv) else 0 for x in l] name2 = "{}-{}".format(name,tv) df[name2] = l # Encode text values to indexes(i.e. [1],[2],[3] for red,green,blue). def encode_text_index(df,name): le = preprocessing.LabelEncoder() df[name] = le.fit_transform(df[name]) return le.classes_ # Encode a numeric column as zscores def encode_numeric_zscore(df,name,mean=None,sd=None): if mean is None: mean = df[name].mean() if sd is None: sd = df[name].std() df[name] = (df[name]-mean)/sd # Convert all missing values in the specified column to the median def missing_median(df, name): med = df[name].median() df[name] = df[name].fillna(med) # Convert all missing values in the specified column to the default def missing_default(df, name, default_value): df[name] = df[name].fillna(default_value) # Convert a Pandas dataframe to the x,y inputs that TensorFlow needs def to_xy(df,target): result = [] for x in df.columns: if x != target: result.append(x) # find out the type of the target column. Is it really this hard? :( target_type = df[target].dtypes target_type = target_type[0] if hasattr(target_type, '__iter__') else target_type # Encode to int for classification, float otherwise. TensorFlow likes 32 bits. if target_type in (np.int64, np.int32): # Classification return df.as_matrix(result).astype(np.float32),df.as_matrix([target]).astype(np.int32) else: # Regression return df.as_matrix(result).astype(np.float32),df.as_matrix([target]).astype(np.float32) # Nicely formatted time string def hms_string(sec_elapsed): h = int(sec_elapsed / (60 * 60)) m = int((sec_elapsed % (60 * 60)) / 60) s = sec_elapsed % 60 return "{}:{:>02}:{:>05.2f}".format(h, m, s) # Regression chart, we will see more of this chart in the next class. def chart_regression(pred,y): t = pd.DataFrame({'pred' : pred, 'y' : y_test.flatten()}) t.sort_values(by=['y'],inplace=True) a = plt.plot(t['y'].tolist(),label='expected') b = plt.plot(t['pred'].tolist(),label='prediction') plt.ylabel('output') plt.legend() plt.show() # Get a new directory to hold checkpoints from a neural network. This allows the neural network to be # loaded later. If the erase param is set to true, the contents of the directory will be cleared. def get_model_dir(name,erase): base_path = os.path.join(".","dnn") model_dir = os.path.join(base_path,name) os.makedirs(model_dir,exist_ok=True) if erase and len(model_dir)>4 and os.path.isdir(model_dir): shutil.rmtree(model_dir,ignore_errors=True) # be careful, this deletes everything below the specified path return model_dir # Remove all rows where the specified column is +/- sd standard deviations def remove_outliers(df, name, sd): drop_rows = df.index[(np.abs(df[name]-df[name].mean())>=(sd*df[name].std()))] df.drop(drop_rows,axis=0,inplace=True) # Encode a column to a range between normalized_low and normalized_high. def encode_numeric_range(df, name, normalized_low =-1, normalized_high =1, data_low=None, data_high=None): if data_low is None: data_low = min(df[name]) data_high = max(df[name]) df[name] = ((df[name] - data_low) / (data_high - data_low)) \ * (normalized_high - normalized_low) + normalized_low # - # # Analyzing a Dataset # # The following script can be used to give a high level overview of how a dataset appears. # + ENCODING = 'utf-8' def expand_categories(values): result = [] s = values.value_counts() t = float(len(values)) for v in s.index: result.append("{}:{}%".format(v,round(100*(s[v]/t),2))) return "[{}]".format(",".join(result)) def analyze(filename): print() print("Analyzing: {}".format(filename)) df = pd.read_csv(filename,encoding=ENCODING) cols = df.columns.values total = float(len(df)) print("{} rows".format(int(total))) for col in cols: uniques = df[col].unique() unique_count = len(uniques) if unique_count>100: print("** {}:{} ({}%)".format(col,unique_count,int(((unique_count)/total)*100))) else: print("** {}:{}".format(col,expand_categories(df[col]))) expand_categories(df[col]) # - # The analyze script can be run on the MPG dataset. # + import tensorflow.contrib.learn as skflow import pandas as pd import os import numpy as np from sklearn import metrics from scipy.stats import zscore path = "./data/" filename_read = os.path.join(path,"auto-mpg.csv") analyze(filename_read) # - # # Preprocessing Examples # # The above preprocessing functions can be used in a variety of ways. # + import tensorflow.contrib.learn as skflow import pandas as pd import os import numpy as np from sklearn import metrics from scipy.stats import zscore path = "./data/" filename_read = os.path.join(path,"auto-mpg.csv") df = pd.read_csv(filename_read,na_values=['NA','?']) # create feature vector missing_median(df, 'horsepower') df.drop('name',1,inplace=True) encode_numeric_zscore(df, 'horsepower') encode_numeric_zscore(df, 'weight') encode_numeric_range(df, 'cylinders',0,1) encode_numeric_range(df, 'displacement',0,1) encode_numeric_zscore(df, 'acceleration') #encode_numeric_binary(df,'mpg',20) #df['origin'] = df['origin'].astype(str) #encode_text_tfidf(df, 'origin') # Drop outliers in horsepower print("Length before MPG outliers dropped: {}".format(len(df))) remove_outliers(df,'mpg',2) print("Length after MPG outliers dropped: {}".format(len(df))) print(df) # - # # Feature Ranking # # Feature ranking is an important process where you determine which input columns (features) are the most important. I implemented several feature ranking algorithms for the following academic paper: # # <NAME>., <NAME>., & <NAME>. (May 2017). [Early stabilizing feature importance for TensorFlow deep neural networks](https://raw.githubusercontent.com/jeffheaton/t81_558_deep_learning/master/pdf/heaton_et_al_ijcnn_2017-pre.pdf). In *International Joint Conference on Neural Networks (IJCNN 2017)* (accepted for publication). IEEE. # # Two feature ranking algorithms are provided here (a total of 4 are in the paper): # # * **CorrelationCoefficientRank** - A simple statistical analysis of the correlation between each input field and the target. Does not require a trained neural network and does not consider interactions. # * **InputPerturbationRank** - Uses a trained neural network and scrambles each input one-by-one. Neural network does not need to be retrained. Slower, but more accurate, than CorrelationCoefficientRank. # # Some of the code from this paper is provieded here: # + # Feature ranking code class Ranking(object): def __init__(self, names): self.names = names def _normalize(self, x, y, impt): impt = impt / sum(impt) impt = list(zip(impt, self.names, range(x.shape[1]))) impt.sort(key=lambda x: -x[0]) return impt class CorrelationCoefficientRank(Ranking): def __init__(self, names): super(CorrelationCoefficientRank, self).__init__(names) def rank(self, x, y, model=None): impt = [] for i in range(x.shape[1]): c = abs(np.corrcoef(x[:, i], y[:, 0])) impt.append(abs(c[1, 0])) impt = impt / sum(impt) impt = list(zip(impt, self.names, range(x.shape[1]))) impt.sort(key=lambda x: -x[0]) return (impt) class InputPerturbationRank(Ranking): def __init__(self, names): super(InputPerturbationRank, self).__init__(names) def _raw_rank(self, x, y, network): impt = np.zeros(x.shape[1]) for i in range(x.shape[1]): hold = np.array(x[:, i]) np.random.shuffle(x[:, i]) # Handle both TensorFlow and SK-Learn models. if 'tensorflow' in str(type(network)).lower(): pred = list(network.predict(x, as_iterable=True)) else: pred = network.predict(x) rmse = metrics.mean_squared_error(y, pred) impt[i] = rmse x[:, i] = hold return impt def rank(self, x, y, network): impt = self._raw_rank(x, y, network) return self._normalize(x, y, impt) # + # Rank MPG fields import tensorflow.contrib.learn as learn import tensorflow as tf from sklearn.model_selection import train_test_split import pandas as pd import os import numpy as np from sklearn import metrics from scipy.stats import zscore path = "./data/" # Set the desired TensorFlow output level for this example tf.logging.set_verbosity(tf.logging.ERROR) filename_read = os.path.join(path,"auto-mpg.csv") df = pd.read_csv(filename_read,na_values=['NA','?']) # create feature vector missing_median(df, 'horsepower') df.drop('name',1,inplace=True) encode_numeric_zscore(df, 'horsepower') encode_numeric_zscore(df, 'weight') encode_numeric_zscore(df, 'cylinders') encode_numeric_zscore(df, 'displacement') encode_numeric_zscore(df, 'acceleration') encode_text_dummy(df, 'origin') # Encode to a 2D matrix for training x,y = to_xy(df,'mpg') # Split into train/test x_train, x_test, y_train, y_test = train_test_split( x, y, test_size=0.20, random_state=42) # Get/clear a directory to store the neural network to model_dir = get_model_dir('mpg',True) # Create a deep neural network with 3 hidden layers of 50, 25, 10 feature_columns = [tf.contrib.layers.real_valued_column("", dimension=x.shape[0])] regressor = learn.DNNRegressor( model_dir= model_dir, config=tf.contrib.learn.RunConfig(save_checkpoints_secs=1), feature_columns=feature_columns, hidden_units=[50, 25, 10]) # Might be needed in future versions of "TensorFlow Learn" #classifier = learn.SKCompat(classifier) # For Sklearn compatibility # Early stopping validation_monitor = tf.contrib.learn.monitors.ValidationMonitor( x_test, y_test, every_n_steps=500, early_stopping_metric="loss", early_stopping_metric_minimize=True, early_stopping_rounds=50) # Fit/train neural network regressor.fit(x_train, y_train,monitors=[validation_monitor],steps=10000) names = list(df.columns) names.remove('mpg') # must remove target field MPG so that index aligns with x (which does not have mpg) ranker = InputPerturbationRank print() print("*** InputPerturbationRank ***") l1 = ranker(names).rank(x_test, y_test, regressor) for itm in l1: print(itm) # + ranker = CorrelationCoefficientRank print() print("*** CorrelationCoefficientRank ***") l1 = ranker(names).rank(x_test, y_test, regressor) for itm in l1: print(itm) # - # # Other Examples: Dealing with Addresses # # Addresses can be difficult to encode into a neural network. There are many different approaches, and you must consider how you can transform the address into something more meaningful. Map coordinates can be a good approach. [Latitude and longitude](https://en.wikipedia.org/wiki/Geographic_coordinate_system) can be a useful encoding. Thanks to the power of the Internet, it is relatively easy to transform an address into its latitude and longitude values. The following code determines the coordinates of [Washington University](https://wustl.edu/): # + import requests address = "1 Brookings Dr, St. Louis, MO 63130" response = requests.get('https://maps.googleapis.com/maps/api/geocode/json?address='+address) resp_json_payload = response.json() print(resp_json_payload['results'][0]['geometry']['location']) # - # If latitude and longitude are simply fed into the neural network as two features, they might not be overly helpful. These two values would allow your neural network to cluster locations on a map. Sometimes cluster locations on a map can be useful. Consider the percentage of the population that smokes in the USA by state: # # ![Smokers by State](https://raw.githubusercontent.com/jeffheaton/t81_558_deep_learning/master/images/class_6_smokers.png "Smokers by State") # # The above map shows that certian behaviors, like smoking, can be clustered by global region. # # However, often you will want to transform the coordinates into distances. It is reasonably easy to estimate the distance between any two points on Earth by using the [great circle distance](https://en.wikipedia.org/wiki/Great-circle_distance) between any two points on a sphere: # # The following code implements this formula: # # $\Delta\sigma=\arccos\bigl(\sin\phi_1\cdot\sin\phi_2+\cos\phi_1\cdot\cos\phi_2\cdot\cos(\Delta\lambda)\bigr)$ # # $d = r \, \Delta\sigma$ # # + from math import sin, cos, sqrt, atan2, radians # Distance function def distance_lat_lng(lat1,lng1,lat2,lng2): # approximate radius of earth in km R = 6373.0 # degrees to radians (lat/lon are in degrees) lat1 = radians(lat1) lng1 = radians(lng1) lat2 = radians(lat2) lng2 = radians(lng2) dlng = lng2 - lng1 dlat = lat2 - lat1 a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlng / 2)**2 c = 2 * atan2(sqrt(a), sqrt(1 - a)) return R * c # Find lat lon for address def lookup_lat_lng(address): response = requests.get('https://maps.googleapis.com/maps/api/geocode/json?address='+address) json = response.json() if len(json['results']) == 0: print("Can't find: {}".format(address)) return 0,0 map = json['results'][0]['geometry']['location'] return map['lat'],map['lng'] # Distance between two locations import requests address1 = "1 Brookings Dr, St. Louis, MO 63130" address2 = "3301 College Ave, Fort Lauderdale, FL 33314" lat1, lng1 = lookup_lat_lng(address1) lat2, lng2 = lookup_lat_lng(address2) print("Distance, St. Louis, MO to Ft. Lauderdale, FL: {} km".format( distance_lat_lng(lat1,lng1,lat2,lng2))) # - # Distances can be useful to encode addresses as. You must consider what distance might be useful for your dataset. Consider: # # * Distance to major metropolitan area # * Distance to competitor # * Distance to distribution center # * Distance to retail outlet # # The following code calculates the distance between 10 universities and washu: # + # Encoding other universities by their distance to Washington University schools = [ ["Princeton University, Princeton, NJ 08544", 'Princeton'], ["Massachusetts Hall, Cambridge, MA 02138", 'Harvard'], ["5801 S Ellis Ave, Chicago, IL 60637", 'University of Chicago'], ["Yale, New Haven, CT 06520", 'Yale'], ["116th St & Broadway, New York, NY 10027", 'Columbia University'], ["450 Serra Mall, Stanford, CA 94305", 'Stanford'], ["77 Massachusetts Ave, Cambridge, MA 02139", 'MIT'], ["Duke University, Durham, NC 27708", 'Duke University'], ["University of Pennsylvania, Philadelphia, PA 19104", 'University of Pennsylvania'], ["Johns Hopkins University, Baltimore, MD 21218", '<NAME>'] ] lat1, lng1 = lookup_lat_lng("1 Brookings Dr, St. Louis, MO 63130") for address, name in schools: lat2,lng2 = lookup_lat_lng(address) dist = distance_lat_lng(lat1,lng1,lat2,lng2) print("School '{}', distance to wustl is: {}".format(name,dist)) # - # # Other Examples: Bag of Words # # The Bag of Words algorithm is a common means of encoding strings. (Harris, 1954) Each input represents the count of one particular word. The entire input vector would contain one value for each unique word. Consider the following strings. # # ``` # Of Mice and Men # Three Blind Mice # Blind Manโ€™s Bluff # Mice and More Mice # ``` # # We have the following unique words. This is our โ€œdictionary.โ€ # # ``` # Input 0 : and # Input 1 : blind # Input 2 : bluff # Input 3 : manโ€™s # Input 4 : men # Input 5 : mice # Input 6 : more # Input 7 : of # Input 8 : three # ``` # # The four lines above would be encoded as follows. # # ``` # Of Mice and Men [ 0 4 5 7 ] # Three Blind Mice [ 1 5 8 ] # Blind Man โ€™ s Bl u f f [ 1 2 3 ] # Mice and More Mice [ 0 5 6 ] # ``` # # Of course we have to fill in the missing words with zero, so we end up with # the following. # # * Of Mice and Men [ 1 , 0 , 0 , 0 , 1 , 1 , 0 , 1 , 0 ] # * Three Blind Mice [ 0 , 1 , 0 , 0 , 0 , 1 , 0 , 0 , 1 ] # * Blind Manโ€™s Bluff [ 0 , 1 , 1 , 1 , 0 , 0 , 0 , 0 , 0 ] # * Mice and More Mice [ 1 , 0 , 0 , 0 , 0 , 2 , 1 , 0 , 0 ] # # Notice that we now have a consistent vector length of nine. Nine is the total # number of words in our โ€œdictionaryโ€. Each component number in the vector is # an index into our dictionary of available words. At each vector component is # stored a count of the number of words for that dictionary entry. Each string # will usually contain only a small subset of the dictionary. As a result, most of # the vector values will be zero. # # As you can see, one of the most difficult aspects of machine learning programming # is translating your problem into a fixed-length array of floating point # numbers. The following section shows how to translate several examples. # # # * [CountVectorizer](http://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.CountVectorizer.html) # + from sklearn.feature_extraction.text import CountVectorizer corpus = [ 'This is the first document.', 'This is the second second document.', 'And the third one.', 'Is this the first document?'] vectorizer = CountVectorizer(min_df=1) vectorizer.fit(corpus) print("Mapping") print(vectorizer.vocabulary_) print() print("Encoded") x = vectorizer.transform(corpus) print(x.toarray()) # + from sklearn.feature_extraction.text import CountVectorizer path = "./data/" filename_read = os.path.join(path,"auto-mpg.csv") df = pd.read_csv(filename_read,na_values=['NA','?']) corpus = df['name'] vectorizer = CountVectorizer(min_df=1) vectorizer.fit(corpus) print("Mapping") print(vectorizer.vocabulary_) print() print("Encoded") x = vectorizer.transform(corpus).toarray() print(x) print(len(vectorizer.vocabulary_)) # reverse lookup for columns bag_cols = [0] * len(vectorizer.vocabulary_) for i,key in enumerate(vectorizer.vocabulary_): bag_cols[i] = key # + #x = x.toarray() #.as_matrix() y = df['mpg'].as_matrix() # Get/clear a directory to store the neural network to model_dir = get_model_dir('mpg_fe',True) # Create a deep neural network with 3 hidden layers of 50, 25, 10 feature_columns = [tf.contrib.layers.real_valued_column("", dimension=x.shape[0])] regressor = learn.DNNRegressor( model_dir= model_dir, feature_columns=feature_columns, hidden_units=[50, 25, 10]) regressor.fit(x,y,steps=5000) # Rank features ranker = InputPerturbationRank print() print("*** Feature Ranking ***") l1 = ranker(bag_cols).rank(x, y, regressor) for itm in l1: print(itm) # - # # Other Examples: Time Series # Time series data will need to be encoded for a regular feedforward neural network. In a few classes we will see how to use a recurrent neural network to find patterns over time. For now, we will encode the series into input neurons. # # Financial forecasting is a very popular form of temporal algorithm. A temporal algorithm is one that accepts input for values that range over time. If the algorithm supports short term memory (internal state) then ranges over time are supported automatically. If your algorithm does not have an internal state then you should use an input window and a prediction window. Most algorithms do not have an internal state. To see how to use these windows, consider if you would like the algorithm to predict the stock market. You begin with the closing price for a stock over several days: # # ``` # Day 1 : $45 # Day 2 : $47 # Day 3 : $48 # Day 4 : $40 # Day 5 : $41 # Day 6 : $43 # Day 7 : $45 # Day 8 : $57 # Day 9 : $50 # Day 10 : $41 # ``` # # The first step is to normalize the data. This is necessary whether your algorithm has internal state or not. To normalize, we want to change each number into the percent movement from the previous day. For example, day 2 would become 0.04, because there is a 4% difference between $45 and $47. Once you perform this calculation for every day, the data set will look like the following: # # ``` # Day 2 : 0. 04 # Day 3 : 0. 02 # Day 4:โˆ’0.16 # Day 5 : 0. 02 # Day 6 : 0. 04 # Day 7 : 0. 04 # Day 8 : 0. 04 # Day 9:โˆ’0.12 # Day 10:โˆ’0.18 # ``` # # In order to create an algorithm that will predict the next dayโ€™s values, we need to think about how to encode this data to be presented to the algorithm. The encoding depends on whether the algorithm has an internal state. The internal state allows the algorithm to use the last few values inputted to help establish trends. # # Many machine learning algorithms have no internal state. If this is the case, then you will typically use a sliding window algorithm to encode the data. To do this, we use the last three prices to predict the next one. The inputs would be the last three-day prices, and the output would be the fourth day. The above data could be organized in the following way to provide training data. # # These cases specified the ideal output for the given inputs: # # ``` # [ 0.04 , 0.02 , โˆ’0.16 ] โˆ’> 0.02 # [ 0.02 , โˆ’0.16 , 0.02 ] โˆ’> 0.04 # [ โˆ’0.16 , 0.02 , 0.04 ] โˆ’> 0.04 # [ 0.02 , 0.04 , 0.04 ] โˆ’> 0. 26 # [ 0.04 , 0.04 , 0.26 ] โˆ’> โˆ’0.12 # [ 0.04 , 0.26 , โˆ’0.12 ] โˆ’> โˆ’0.18 # ``` # # The above encoding would require that the algorithm have three inputs and one output. # + import numpy as np def normalize_price_change(history): last = None result = [] for price in history: if last is not None: result.append( float(price-last)/last ) last = price return result def encode_timeseries_window(source, lag_size, lead_size): """ Encode raw data to a time-series window. :param source: A 2D array that specifies the source to be encoded. :param lag_size: The number of rows uses to predict. :param lead_size: The number of rows to be predicted :return: A tuple that contains the x (input) & y (expected output) for training. """ result_x = [] result_y = [] output_row_count = len(source) - (lag_size + lead_size) + 1 for raw_index in range(output_row_count): encoded_x = [] # Encode x (predictors) for j in range(lag_size): encoded_x.append(source[raw_index+j]) result_x.append(encoded_x) # Encode y (prediction) encoded_y = [] for j in range(lead_size): encoded_y.append(source[lag_size+raw_index+j]) result_y.append(encoded_y) return result_x, result_y price_history = [ 45, 47, 48, 40, 41, 43, 45, 57, 50, 41 ] norm_price_history = normalize_price_change(price_history) print("Normalized price history:") print(norm_price_history) print() print("Rounded normalized price history:") norm_price_history = np.round(norm_price_history,2) print(norm_price_history) print() print("Time Boxed(time series encoded):") x, y = encode_timeseries_window(norm_price_history, 3, 1) for x_row, y_row in zip(x,y): print("{} -> {}".format(np.round(x_row,2), np.round(y_row,2))) # -
t81_558_class6_preprocessing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Layering Nested Operations # # We start by loading the necessary libraries and resetting the computational graph. import matplotlib.pyplot as plt import numpy as np import tensorflow as tf import os from tensorflow.python.framework import ops ops.reset_default_graph() # ### Create a graph session sess = tf.Session() # ### Create the Tensors, Constants, and Placeholders # # We start by creating an array to feed in to a placeholder (note the agreements on the dimensions). We then declare some graph constants to use in the operations. # Create data to feed in my_array = np.array([[1., 3., 5., 7., 9.], [-2., 0., 2., 4., 6.], [-6., -3., 0., 3., 6.]]) # Duplicate the array for having two inputs x_vals = np.array([my_array, my_array + 1]) # Declare the placeholder x_data = tf.placeholder(tf.float32, shape=(3, 5)) # Declare constants for operations m1 = tf.constant([[1.],[0.],[-1.],[2.],[4.]]) m2 = tf.constant([[2.]]) a1 = tf.constant([[10.]]) # ### Declare Operations # # We start with matrix multiplication (A[3x5] * m1[5x1]) = prod1[3x1] # 1st Operation Layer = Multiplication prod1 = tf.matmul(x_data, m1) # Second operation is multiplication of prod1[3x1] by m2[1x1], which results in prod2[3x1] # 2nd Operation Layer = Multiplication prod2 = tf.matmul(prod1, m2) # The third operation is matrix addition of prod2[3x1] to a1[1x1], This makes use of TensorFlow's broadcasting. # 3rd Operation Layer = Addition add1 = tf.add(prod2, a1) # ### Evaluate and Print Output for x_val in x_vals: print(sess.run(add1, feed_dict={x_data: x_val})) # ### Create and Format Tensorboard outputs for viewing # + merged = tf.summary.merge_all(key='summaries') if not os.path.exists('tensorboard_logs/'): os.makedirs('tensorboard_logs/') my_writer = tf.summary.FileWriter('tensorboard_logs/', sess.graph) # - # ![layering_nested_operations](https://github.com/nfmcclure/tensorflow_cookbook/raw/master/02_TensorFlow_Way/images/02_Multiple_Operations.png)
02_TensorFlow_Way/02_Layering_Nested_Operations/02_layering_nested_operations.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Baseball pitcher WAR calculation using Statcast data # by <NAME> (13/2/2019) # Here is an example on how to calculate Wins Over Replacement (WAR) using Statcast data. # # [Baseball Reference](https://www.baseball-reference.com/about/war_explained.shtml) and [Fangraphs](https://library.fangraphs.com/misc/war/) both provide a detail disucssion and calculation steps for WAR. It's recommanded to read both sites before getting back to this calculation. # # To retreive Statcast data from [Baseballsavant](https://baseballsavant.mlb.com/), python package [pybaseball](https://github.com/jldbc/pybaseball) is used. It can be installed via # # ``` # pip install pybaseball # ``` # # Sample script of downloading Statcast database to your computer and stored in SQL format is as follow: # + import pybaseball data_April = pybaseball.statcast(start_dt='2018-03-29', end_dt='2018-04-30') data_May = pybaseball.statcast(start_dt='2018-05-01', end_dt='2018-06-01') data_June = pybaseball.statcast(start_dt='2018-06-02', end_dt='2018-06-30') data_July = pybaseball.statcast(start_dt='2018-07-01', end_dt='2018-08-01') data_August = pybaseball.statcast(start_dt='2018-08-02', end_dt='2018-08-30') data_Septemeber = pybaseball.statcast(start_dt='2018-08-31', end_dt='2018-10-01') # - # The following calculation assumes that Statcast_data is put in a table named 'statcast_year', e.g. data corresponding to year 2018 is # ``` # statcast_2018 # ``` # # + import psycopg2 #postgreSQL #import sqlite3 #SQLite #import pymysql #MySQL import sqlalchemy conn = psycopg2.connect("dbname='dbname' user='username' host='localhost' password='password'") #postgreSQL #conn = sqlite3.connect('dbname.db') #SQLite #conn = pymysql.connect (host='127.0.0.1',user='username',passwd = "password" ,db = 'dbname') #MySQL cur = conn.cursor() engine = sqlalchemy.create_engine('dialect://username:dialect@localhost/dbname', echo=False) # - data_April.to_sql('statcast_2018', con=engine, if_exists='replace') data_May.to_sql('statcast_2018', con=engine, if_exists='append') data_June.to_sql('statcast_2018', con=engine, if_exists='append') data_July.to_sql('statcast_2018', con=engine, if_exists='append') data_August.to_sql('statcast_2018', con=engine, if_exists='append') data_Septemeber.to_sql('statcast_2018', con=engine, if_exists='append') import pandas as pd import numpy as np # The first things we do here is to calculate the number of innings pitched by each pither. We now collect events that contribute to 1,2 or 3 outs and sum together. # # One inning is defined by three outs made so it would be a weighted sum of number of events. Also we would like to calculate number of games and number of starts in those games as well. query = ''' SELECT pitcher,COUNT(DISTINCT(game_pk)) AS game_count FROM statcast_2018 GROUP BY pitcher,game_pk ''' statcast_game_count = pd.read_sql(query,engine) query = ''' SELECT a1.pitcher,COUNT(DISTINCT(game_pk)) AS game_start FROM statcast_2018 a1 LEFT OUTER JOIN ( SELECT pitcher,MIN(at_bat_number) AS min_at_bat FROM statcast_2018 WHERE inning = 1 and outs_when_up = 0 AND pitch_number = 1 GROUP BY pitcher,game_pk ) a2 ON a1.pitcher=a2.pitcher AND a1.at_bat_number = a2.min_at_bat WHERE inning = 1 and outs_when_up = 0 AND pitch_number = 1 GROUP BY a1.pitcher,game_pk ''' statcast_game_start = pd.read_sql(query,engine) query = ''' SELECT pitcher,outs_when_up,game_pk,COUNT(pitcher) AS count_one FROM statcast_2018 WHERE pitcher IS NOT NULL AND events NOT LIKE '%%double_play%%' AND events NOT LIKE '%%triple_play%%' AND (events LIKE '%%out%%' OR events LIKE '%%caught%%' OR events LIKE '%%sac%%' OR events LIKE '%%choice%%') GROUP BY pitcher,outs_when_up,inning,game_pk ''' statcast_all_inning_one = pd.read_sql(query,engine) query = ''' SELECT pitcher,outs_when_up,game_pk,COUNT(pitcher) AS count_two FROM statcast_2018 WHERE pitcher IS NOT NULL AND events LIKE '%%double_play%%' AND events NOT LIKE '%%triple_play%%' GROUP BY pitcher,outs_when_up,inning,game_pk ''' statcast_all_inning_two = pd.read_sql(query,engine) query = ''' SELECT pitcher,outs_when_up,game_pk,COUNT(pitcher) AS count_three FROM statcast_2018 WHERE pitcher IS NOT NULL AND events LIKE '%%triple_play%%' GROUP BY pitcher,outs_when_up,inning,game_pk ''' statcast_all_inning_three = pd.read_sql(query,engine) # + pitcher_one_out = statcast_all_inning_one.groupby(['pitcher']).count()['count_one'] pitcher_two_out = statcast_all_inning_two.groupby(['pitcher']).count()['count_two'] pitcher_three_out = statcast_all_inning_three.groupby(['pitcher']).count()['count_three'] pitcher_game_count = statcast_game_count.groupby(['pitcher']).count()['game_count'] pitcher_game_start = statcast_game_start.groupby(['pitcher']).count()['game_start'] pitcher_inning_df = pd.concat([pitcher_one_out,pitcher_two_out,pitcher_three_out,pitcher_game_count,pitcher_game_start],axis=1).fillna(0) pitcher_inning_df['IP'] = (pitcher_inning_df['count_one'] + 2*pitcher_inning_df['count_two'] + 3*pitcher_inning_df['count_three'])/3 # - pitcher_inning_df = pitcher_inning_df[['game_count','game_start','IP']] pitcher_inning_df.head() # The next thing we do is to collect expected weighted On Base Average(xwoba) data from Statcast database combining data with walks and strikeout. # # The standard wOBA formula is as follow: # # $$wOBA = \frac{0.7 * BB + 0.9 * 1B + 1.25 * 2B + 1.6 * 3B + 2 * HR} {PA}$$ # # What Statcast does is that the system track the exit velocity and exit angle of eached batted ball and assign a expected wOBA value on it. # # [Breakdown of estimated wOBA on Baseballsavant](https://baseballsavant.mlb.com/statcast_hit_probability) query = ''' SELECT pitcher,estimated_woba_using_speedangle,woba_value,woba_denom,at_bat_number,game_pk,home_team,away_team, CASE WHEN inning_topbot = 'Top' THEN home_team ELSE away_team END AS pitch_team, CASE WHEN estimated_woba_using_speedangle IS NULL THEN woba_value ELSE estimated_woba_using_speedangle END AS xwoba FROM statcast_2018 WHERE woba_denom = 1 AND (events != 'intentional_walk' AND events != 'catcher_interf') ''' statcast_pitcher_pitch_woba = pd.read_sql(query,engine) # Batting performance in baseball are dependent on the stadium the team play due to altitude, dimensions or other factors. # # It is commonly referred as 'park factor'. The formula is simply: # # $$Park Factor = \frac{xwOBA\, at\, home\, + opponent\, xwOBA\, at\, home} # {xwOBA\, at\, away\, + opponent\, xwOBA\, at\, away} * 100$$ # # To eliminate sample size effect, three years worth of data is used. # + #Select one year data only # query = ''' # SELECT home_team,away_team, # CASE WHEN estimated_woba_using_speedangle IS NULL THEN woba_value # ELSE estimated_woba_using_speedangle # END AS xwoba # FROM statcast_2018 # WHERE woba_denom = 1 AND (events != 'intentional_walk' AND events != 'catcher_interf') # ''' # statcast_park_factor = pd.read_sql(query,engine) # + query = ''' SELECT home_team,away_team, CASE WHEN estimated_woba_using_speedangle IS NULL THEN woba_value ELSE estimated_woba_using_speedangle END AS xwoba FROM statcast_2016 WHERE woba_denom = 1 AND (events != 'intentional_walk' AND events != 'catcher_interf') UNION ALL SELECT home_team,away_team, CASE WHEN estimated_woba_using_speedangle IS NULL THEN woba_value ELSE estimated_woba_using_speedangle END AS xwoba FROM statcast_2017 WHERE woba_denom = 1 AND (events != 'intentional_walk' AND events != 'catcher_interf') UNION ALL SELECT home_team,away_team, CASE WHEN estimated_woba_using_speedangle IS NULL THEN woba_value ELSE estimated_woba_using_speedangle END AS xwoba FROM statcast_2018 WHERE woba_denom = 1 AND (events != 'intentional_walk' AND events != 'catcher_interf') ''' statcast_park_factor = pd.read_sql(query,engine) # - home_xwoba = statcast_park_factor['xwoba'].groupby(statcast_pitcher_pitch_woba['home_team']).mean() away_xwoba = statcast_park_factor['xwoba'].groupby(statcast_pitcher_pitch_woba['away_team']).mean() park_factor = home_xwoba/away_xwoba*100 # Also, American League in MLB adopted Designated Hitter (DH) rule which pitcher can be replaced by player when at bat, and thus AL as a better batting stat overall. # # We would like to consider the effect as well by adjusting to league average, or weighted average by Plate Appearance if the player has switch league during the season. # + AL_team = ['BAL','BOS','CLE','CWS','DET','HOU','KC','LAA','MIN','NYY','OAK','SEA','TB','TEX','TOR'] NL_team = ['ARI','ATL','CHC','CIN','COL','LAD','MIA','MIL','NYM','PHI','PIT','SD','SF','STL','WSH'] AL_average = np.average(statcast_pitcher_pitch_woba[statcast_pitcher_pitch_woba['pitch_team'].isin(AL_team)]['xwoba']) NL_average = np.average(statcast_pitcher_pitch_woba[statcast_pitcher_pitch_woba['pitch_team'].isin(NL_team)]['xwoba']) AL_average_df = pd.DataFrame({'pitch_team':AL_team,'league_avg': AL_average, 'AL_team':1}) NL_average_df = pd.DataFrame({'pitch_team':NL_team,'league_avg': NL_average, 'AL_team':0}) statcast_pitcher_pitch_woba = statcast_pitcher_pitch_woba.merge(pd.DataFrame({'park_factor': park_factor}),on='home_team') statcast_pitcher_pitch_woba = statcast_pitcher_pitch_woba.merge(pd.concat([AL_average_df,NL_average_df]),on='pitch_team') # - statcast_pitcher_pitch_woba.head() # Then we will group xwOBA value of each pitcheres and calculate Runs Above Average (RAA) using the following formula: # # $$ RAA = \frac{((wOBA*PF/100-lgAvg)}{wOBA Scale} * PA$$ # # [The calculation of wOBA scale is described in detail by Fangraphs here](https://library.fangraphs.com/principles/linear-weights/) To simplified the calculation, we assume wOBA scale = 1.2 here. statcast_pitcher_xwoba = statcast_pitcher_pitch_woba.groupby(['pitcher'])['xwoba'].mean() pitcher_park_factor = statcast_pitcher_pitch_woba.groupby(['pitcher'])['park_factor'].mean() pitcher_league_avg = statcast_pitcher_pitch_woba.groupby(['pitcher'])['league_avg'].mean() pitcher_PA_count = statcast_pitcher_pitch_woba.groupby(['pitcher'])['league_avg'].count() pitcher_AL_prop = statcast_pitcher_pitch_woba.groupby(['pitcher'])['AL_team'].mean() pitcher_raa = -(statcast_pitcher_xwoba * pitcher_park_factor/100 - pitcher_league_avg )/1.2 * pitcher_PA_count pitcher_run_out = pd.DataFrame({'RAA':pitcher_raa,'PF':pitcher_park_factor,'league_avg':pitcher_league_avg,'AL_prop':pitcher_AL_prop}) pitcher_run_df = pitcher_inning_df[['IP','game_count', 'game_start']].merge(pitcher_run_out,on='pitcher') pitcher_run_df['RAA_per_game'] = pitcher_run_df['RAA'] / pitcher_run_df['game_count'] # To convert runs to wins, since pitcher quality will directly affact the scoring enviornment, each pitchers are assigned with a specific runs to wins ratio. More details on [Baseball-reference.](https://www.baseball-reference.com/about/war_explained_runs_to_wins.shtml) # # The calculation steps are as follow: # # 1) Calculate league average runs per out: query = ''' SELECT MAX(post_bat_score) AS run, CASE WHEN inning_topbot = 'Top' THEN home_team ELSE away_team END AS pitch_team FROM statcast_2018 WHERE post_bat_score IS NOT NULL GROUP BY game_pk,pitch_team ''' statcast_season_run = pd.read_sql(query,engine) AL_run_per_out = statcast_season_run[statcast_season_run.pitch_team.isin(AL_team)]['run'].sum()/162/len(AL_team)/26.8 NL_run_per_out = statcast_season_run[statcast_season_run.pitch_team.isin(NL_team)]['run'].sum()/162/len(NL_team)/26.8 # 2) Calculate pitcher specific Pythagorean Win component by: # # $$x = (53.6 * league\, average\, runs\, per\, out - pitcherRAA)^.285$$ pitcher_pythcom = (53.6 * (pitcher_run_df['AL_prop'] *AL_run_per_out + (1 - pitcher_run_df['AL_prop']) * NL_run_per_out) - pitcher_run_df['RAA_per_game'])** 0.285 # 3) Calculate pitcher W-L%: # # $$Win\% = \frac{4.14^x}{4.14^x + (4.14-pitcherRAAperGame)^x}$$ pithcher_pywin = 4.14**pitcher_pythcom / (4.14**pitcher_pythcom+ (4.14-pitcher_run_df['RAA_per_game'])**pitcher_pythcom) # And Win Above Average (WAA) per game = $Win\% - 0.5$ pitcher_run_df['WAA_per_game'] = (pithcher_pywin - 0.5) # We have finally arrived to calculate replacement level of pitcher. Replacement level is perhaps the most difficult concept for the whole calculation. Fangraphs has two great article on its concept: # # [The Beginnerโ€™s Guide to Replacement Level](https://library.fangraphs.com/the-beginners-guide-to-replacement-level/) # [The Recent Examples of a Replacement Level Player](https://blogs.fangraphs.com/the-recent-examples-of-a-replacement-level-player/) # # In modern baseball, reliever will throw in much fewer innings than starter and generally has a better performance. To account for this effect, the replacement level is set as : # # $$Replacement\, level = (0.03*\frac{1-Game\,Start}{Game\, played}+0.12*\frac{Game\, start}{Game\, played})*IP/9$$ # # Also, the concept of 'opener' become much more popular in MLB where the starting pitcher pitch much less followed by a 'headliner' who functions like a traditional pitcher. [Inspired by baseball-reference](https://www.sports-reference.com/blog/2019/03/2019-war-update/), we would like to treat openers like relievers and headliners like starters. # # Here opener is defined as: # # * At most 2 innings pitched (6 outs), or at most 9 batters faced # # and headliner is defined as: # # 1. At least 4 innings pitched (12 outs), or at least 18 batters faced # # 2. Pitch in first 3 innings or enter the game in 4th inning # # It counts only if an opener and a headliner both play in the same game. # # Then we would deduct number of games as opener from games started and add the number games entered as headliner # # $$Adjusted\,game\,start = Game\,Start - Game\,Opener + Game\,Reliever$$ query = ''' SELECT pitcher,game_pk,COUNT(DISTINCT(at_bat_number)) AS AB_count,inning_topbot FROM statcast_2018 GROUP BY pitcher,game_pk,inning_topbot ''' statcast_AB_count = pd.read_sql(query,engine) query = ''' SELECT pitcher,game_pk,COUNT(DISTINCT(game_pk)) AS early_enter FROM statcast_2018 WHERE (inning = 1 OR inning = 2 OR inning = 3 OR(inning = 4 AND outs_when_up = 0)) AND pitch_number = 1 GROUP BY pitcher,game_pk ''' statcast_game_enter = pd.read_sql(query,engine) # + statcast_game_inning = pd.merge((statcast_all_inning_one.groupby(['pitcher','game_pk']).count()/3)['outs_when_up'].reset_index(), (statcast_all_inning_two.groupby(['pitcher','game_pk']).count()*2/3)['outs_when_up'].reset_index(),on=['pitcher','game_pk'],how='outer') statcast_game_inning = statcast_game_inning.merge((statcast_all_inning_three.groupby(['pitcher','game_pk']).count())['outs_when_up'].reset_index(),how='outer').fillna(0) statcast_game_inning['game_IP'] = statcast_game_inning['outs_when_up_x']+statcast_game_inning['outs_when_up_y']+statcast_game_inning['outs_when_up'] statcast_game_inning = statcast_game_inning.merge(statcast_game_start,how='outer').fillna(0) statcast_game_inning = statcast_game_inning.merge(statcast_AB_count) statcast_game_inning = statcast_game_inning.merge(statcast_game_enter,how='outer').fillna(0) headliner_df = statcast_game_inning[(statcast_game_inning.early_enter == 1) & (statcast_game_inning.game_start == 0) & ((statcast_game_inning.game_IP >= 4) | (statcast_game_inning.ab_count >= 18))] opener_df = statcast_game_inning[(statcast_game_inning.game_start == 1) & ((statcast_game_inning.game_IP <= 2) | (statcast_game_inning.ab_count <= 9))] merged_df = pd.merge(headliner_df,opener_df,on=['game_pk','inning_topbot'],how='inner') headliner_count = merged_df.groupby('pitcher_x').count()['game_pk'].to_frame().reset_index() opener_count = merged_df.groupby('pitcher_y').count()['game_pk'].to_frame().reset_index() # - pitcher_run_df['replacement_level'] = (0.03* (1- (pitcher_run_df['game_start']+pitcher_run_df['opener_count']-pitcher_run_df['headliner_count'])/pitcher_run_df['game_count']) + 0.12* ((pitcher_run_df['game_start']-pitcher_run_df['opener_count']+pitcher_run_df['headliner_count'])/pitcher_run_df['game_count'])) * pitcher_run_df['IP']/9 # (unadjusted) WAR is simply $WAA\, per\, game * Game\, played + replacement\, level$ pitcher_run_df['unadj_WAR'] = (pitcher_run_df['WAA_per_game'] * pitcher_run_df['game_count'] + pitcher_run_df['replacement_level'] ) # We would like to reward pitcher who pitch in high leverage situation too, i.e. the game score is close, late inning or more players are on the base, since the pitcher would affect the chance of winning more. [Baseball referece has a long discussion on how leverage is calculated](https://www.baseball-reference.com/about/wpa.shtml). # # Again to simplified the calculation the leverage index table is copied from [The book: playing the percentages in baseball](http://www.insidethebook.com/li.shtml). # # To adjust for leverage, the formula is $$unadjWAR * (1+ (gmLI/2)),$$ # # where gmLI is the leverage when the pitcher enter the game. leverage_array = np.array(pd.read_csv('baseball_leverage.csv')) # + query = ''' SELECT pitcher,at_bat_number,outs_when_up,inning,game_pk,home_team,on_1b,on_2b,on_3b,bat_score,fld_score,inning_topbot FROM statcast_2018 WHERE (pitch_number,at_bat_number,outs_when_up,inning,game_pk) IN ( SELECT min(pitch_number),min(at_bat_number),min(outs_when_up),min(inning),game_pk FROM statcast_2018 GROUP BY pitcher,game_pk) AND inning_topbot = 'Top' ''' statcast_game_leverage_top = pd.read_sql(query,engine) query = ''' SELECT pitcher,at_bat_number,outs_when_up,inning,game_pk,home_team,on_1b,on_2b,on_3b,bat_score,fld_score,inning_topbot FROM statcast_2018 WHERE (pitch_number,at_bat_number,outs_when_up,inning,game_pk) IN ( SELECT min(pitch_number),min(at_bat_number),min(outs_when_up),min(inning),game_pk FROM statcast_2018 GROUP BY pitcher,game_pk) AND inning_topbot = 'Bot' ''' statcast_game_leverage_bot = pd.read_sql(query,engine) statcast_game_leverage = pd.concat([statcast_game_leverage_top,statcast_game_leverage_bot]).drop_duplicates().reset_index(drop=True) # + gm_leverage = [] for i,inning in enumerate(statcast_game_leverage['inning']): score_diff = statcast_game_leverage['bat_score'][i] - statcast_game_leverage['fld_score'][i] if(pd.isna(statcast_game_leverage['on_1b'][i]) and pd.isna(statcast_game_leverage['on_2b'][i]) and pd.isna(statcast_game_leverage['on_3b'][i])): runner_offset = 0 elif(pd.isna(statcast_game_leverage['on_2b'][i]) and pd.isna(statcast_game_leverage['on_3b'][i])): runner_offset = 1 elif(pd.isna(statcast_game_leverage['on_1b'][i]) and pd.isna(statcast_game_leverage['on_3b'][i])) : runner_offset = 2 elif(pd.isna(statcast_game_leverage['on_1b'][i]) and pd.isna(statcast_game_leverage['on_2b'][i])) : runner_offset = 3 elif(pd.isna(statcast_game_leverage['on_3b'][i])): runner_offset = 4 elif(pd.isna(statcast_game_leverage['on_2b'][i])): runner_offset = 5 elif(pd.isna(statcast_game_leverage['on_1b'][i])): runner_offset = 6 else: runner_offset = 7 if(statcast_game_leverage['inning_topbot'][i] == 'Top'): top_bot = 0 else: top_bot = 1 gm_leverage.append(leverage_array[int(48 * (min(statcast_game_leverage['inning'][i],9)-1)+ 24 * top_bot + 8* statcast_game_leverage['outs_when_up'][i]+ runner_offset)][int(min(max(score_diff,-4),4))+5]) # - statcast_game_leverage['gmLI'] = gm_leverage pitcher_run_df = pitcher_run_df.merge(pd.DataFrame(statcast_game_leverage.groupby(['pitcher']).mean()[['gmLI']]),on='pitcher') # Both Fangraphs and baseball-reference had set the replacement level to .294, which means a team consisit of only replacement level players is going to win 29.4% of time or about 48 games. # # The total number of win above replacement for 162 games and 30 teams league is $(0.5-0.294)*30*162 = 1000$ games. Note that due to playoff tiebreaker or other factor teams may not exactly play 162 games in a season. # # Since pitcher and batter perform independently, it is better to award wins separately to two groups. Here 43% of wins are award to pitchers, same as Fangraphs calculation total_war_leverage = np.sum(pitcher_run_df['unadj_WAR'] * (1+pitcher_run_df['gmLI'])/2) total_war_leverage # Now we add the difference between unadjusted WAR and total WAR to pitcher per inning pitched. query = ''' SELECT COUNT(DISTINCT(game_pk)) FROM statcast_2018 game_pk ''' total_game = pd.read_sql(query,engine)['count'][0] pitcher_run_df['pitch_WAR'] = (total_game*2 * (0.5-0.294) * 0.43 - total_war_leverage )/ \ np.sum(pitcher_run_df['IP']) * \ pitcher_run_df['IP'] + pitcher_run_df['unadj_WAR'] * (1+pitcher_run_df['gmLI'])/2 # Pitcher is also at bat in NL park so we need to consider their contribution at bat too. Here the replacement level of pitcher batting is same as league average xwoba for pitcher as well. query = ''' SELECT batter AS pitcher,woba_denom,at_bat_number,game_pk,away_team, CASE WHEN estimated_woba_using_speedangle IS NULL THEN woba_value ELSE estimated_woba_using_speedangle END AS xwoba FROM statcast_2018 WHERE woba_denom = 1 ''' statcast_pitcher_bat_woba = pd.read_sql(query,engine) # Here we remove positional player who pitch statcast_pitcher_bat_woba = statcast_pitcher_bat_woba.merge(statcast_game_leverage,on=['pitcher','game_pk']) statcast_pitcher_bat_woba = statcast_pitcher_bat_woba[statcast_pitcher_bat_woba.at_bat_number_x > statcast_pitcher_bat_woba.at_bat_number_y].reset_index(drop=True) pitcher_xwoba_bat = statcast_pitcher_bat_woba['xwoba'].groupby(statcast_pitcher_bat_woba['pitcher']).mean() pitcher_pa_count = statcast_pitcher_bat_woba['xwoba'].groupby(statcast_pitcher_bat_woba['pitcher']).count() # Run per wins formula for batter used here is developed by <NAME>: # # $$9*(MLB Runs Scored / MLB Innings Pitched)*1.5 + 3$$ batter_run_per_win = 9*np.sum(statcast_season_run['run'])/np.sum(pitcher_inning_df['IP'])*1.5+3 pitcher_bat_win = (pitcher_xwoba_bat - np.mean(statcast_pitcher_bat_woba['xwoba']))/1.2*pitcher_pa_count / batter_run_per_win pitcher_run_df = pitcher_run_df.merge(pd.DataFrame({'bat_win':pitcher_bat_win}),on='pitcher') # By combining bat wins we can have total WAR of pitcher pitcher_run_df['total_WAR'] = (pitcher_run_df['pitch_WAR'] + pitcher_run_df['bat_win']).round(2) # To convert player ID from MLBAM to pitcher, [Baseball Databank](https://github.com/chadwickbureau/baseballdatabank) data is used. # + # url="https://raw.githubusercontent.com/chadwickbureau/register/master/data/people.csv" # player_table=pd.read_csv(url) player_table=pd.read_csv('people.csv') # - pitcher_run_df = pitcher_run_df.merge(player_table[['key_mlbam','name_first','name_last']],left_on='pitcher',right_on='key_mlbam') pitcher_run_df['player_full_name'] = pitcher_run_df['name_first'] + ' ' + pitcher_run_df['name_last'] pitcher_run_df.head() pitcher_run_df[['player_full_name', 'total_WAR']].sort_values(by=['total_WAR'],ascending=False).reset_index(drop=True)
Statcast_pitcher_WAR.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="Ddfvitdtnq5y" # # Cambridge IGCSE # Paper 2 Problem-solving and Programming \ # 0478/21\ # May/June2022 # # # + [markdown] id="kDnCHblztRc6" # ## Scenario # + [markdown] id="j31zcZcSplgq" # # In preparation for the examination candidates should attempt the following practical tasks by writing and testing a program or programs. # # ๅœจๅค‡่€ƒๆ—ถ๏ผŒ่€ƒ็”Ÿๅบ”่ฏฅๅฐ่ฏ•ไปฅไธ‹ๅฎž้™…็ผ–ๅ†™ๅ’Œๆต‹่ฏ•ไธ€ไธชๆˆ–ๅคšไธช็จ‹ๅบใ€‚ # # Friends of Seaview Pier is an organisation devoted to the restoration and upkeep of a pier in the town. A pier is a wooden structure that provides a walkway over the sea. The pier requires regular maintenance and the friends of the pier need to raise money for this purpose. # # ๆตทๆ™ฏ็ ๅคดไน‹ๅ‹ๆ˜ฏไธ€ไธช่‡ดๅŠ›ไบŽไฟฎๅคๅ’Œ็ปดๆŠค้•‡ไธŠ็ ๅคด็š„็ป„็ป‡ใ€‚็ ๅคดๆ˜ฏไธ€็งๆœจๅˆถ็ป“ๆž„๏ผŒๆไพ›ไบ†ไธ€ไธชๆตทไธŠ็š„้€š้“ใ€‚็ ๅคด้œ€่ฆๅฎšๆœŸ็ปดๆŠค๏ผŒ็ ๅคด็š„ๆœ‹ๅ‹้œ€่ฆไธบๆญค็ญน้›†่ต„้‡‘ใ€‚ # # Members of Friends of Seaview Pier each pay $75 per year, as a contribution to the pierโ€™s running costs. This entitles them to free admission to the pier throughout the year. They can also volunteer to help run the pier, by working at the pier entrance gate, working in the gift shop, or painting and decorating. \ # ๆตทๆ™ฏ็ ๅคดไน‹ๅ‹็š„ไผšๅ‘˜ๆฏไบบๆฏๅนดๆ”ฏไป˜75็พŽๅ…ƒ๏ผŒไฝœไธบ็ ๅคด่ฟ่ฅๆˆๆœฌ็š„ไธ€้ƒจๅˆ†ใ€‚่ฟ™ไฝฟๅพ—ไป–ไปฌๅ…จๅนด้ƒฝๅฏไปฅๅ…่ดน่ฟ›ๅ…ฅ็ ๅคดใ€‚ไป–ไปฌไนŸๅฏไปฅ่‡ชๆ„ฟๅธฎๅŠฉ็ฎก็†็ ๅคด๏ผŒๅœจ็ ๅคดๅ…ฅๅฃๅค„ๅทฅไฝœ๏ผŒๅœจ็คผๅ“ๅบ—ๅทฅไฝœ๏ผŒๆˆ–ๆฒนๆผ†ๅ’Œ่ฃ…้ฅฐใ€‚ # # To provide additional income, the pierโ€™s wooden planks can be sponsored. A brass plaque, which contains a short message of the sponsorโ€™s choice, is fitted to a plank on the pier, for a donation of $200.\ # # ไธบไบ†ๆไพ›้ขๅค–็š„ๆ”ถๅ…ฅ๏ผŒๅฏไปฅ่ตžๅŠฉ็ ๅคด็š„ๆœจๆฟใ€‚็ ๅคดไธŠ็š„ไธ€ๅ—ๆœจๆฟไธŠๆŒ‚็€ไธ€ๅ—้“œๅŒพ๏ผŒไธŠ้ขๅ†™็€่ตžๅŠฉๅ•†็š„ไฟกๆฏ๏ผŒๆ็Œฎ200็พŽๅ…ƒๅณๅฏใ€‚ # # Write and test a program or programs for the Friends of Seaview Pier: # # Your program or programs must include appropriate prompts for the entry of data. Data must be validated on entry. # All outputs, including error messages, need to be set out clearly and understandably. # All variables, constants and other identifiers must have meaningful names. # # ไธบๆตทๆ™ฏ็ ๅคดไน‹ๅ‹็ผ–ๅ†™ๅ’Œๆต‹่ฏ•ไธ€ไธชๆˆ–ๅคšไธช็จ‹ๅบ: # # ๆ‚จ็š„็จ‹ๅบๆˆ–็จ‹ๅบๅฟ…้กปๅŒ…ๅซ่พ“ๅ…ฅๆ•ฐๆฎ็š„้€‚ๅฝ“ๆ็คบใ€‚ๆ•ฐๆฎๅฟ…้กปๅœจ่พ“ๅ…ฅๆ—ถ่ฟ›่กŒ้ชŒ่ฏใ€‚ # ๆ‰€ๆœ‰่พ“ๅ‡บ๏ผŒๅŒ…ๆ‹ฌ้”™่ฏฏไฟกๆฏ๏ผŒ้ƒฝๅฟ…้กปๆธ…ๆฅšๅ’Œๅฏ็†่งฃๅœฐๅˆ—ๅ‡บใ€‚ # ๆ‰€ๆœ‰ๅ˜้‡ใ€ๅธธ้‡ๅ’Œๅ…ถไป–ๆ ‡่ฏ†็ฌฆๅฟ…้กปๅ…ทๆœ‰ๆœ‰ๆ„ไน‰็š„ๅ็งฐใ€‚ # # You will need to complete these three tasks. Each task must be fully tested. # # ไฝ ้œ€่ฆๅฎŒๆˆ่ฟ™ไธ‰ไธชไปปๅŠกใ€‚ๆฏ้กนไปปๅŠก้ƒฝๅฟ…้กป็ป่ฟ‡ๅ……ๅˆ†็š„ๆต‹่ฏ• # + [markdown] id="sRKlGuNBtXYP" # ## Task1 # + [markdown] id="ZKUz2eQhrnTG" # # Task1 - Becoming a member of Friends of Seaview Pier # # Set up a system to enable people to become members of Friends of Seaview Pier and for each new member enter: # - their first name and last name # - whether or not they wish to work as a volunteer # - if they choose to volunteer, identify the area from: # - the pier entrance gate # - the gift shop # - painting and decorating # - the date of joining # - whether or not they have paid the $75 fee. # # All of this information needs to be stored using suitable data structures. # # ไปปๅŠก1:ๆˆไธบๆตทๆ™ฏ็ ๅคดไน‹ๅ‹็š„ไผšๅ‘˜ # ๅปบ็ซ‹ไธ€ไธช็ณป็ปŸ๏ผŒ่ฎฉไบบไปฌๆˆไธบๆตทๆ™ฏ็ ๅคดไน‹ๅ‹็š„ไผšๅ‘˜๏ผŒๆฏไฝๆ–ฐไผšๅ‘˜้œ€่ฟ›ๅ…ฅ: # - ไป–ไปฌ็š„ๅง“ๅ’Œๅ # - ไธ็ฎกไป–ไปฌๆ˜ฏๅฆๆ„ฟๆ„ๅšๅฟ—ๆ„ฟ่€… # - ๅฆ‚ๆžœไป–ไปฌ้€‰ๆ‹ฉๅšๅฟ—ๆ„ฟ่€…๏ผŒ่ฏทไปŽไปฅไธ‹ๅ‡ ไธชๆ–น้ข็กฎๅฎš: # - ็ ๅคดๅ…ฅๅฃๅคง้—จ # - ็คผๅ“ๅบ— # - ็ฒ‰ๅˆทๅ’Œ่ฃ…ๆฝข # - ๅŠ ๅ…ฅๆ—ฅๆœŸ # - ไป–ไปฌๆ˜ฏๅฆๆ”ฏไป˜ไบ†75็พŽๅ…ƒ็š„่ดน็”จใ€‚ # # ๆ‰€ๆœ‰่ฟ™ไบ›ไฟกๆฏ้ƒฝ้œ€่ฆไฝฟ็”จๅˆ้€‚็š„ๆ•ฐๆฎ็ป“ๆž„่ฟ›่กŒๅญ˜ๅ‚จใ€‚ # + [markdown] id="sa6h7NPWv_Ge" # ## Task1 # ------- # + [markdown] id="HRH77khTwrGG" # ## Task1 - Becoming a member of Friends of <NAME> # + [markdown] id="ETdoDGSZJpx4" # ### Task1 - Identifier table # # # | Identifier | Data type |Description | # |:------------|:-----------|:----------------------------| # | FirstName | STRING |Store the first name of members| # |LastName|STRING|Store the last name of members| # |isVolunteer|BOOLEAN|Store whether wish to be volunteer| # |tpVolunteer|INTEGER|store the type of volunteer selected by members| # |Date|DATE|store the date of joining (YYYY/MM/DD)| # |isPaid| BOOLEAN| Store whether paid fee| # # # # # + [markdown] id="FdyOWERjuwp2" # ### Task1 - Flowchart # + colab={"base_uri": "https://localhost:8080/", "height": 453} id="bJP2OCQKtgR0" outputId="cb78f0cc-e38b-4638-ad70-385361774011" from IPython.display import IFrame IFrame (width="768", height="432", src="https://miro.com/app/live-embed/uXjVOWwhYP0=/?moveToViewport=-743,-594,1465,1551", frameBorder="0", scrolling="no") # + [markdown] id="94uVyYYb2-m8" # ### *Task1 - Pseudocode* # + [markdown] id="za7CNzqOjfO_" # # # ``` # OUTPUT "Please enter first name: " # INPUT FirstName # OUTPUT "Please enter last name:" # INPUT LastName # OUTPUT "Do you want to be a volunteer? (TRUE/FALSE)" # INPUT isVolunteer # # IF isVolunteer = TRUE THEN # REPEAT # OUTPUT "which type of volunteer do you want to be ? " # OUTPUT "1: Working at the pier of enterance gate" # OUTPUT "2:working in the gift shop" # OUTPUT "3:painting and decorating " # INPUT tpVolunteer # UNTILE tpVolunteer =1 OR tpVolunteer=2 OR tpVolunteer =3 # # OUTPUT "When will you join us (YYYY/MM/DD)๏ผŸ " # INPUT joinDate # # OUTPUT "have you paid the fee? (TRUE/FALSE) " # INPUT isPaid # # ENDIF # ``` # + [markdown] id="PbGKi8rV3KRv" # ### Task1 - Python # + colab={"base_uri": "https://localhost:8080/"} id="FB7PBvyXtpps" outputId="59663002-ee2c-4d5a-bd30-a7907268ec86" # appropriate prompt FirstName = input('Please enter \nFirst name: ') LastName = input('Last name: ') # Type check - Boolean while True: isVolunteer = input('Are you want to be a volunteer? (Y/N): ') Pos = ['y', 'yes'] Neg = ['n', 'no'] if isVolunteer.lower() in Pos: isVolunteer = True break elif isVolunteer in Neg: isVolunteer = False break else: print('Invalid input !!!') if isVolunteer: # Range check - (1-3) while True: try: tpVolunter = int(input("There are 3 type of volunteer: \n 1: working at the pier entrance gate.\n 2:working in the gift shop.\n 3:painting and decorating\n")) if tpVolunter >= 1 and tpVolunter <= 3 : break else: print('Invalid input, Please input 1 or 2 or 3') except ValueError: print('Invalid input, Please input an integer 1 or 2 or 3') # if do type check - date (import datetime) joinDate = input("When will you join us (YYYY/MM/DD)๏ผŸ") # typy check - BOOLEAN while True: isPaid = input('have you paid membership fee? (Y/N):') Pos = ['y', 'yes'] Neg = ['n', 'no'] if isPaid.lower() in Pos: isPaid = True break elif isPaid in Neg: isPaid = False break else: print('Invalid input !!!') # + [markdown] id="jUK7bKc5wzqO" # ## Task 2 - Using the membership data # + [markdown] id="s1Erfnu52Zdc" # Extend the program in Task 1 so that a list of the first and last names of members can be output in any of the following categories. # # - Members who have chosen to work as volunteers # - Volunteers who would like to work at the pier entrance gate # - Volunteers who would like to work in the gift shop # - Volunteers who would like to help with painting and decorating tasks. # - Members whose membership has expired (they have **not** re-joined this year) # - Members who have ***not*** yet Paid their $75 fee. # # ๆ‰ฉๅฑ•Task 1ไธญ็š„็จ‹ๅบ๏ผŒไปฅไพฟๅฏไปฅ่พ“ๅ‡บไธ‹ๅˆ—ไปปไฝ•็ฑปๅˆซๆˆๅ‘˜็š„ๅง“ๅ’Œๅใ€‚ # - ้€‰ๆ‹ฉไฝœไธบๅฟ—ๆ„ฟ่€…ๅทฅไฝœ็š„ๆˆๅ‘˜ # - ๅฟ—ๆ„ฟๅœจ็ ๅคดๅ…ฅๅฃๅค„ๅทฅไฝœ็š„ๅฟ—ๆ„ฟ่€… # - ๆƒณๅœจ็คผๅ“ๅบ—ๅทฅไฝœ็š„ๅฟ—ๆ„ฟ่€… # - ๆ„ฟๆ„ๅธฎๅŠฉๅฎŒๆˆ็ป˜็”ปๅ’Œ่ฃ…้ฅฐไปปๅŠก็š„ๅฟ—ๆ„ฟ่€…ใ€‚ # - ๆˆๅ‘˜่ต„ๆ ผๅทฒ่ฟ‡ๆœŸ็š„ๆˆๅ‘˜ # - ๅฐšๆœชๆ”ฏไป˜75็พŽๅ…ƒไผš่ดน็š„ไผšๅ‘˜ # + [markdown] id="jEKUQqjyw68F" # ### Task2 - Identifier Table # # |Identifier|Data Type| Description | # |:----------|:---------|:-------| # |i|INTEGER|Count the number members| # |FirstNames| ARRAT of STRING| Store the first name of members| # |LastNames| ARRAY of STRING| Store the last name of members| # |isVolunteers|ARRAY of BOOLEAN | Store whether to be volunteer in name order| # |isPierVols| ARRAY of BOOLEAN| Store whether to be pier volunteer in name order| # |IsGifVols|ARRAY of BOOLEAN| Store whether to be gift shop volunteer in name order| # |isDecVols|ARRAY of BOOLEAN| Store whether to be painting and decorating volunteer in name order| # |isExpireds| ARRAY of BOOLEAN| Store whether to be expired in name order| # |isPaids|ARRAY of BOOLEAN| Store whether paid the membership fee in name order| # - # ## Task2 - Pseudocode # ``` # // Declearation # DECLARE FirstNames, LastNames : ARRAY[1:100] OF STRING # DECLARE isVolunteers, isPierVols, isGifVols, isDecVols, isExpireds, isPaids: ARRAY [1:100] OF BOOLEAN # # DECLARE i : INTEGER # # i <-- 0 # # ....task 1 pseudocode here # # FirstNames[i] <-- FirstName # LastNames[i] <-- LastName # isVolunteers[i] <-- isVolunteer # # isPierVols[i] <-- FALSE # isGifVols[i] <-- FALSE # isDecVols[i] <-- FALSE # # IF tpVolunteer = 1 THEN # isPierVols[i]<-- TURE # ELSE # IF tpVolunteer = 2 THEN # isGifVols[i] <-- TRUE # ELSE # isDecVols[i] <-- TRUE # ENDIF # ENDIF # # isExpireds <-- TRUE # IF joinDate >= "2022/01/01" THEN # isExpireds <-- FALSE # ENDIF # # isPaids[i] <-- isPaid # ``` # ### Date checking Example # + import datetime date_string = '12-25-2018' format = "%Y-%m-d" try: datetime.datetime.strptime(date_string, format) print("This is the correct date string format.") except ValueError: print("This is the incorrect date string format. It should be YYYY-MM-DD") # + [markdown] id="NcGJxjGj4dOX" # ### Task2 - Python # + colab={"base_uri": "https://localhost:8080/"} id="HazPFO507f1x" outputId="45e04f64-fa1c-45b5-90ee-b1e8e58e8870" # %reset FirstNames = [] #STRING LastNames= [] #STRING isVolunteers = [] #BOOLEAN isPierVols = [] #BOOLEAN isGifVols = [] #BOOELAN isDecVols = [] #BOOLEAN isExpireds = [] #BOOLEAN isPaids = [] #BOOLEAN # This example only show how to register one member # IF you want to register many members, you can use whil # appropriate prompt FirstName = input('Please enter \nFirst name: ') LastName = input('Last name: ') # Type check - Boolean ################################################################## while True: isVolunteer = input('Are you want to be a volunteer? (Y/N): ') Pos = ['y', 'yes'] Neg = ['n', 'no'] if isVolunteer.lower() in Pos: isVolunteer = True break elif isVolunteer in Neg: isVolunteer = False break else: print('Invalid input !!!') if isVolunteer: # Range check - (1-3) ############################################################### while True: try: tpVolunter = int(input("There are 3 type of volunteer: \n 1: working at the pier entrance gate.\n 2:working in the gift shop.\n 3:painting and decorating\n")) if tpVolunter >= 1 and tpVolunter <= 3 : break else: print('Invalid input, Please input 1 or 2 or 3') except ValueError: print('Invalid input, Please input an integer 1 or 2 or 3') # if do type check - date (import datetime) joinDate = input("When will you join us (YYYY/MM/DD)๏ผŸ") # typy check - BOOLEAN ################################################################# while True: isPaid = input('have you paid membership fee? (Y/N): ') Pos = ['y', 'yes'] Neg = ['n', 'no'] if isPaid.lower() in Pos: isPaid = True break elif isPaid in Neg: isPaid = False break else: print('Invalid input !!!') ### Task 2 FirstNames.append(FirstName) LastNames.append(LastName) isVolunteers.append(isVolunteer) isPierVols.append(False) isGifVols.append(False) isDecVols.append(False) if tpVolunter == 1: isPierVols[-1] = True elif tpVolunter == 2: isGifVols[-1] = True else: isDecVols[-1] = True if joinDate >= "2022/01/01": # this year isExpireds.append(False) else: isExpireds.append(True) isPaids.append(isPaid) # display the contents of each array print(FirstNames) print(LastNames) print(isVolunteers) print(isPierVols) print(isGifVols) print(isDecVols) print(isExpireds) print(isPaids) # - # ### Task 3 - Sponsoring a wooden plank # Add an additional option to the program in Task 1 to enable the pierโ€™s wooden planks to be sponsored. Separate data structures should be used to store the names of the individuals and the short messages they would like to have written on their brass plaque. An output would display everything that was input for the sponsor to confirm. If errors are found, the program should allow data to be re-entered. Once complete, the data is stored and the sponsor is charged $200. # # # ๅœจไปปๅŠก1ไธญๆทปๅŠ ไธ€ไธช้ขๅค–็š„้€‰้กน๏ผŒไฝฟ็ ๅคด็š„ๆœจๆฟๅฏไปฅ่ขซ่ตžๅŠฉใ€‚ๅบ”่ฏฅไฝฟ็”จๅ•็‹ฌ็š„ๆ•ฐๆฎ็ป“ๆž„ๆฅๅญ˜ๅ‚จไธชไบบ็š„ๅๅญ—ๅ’Œไป–ไปฌๆƒณๅ†™ๅœจไป–ไปฌ็š„้“œๅŒพไธŠ็š„็Ÿญๆถˆๆฏใ€‚่พ“ๅ‡บๅฐ†ๆ˜พ็คบ่ตžๅŠฉๅ•†้œ€่ฆ็กฎ่ฎค็š„ๆ‰€ๆœ‰่พ“ๅ…ฅๅ†…ๅฎนใ€‚ๅฆ‚ๆžœๅ‘็Žฐ้”™่ฏฏ๏ผŒ็จ‹ๅบๅบ”ๅ…่ฎธ้‡ๆ–ฐ่พ“ๅ…ฅๆ•ฐๆฎใ€‚ไธ€ๆ—ฆๅฎŒๆˆ๏ผŒๆ•ฐๆฎ่ขซๅญ˜ๅ‚จ๏ผŒ่ตžๅŠฉๅ•†ๅฐ†่ขซๆ”ถๅ–200็พŽๅ…ƒ็š„่ดน็”จ. # # + Sponsors = [] messages = [] FirstName = input ('Please enter your first name: ') LastName = input ('Please enter your last name: ') while True: enter = input ('do you want to be a sponsor? (y/n) ') if enter.lower() in ['y','yes']: isSponsor = True break elif enter.lower() in ['n','no']: isSponsor = False break else: print ('Invalid input. please enter y, yes,n,no.') if isSponsor: while True: message = input ('what would you like to write on your brass plaque? ') print('the message you just enter is : ',message) while True: checked = input ('are you sure (y) or want to change (n)?') if checked.lower() in ['y','yes']: checked = True break elif checked.lower() in ['n','no']: checked = False break else: print ('Invalid input. please enter y, yes,n,no.') if checked: break else: continue Sponsors.append(FirstName+LastName) messages.append(message) # -
Pre-release/0478_Pre_2022_MJ_21.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <div align="center"><img width="50%" src="https://raw.githubusercontent.com/jupyter/jupyter/master/docs/source/_static/_images/jupyter.png"></div> # # # Jupyter Notebook # # This notebook was adapted from https://github.com/oesteban/biss2016 and is originally based on https://github.com/jvns/pandas-cookbook. # # [Jupyter Notebook](http://jupyter.org/) started as a web application, based on [IPython](https://ipython.org/) that can run Python code directly in the webbrowser. Now, Jupyter Notebook can handle over 40 programming languages and is *the* interactive, open source web application to run any scientific code. # # You might also want to try a new Jupyter environment [JupyterLab](https://github.com/jupyterlab/jupyterlab). # ## How to run a cell # # First, we need to explain how to run cells. Try to run the cell below! # + import pandas as pd print("Hi! This is a cell. Click on it and press the โ–ถ button above to run it") # - # You can also run a cell with `Ctrl+Enter` or `Shift+Enter`. Experiment a bit with that. # ## Tab Completion # One of the most useful things about Jupyter Notebook is its tab completion. # # Try this: click just after `read_csv(` in the cell below and press `Shift+Tab` 4 times, slowly. Note that if you're using JupyterLab you don't have an additional help box option. pd.read_csv( # After the first time, you should see this: # # ![](../static/images/jupyter_tab-once.png) # # After the second time: # ![](../static/images/jupyter_tab-twice.png) # # After the fourth time, a big help box should pop up at the bottom of the screen, with the full documentation for the `read_csv` function: # ![](../static/images/jupyter_tab-4-times.png) # # I find this amazingly useful. I think of this as "the more confused I am, the more times I should press `Shift+Tab`". # # Okay, let's try tab completion for function names! pd.r # You should see this: # # ![](../static/images/jupyter_function-completion.png) # ## Get Help # # There's an additional way on how you can reach the help box shown above after the fourth `Shift+Tab` press. Instead, you can also use `obj?` or `obj??` to get help or more help for an object. # + # pd.read_csv? # - # ## Writing code # # Writing code in the notebook is pretty normal. def print_10_nums(): for i in range(10): print(i) print_10_nums() # If you messed something up and want to revert to an older version of a code in a cell, use `Ctrl+Z` or to go than back `Ctrl+Y`. # # For a full list of all keyboard shortcuts, click on the small keyboard icon in the notebook header or click on `Help > Keyboard Shortcuts`. # ## Saving a Notebook # # Jupyter Notebooks autosave, so you don't have to worry about losing code too much. At the top of the page you can usually see the current save status: # # - Last Checkpoint: 2 minutes ago (unsaved changes) # - Last Checkpoint: a few seconds ago (autosaved) # # If you want to save a notebook on purpose, either click on `File > Save and Checkpoint` or press `Ctrl+S`. # ## Magic functions # IPython has all kinds of magic functions. Magic functions are prefixed by % or %%, and typically take their arguments without parentheses, quotes or even commas for convenience. Line magics take a single % and cell magics are prefixed with two %%. # # Some useful magic functions are: # # Magic Name | Effect # ---------- | ------------------------------------------------------------- # # %env | Get, set, or list environment variables # # %pdb | Control the automatic calling of the pdb interactive debugger # # %pylab | Load numpy and matplotlib to work interactively # # %%debug | Activates debugging mode in cell # # %%html | Render the cell as a block of HTML # # %%latex | Render the cell as a block of latex # # %%sh | %%sh script magic # # %%time | Time execution of a Python statement or expression # # You can run `%magic` to get a list of magic functions or `%quickref` for a reference sheet. # Example 1: Let's see how long a specific command takes with `%time` or `%%time`: # %time result = sum([x for x in range(10**6)]) # Example 2: Let's use `%%latex` to render a block of latex # + language="latex" # $$F(k) = \int_{-\infty}^{\infty} f(x) e^{2\pi i k} \mathrm{d} x$$ # -
notebooks/introduction_jupyter-notebook.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + from chatterbot import ChatBot import tweepy #from keys import * #import our user keys # do twitter auth stuff auth = tweepy.OAuthHandler('','') #(c_key, c_secret) auth.set_access_token("","") #(token, token_secret) api = tweepy.API(auth) # Get our API object #Set up our chat bot: chatbot = ChatBot( '<NAME>', trainer='chatterbot.trainers.ChatterBotCorpusTrainer' ) # Train based on the english corpus chatbot.train("chatterbot.corpus.english") class StdOutListener(tweepy.StreamListener): """Class that handles tweepy events. E.g: on_connect, on_disconnect, on_status, on_direct_message, etc.""" def on_connect( self ): print("Connection to twitter established!!") self.me = api.me() try: api.update_status(None, 'Chat bot online!') except tweepy.error.TweepError as e: print("Error sending bot online tweet.") print("Message: %s" %(e)) def on_disconnect( self, notice ): print("Connection to twitter lost!! : ", notice) try: api.update_status(None, 'Chat bot bot now offline.') except tweepy.error.TweepError as e: print("Error sending bot offline tweet.") print("Message: %s" %(e)) def on_status( self, status ): print(status.user.name+": \""+status.text+"\"") return True def on_direct_message(self, status): print("Direct message received.") try: if status.direct_message['sender_screen_name'] != self.me.screen_name: print(status.direct_message['sender_screen_name']+": \""+status.direct_message['text']+"\"") response = chatbot.get_response(status.direct_message['text']) print "chat bot response: %s" %(response) api.send_direct_message(user_id =status.direct_message['sender_id'], text =response) return True except BaseException as e: print("Failed on_direct_message()", str(e)) def on_error( self, status ): print(status) try: api.update_status(None, 'Chat bot encountered an error... Now offline.') except tweepy.error.TweepError as e: print("Error sending bot offline-error tweet.") print("Message: %s" %(e)) def main(): try: me = api.me() print "Starting userstream for %s ( %s )" %(me.name, me.screen_name) stream = tweepy.Stream(auth, StdOutListener()) stream.userstream() except KeyboardInterrupt: print("Shutting down the twitter chatbot...") api.update_status(None, 'Chat bot bot now offline.') print('goodbye!') if __name__ == '__main__': main () # -
chatbottwitterdirectmessage.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/Raghavkumarkakar252/Speech_Recognition/blob/master/audio_spectogram_cnn.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="iakuHveuKBOm" colab_type="code" colab={} from preprocess import * import keras from keras.models import Sequential from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D, LSTM from keras.utils import to_categorical import wandb from wandb.keras import WandbCallback import matplotlib.pyplot as plt # + id="5ipFGm3bKBOq" colab_type="code" colab={} outputId="63f71b46-ed4c-4eb6-c1ad-7fe8f85d277d" wandb.init() config = wandb.config config.max_len = 11 config.buckets = 20 # Save data to array file first save_data_to_array(max_len=config.max_len, n_mfcc=config.buckets) labels=["bed", "happy", "cat"] # + id="g9W7h15MKBOt" colab_type="code" colab={} # # Loading train set and test set X_train, X_test, y_train, y_test = get_train_test() # + id="avSXyibYKBOv" colab_type="code" colab={} # # Feature dimension channels = 1 config.epochs = 50 config.batch_size = 100 num_classes = 3 X_train = X_train.reshape(X_train.shape[0], config.buckets, config.max_len, channels) X_test = X_test.reshape(X_test.shape[0], config.buckets, config.max_len, channels) # + id="WGrWvfa9KBOz" colab_type="code" colab={} outputId="9966f92b-9850-4e9e-830e-790f62c07860" plt.imshow(X_train[100, :, :, 0]) print(y_train[100]) # + id="RN3_cIZLKBO1" colab_type="code" colab={} y_train_hot = to_categorical(y_train) y_test_hot = to_categorical(y_test) # + id="HMU0iysfKBO4" colab_type="code" colab={} X_train = X_train.reshape(X_train.shape[0], config.buckets, config.max_len) X_test = X_test.reshape(X_test.shape[0], config.buckets, config.max_len) model = Sequential() model.add(Flatten(input_shape=(config.buckets, config.max_len))) model.add(Dense(num_classes, activation='softmax')) model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=['accuracy']) # + id="iV1WVDpAKBO6" colab_type="code" colab={} outputId="8ce77a80-e525-48de-bfb5-50e6bede476a" wandb.init() model.fit(X_train, y_train_hot, epochs=config.epochs, validation_data=(X_test, y_test_hot), callbacks=[WandbCallback(data_type="image", labels=labels)]) # + id="MjMmNbKmKBO8" colab_type="code" colab={} # build model model = Sequential() model.add(LSTM(16, input_shape=(config.buckets, config.max_len, channels), activation="sigmoid")) model.add(Dense(1, activation='sigmoid')) model.add(Dense(num_classes, activation='softmax')) # + id="pz2b11m2KBO-" colab_type="code" colab={} model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=['accuracy']) # + id="oza9SY4JKBPA" colab_type="code" colab={} outputId="8c778c9a-3faf-41da-fa41-c81d41356998" wandb.init() model.fit(X_train, y_train_hot, epochs=config.epochs, validation_data=(X_test, y_test_hot), callbacks=[WandbCallback(data_type="image", labels=labels)]) # + id="xuOI8mUEKBPC" colab_type="code" colab={}
audio_spectogram_cnn.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # 1. Get data # !wget -O salmonella-project.zip https://ndownloader.figshare.com/files/29078733?private_link=0405199820a13aedca42 # !unzip -n salmonella-project.zip | head -n 3 # # 2. Load index # + import genomics_data_index.api as gdi db = gdi.GenomicsDataIndex.connect('salmonella-project') db # + import pandas as pd # I rename 'Sample Name' here to 'Sample Name Orig' to avoid issues later on with other columns named 'Sample Name' metadata_df = pd.read_csv('salmonella-project/metadata.tsv.gz', sep='\t', dtype=str).rename( {'Sample Name': 'Sample Name Orig'}, axis='columns') metadata_df.head(3) # - # # 3. Visualize tree q = db.samples_query(universe='mutations', reference_name='NC_011083')\ .join(metadata_df, sample_names_column='Sample Name Orig') q = q.isa('1', isa_column='outbreak_number', kind='dataframe').subsample(3, seed=41) | \ q.isa('2', isa_column='outbreak_number', kind='dataframe').subsample(3, seed=41) | \ q.isa('3', isa_column='outbreak_number', kind='dataframe').subsample(4, seed=41) q = q.prune().reset_universe().set_outgroup('NC_011083') q # # Tree1 # + # Queries q_mut = q.hasa("NC_011083.1:4482211:C:A") # Render tree ts = q.tree_styler(annotate_show_box_label=True, show_border=False) ts = ts.annotate(q_mut, color_present='#33a02c', box_label="4482211:C:A", legend_label="4482211:C:A") ts.render(w=200) # - x = ts.render('tree1.png', h=1200) # # Tree2 # + from genomics_data_index.api.viewer.TreeStyler import HighlightStyle from typing import List def create_highlight_style(colors: List[str]) -> HighlightStyle: fg_colors = colors bg_colors = fg_colors unknown_fg_color = 'black' unknown_bg_color = 'black' return HighlightStyle._create_highlights(fg_colors=fg_colors, bg_colors=bg_colors, unknown_bg_color=unknown_bg_color, unknown_fg_color=unknown_fg_color) highlight_colors = ['#a6cee3'] hstyle = create_highlight_style(highlight_colors) # + # Queries q_mrca = q.isin(["SH14-014", "SH12-010"], kind='mrca') # Render tree ts = q.tree_styler(highlight_style=hstyle, show_legend_type_labels=False, include_unknown=False, show_border=False) ts = ts.highlight(q_mrca, legend_label="MRCA of SH14-014 and SH12-010") ts.render(w=200) # - x = ts.render('tree2.png', h=1200) # # Tree3 qmlst_df = q.features_summary(kind='mlst', scheme='sistr_330').sort_values('Count', ascending=False) qmlst_df[qmlst_df['Percent'] < 90].head(10) # + # q = db.samples_query(universe='mutations', reference_name='NC_011083')\ # .join(metadata_df, sample_names_column='Sample Name Orig').set_outgroup('NC_011083') # q # + # Queries q_mut = q.hasa("NC_011083.1:4482211:C:A") q_mrca = q.isin(["SH14-014", "SH12-010"], kind='mrca') q_mlst = q.hasa('mlst:sistr_330:NZ_AOXE01000034.1_103:1944731850') # Render tree ts = q.tree_styler(show_border=False, highlight_style=hstyle)\ .annotate(q_mlst, color_present='#7570b3', legend_label='NZ_AOXE01000034.1_103:1944731850')\ .highlight(q_mrca, legend_label="MRCA of SH14-014 and SH12-010")\ .annotate(q_mut, color_present='#33a02c', legend_label="4482211:C:A") ts.render(w=200) # - x = ts.render('tree3.png', h=1200)
evaluations/visualization/visualization-trees.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %matplotlib inline import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn.model_selection import train_test_split from sklearn.neighbors import KNeighborsClassifier from sklearn.metrics import classification_report, confusion_matrix # + import sklearn.metrics as metrics def plot_cm(y_true, y_pred, figsize=(10,10)): cm = metrics.confusion_matrix(y_true, y_pred, labels=np.unique(y_true)) cm_sum = np.sum(cm, axis=1, keepdims=True) cm_perc = cm / cm_sum.astype(float) * 100 annot = np.empty_like(cm).astype(str) nrows, ncols = cm.shape for i in range(nrows): for j in range(ncols): c = cm[i, j] p = cm_perc[i, j] if i == j: s = cm_sum[i] annot[i, j] = '%.1f%%\n%d/%d' % (p, c, s) elif c == 0: annot[i, j] = '' else: annot[i, j] = '%.1f%%\n%d' % (p, c) cm = pd.DataFrame(cm, index=np.unique(y_true), columns=np.unique(y_true)) cm.index.name = 'Actual' cm.columns.name = 'Predicted' fig, ax = plt.subplots(figsize=figsize) sns.heatmap(cm, cmap= "YlGnBu", annot=annot, fmt='', ax=ax, linewidths=.5) # + # Import data (exported from Postgree) data = pd.read_csv ("../datasets/Labsmart EquipmentsData.csv", header=0); g = sns.PairGrid(data, hue='class') g.map_diag(plt.hist) g.map_offdiag(plt.scatter); ## Set the timestamp as index data['time'] = pd.to_datetime (data['time']) data = data.set_index(data.time) # - data.describe() ## Resample resampled = data.resample('5S').agg({'class':['mean', 'count'], 'voltage': 'mean', 'current':['mean','var'], 'activepower': 'mean', 'reactivepower': 'mean', 'powerfactor': 'mean', 'fundamentalpower': 'mean', 'hamonicpower':['mean','var'] }) resampled.columns = resampled.columns.map('_'.join) resampled = resampled.query('class_count > 3') resampled = resampled.drop('class_count', axis=1) resampled.head() # minMax normalization normalized=(resampled-resampled.min())/(resampled.max()-resampled.min()) normalized y.shape # + # Separa as colunas X = data.drop(['time', 'class'], axis=1) y = data['class'] #Treina o modelo X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.50) classifier = KNeighborsClassifier(n_neighbors=5) classifier.fit(X_train, y_train) # + # Predic y_pred = classifier.predict(X_test) #Analyse plot_cm(y_test, y_pred) print(classification_report(y_test, y_pred)) # -
artificial_intelligence/02 - Load identification (classification)/equipments KNN.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %matplotlib inline import matplotlib.pyplot as plt from config import PARQUET_FILE # - df = spark.read.parquet(PARQUET_FILE) df.createOrReplaceTempView("washing") # spark.sql(""" # SELECT * # FROM washing # LIMIT 10 # """).show() def getSample(): #TODO Please enter your code here, you are not required to use the template code below #some reference: https://spark.apache.org/docs/latest/api/python/pyspark.sql.html#pyspark.sql.DataFrame #https://spark.apache.org/docs/latest/api/sql/ return df.sample(False, 0.1) def getListForHistogramAndBoxPlot(): #TODO Please enter your code here, you are not required to use the template code below #some reference: https://spark.apache.org/docs/latest/api/python/pyspark.sql.html#pyspark.sql.DataFrame #https://spark.apache.org/docs/latest/api/sql/ result = spark.sql(""" SELECT temperature FROM washing WHERE temperature is not null """) result_arr = result.rdd.map(lambda row: row.temperature).collect() if not type(result_arr)==list: raise Exception('return type not a list') return result_arr # should return a tuple containing the two lists for timestamp and temperature # please make sure you take only 10% of the data by sampling # please also ensure that you sample in a way that the timestamp samples and temperature samples correspond (=> call sample on an object still containing both dimensions) def getListsForRunChart(): #TODO Please enter your code here, you are not required to use the template code below #some reference: https://spark.apache.org/docs/latest/api/python/pyspark.sql.html#pyspark.sql.DataFrame #https://spark.apache.org/docs/latest/api/sql/ double_tuple_rdd = spark.sql(""" SELECT ts , temperature FROM washing WHERE temperature is not null ORDER BY ts asc """).sample(False,0.1).rdd.map(lambda row : (row.ts, row.temperature)) result_array_ts = double_tuple_rdd.map(lambda ts_temperature: ts_temperature[0]).collect() result_array_temperature = double_tuple_rdd.map(lambda ts_temperature: ts_temperature[1]).collect() return (result_array_ts, result_array_temperature) plt.hist(getListForHistogramAndBoxPlot()) plt.show() plt.boxplot(getListForHistogramAndBoxPlot()) plt.show() lists = getListsForRunChart() plt.plot(lists[0],lists[1]) plt.xlabel("time") plt.ylabel("temperature") plt.show() from rklib import submitAll # + import json key = "S5PNoSHNEeisnA6YLL5C0g" email = "<EMAIL>" token = "<PASSWORD>" parts_data = {} parts_data["iLdHs"] = json.dumps(str(type(getListForHistogramAndBoxPlot()))) parts_data["xucEM"] = json.dumps(len(getListForHistogramAndBoxPlot())) parts_data["IyH7U"] = json.dumps(str(type(getListsForRunChart()))) parts_data["MsMHO"] = json.dumps(len(getListsForRunChart()[0])) submitAll(email, token, key, parts_data) # -
notebook-samples/spark/adv-data-sci-IBM/course-1/week-4/wk-4-programming-assignment.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Background # This notebook includes an example excercise about the effect of pseudorapidity to the resolution of the CMS detector. The excercise consists of a theory part and a practical part. # # The effect of the pseudorapidity $\eta$ to the resolution of the CMS detector # In this excercise the CMS (Compact Muon Solenoid) detector and the concept of pseudorapidity is introduced. With the real data collected by CMS detector the effect of the pseudorapidity to the resolution of the CMS detector is observed. # ### CMS detector # At CERN particles are accelerated and collided with the LHC (Large Hadron Collider) particle accelerator. With the CMS detector the new particles created in these collisions can be observed and measured. There is the opened CMS detector in the picture below. # # <img src="../Images/CMS.jpg" alt="Image of the CMS detector" style="height: 400px"> # # (Picture: Domenico Salvagnin, https://commons.wikimedia.org/wiki/File:CMS@CERN.jpg) # <br> # <br> # <br> # ### Pseudorapidity # In experimental particle physics pseudorapidity $\eta$ is a spatial coordinate used to describe the angle between a particle and the particle beam. Pseudorapidity is determined by the equation # # $$ \eta \equiv -\ln(\tan(\frac{\theta}{2})), $$ # # where $\theta$ is the angle of a particle relative to the particle beam. # # Pseudorapidity thus describes the angle between a detected particle and the particle beam. In the image below the particle beam would go horizontally from left to right. So with the large values of $\eta$ a particle created in the collision would differ just a little from the direction of the beam. With the small values of $\eta$ the deflection is bigger. # # <img src="../Images/Pseudorapidity_plot.png" alt="Image of pseudorapidity values" style="height: 200px"> # # (Image: Wikimedia user Mets501, Own work, CC BY-SA 3.0, https://commons.wikimedia.org/w/index.php?curid=20392149) # <br> # <br> # <br> # ### The effect of pseudorapidity to the resolution of the measurement # With the CMS detector for example momenta of particles can be measured. Pseudorapidity $\eta$ affects to the resolution of the measurement of momenta. The particles that hit in the middle part of the detector (in the barrel) can be measured more accurate than the particles that hit in the end of the detector (in the endcap). # # The reason for that is probably that the particles that have flown to the endcap have encountered more other matter (other particles) than the particles hit in the barrel. The interaction with other matter will cause inaccuracy to the measurements of the particles hit in the endcap. # # In the image below there is a draft of the two particles created in the collision event. One hits the barrel of the detector and another hits the endcap. There are also the values of pseudorapidities $\eta$ and the angles $\theta$ of the particles. # # <img src="../Images/Sylinder.png" alt="Draft of the CMS detector" style="height: 300px"> # ### Let's start! # Let's start observing how the effect of pseudorapidity to the resolution of the measurement can be seen with the real data collected by the CMS detector. # # We will use the data collected in 2011 [1]. From the primary dataset 10851 collision events where have been exactly two muons have been selected to the file "Zmumu_Run2011A_masses.csv". (The selection has been done with the code that is openly available at https://github.com/tpmccauley/dimuon-filter.) The measured momenta and energies of the muons are written in the file. # # From the measured values of momenta and energies, the invariant mass for muons for every event is calculated. Invariant mass is a mathematical concept, not a physical mass. Invariant mass is determined by the equation # # $$ M = \sqrt{(E_1 + E_2)^2-(\vec{p_1} + \vec{p_2})^2}. $$ # # In the equation $E_1$ and $E_2$ are the energies of the muons and $\vec{p_1}$ and $\vec{p_2}$ the momenta of the muons. # # If the muon pair comes from the decay of Z-boson, the invariant mass calculated to that muon pair equals the physical mass of Z-boson (91.1876 GeV, [Particle Data Group](http://pdg.lbl.gov/2012/listings/rpp2012-list-z-boson.pdf)). If the two muons originate from some other process (there are lots of different processes in the particle collisions) then the invariant mass calculated to them is something else. # # Let's observe the invariant masses calculated from different events by plotting a histogram of them. The histogram shows that in how many events the value of the invariant mass has been in the certain value range. With the histogram one can see how close to the Z-boson mass (91.1876 GeV) the different invariant mass values will be. # <br> # <br> # <br> # [1] CMS collaboration (2016). DoubleMu primary dataset in AOD format from RunA of 2011 (/DoubleMu/Run2011A-12Oct2013-v1/AOD). CERN Open Data Portal. DOI: [10.7483/OPENDATA.CMS.RZ34.QR6N](http://doi.org/10.7483/OPENDATA.CMS.RZ34.QR6N). # ### 1) Selecting the events # <img src="../Images/Pseudorapidity_plot.png" alt="Image of pseudorapidity values" style="height: 200px"> # # (Image: Wikimedia user Mets501, Own work, CC BY-SA 3.0, https://commons.wikimedia.org/w/index.php?curid=20392149) # # First we will select from all the events into two groups the events where the pseudorapidity of the two muons have been relatively large (e.g. $\eta$ > 1.52) and relatively small (e.g. $\eta$ < 0.45). The selection is made with the code below. We want about the same amount of events to both groups so that the comparison could be done. # # Perform the selection by running the code below. You can run the code by clicking the code cell active and then pressing _Ctrl_ + _Enter_. The #-separated lines in the code are comments that won't affect to the code. # + # Import the needed modules. Pandas is for the data-analysis, numpy for scientific calculation # and matplotlib.pyplot for making plots. Modules are named as pd, np and plt. import pandas as pd import numpy as np import matplotlib.pyplot as plt # Create a new DataFrame structure from the file "Zmumu_Run2011A_masses.csv" dataset = pd.read_csv('../Data/Zmumu_Run2011A_masses.csv') # Set the conditions to large and small etas. These can be changed, but it has to be taken # care that about the same amount of events are selected in both groups. cond1 = 1.52 cond2 = 0.45 # Create two DataFrames. Select to "large_etas" events where the pseudorapidities # of the both muons are larger than "cond1". Select to "small_etas" events where # the pseudorapidities of the both muons are smaller than "cond2". large_etas = dataset[(np.absolute(dataset.eta1) > cond1) & (np.absolute(dataset.eta2) > cond1)] small_etas = dataset[(np.absolute(dataset.eta1) < cond2) & (np.absolute(dataset.eta2) < cond2)] # Print two empty lines for better design. print('\n' * 2) print('The amount of all events = %d' % len(dataset)) print('The amount of events where the pseudorapidity of both muons has been large = %d' %len(large_etas)) print('The amount of events where the pseudorapidity of both muons has been small = %d' %len(small_etas)) # - # ### 2) Creating the histograms # Next we will create the separate histograms of the invariant masses for the events with the large pseudorapidities and with the small pseudorapidities. With the histograms we can compare these two situations. # ### Histogram for the large $\eta$ events # Let's start with the events where the pseudorapidity of both of the muons has been large. Run the code by clicking the code cell active and then pressing _Ctrl_ + _Enter_. # + # Save the invariant masses to variable "inv_mass1". inv_mass1 = large_etas['M'] # Jupyter Notebook uses "magic functions". With this function it is possible to plot # the histogram straight to notebook. % matplotlib inline # Create the histogram from data in variable "inv_mass1". Set bins and range. plt.hist(inv_mass1, bins=120, range=(60,120)) # Set y-axis range from 0 to 60. axes = plt.gca() axes.set_ylim([0,60]) # Name the axises and give a title. plt.xlabel('Invariant mass [GeV]') plt.ylabel('Number of events per bin') plt.title('Histogram of invariant masses for the events where the\n pseudorapidity of both of the muons has been large\n') plt.show() # - # ### Histogram for the small $\eta$ events # Analogously than above, let's plot the histogram of the invariant masses for the events where the pseudorapidity of both of the muons has been small. # + # Save the invariant masses to variable "inv_mass2". inv_mass2 = small_etas['M'] # Jupyter Notebook uses "magic functions". With this function it is possible to plot # the histogram straight to notebook. % matplotlib inline # Create the histogram from data in variable "inv_mass1". Set bins and range. plt.hist(inv_mass2, bins=120, range=(60,120)) # Set y-axis range from 0 to 60. axes = plt.gca() axes.set_ylim([0,60]) # Name the axises and give a title. plt.xlabel('Invariant mass [GeV]') plt.ylabel('Number of events per bin') plt.title('Histogram of invariant masses for the events where the\n pseudorapidity of both of the muons has been small\n') plt.show() # - # ### 3) Exercise # Now we have created from the real CMS data the two histograms of the invariant masses. With the help of the histograms and the theory part of the notebook think about the following questions: # # __In which way you can see the effect of the pseudorapidity to the measurement resolution of the CMS detector?__ # # __Do your results show the same than the theory predicts?__ # # After answering to the questions you can try to change the conditions for the large and small pseudorapidities in the first code cell. The conditions are named _cond1_ and _cond2_. Make sure you choose conditions in a way that there will be nearly same amount of events in both of the groups. # # After the changes run the code again. How do the changes affect to the number of the events? And how to the histograms?
Exercises-with-open-data/Pseudorapidity-resolution.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import os import sys import matplotlib.pyplot as plt import itertools ROOT_DIR = os.path.dirname(os.path.dirname(os.getcwd())) if ROOT_DIR not in sys.path: sys.path.append(ROOT_DIR) import numpy as np import proplot as plot import tensorflow as tf import pandas as pd import DeepSparseCoding.tf1x.analysis.analysis_picker as ap import DeepSparseCoding.tf1x.data.data_selector as ds import DeepSparseCoding.tf1x.params.param_picker as pp import DeepSparseCoding.tf1x.models.model_picker as mp import DeepSparseCoding.utils.plot_functions as pf rand_seed = 123 rand_state = np.random.RandomState(rand_seed) color_vals = dict(zip(["blk", "lt_green", "md_green", "dk_green", "lt_blue", "md_blue", "dk_blue", "lt_red", "md_red", "dk_red"], ["#000000", "#A9DFBF", "#196F3D", "#27AE60", "#AED6F1", "#3498DB", "#21618C", "#F5B7B1", "#E74C3C", "#943126"])) # - # ### Load DeepSparseCoding analyzer class params(object): def __init__(self): #self.device = "/cpu" self.device = "/gpu:0" self.analysis_dataset = "test" self.save_info = "analysis_" + self.analysis_dataset self.overwrite_analysis_log = False self.do_class_adversaries = False self.do_run_analysis = False self.do_evals = False self.do_basis_analysis = False self.do_inference = False self.do_atas = False self.do_recon_adversaries = False self.do_neuron_visualization = False self.do_full_recon = False self.do_orientation_analysis = False self.do_group_recons = False self.data_dir = os.path.join(ROOT_DIR, 'Datasets') self.data_type = 'vanhateren' self.vectorize_data = True self.rescale_data = False self.standardize_data = False self.contrast_normalize = False self.whiten_data = True self.whiten_method = "FT" self.whiten_batch_size = 2 self.extract_patches = True self.num_patches = 1e5 self.patch_edge_size = 16 self.overlapping_patches = True self.randomize_patches = True self.patch_variance_threshold = 0.0 self.lpf_data = False # whitening automatically includes lpf self.lpf_cutoff = 0.7 self.batch_size = 100 self.rand_seed = rand_seed self.rand_state = rand_state # + analysis_params = params() #analysis_params.projects_dir = os.path.expanduser("~")+"/Redwood/JOV_Paper/Projects_New/" analysis_params.projects_dir = os.path.expanduser("~")+"/Work/Projects/" model_names = ['lca_512_vh', 'lca_1024_vh', 'lca_2560_vh']#, 'sae_768_vh', 'rica_768_vh'] model_types = ['LCA', 'LCA', 'LCA']#, 'SAE', 'ICA'] model_labels = ['2x', '4x', '10x']#, 'Sparse Autoencoder', 'Linear Autoencoder'] analyzers = [] for model_type, model_name, model_label in zip(model_types, model_names, model_labels): analysis_params.model_name = model_name analysis_params.version = '0.0' analysis_params.model_dir = analysis_params.projects_dir+analysis_params.model_name model_log_file = (analysis_params.model_dir+"/logfiles/"+analysis_params.model_name +"_v"+analysis_params.version+".log") analysis_params.model_type = model_type analyzer = ap.get_analyzer(analysis_params.model_type) analysis_params.save_info = "analysis_selectivity" analyzer.setup(analysis_params) analyzer.model_label = model_label analyzer.model_type = model_type analyzer.setup_model(analyzer.model_params) analyzer.load_analysis(save_info="analysis_train_kurakin_targeted") analyzer.nat_selectivity = {} analyzers.append(analyzer) # - # ### Load data, weights, and activations data = ds.get_data(analysis_params) data = analyzers[0].model.preprocess_dataset(data, analysis_params) data = analyzers[0].model.reshape_dataset(data, analysis_params) num_imgs = int(analysis_params.num_patches) #num_imgs = int(analysis_params.batch_size) num_imgs_test = 6 img_idx = np.random.randint(num_imgs-num_imgs_test) fig, axs = plot.subplots(ncols=num_imgs_test) for inc_img in range(num_imgs_test): im = axs[inc_img].imshow(data['train'].images[img_idx+inc_img,...].reshape(16, 16), cmap='greys_r') axs.format(suptitle=f'DSC van hateren example images') pf.clear_axes(axs) plot.show() weights = [] for analyzer in analyzers: if analyzer.model_type == 'LCA': print(f'Loading {analyzer.analysis_params.cp_loc} from {analyzer.model_label}') weights.append(np.squeeze(analyzer.eval_analysis(data['train'].images[0,...][None,...], ['lca/weights/w:0'], analyzer.analysis_params.save_info)['lca/weights/w:0'])) #weights = [np.squeeze(analyzer.eval_analysis(data['train'].images[0,...][None,...], ['lca/weights/w:0'], analyzer.analysis_params.save_info)['lca/weights/w:0']) for analyzer in analyzers if analyzer.model_type=='LCA'] num_plots_per_model = 6 fig, axs = plot.subplots(ncols=num_plots_per_model, nrows=len(analyzers)) for analyzer_idx, analyzer in enumerate(analyzers): ax_row = analyzer_idx weight_indices = np.random.randint(0, analyzer.model_params.num_neurons, num_plots_per_model) for ax_col, weight_idx in enumerate(weight_indices): im = axs[ax_row, ax_col].imshow(weights[analyzer_idx][:, weight_idx].reshape(16, 16), cmap='greys_r') axs[ax_row, 0].format(title=f'{analyzer.model_label} overcomplete') axs.format(suptitle=f'Model weights') pf.clear_axes(axs) plot.show() indices = np.random.randint(low=0, high=analyzers[0].model.params.num_neurons, size=3) fig, axs = plot.subplots(ncols=3, nrows=2) for fig_idx, neuron_idx in enumerate(indices): axs[0, fig_idx].imshow(analyzers[0].bf_stats['basis_functions'][neuron_idx], cmap='greys_r') axs[0, fig_idx].format(title=f'Diameter = {analyzers[0].bf_stats["diameters"][neuron_idx]:.2f}') axs[1, fig_idx].imshow(analyzers[0].bf_stats['envelopes'][neuron_idx], cmap='greys_r') pf.clear_axes(axs) plot.show() lca_activations = [np.squeeze(analyzer.compute_activations(data['train'].images[0:num_imgs,...], activation_operation=analyzer.model.get_encodings)) for analyzer in analyzers] def compute_lambda_activations(images, model, weights, batch_size=None, activation_operation=None): """ Computes the output code for a set of images. Outputs: evaluated activation_operation on the input images Inputs: images [np.ndarray] of shape (num_imgs, num_img_pixels) batch_size [int] how many inputs to use in a batch activation_operation [tf operation] that produces the output activation if None then it defaults to `model.get_encodings()` """ if activation_operation is None: activation_operation = model.get_encodings images_shape = list(images.shape) num_images = images_shape[0] config = tf.compat.v1.ConfigProto() config.gpu_options.allow_growth = True with tf.compat.v1.Session(config=config, graph=model.graph) as sess: if batch_size is not None and batch_size < num_images: assert num_images % batch_size == 0, ( "batch_size=%g must divide evenly into num_images=%g"%(batch_size, num_images)) num_batches = int(np.ceil(num_images / batch_size)) batch_image_shape = [batch_size] + images_shape[1:] sess.run(model.init_op, {model.input_placeholder:np.zeros(batch_image_shape)}) activations = [] for batch_idx in range(num_batches): im_batch_start_idx = int(batch_idx * batch_size) im_batch_end_idx = int(np.min([im_batch_start_idx + batch_size, num_images])) batch_images = images[im_batch_start_idx:im_batch_end_idx, ...] feed_dict = model.get_feed_dict(batch_images, is_test=True) feed_dict[model.weight_placeholder] = weights outputs = sess.run(activation_operation(), feed_dict) activations.append(outputs.copy()) activations = np.stack(activations, axis=0) num_batches, batch_size, num_outputs = activations.shape activations = activations.reshape((num_batches*batch_size, num_outputs)) else: feed_dict = model.get_feed_dict(images, is_test=True) feed_dict[model.weight_placeholder] = weights sess.run(model.init_op, feed_dict) activations = sess.run(activation_operation(), feed_dict) return activations lamb_activation = lambda x : tf.identity(x) # linear lambda_params = pp.get_params("lambda") lambda_params.set_data_params("vanhateren") lambda_params.batch_size = analysis_params.batch_size lambda_params.data_shape = [analysis_params.patch_edge_size**2] # assumes vector inputs (i.e. not convoultional) lambda_params.activation_function = lamb_activation num_neurons_list = [analyzer.model_params.num_neurons for analyzer in analyzers] linear_activations = [] for num_neurons, lca_weights in zip(num_neurons_list, weights): lambda_params.num_neurons = num_neurons lambda_model = mp.get_model("lambda") lambda_model.setup(lambda_params) lambda_activations = compute_lambda_activations( data['train'].images[0:num_imgs, ...], lambda_model, lca_weights, batch_size=lambda_params.batch_size ) linear_activations.append(lambda_activations) # + def mask_then_normalize(vector, mask, mask_threshold): """ ensure input is a vector, and then divide it by its l2 norm. Parameters: mask [np.ndarray] mask to zero out vector values with shape [vector_rows, vector_cols] or [vector_length,] vector [np.ndarray] vector with shape [vector_rows, vector_cols] or [vector_length,]. Outputs: vector [np.ndarray] masked vector with shape [vector_length,] and l2-norm = 1 """ mask = mask.flatten() vector = vector.flatten() assert mask.size == vector.size, ( f'mask size = {mask.size} must equal vector size = {vector.size}') mask /= mask.max() mask[mask<mask_threshold] = 0 mask[mask>0] = 1 vector = np.multiply(mask, vector) vector = vector / np.linalg.norm(vector) return vector def angle_between_vectors(vec_a, vec_b): """ Returns the cosine angle between two vectors Parameters: vec_a [np.ndarray] l2 normalized vector with shape [vector_length, 1] vec_b [np.ndarray] l2 normalized vector with shape [vector_length, 1] Outputs: angle [float] angle between the two vectors, in degrees """ inner_products = np.dot(vec_a.T, vec_b) inner_products = np.clip(inner_products, -1.0, 1.0) angle = np.arccos(inner_products) * (180 / np.pi) return angle def one_to_many_angles(vec_a, vec_list_b): """ Returns cosine angle from one vector to a list of vectors Parameters: vec_a [np.ndarray] l2 normalized vector with shape [vector_length,] vec_list_b [list of np.ndarray] list of l2 normalized vectors with shape [num_vectors][vector_length,] Outputs: angles [list of floats] angle between vec_a and each of the vectors in vec_list_b, in degrees """ angles = [] for vec_b in vec_list_b: angles.append(angle_between_vectors(vec_a, vec_b)) return angles def masked_weight_to_image_angles(weight, mask, image_list, mask_threshold): """ """ num_images = len(image_list) vec0 = mask_then_normalize(weight, mask, mask_threshold) vec1_list = [] for image in image_list: assert image.size == vec0.size, ( f'Each image size = {image.size} must equal the weight size = {vec0.size}') vec1_list.append(mask_then_normalize(image, mask, mask_threshold)) angles = one_to_many_angles(vec0, vec1_list) return angles def interesting_image_indices(activations, portion_of_max): """ """ indices = [] for activity in activations: sub_index_list = [] for neuron_idx in range(activity.shape[1]): threshold = activity[:, neuron_idx].max()*portion_of_max sub_index_list.append(np.nonzero((activity[:, neuron_idx] > threshold))) indices.append(sub_index_list) return indices # - portion_of_max = 0.5 linear_interesting_indices = interesting_image_indices(linear_activations, portion_of_max) lca_interesting_indices = interesting_image_indices(lca_activations, portion_of_max) loop_zip = zip(lca_interesting_indices, linear_interesting_indices, analyzers, model_labels) for nl_ind, l_ind, analyzer, label in loop_zip: num_nl_ind = [neuron_ind[0].size for neuron_ind in nl_ind] avg_num_nl_ind = np.mean(num_nl_ind) std_num_nl_ind = np.std(num_nl_ind) print(f'model {analyzer.model_type}_{analyzer.model_label} had an average of {avg_num_nl_ind:.1f} interesting images') num_l_ind = [neuron_ind[0].size for neuron_ind in l_ind] avg_num_l_ind = np.mean(num_l_ind) std_num_l_ind = np.std(num_l_ind) print(f'model linear_{label} had an average of {avg_num_l_ind:.1f} interesting images') analyzer.nat_selectivity['num_interesting_img_nl'] = num_nl_ind analyzer.nat_selectivity['num_interesting_img_l'] = num_l_ind analyzer.nat_selectivity['num_interesting_img_nl_std'] = std_num_nl_ind analyzer.nat_selectivity['num_interesting_img_l_std'] = std_num_l_ind analyzer.nat_selectivity['num_interesting_img_nl_mean'] = avg_num_nl_ind analyzer.nat_selectivity['num_interesting_img_l_mean'] = avg_num_l_ind analyzer.nat_selectivity['oc_label'] = label # + num_interesting_means = np.stack([np.array([analyzer.nat_selectivity['num_interesting_img_nl_mean'], analyzer.nat_selectivity['num_interesting_img_l_mean']]) for analyzer in analyzers], axis=0) df = pd.DataFrame( num_interesting_means, index=pd.Index(model_labels, name='Overcompleteness'), columns=['LCA', 'Linear'] ) fig, ax = plot.subplots(nrows=1, aspect=2, axwidth=4.8, share=0, hratios=(3)) obj = ax.bar( df, cycle=[color_vals['md_red'], color_vals['md_green']], edgecolor='black', ) num_interesting_stds = np.stack([np.array([analyzer.nat_selectivity['num_interesting_img_nl_std'], analyzer.nat_selectivity['num_interesting_img_l_std']]) for analyzer in analyzers], axis=0) half_bar_width = np.abs(obj[1].patches[0].xy[0] - obj[0].patches[0].xy[0])/2 lca_bar_locs = [patch.xy[0]+half_bar_width for patch in obj[0].patches] lin_bar_locs = [patch.xy[0]+half_bar_width for patch in obj[1].patches] ax.errorbar(lca_bar_locs, num_interesting_means[:,0] , yerr=num_interesting_stds[:,0], color='k', fmt='.') ax.errorbar(lin_bar_locs, num_interesting_means[:,1] , yerr=num_interesting_stds[:,1], color='k', fmt='.') ax.legend(obj, frameon=False) ax.format( xlocator=1, xminorlocator=0.5, ytickminor=False, ylim=[0, np.max(num_interesting_means)+np.max(num_interesting_stds)], suptitle='Average number of intersting images' ) # - print([analyzer.model_params.num_steps for analyzer in analyzers]) # + mask_threshold = 0.5 vect_size = analysis_params.patch_edge_size**2 image_list = [data['train'].images[idx, ...].reshape((vect_size, 1)) for idx in range(analysis_params.batch_size)] weight_list = [[weight_matrix[:, idx].reshape((vect_size, 1)) for idx in range(weight_matrix.shape[1])] for weight_matrix in weights] mask_list = [[envelope.reshape((vect_size, 1)) for envelope in analyzer.bf_stats['envelopes']] for analyzer in analyzers] for model_index, analyzer in enumerate(analyzers): lca_weight_angles = [] linear_weight_angles = [] for weight_index in range(analyzers[model_index].model.params.num_neurons): model_weight = weight_list[model_index][weight_index] model_mask = mask_list[model_index][weight_index] lca_images = [data['train'].images[idx, ...].flatten() for idx in lca_interesting_indices[model_index][weight_index][0]] angles = masked_weight_to_image_angles(model_weight, model_mask, lca_images, mask_threshold) lca_weight_angles.append(angles) linear_images = [data['train'].images[idx, ...].flatten() for idx in linear_interesting_indices[model_index][weight_index][0]] angles = masked_weight_to_image_angles(model_weight, model_mask, linear_images, mask_threshold) linear_weight_angles.append(angles) analyzer.nat_selectivity['lca_angles'] = lca_weight_angles analyzer.nat_selectivity['linear_angles'] = linear_weight_angles # - lca_angles = [analyzer.nat_selectivity['lca_angles'] for analyzer in analyzers] linear_angles = [analyzer.nat_selectivity['linear_angles'] for analyzer in analyzers] num_plots_per_model = 3 nbins=20 fig, axs = plot.subplots(ncols=len(analyzers), nrows=num_plots_per_model, sharey=False) max_vals = [] for model_idx in range(len(analyzers)): weight_indices = np.random.randint(0, analyzers[model_idx].model_params.num_neurons, num_plots_per_model) for row_idx, weight_idx in enumerate(weight_indices): indiv_lin_angles = linear_angles[model_idx][weight_idx] indiv_lca_angles = lca_angles[model_idx][weight_idx] axs[model_idx, row_idx].hist(indiv_lin_angles, bins=nbins, color=color_vals['md_green'], alpha=0.5, label='Linear') axs[model_idx, row_idx].hist(indiv_lca_angles, bins=nbins, color=color_vals['md_red'], alpha=0.5, label='LCA') max_vals.append(np.max([np.max(indiv_lin_angles), np.max(indiv_lca_angles)])) axs[model_idx, row_idx].format(title=f'Neuron {weight_idx}; {model_labels[model_idx]} Overcompleteness') axs[0,0].legend(loc='ur', ncols=1, frameon=False) axs.format(suptitle='Exciting image angles per neuron', xlabel='Image-to-weight angle', ylabel='Number of images', xlim=[0, 90]) plot.show() for analyzer in analyzers: lca_model_means = [] lca_model_vars = [] lin_model_means = [] lin_model_vars = [] for weight_idx in range(analyzer.model_params.num_neurons): indiv_lca_angles = analyzer.nat_selectivity['lca_angles'][weight_idx] if len(indiv_lca_angles) > 0: lca_model_means.append(np.mean(indiv_lca_angles)) lca_model_vars.append(np.var(indiv_lca_angles)) else: lca_model_means.append(-1) lca_model_vars.append(-1) indiv_lin_angles = analyzer.nat_selectivity['linear_angles'][weight_idx] if len(indiv_lin_angles) > 0: lin_model_means.append(np.mean(indiv_lin_angles)) lin_model_vars.append(np.var(indiv_lin_angles)) else: lin_model_means.append(-1) lin_model_vars.append(-1) analyzer.nat_selectivity['lca_means'] = lca_model_means analyzer.nat_selectivity['lca_vars'] = lca_model_vars analyzer.nat_selectivity['lin_means'] = lin_model_means analyzer.nat_selectivity['lin_vars'] = lin_model_vars fig, axs = plot.subplots(ncols=len(analyzers), nrows=1, sharey=False) for ax, analyzer in zip(axs, analyzers): lin_data = [mean for mean in analyzer.nat_selectivity['lin_means'] if mean>0] non_lin_data = [mean for mean in analyzer.nat_selectivity['lca_means'] if mean>0] h1 = ax.hist(lin_data, bins=nbins, color=color_vals['md_green'], alpha=0.5, label='Linear') h2 = ax.hist(non_lin_data, bins=nbins, color=color_vals['md_red'], alpha=0.5, label='LCA') oc = analyzer.nat_selectivity['oc_label'] ax.format(title=f'{oc} Overcompleteness') axs[0,0].legend(loc='ul', frameon=False, ncols=1) axs[0,0].format(ylabel='Number of images') axs.format( suptitle='Exciting image angles', xlabel='Mean image-to-weight angle', xlim=[0, 90] ) plot.show() for analyzer in analyzers: np.savez(analyzer.analysis_out_dir+'savefiles/natural_image_selectivity.npz', data=analyzer.nat_selectivity)
notebooks/DSC_unit_atk_selectivity.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Tutorial 04: Visualizing Experiment Results # This tutorial describes the process of visualizing the results of Flow experiments, and of replaying them. # # **Note:** This tutorial is only relevant if you use SUMO as a simulator. We currently do not support policy replay nor data collection when using Aimsun. The only exception is for reward plotting, which is independent on whether you have used SUMO or Aimsun during training. # ## 1. Visualization components # The visualization of simulation results breaks down into three main components: # # - **reward plotting**: Visualization of the reward function is an essential step in evaluating the effectiveness and training progress of RL agents. # # - **policy replay**: Flow includes tools for visualizing trained policies using SUMO's GUI. This enables more granular analysis of policies beyond their accrued reward, which in turn allows users to tweak actions, observations and rewards in order to produce some desired behavior. The visualizers also generate plots of observations and a plot of the reward function over the course of the rollout. # # - **data collection and analysis**: Any Flow experiment can output its simulation data to a CSV file, `emission.csv`, containing the contents of SUMO's built-in `emission.xml` files. This file contains various data such as the speed, position, time, fuel consumption and many other metrics for every vehicle in the network and at each time step of the simulation. Once you have generated the `emission.csv` file, you can open it and read the data it contains using Python's [csv library](https://docs.python.org/3/library/csv.html) (or using Excel). # Visualization is different depending on which reinforcement learning library you are using, if any. Accordingly, the rest of this tutorial explains how to plot rewards, replay policies and collect data when using either no RL library, RLlib or rllab. # **Contents:** # # [How to visualize using SUMO without training](#2.1---Using-SUMO-without-training) # # [How to visualize using SUMO with RLlib](#2.2---Using-SUMO-with-RLlib) # # [How to visualize using SUMO with rllab](#2.3---Using-SUMO-with-rllab) # # [**_Example: visualize data on a ring trained using RLlib_**](#2.4---Example:-Visualize-data-on-a-ring-trained-using-RLlib) # # ## 2. How to visualize # ### 2.1 - Using SUMO without training # # _In this case, since there is no training, there is no reward to plot and no policy to replay._ # #### Data collection and analysis # # SUMO-only experiments can generate emission CSV files seamlessly: # # First, you have to tell SUMO to generate the `emission.xml` files. You can do that by specifying `emission_path` in the simulation parameters (class `SumoParams`), which is the path where the emission files will be generated. For instance: # + from flow.core.params import SumoParams sumo_params = SumoParams(sim_step=0.1, render=True, emission_path='data') # - # Then, you have to tell Flow to convert these XML emission files into CSV files. To do that, pass in `convert_to_csv=True` to the `run` method of your experiment object. For instance: # # ```python # exp.run(1, 1500, convert_to_csv=True) # ``` # When running experiments, Flow will now automatically create CSV files next to the SUMO-generated XML files. # ### 2.2 - Using SUMO with RLlib # #### Reward plotting # # RLlib supports reward visualization over the period of the training using the `tensorboard` command. It takes one command-line parameter, `--logdir`, which is an RLlib result directory. By default, it would be located within an experiment directory inside your `~/ray_results` directory. # # An example call would look like: # # `tensorboard --logdir ~/ray_results/experiment_dir/result/directory` # # You can also run `tensorboard --logdir ~/ray_results` if you want to select more than just one experiment. # # If you do not wish to use `tensorboard`, an other way is to use our `flow/visualize/plot_ray_results.py` tool. It takes as arguments: # # - the path to the `progress.csv` file located inside your experiment results directory (`~/ray_results/...`), # - the name(s) of the column(s) you wish to plot (reward or other things). # # An example call would look like: # # `flow/visualize/plot_ray_results.py ~/ray_results/experiment_dir/result/progress.csv training/return-average training/return-min` # # If you do not know what the names of the columns are, run the command without specifying any column: # # `flow/visualize/plot_ray_results.py ~/ray_results/experiment_dir/result/progress.csv` # # and the list of all available columns will be displayed to you. # #### Policy replay # # The tool to replay a policy trained using RLlib is located at `flow/visualize/visualizer_rllib.py`. It takes as argument, first the path to the experiment results (by default located within `~/ray_results`), and secondly the number of the checkpoint you wish to visualize (which correspond to the folder `checkpoint_<number>` inside the experiment results directory). # # An example call would look like this: # # `python flow/visualize/visualizer_rllib.py ~/ray_results/experiment_dir/result/directory 1` # # There are other optional parameters which you can learn about by running `visualizer_rllib.py --help`. # #### Data collection and analysis # # Simulation data can be generated the same way as it is done [without training](#2.1---Using-SUMO-without-training). # # If you need to generate simulation data after the training, you can run a policy replay as mentioned above, and add the `--gen-emission` parameter. # # An example call would look like: # # `python flow/visualize/visualizer_rllib.py ~/ray_results/experiment_dir/result/directory 1 --gen_emission` # ### 2.4 - Example: Visualize data on a ring trained using RLlib # !pwd # make sure you are in the flow/tutorials folder # The folder `flow/tutorials/data/trained_ring` contains the data generated in `ray_results` after training an agent on a ring scenario for 200 iterations using RLlib (the experiment can be found in `flow/examples/rllib/stabilizing_the_ring.py`). # # Let's first have a look at what's available in the `progress.csv` file: # !python ../flow/visualize/plot_ray_results.py data/trained_ring/progress.csv # This gives us a list of everything that we can plot. Let's plot the reward and its boundaries: # %matplotlib notebook # if this doesn't display anything, try with "%matplotlib inline" instead # %run ../flow/visualize/plot_ray_results.py data/trained_ring/progress.csv \ # episode_reward_mean episode_reward_min episode_reward_max # We can see that the policy had already converged by the iteration 50. # # Now let's see what this policy looks like. Run the following script, then click on the green arrow to run the simulation (you may have to click several times). # !python ../flow/visualize/visualizer_rllib.py data/trained_ring 200 --horizon 2000 # The RL agent is properly stabilizing the ring! # # Indeed, without an RL agent, the vehicles start forming stop-and-go waves which significantly slows down the traffic, as you can see in this simulation: # !python ../examples/sumo/sugiyama.py # In the trained ring folder, there is a checkpoint generated every 20 iterations. Try to run the second previous command but replace 200 by 20. On the reward plot, you can see that the reward is already quite high at iteration 20, but hasn't converged yet, so the agent will perform a little less well than at iteration 200. # That's it for this example! Feel free to play around with the other scripts in `flow/visualize`. Run them with the `--help` parameter and it should tell you how to use it. Also, if you need the emission file for the trained ring, you can obtain it by running the following command: # !python ../flow/visualize/visualizer_rllib.py data/trained_ring 200 --horizon 2000 --gen_emission # The path where the emission file is generated will be outputted at the end of the simulation.
tutorials/tutorial04_visualize.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## <small> # Copyright (c) 2017-21 <NAME> # # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # </small> # # # # Deep Learning: A Visual Approach # ## by <NAME>, https://glassner.com # ### Order: https://nostarch.com/deep-learning-visual-approach # ### GitHub: https://github.com/blueberrymusic # ------ # # ### What's in this notebook # # This notebook is provided as a โ€œbehind-the-scenesโ€ look at code used to make some of the figures in this chapter. It is cleaned up a bit from the original code that I hacked together, and is only lightly commented. I wrote the code to be easy to interpret and understand, even for those who are new to Python. I tried never to be clever or even more efficient at the cost of being harder to understand. The code is in Python3, using the versions of libraries as of April 2021. # # This notebook may contain additional code to create models and images not in the book. That material is included here to demonstrate additional techniques. # # Note that I've included the output cells in this saved notebook, but Jupyter doesn't save the variables or data that were used to generate them. To recreate any cell's output, evaluate all the cells from the start up to that cell. A convenient way to experiment is to first choose "Restart & Run All" from the Kernel menu, so that everything's been defined and is up to date. Then you can experiment using the variables, data, functions, and other stuff defined in this notebook. # ## Chapter 16: CNNs - Notebook 1 import numpy as np import matplotlib.pyplot as plt # + # Make a File_Helper for saving and loading files. save_files = False import os, sys, inspect current_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) sys.path.insert(0, os.path.dirname(current_dir)) # path to parent dir from DLBasics_Utilities import File_Helper file_helper = File_Helper(save_files) # + # Make a random image np.random.seed(42) img_size = 20 img = np.random.uniform(size=(img_size, img_size)) img = np.where(img>.5, 0, 1) # + # Apply an element to the current image img def get_results(element): result = np.zeros((img_size, img_size)) binary_result = np.zeros((img_size, img_size)) for y in range(1, img_size-1): for x in range(1, img_size-1): sum = 0 for ey in range(3): for ex in range(3): sum += img[y+ey-1][x+ex-1]*element[ey][ex] result[y][x] = sum binary_result[y][x] = sum==3 return (result, binary_result) # + # Show the five pictures for applying an element to an image def show_five(img, element, result, binary_result, filename): plt.subplot(1, 5, 1) plt.imshow(element, interpolation='nearest', vmin=-1, vmax=1, cmap='autumn') plt.plot([.5, .5], [-.5, 2.5], color='black') plt.plot([1.5, 1.5], [-.5, 2.5], color='black') plt.plot([-.5, 2.5], [.49, .49], color='black') plt.plot([-.5, 2.5], [1.5, 1.5], color='black') plt.xticks([],[]) plt.yticks([],[]) plt.title('(a)') plt.subplot(1, 5, 2) plt.imshow(img, interpolation='nearest', cmap='gray') plt.xticks([],[]) plt.yticks([],[]) plt.title('(b)') plt.subplot(1, 5, 3) plt.imshow(result, interpolation='nearest', vmin=-6, vmax=3, cmap='cool') plt.xticks([],[]) plt.yticks([],[]) plt.title('(c)') plt.subplot(1, 5, 4) plt.imshow(binary_result, interpolation='nearest', cmap='gray') plt.xticks([],[]) plt.yticks([],[]) plt.title('(d)') plt.subplot(1, 5, 5) mask_result = np.zeros(img.shape) for y in range(0, img_size): for x in range(0, img_size): mask_result[y][x] = .45 + (.1 * img[y][x]) for y in range(0, img_size): for x in range(0, img_size): for ty in range(-1, 2): for tx in range(-1, 2): if (y+ty >= 0) and (y+ty < img_size) and (x+tx >= 0) and (x+tx < img_size): if binary_result[y+ty][x+tx] > .5: mask_result[y][x] = img[y][x] plt.imshow(mask_result, interpolation='nearest', cmap='gray') plt.xticks([],[]) plt.yticks([],[]) plt.title('(e)') plt.tight_layout() file_helper.save_figure(filename) plt.show() # + # Find a vertical white stripe up the center element1 = [[-1, 1, -1], [-1, 1, -1], [-1, 1, -1]] (result, binary_result) = get_results(element1) show_five(img, element1, result, binary_result, 'element1') # + # Find a diagonal white stripe from NW to SE element2 = [[1,-1,-1],[-1,1,-1],[-1,-1,1]] (result, binary_result) = get_results(element2) show_five(img, element2, result, binary_result, 'element2') # -
Notebooks/Chapter16-CNNs/Chapter16-CNNs-1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np # numpy float ์ถœ๋ ฅ์˜ต์…˜ ๋ณ€๊ฒฝ np.set_printoptions(formatter={'float_kind': lambda x: "{0:0.3f}".format(x)}) import matplotlib.pyplot as plt # %matplotlib inline # - # ๋žœ๋ค์— ์˜ํ•ด ๋˜‘๊ฐ™์€ ๊ฒฐ๊ณผ๋ฅผ ์žฌํ˜„ํ•˜๋„๋ก seed๊ฐ’ ๊ณ ์ • seed = 777 np.random.seed(seed) mu, sigma = 10, 1 # mean and standard deviation train_data = np.random.normal(mu, sigma, (100, 2)) from sklearn.preprocessing import MinMaxScaler minMaxScaler = MinMaxScaler() print(minMaxScaler.fit(train_data)) train_data_minMaxScaled = minMaxScaler.transform(train_data) from sklearn.preprocessing import MaxAbsScaler maxAbsScaler = MaxAbsScaler() print(maxAbsScaler.fit(train_data)) train_data_maxAbsScaled = maxAbsScaler.transform(train_data) from sklearn.preprocessing import StandardScaler standardScaler = StandardScaler() print(standardScaler.fit(train_data)) train_data_standardScaled = standardScaler.transform(train_data) from sklearn.preprocessing import RobustScaler robustScaler = RobustScaler() print(robustScaler.fit(train_data)) train_data_robustScaled = robustScaler.transform(train_data) # + fig, ax = plt.subplots(nrows=1, ncols=5, figsize=(19,4)) ax[0].scatter(train_data[:, 0], train_data[:, 1], s=2, color='r') ax[0].grid(True) ax[0].set_title('train_data') ax[1].scatter(train_data_minMaxScaled[:, 0], train_data_minMaxScaled[:, 1], s=2, color='b') ax[1].grid(True) ax[1].set_title('train_data_minMaxScaled') ax[2].scatter(train_data_maxAbsScaled[:, 0], train_data_maxAbsScaled[:, 1], s=2, color='b') ax[2].grid(True) ax[2].set_title('train_data_maxAbsScaled') ax[3].scatter(train_data_standardScaled[:, 0], train_data_standardScaled[:, 1], s=2, color='b') ax[3].grid(True) ax[3].set_title('train_data_standardScaled') ax[4].scatter(train_data_robustScaled[:, 0], train_data_robustScaled[:, 1], s=2, color='b') ax[4].grid(True) ax[4].set_title('train_data_robustScaled') # -
AI_Class/000/์ •๊ทœ๋ถ„ํฌ.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <style>div.container { width: 100% }</style> # <img style="float:left; vertical-align:text-bottom;" height="65" width="172" src="../assets/holoviz-logo-unstacked.svg" /> # <div style="float:right; vertical-align:text-bottom;"><h2>SciPy 2019 Tutorial Index</h2></div> # <div class="alert alert-warning" role="alert"> <strong>NOTE:</strong> This material is out of date. Check out the <a href="https://github.com/pyviz/holoviz/tree/scipy19">scipy19 tag</a> to access the materials included in the tutorial. For the latest version of the tutorial, visit <a href="https://holoviz.org/tutorial">holoviz.org</a>. # </div> # ## Welcome to the HoloViz tutorial from SciPy 2019! # # This tutorial will take you through all of the steps involved in exploring data of many different types and sizes, building simple and complex figures, working with billions of data points, adding interactive behavior, widgets and controls, and deploying full dashboards and applications. # # We'll be using a wide range of open-source Python libraries, but focusing on the tools we help maintain as part of the HoloViz project: # [Panel](https://panel.pyviz.org), # [hvPlot](https://hvplot.pyviz.org), # [HoloViews](http://holoviews.org), # [GeoViews](http://geoviews.org), # [Datashader](http://datashader.org), # [Param](http://param.pyviz.org), and # [Colorcet](http://colorcet.pyviz.org). # # <img width="800" src="../assets/pn_hp_hv_gv_ds_pa_cs.png"/> # # These tools were previously part of [PyViz.org](http://pyviz.org), but have been pulled out into [HoloViz.org](http://holoviz.org) to allow PyViz to be fully neutral and general. # # The HoloViz tools have been carefully designed to work together with each other and with the SciPy ecosystem to address a very wide range of data-analysis and visualization tasks, making it simple to discover, understand, and communicate the important properties of your data. # # <img align="center" src="../assets/earthquakes.png"></img> # # This notebook serves as the homepage of the tutorial, including a table of contents letting you launch each tutorial section. # ## Index and Schedule # # - **Introduction and setup** # * &nbsp;&nbsp;**5 min** &nbsp;[Setup](./00_Setup.ipynb): Setting up the environment and data files. # * **20 min** &nbsp;[Overview](./01_Overview.ipynb): Overview of the HoloViz tools, philosophy, and approach. # # - **Building dashboards using Panel** # * **15 min** &nbsp;[Building_Panels](./02_Building_Panels.ipynb): How to make apps and dashboards from Python objects. # * &nbsp;&nbsp;**5 min** &nbsp;[*Exercise 1*](./exercises/Building_a_Dashboard.ipynb#Exercise-1): Using a mix of visualizable types, create a panel and serve it. # * **10 min** &nbsp;[Interlinked Panels](./03_Interlinked_Panels.ipynb): Customizing linkages between widgets and displayable objects. # * &nbsp;&nbsp;**5 min** &nbsp;[*Exercise 2*](./exercises/Building_a_Dashboard.ipynb#Exercise-2): Add widgets to control your dashboard. # * **10 min** &nbsp;*Break* # # # - **The `.plot` API: a data-centric approach to visualization** # * **30 min** &nbsp;[Basic Plotting](./04_Basic_Plotting.ipynb): Quick introduction to the `.plot` interface. # * **10 min** &nbsp;[Composing Plots](./05_Composing_Plots.ipynb): Overlaying and laying out `.hvplot` outputs to show relationships. # * **10 min** &nbsp;[*Exercise 3*](./exercises/Plotting.ipynb#Exercise-3): Add some `.plot` or `.hvplot` visualizations to your dashboard. # * **10 min** &nbsp;*Break* # # # - **Custom interactivity** # * **25 min** &nbsp;[Interlinked Plots](./06_Interlinked_Plots.ipynb): Connecting HoloViews "streams" to customize behavior. # * **10 min** &nbsp;[*Exercise 4*](./exercises/Plotting.ipynb#Exercise-4): Add a linked visualization with HoloViews. # # # - **Working with large datasets** # * **20 min** &nbsp;[Large Data](./07_Large_Data.ipynb): Using Datashader to pre-render data in Python # * **10 min** &nbsp;*Break* # # # - **Building advanced dashboards** # * **15 min** &nbsp;[Advanced Dashboards](./08_Advanced_Dashboards.ipynb): Using Panel to create an advanced dashboard with linked plots and streams. # * **30 min** &nbsp;[*Exercise 5*](./exercises/Advanced_Dashboarding.ipynb): Build a new dashboard using everything you've learned so far. # ## Related links # # You will find extensive support material on the websites for each package. You may find these links particularly useful during the tutorial: # # * [hvPlot user guide](https://hvplot.pyviz.org/user_guide): Guide to the plots available via `.hvplot()` # * [HoloViews reference gallery](http://holoviews.org/reference/index.html): Visual reference of all HoloViews elements and containers, along with some other components # * [Panel reference gallery](http://panel.pyviz.org/reference/index.html): Visual reference of all panes, layouts and widgets.
examples/tutorial/scipy19.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from ray.tune.web_server import TuneClient manager = TuneClient(tune_address="localhost:4321") x = manager.get_all_trials() [((y["id"]), y["status"]) for y in x["trials"]] # - for y in x["trials"][-10:]: manager.stop_trial(y["id"]) # + from ray.tune.variant_generator import generate_trials import yaml with open("../rllib/tuned_examples/hyperband-cartpole.yaml") as f: d = yaml.load(f) # - name, spec = [x for x in d.items()][0] manager.add_trial(name, spec)
python/ray/tune/TuneClient.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: pyiron-dev # language: python # name: pyiron-dev # --- # # Introduction to pyiron workflows # In this notebook, you will learn about different pyiron objects which acts as blocks of a building to complete a workflow. Here, we use a simulation workflow using lammps, but the features introduced here, as will be shown, are generic to other simulation/analysis tools. # As a first step we import the libraries [numpy](http://www.numpy.org/) for data analysis and [matplotlib](https://matplotlib.org/) for visualization. import numpy as np import matplotlib.pyplot as plt # To import pyiron simply use: from pyiron_atomistics import Project # The `Project` object introduced below is central in pyiron. It allows to name the project as well as to derive all other objects such as structures, jobs etc. without having to import them. Thus, by code completion *Tab* the respective commands can be found easily. # We now create a pyiron `Project` named `first_steps`. pr = Project(path='first_steps') # The project name also applies for the directory that is created for the project. # ## Perform a LAMMPS MD simulation # Having created an instance of the pyiron `Project` we now perform a [LAMMPS](http://lammps.sandia.gov/) molecular dynamics simulation. # For this basic simulation example we construct an fcc Al crystal in a cubic supercell (`cubic=True`). basis = pr.create.structure.ase.bulk('Al', cubic=True) # Here `create_ase_bulk` uses the [ASE bulk module](https://wiki.fysik.dtu.dk/ase/ase/build/build.html). The structure can be modified - here we extend the original cell to a 3x3x3 supercell (`repeat([3, 3, 3]`). Finally, we plot the structure using [NGlview](http://nglviewer.org/nglview/latest/api.html). # + jupyter={"outputs_hidden": true} tags=[] supercell_3x3x3 = basis.repeat([3, 3, 3]) supercell_3x3x3.plot3d() # - # The project object allows to create various simulation job types. Here, we create a LAMMPS job. job = pr.create.job.Lammps(job_name='Al_T800K') # Further, we specify a Molecular Dynamics simulation at $T=800$ K using the supercell structure created above. job.structure = supercell_3x3x3 job.calc_md(temperature=800, pressure=0, n_ionic_steps=10000) # To see all available interatomic potentials which are compatible with the structure (for our example they must contain Al) and the job type (here LAMMPS) we call `job.list_potentials()`. # + jupyter={"outputs_hidden": true} tags=[] job.list_potentials() # - # From the above let us select the first potential in the list. pot = job.list_potentials()[0] print ('Selected potential: ', pot) job.potential = pot # To run the LAMMPS simulation (locally) we now simply use: # + #job.server.core=2 # + #job.server.queue= # - job.run() pr.job_table() # ## Analyze the calculation # After the simulation has finished the information about the job can be accessed through the Project object. job = pr['Al_T800K'] job # Printing the job object (note that in Jupyter we don't have to call a print statement if the variable/object is in the last line). The output lists the variables (nodes) and the directories (groups). To get a list of all variables stored in the generic output we type: job['output/generic'] # An animated 3d plot of the MD trajectories is created by: job.animate_structure() # To analyze the temperature evolution we plot it as function of the MD step. temperatures = job['output/generic/temperature'] steps = job['output/generic/steps'] plt.plot(steps, temperatures) plt.xlabel('MD step') plt.ylabel('Temperature [K]'); pos = job['output/generic/positions'] x, y, z = [pos[:, :, i] for i in range(3)] sel = np.abs(z) < 0.1 fig, axs = plt.subplots(1,1) axs.scatter(x[sel], y[sel]) axs.set_xlabel('x [$\AA$]') axs.set_ylabel('y [$\AA$]') axs.set_aspect('equal', 'box'); # ## pyiron GUI # With pyiron GUI, one can explore the result of the simulations in a simplified manner. GUI = pr.gui() # ## Perform a series of jobs # We would like now to create a series of jobs to perform the same simulation but with diferent temperatures. # The temperature must be in the range, 200K<T<1200K, with increment of 200K. # + jupyter={"outputs_hidden": true} tags=[] for temperature in np.arange(200, 1200, 200): job = pr.create_job(pr.job_type.Lammps, 'Al_T{}K'.format(int(temperature))) job.structure = supercell_3x3x3 job.potential = pot job.calc_md(temperature=temperature, pressure=0, n_ionic_steps=10000) job.run() # - # To inspect the list of jobs in our current project we type (note that the existing job from the previous excercise at $T=800$ K has been recognized and not run again): pr # We can now iterate over the jobs and extract volume and mean temperature. vol_lst, temp_lst = [], [] for job in pr.iter_jobs(convert_to_object=False): volumes = job['output/generic/volume'] temperatures = job['output/generic/temperature'] temp_lst.append(np.mean(temperatures[:-20])) vol_lst.append(np.mean(volumes[:-20])) # Then we can use the extracted information to plot the thermal expansion, calculated within the $NPT$ ensemble. For plotting the temperature values in ascending order the volume list is mapped to the sorted temperature list. plt.figure() vol_lst[:] = [vol_lst[np.argsort(temp_lst)[k]] for k in range(len(vol_lst))] plt.plot(sorted(temp_lst), vol_lst, linestyle='-',marker='o',) plt.title('Thermal expansion') plt.xlabel('Temperature [K]') plt.ylabel('Volume [$\AA^3$]'); # ## Sharing your project # You can share the completed project at its current status with others. The sharing is done via `pack()`/`unpack()` functions of the Project object. # Before packing the project, let's take a look at pyiron `job_table` pr.job_table() # To export the project: pr.pack(csv_file_name='export.csv',destination_path='lammps_project') # Unpack and use in a new project pr = Project("imported_proj") pr.unpack(csv_file_name='export.csv',origin_path='lammps_project') pr.job_table()
1_1_intro_pyiron_building_blocks/1_1_intro.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # SMA Percent Band # # 1. The SPY closes above its upper band, buy # 2. If the SPY closes below its lower band, sell your long position. # + # use future imports for python 3.x forward compatibility from __future__ import print_function from __future__ import unicode_literals from __future__ import division from __future__ import absolute_import # other imports import pandas as pd import matplotlib.pyplot as plt import datetime from talib.abstract import * # project imports import pinkfish as pf import strategy # format price data pd.options.display.float_format = '{:0.2f}'.format # %matplotlib inline # - # set size of inline plots '''note: rcParams can't be in same cell as import matplotlib or %matplotlib inline %matplotlib notebook: will lead to interactive plots embedded within the notebook, you can zoom and resize the figure %matplotlib inline: only draw static images in the notebook ''' plt.rcParams["figure.figsize"] = (10, 7) # Some global data symbol = '^GSPC' #symbol = 'SPY' #symbol = 'DIA' #symbol = 'QQQ' #symbol = 'IWM' #symbol = 'TLT' #symbol = 'GLD' #symbol = 'AAPL' #symbol = 'BBRY' #symbol = 'GDX' capital = 10000 start = datetime.datetime(1900, 1, 1) #start = datetime.datetime(2000, 1, 1) #end = datetime.datetime(2010, 12, 1) end = datetime.datetime.now() # Include dividends? (If yes, set to True) use_adj = True # Define high low trade periods # + sma_period = 200 percent_band = 3.5 #sma_period = 225 #percent_band = 3.0 # - # Run Strategy s = strategy.Strategy(symbol, capital, start, end, use_adj, sma_period, percent_band) s.run() # Retrieve log DataFrames s.rlog, s.tlog, s.dbal = s.get_logs() s.stats = s.stats() s.tlog.tail(100) s.dbal.tail() # Generate strategy stats - display all available stats pf.print_full(s.stats) # Run Benchmark, Retrieve benchmark logs, and Generate benchmark stats benchmark = pf.Benchmark(symbol, capital, s._start, s._end, s._use_adj) benchmark.run() benchmark.tlog, benchmark.dbal = benchmark.get_logs() benchmark.stats = benchmark.stats() # Plot Equity Curves: Strategy vs Benchmark pf.plot_equity_curve(s.dbal, benchmark=benchmark.dbal) # Plot Trades pf.plot_trades(s.dbal, benchmark=benchmark.dbal) print('trading period: {} to {}'.format(s.stats['start'], s.stats['end'])) df = pf.summary5(s.stats, benchmark.stats) df # Bar Graph: Strategy vs Benchmark # + metrics = ('annual_return_rate', 'max_closed_out_drawdown', 'drawdown_annualized_return', 'drawdown_recovery', 'best_month', 'worst_month', 'sharpe_ratio', 'sortino_ratio', 'monthly_std') df = pf.plot_bar_graph(s.stats, benchmark.stats, *metrics) df # - # Print the raw transaction log pd.set_option('display.max_rows', len(s.rlog)) s.rlog
examples/sma-percent-band/strategy.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import sys import numpy as np import time import matplotlib.pyplot as plt from scipy import interpolate sys.path.append(r'C:\Chuji\Code_and_Data\MyCode') import Circuit_Simulator import PulseGenerator as PG from toolfunc import * from toolfunc.adia_analysis import * from toolfunc.pulse_filter import * import scipy.optimize as sci_op from toolfunc import DE # + raw_config = Circuit_Simulator.RawConfig(qubit_num=3,dimension=3,circuit_type=1,initial_state='ground',sampling_rate=1e9) raw_config.load_default_value(modulation=True,decoherence=False,use_capacitance=False) flux_pulse = np.linspace(0/7.5,4/7.5,400) freq_array = 8.5e9-flux_pulse * (8.5e9 - 1e9) raw_config.setValue('Q1 f01_max',6.0e9) raw_config.setValue('Q2 f01_max',8.0e9) raw_config.setValue('Q3 f01_max',5.4e9) raw_config.setValue('Q1 f01_min',1e9) raw_config.setValue('Q2 f01_min',1e9) raw_config.setValue('Q3 f01_min',1e9) raw_config.setValue('Q1 Ec',0.25e9) raw_config.setValue('Q2 Ec',0.30e9) raw_config.setValue('Q3 Ec',0.25e9) raw_config.setValue('r12',0.012) raw_config.setValue('r23',0.012) raw_config.setValue('r13',0.0008) raw_config.setValue('Q2 Voltage period',-1) raw_config.setValue('Q2 Voltage operating point',0.00) raw_config.setValue('Q2 Flux',flux_pulse) simu_config = Circuit_Simulator.read_config(raw_config.get_dict()) Simulator = Circuit_Simulator.Simulator(simu_config) Simulator.show_pulse() # + Simulator.performsimulation(solver_type=2,resample_factor=1,eigen_cloest_to_bare=False ,sort_by_maximum_overlap=True,gap=12e6) fig = plt.figure(figsize=[6.4,6]) ax = fig.add_subplot(111) eigen_trace = Simulator.EigenResult.get_Ener_gap_trace('101-100-001+000') ax.plot(freq_array[0:400],-eigen_trace[0:400]/1e6) ax.set_yscale('log') # - # %matplotlib qt # + # raw_config.setValue('Q1 f01_max',6.0e9) # raw_config.setValue('Q2 f01_max',7.87e9) # raw_config.setValue('Q3 f01_max',5.4e9) # raw_config.setValue('Q1 f01_min',1e9) # raw_config.setValue('Q2 f01_min',1e9) # raw_config.setValue('Q3 f01_min',1e9) # raw_config.setValue('Q1 Ec',0.25e9) # raw_config.setValue('Q2 Ec',0.3e9) # raw_config.setValue('Q3 Ec',0.25e9) # raw_config.setValue('r12',0.018) # raw_config.setValue('r23',0.018) # raw_config.setValue('r13',0.0015) # raw_config.setValue('Q1 f01_max',6.0e9) # raw_config.setValue('Q2 f01_max',8.3e9) # raw_config.setValue('Q3 f01_max',5.4e9) # raw_config.setValue('Q1 f01_min',1e9) # raw_config.setValue('Q2 f01_min',1e9) # raw_config.setValue('Q3 f01_min',1e9) # raw_config.setValue('Q1 Ec',0.25e9) # raw_config.setValue('Q2 Ec',0.30e9) # raw_config.setValue('Q3 Ec',0.25e9) # raw_config.setValue('r12',0.03) # raw_config.setValue('r23',0.03) # raw_config.setValue('r13',0.0036) # raw_config.setValue('Q1 f01_max',6.0e9) # raw_config.setValue('Q2 f01_max',8.21e9) # raw_config.setValue('Q3 f01_max',5.4e9) # raw_config.setValue('Q1 f01_min',1e9) # raw_config.setValue('Q2 f01_min',1e9) # raw_config.setValue('Q3 f01_min',1e9) # raw_config.setValue('Q1 Ec',0.25e9) # raw_config.setValue('Q2 Ec',0.30e9) # raw_config.setValue('Q3 Ec',0.25e9) # raw_config.setValue('r12',0.009) # raw_config.setValue('r23',0.009) # raw_config.setValue('r13',0.0004) # - # %matplotlib inline # + def cost_func_distor_12(pulse_params,*args): gate_time,SRATE,f_term,factor_r,T_reflec=args lamb1 = pulse_params total_len = gate_time + 8e-9+4*T_reflec Seq=PG.Sequence(total_len=total_len,sample_rate=SRATE,complex_trace=False) Seq.clear_pulse(tips_on=False) Seq.add_pulse('Adiabatic',t0=gate_time/2+10e-9/2,width=gate_time,plateau=0e-9,frequency=0,F_Terms=f_term,Lcoeff=np.array(lamb1),Q1_freq=6.0e9, CPLR_idle_freq=7.95e9,Q2_freq=5.4e9,constant_coupling=False,r1c=0.012,r2c=0.012,r12=0.0008,anhar_CPLR=-300e6, anhar_Q1=-250e6,anhar_Q2=-250e6,negative_amplitude=False,dfdV=7.95e9-1e9,gap_threshold=8e6,freqpoints=301,pulsepoints=601) Seq.add_filter('Gauss Low Pass',300e6) Seq.add_filter('Reflection',*(factor_r,T_reflec)) flux_pulse=Seq.get_sequence() raw_config = Circuit_Simulator.RawConfig(qubit_num=3,dimension=3,circuit_type=1,initial_state='-Z+Z+Z',sampling_rate=SRATE) raw_config.load_default_value(modulation=True,decoherence=False,use_capacitance=False) raw_config.setValue('Q1 f01_max',6.0e9) raw_config.setValue('Q2 f01_max',7.95e9) raw_config.setValue('Q3 f01_max',5.4e9) raw_config.setValue('Q1 f01_min',1e9) raw_config.setValue('Q2 f01_min',1e9) raw_config.setValue('Q3 f01_min',1e9) raw_config.setValue('Q1 Ec',0.25e9) raw_config.setValue('Q2 Ec',0.3e9) raw_config.setValue('Q3 Ec',0.25e9) raw_config.setValue('r12',0.012) raw_config.setValue('r23',0.012) raw_config.setValue('r13',0.0008) raw_config.setValue('Q2 Voltage period',-1) raw_config.setValue('Q2 Voltage operating point',0) raw_config.setValue('Q2 Flux',flux_pulse) simu_config = Circuit_Simulator.read_config(raw_config.get_dict()) Simulator = Circuit_Simulator.Simulator(simu_config) Simulator.performsimulation(solver_type=1) Simulator.UnitaryResult.get_U(-1) Simulator.UnitaryResult.get_subspace_operator(['000','001','100','101']) Simulator.UnitaryResult.set_Target_gate('CZ') Simulator.UnitaryResult.remove_single_qubit_gate() Simulator.UnitaryResult.get_Gate_Fidelity() fidelity = Simulator.UnitaryResult.Gate_Fidelity return 1 - fidelity def cost_func_distor_30(pulse_params,*args): gate_time,SRATE,f_term,factor_r,T_reflec=args lamb1 = pulse_params total_len = gate_time + 8e-9+4*T_reflec Seq=PG.Sequence(total_len=total_len,sample_rate=SRATE,complex_trace=False) Seq.clear_pulse(tips_on=False) Seq.add_pulse('Adiabatic',t0=gate_time/2+10e-9/2,width=gate_time,plateau=0e-9,frequency=0,F_Terms=f_term,Lcoeff=np.array(lamb1),Q1_freq=6.0e9, CPLR_idle_freq=8.3e9,Q2_freq=5.4e9,constant_coupling=False,r1c=0.03,r2c=0.03,r12=0.0036,anhar_CPLR=-300e6, anhar_Q1=-250e6,anhar_Q2=-250e6,negative_amplitude=False,dfdV=8.3e9-1e9,gap_threshold=8e6,freqpoints=301,pulsepoints=601) Seq.add_filter('Gauss Low Pass',300e6) Seq.add_filter('Reflection',*(factor_r,T_reflec)) flux_pulse=Seq.get_sequence() raw_config = Circuit_Simulator.RawConfig(qubit_num=3,dimension=3,circuit_type=1,initial_state='-Z+Z+Z',sampling_rate=SRATE) raw_config.load_default_value(modulation=True,decoherence=False,use_capacitance=False) raw_config.setValue('Q1 f01_max',6.0e9) raw_config.setValue('Q2 f01_max',8.3e9) raw_config.setValue('Q3 f01_max',5.4e9) raw_config.setValue('Q1 f01_min',1e9) raw_config.setValue('Q2 f01_min',1e9) raw_config.setValue('Q3 f01_min',1e9) raw_config.setValue('Q1 Ec',0.25e9) raw_config.setValue('Q2 Ec',0.3e9) raw_config.setValue('Q3 Ec',0.25e9) raw_config.setValue('r12',0.03) raw_config.setValue('r23',0.03) raw_config.setValue('r13',0.0036) raw_config.setValue('Q2 Voltage period',-1) raw_config.setValue('Q2 Voltage operating point',0) raw_config.setValue('Q2 Flux',flux_pulse) simu_config = Circuit_Simulator.read_config(raw_config.get_dict()) Simulator = Circuit_Simulator.Simulator(simu_config) Simulator.performsimulation(solver_type=1) Simulator.UnitaryResult.get_U(-1) Simulator.UnitaryResult.get_subspace_operator(['000','001','100','101']) Simulator.UnitaryResult.set_Target_gate('CZ') Simulator.UnitaryResult.remove_single_qubit_gate() Simulator.UnitaryResult.get_Gate_Fidelity() fidelity = Simulator.UnitaryResult.Gate_Fidelity return 1 - fidelity def cost_func_distor_18(pulse_params,*args): gate_time,SRATE,f_term,factor_r,T_reflec=args lamb1 = pulse_params total_len = gate_time + 8e-9+4*T_reflec Seq=PG.Sequence(total_len=total_len,sample_rate=SRATE,complex_trace=False) Seq.clear_pulse(tips_on=False) Seq.add_pulse('Adiabatic',t0=gate_time/2+10e-9/2,width=gate_time,plateau=0e-9,frequency=0,F_Terms=f_term,Lcoeff=np.array(lamb1),Q1_freq=6.0e9, CPLR_idle_freq=7.87e9,Q2_freq=5.4e9,constant_coupling=False,r1c=0.018,r2c=0.018,r12=0.0015,anhar_CPLR=-300e6, anhar_Q1=-250e6,anhar_Q2=-250e6,negative_amplitude=False,dfdV=7.87e9-1e9,gap_threshold=8e6,freqpoints=301,pulsepoints=601) Seq.add_filter('Gauss Low Pass',300e6) Seq.add_filter('Reflection',*(factor_r,T_reflec)) flux_pulse=Seq.get_sequence() raw_config = Circuit_Simulator.RawConfig(qubit_num=3,dimension=3,circuit_type=1,initial_state='-Z+Z+Z',sampling_rate=SRATE) raw_config.load_default_value(modulation=True,decoherence=False,use_capacitance=False) raw_config.setValue('Q1 f01_max',6.0e9) raw_config.setValue('Q2 f01_max',7.87e9) raw_config.setValue('Q3 f01_max',5.4e9) raw_config.setValue('Q1 f01_min',1e9) raw_config.setValue('Q2 f01_min',1e9) raw_config.setValue('Q3 f01_min',1e9) raw_config.setValue('Q1 Ec',0.25e9) raw_config.setValue('Q2 Ec',0.3e9) raw_config.setValue('Q3 Ec',0.25e9) raw_config.setValue('r12',0.018) raw_config.setValue('r23',0.018) raw_config.setValue('r13',0.0015) raw_config.setValue('Q2 Voltage period',-1) raw_config.setValue('Q2 Voltage operating point',0) raw_config.setValue('Q2 Flux',flux_pulse) simu_config = Circuit_Simulator.read_config(raw_config.get_dict()) Simulator = Circuit_Simulator.Simulator(simu_config) Simulator.performsimulation(solver_type=1) Simulator.UnitaryResult.get_U(-1) Simulator.UnitaryResult.get_subspace_operator(['000','001','100','101']) Simulator.UnitaryResult.set_Target_gate('CZ') Simulator.UnitaryResult.remove_single_qubit_gate() Simulator.UnitaryResult.get_Gate_Fidelity() fidelity = Simulator.UnitaryResult.Gate_Fidelity return 1 - fidelity # - # + SRATE=6e9 gate_time=30e-9 f_terms=1 T_r=5e-9 factor_r_arr = np.linspace(-0.1,0.1,21) gate_fidelity_one = np.zeros([len(factor_r_arr)]) gate_params_one = np.zeros([len(factor_r_arr)]) raw_initial_seeds=np.array([0.8]) jj = 0 for factor_r in factor_r_arr: time_start = time.time() DATA = sci_op.minimize(cost_func_distor_30,raw_initial_seeds,args=(gate_time,SRATE,f_terms,factor_r,T_r), method='Nelder-Mead', options={'disp': True,'ftol':1e-4,'xtol':1e-4,'maxiter':30}) gate_fidelity_one[jj] = DATA.fun gate_params_one[jj] = DATA.x print('fidelity',DATA.fun) print(time.time()-time_start) np.savetxt(r'C:\Chuji\Latex_Papers\Mypapers\ZZ_coupling_20210205\fig_zz\Robustness\robustness_versus_coupling_strength\error_r_ic30.txt',gate_params_one ) np.savetxt(r'C:\Chuji\Latex_Papers\Mypapers\ZZ_coupling_20210205\fig_zz\Robustness\robustness_versus_coupling_strength\param_r_ic30.txt',gate_fidelity_one ) jj+=1 # + SRATE=6e9 gate_time=60e-9 f_terms=1 T_r=5e-9 factor_r_arr = np.linspace(-0.1,0.1,21)[9:] gate_fidelity_one = np.zeros([len(factor_r_arr)]) gate_params_one = np.zeros([len(factor_r_arr)]) raw_initial_seeds=np.array([0.97]) jj = 0 for factor_r in factor_r_arr: time_start = time.time() DATA = sci_op.minimize(cost_func_distor_09,raw_initial_seeds,args=(gate_time,SRATE,f_terms,factor_r,T_r), method='Nelder-Mead', options={'disp': True,'ftol':1e-4,'xtol':1e-4,'maxiter':30}) gate_fidelity_one[jj] = DATA.fun gate_params_one[jj] = DATA.x raw_initial_seeds = DATA.x print('fidelity',DATA.fun) print(time.time()-time_start) np.savetxt(r'C:\Chuji\Latex_Papers\Mypapers\ZZ_coupling_20210205\fig_zz\Robustness\robustness_versus_coupling_strength\param_60ns_r_ic09.txt',gate_params_one ) np.savetxt(r'C:\Chuji\Latex_Papers\Mypapers\ZZ_coupling_20210205\fig_zz\Robustness\robustness_versus_coupling_strength\error_60ns_r_ic09.txt',gate_fidelity_one ) jj+=1 # + params = array([0.8846875 , 0.90784143, 0.92051929, 0.92869969, 0.93749694, 0.94848324, 0.95839414, 0.96597519, 0.97172954, 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ]) error = array([0.01201456, 0.05103845, 0.10193811, 0.131405 , 0.12629882, 0.09646129, 0.05970911, 0.02951301, 0.01071783, 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ]) # - np.linspace(-0.1,0.1,21)[9:] # Error_two_5ns = np.loadtxt(r'C:\Chuji\Latex_Papers\Mypapers\ZZ_coupling_20210205\fig_zz\Robustness\error_two_distor_5ns.txt') # Param_two_5ns = np.loadtxt(r'C:\Chuji\Latex_Papers\Mypapers\ZZ_coupling_20210205\fig_zz\Robustness\param_two_distor_5ns.txt') Error_one_5ns = np.loadtxt(r'C:\Chuji\Latex_Papers\Mypapers\ZZ_coupling_20210205\fig_zz\Robustness\error_one_distor_5ns.txt') Error_30_5ns = np.loadtxt(r'C:\Chuji\Latex_Papers\Mypapers\ZZ_coupling_20210205\fig_zz\Robustness\robustness_versus_coupling_strength\error_r_ic30.txt') Error_09_5ns = np.loadtxt(r'C:\Chuji\Latex_Papers\Mypapers\ZZ_coupling_20210205\fig_zz\Robustness\robustness_versus_coupling_strength\error_r_ic09.txt') Error_60ns_09_5ns = np.loadtxt(r'C:\Chuji\Latex_Papers\Mypapers\ZZ_coupling_20210205\fig_zz\Robustness\robustness_versus_coupling_strength\error_60ns_r_ic09.txt') # + import matplotlib.ticker as mtick fig = plt.figure(figsize=[7.6,4.8]) ax = fig.add_axes([0.2,0.10,0.75,0.8]) # ax_inset = fig.add_axes([0.35,0.36,0.4,0.5]) ax.plot(np.linspace(-0.1,0.1,41),Error_one_5ns*1,label='n=1',linewidth=3) ax.plot(np.linspace(-0.1,0.1,21),Error_30_5ns*1,label='n=1',linewidth=3) # ax.plot(np.linspace(-0.1,0.1,21),Error_09_5ns*1,label='n=1',linewidth=3) ax.plot(np.linspace(-0.1,0.1,21),Error_60ns_09_5ns*1,label='n=1',linewidth=3) # ax.plot(np.linspace(-0.1,0.1,41),Error_two_5ns*1,label='n=2',linewidth=3) ax.set_xlim([-0.105,0.105]) # ax.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.1e')) ax.tick_params(axis='x',which='both',bottom=True,top=True,direction='in' ) ax.tick_params(axis='y',which='both',left=True,right=True,direction='in' ) ax.tick_params(labelsize=16) plt.tick_params(labelsize=16) ax.set_yscale('log') # - # %matplotlib qt # + gate_time=60e-9 SRATE=10e9 f_term=2 factor_r=-0.06 T_reflex=5e-9 factor1=1.0 factorc=1.0 factor2=1.0 lamb1 = [0.8,-0.1] total_len = gate_time + 8e-9+4*T_reflex Seq=PG.Sequence(total_len=total_len,sample_rate=SRATE,complex_trace=False) Seq.clear_pulse(tips_on=False) Seq.add_pulse('Adiabatic',t0=gate_time/2+10e-9/2,width=gate_time,plateau=0e-9,frequency=0,F_Terms=f_term,Lcoeff=np.array(lamb1),Q1_freq=6.0e9, CPLR_idle_freq=(7.87e9-6e9)*factorc+6e9,Q2_freq=6e9+(5.4e9-6e9)*factor2,constant_coupling=False,r1c=0.018*factor1,r2c=0.018*factor2,r12=0.0015*factorc,anhar_CPLR=-300e6*factorc, anhar_Q1=-250e6*factor1,anhar_Q2=-250e6*factor2,negative_amplitude=False,dfdV=(7.87e9-6e9)*factorc+6e9-1e9,gap_threshold=8e6,freqpoints=301,pulsepoints=601) Seq.add_filter('Gauss Low Pass',300e6) Seq.add_filter('Reflection',*(factor_r,T_reflex)) flux_pulse=Seq.get_sequence() plt.plot(flux_pulse) # - # ## compare to coupler-free structrue # + raw_config = Circuit_Simulator.RawConfig(qubit_num=2,dimension=3,circuit_type=1,initial_state='ground',sampling_rate=1e9) raw_config.load_default_value(modulation=True,decoherence=False,use_capacitance=False) flux_pulse = np.linspace(0/7.5,4/7.5,400) freq_array = 8.5e9-flux_pulse * (8.5e9 - 1e9) raw_config.setValue('Q1 f01_max',6.0e9) raw_config.setValue('Q2 f01_max',8.0e9) raw_config.setValue('Q1 f01_min',1e9) raw_config.setValue('Q2 f01_min',1e9) raw_config.setValue('Q1 Ec',0.25e9) raw_config.setValue('Q2 Ec',0.30e9) raw_config.setValue('r12',0.008) raw_config.setValue('Q2 Voltage period',-1) raw_config.setValue('Q2 Voltage operating point',0.00) raw_config.setValue('Q2 Flux',flux_pulse) simu_config = Circuit_Simulator.read_config(raw_config.get_dict()) Simulator = Circuit_Simulator.Simulator(simu_config) Simulator.show_pulse() # + Simulator.performsimulation(solver_type=2,resample_factor=1,eigen_cloest_to_bare=False ,sort_by_maximum_overlap=True,gap=10e6) fig = plt.figure(figsize=[6.4,6]) ax = fig.add_subplot(111) eigen_trace = Simulator.EigenResult.get_Ener_gap_trace('11-10-01+00') ax.plot(freq_array[0:400],-eigen_trace[0:400]/1e6) ax.set_yscale('log') # - # %matplotlib inline # + gate_time,SRATE,f_term,factor_r,T_reflec=(40e-9,10e9,1,0,2e-9) lamb1 = 0.8 total_len = gate_time + 8e-9+4*T_reflec Seq=PG.Sequence(total_len=total_len,sample_rate=SRATE,complex_trace=False) Seq.clear_pulse(tips_on=False) Seq.add_pulse('Slepian',t0=gate_time/2+10e-9/2,width=gate_time,plateau=0e-9,frequency=0,F_Terms=f_term,Lcoeff=np.array(lamb1), Coupling=20e6,Offset=1e9,dfdV=5e9,negative_amplitude=None) Seq.add_filter('Gauss Low Pass',300e6) Seq.add_filter('Reflection',*(factor_r,T_reflec)) flux_pulse=Seq.get_sequence() plt.plot(flux_pulse) # - # + def cost_func_distor_18(pulse_params,*args): gate_time,SRATE,f_term,factor_r,T_reflec=args lamb1 = pulse_params total_len = gate_time + 8e-9+4*T_reflec Seq=PG.Sequence(total_len=total_len,sample_rate=SRATE,complex_trace=False) Seq.clear_pulse(tips_on=False) Seq.add_pulse('Adiabatic',t0=gate_time/2+10e-9/2,width=gate_time,plateau=0e-9,frequency=0,F_Terms=f_term,Lcoeff=np.array(lamb1),Q1_freq=6.0e9, CPLR_idle_freq=7.87e9,Q2_freq=5.4e9,constant_coupling=False,r1c=0.018,r2c=0.018,r12=0.0015,anhar_CPLR=-300e6, anhar_Q1=-250e6,anhar_Q2=-250e6,negative_amplitude=False,dfdV=7.87e9-1e9,gap_threshold=8e6,freqpoints=301,pulsepoints=601) Seq.add_filter('Gauss Low Pass',300e6) Seq.add_filter('Reflection',*(factor_r,T_reflec)) flux_pulse=Seq.get_sequence() raw_config = Circuit_Simulator.RawConfig(qubit_num=3,dimension=3,circuit_type=1,initial_state='-Z+Z+Z',sampling_rate=SRATE) raw_config.load_default_value(modulation=True,decoherence=False,use_capacitance=False) raw_config.setValue('Q1 f01_max',6.0e9) raw_config.setValue('Q2 f01_max',7.87e9) raw_config.setValue('Q3 f01_max',5.4e9) raw_config.setValue('Q1 f01_min',1e9) raw_config.setValue('Q2 f01_min',1e9) raw_config.setValue('Q3 f01_min',1e9) raw_config.setValue('Q1 Ec',0.25e9) raw_config.setValue('Q2 Ec',0.3e9) raw_config.setValue('Q3 Ec',0.25e9) raw_config.setValue('r12',0.018) raw_config.setValue('r23',0.018) raw_config.setValue('r13',0.0015) raw_config.setValue('Q2 Voltage period',-1) raw_config.setValue('Q2 Voltage operating point',0) raw_config.setValue('Q2 Flux',flux_pulse) simu_config = Circuit_Simulator.read_config(raw_config.get_dict()) Simulator = Circuit_Simulator.Simulator(simu_config) Simulator.performsimulation(solver_type=1) Simulator.UnitaryResult.get_U(-1) Simulator.UnitaryResult.get_subspace_operator(['000','001','100','101']) Simulator.UnitaryResult.set_Target_gate('CZ') Simulator.UnitaryResult.remove_single_qubit_gate() Simulator.UnitaryResult.get_Gate_Fidelity() fidelity = Simulator.UnitaryResult.Gate_Fidelity return 1 - fidelity # -
examples/code for 'Coupler-assisted C-phase gate'/Robustness/robustness to distortion/.ipynb_checkpoints/Robustness versus coupling strength (Tg=30ns)-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import sys import imp import yaml import csv import pandas as pd import re from rf import * modl = imp.load_source('read_model_yaml', 'read_model_yaml.py') # Import argument inp_yaml = "model/spec/SS/SS_RF_1.yaml" #inp_yaml = sys.argv[1] # + # Open test and train sets df_test = pd.read_csv("data/output/model_clean_data/test.tar.gz",compression='gzip', index_col = None) df_train = pd.read_csv("data/output/model_clean_data/train.tar.gz",compression='gzip', index_col = None) # Define test/training set X_test = np.array(df_test.drop(['labels'], axis = 1)) Y_test = np.array(df_test[['labels']])[:,0] X_train = np.array(df_train.drop(['labels'], axis = 1)) Y_train = np.array(df_train[['labels']])[:,0] # + def write(filename,results,labels): """ Write results into csv file. Parameters ---------- filename : string filename to output the result results : list or numpy array results of some simulation labels : list labels for the results, i.e. names of parameters and metrics """ ## # Write into csv file # TODO: labels as header results.tofile(filename,sep=',') def run_model(inp_yaml,X_train,Y_train,X_test,Y_test): """Apply trees in the forest to X, return leaf indices. Parameters ---------- inp_yaml : A yaml file with model specifications Returns ------- parameters_dict : A python dictionary with the model specifications to be used to encode metadata for the model and pass into specific model functions e.g. random forest """ # Define output file name based on input folder_name = re.split("/","model/spec/SS/SS_RF_1.yaml")[2] file_name = re.split("/","model/spec/SS/SS_RF_1.yaml")[3][:-5] output = 'data/output/'+folder_name+'/'+file_name+'.csv' yaml_params = modl.read_model_yaml(inp_yaml) if yaml_params["model_type"] == "RF": n_estimators = yaml_params["parameters"]["n_estimators"] criterion = yaml_params["parameters"]["criterion"] max_features = yaml_params["parameters"]["max_features"] max_depth = yaml_params["parameters"]["max_depth"] n_jobs = yaml_params["parameters"]["n_jobs"] # Define labels of output labels = ["logloss", "miss_err", "prec", "recall", "f1", "n_estimators", "criterion", "max_features", "max_depth"] # Run many simulations in parallel using as many cores as necessary if yaml_params["simulations"]: # Run simulation result = rf_simulation(X_train = X_train, Y_train = Y_train, X_test = X_test, Y_test = Y_test, n_estimators = n_estimators, criterion = criterion, max_features = max_features, max_depth = max_depth) # Write into csv write(output,result,labels) # Run a single simulation else: labels.append("n_jobs") # Run simulation result = rf(X_train = X_train, Y_train = Y_train, X_test = X_test, Y_test = Y_test, n_estimators = n_estimators, criterion = criterion, max_features = max_features, max_depth = max_depth) result = np.array([result]) # Write into csv write(output,result,labels) # - # Run the model run_model(inp_yaml,X_train,Y_train,X_test,Y_test)
lobpredictrst/.ipynb_checkpoints/run_model-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python # language: python3 # name: python3 # --- # # Tests for PDF Pipline # # # Contents: # - \hyperlink{This is a test file for pdfs (and a Generalization)](pdf.ipynb) # - \hyperlink{Notation](pdf.ipynb#notation) # - \hyperlink{Model Ingredients and Assumptions (test case for brackets)](pdf.ipynb#model-ingredients-and-assumptions-test-case-for-brackets) # - \hyperlink{Dynamic Interpretation](pdf.ipynb#dynamic-interpretation) # - \hyperlink{References](references.ipynb)
tests/pdf/ipynb/index.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Update Operations # We can't really update an element of a set - either we remove one or add one - but replacement would not make sense, much like "replacing" a key in a dictionary (we can replace a value, just not a key, and sets are basically like value-less dictionaries). # Let's first consider how we can create new sets from other sets: # * intersection # * union # * difference # * symetric difference # For each of these cases, we can create new sets as follows: s1 = {1, 2, 3} s2 = {2, 3, 4} print(s1, id(s1)) s1 = s1 & s2 print(s1, id(s1)) # As you can see, we calculated the intersection of `s1` and `s2` and set `s1` to the result - but this means we ended up with a new object for `s1`. # # We may want to **mutate** `s1` instead. # And the samew goes for the other operations mentioned above. # # Python provides us a way to do this using both methods and equivalent operators: # * union updates: `s1.update(s2)` or `s1 |= s2` # * intersection updates: `s1.intersection_update(s2)` or `s1 &= s2` # * difference updates: `s1.difference_update(s2)` or `s1 -= s2` # * symm. diff. updates: `s1.symmetric_difference_update(s2)` or `s1 ^= s2` # All these operations **mutate** the original set. # #### Union Updates s1 = {1, 2, 3} s2 = {4, 5, 6} print(id(s1)) s1 |= s2 print(s1, id(s1)) s1 = {1, 2, 3} s2 = {4, 5, 6} print(id(s1)) s1.update(s2) print(s1, id(s1)) # #### Intersection Updates s1 = {1, 2, 3} s2 = {2, 3, 4} print(id(s1)) s1 &= s2 print(s1, id(s1)) s1 = {1, 2, 3} s2 = {2, 3, 4} print(id(s1)) s1.intersection_update(s2) print(s1, id(s1)) # #### Difference Updates s1 = {1, 2, 3, 4} s2 = {2, 3} print(id(s1)) s1 -= s2 print(s1, id(s1)) s1 = {1, 2, 3, 4} s2 = {2, 3} print(id(s1)) s1.difference_update(s2) print(s1, id(s1)) # Be careful with this one. These two expressions are **NOT** equivalent (this is because difference operations are not associative): s1 = {1, 2, 3, 4} s2 = {2, 3} s3 = {3, 4} result = s1 - (s2 - s3) print(result) s1 -= s2 - s3 print(s1) s1 = {1, 2, 3, 4} s2 = {2, 3} s3 = {3, 4} result = (s1 - s2) - s3 print(result) s1.difference_update(s2, s3) print(s1) # #### Symmetric Difference Update s1 = {1, 2, 3, 4, 5} s2 = {4, 5, 6, 7} s1 ^ s2 s1 = {1, 2, 3, 4, 5} s2 = {4, 5, 6, 7} print(id(s1)) s1 ^= s2 print(s1, id(s1)) s1 = {1, 2, 3, 4, 5} s2 = {4, 5, 6, 7} print(id(s1)) s1.symmetric_difference_update(s2) print(s1, id(s1)) # #### Why the methods as well as the operators? # The methods are actually a bit more flexible than the operators. # What happens when we want to update a set from it's union with multiple other sets? # We can certainly do it this way: s1 = {1, 2, 3} s2 = {3, 4, 5} s3 = {5, 6, 7} print(id(s1)) s1 |= s2 | s3 print(s1, id(s1)) # So this works quite well, but we **have** to use sets. # # Using the method we do not have that restriction, we can actually use iterables (they must contain hashable elements) and Python will implicitly convert them to sets: s1 = {1, 2, 3} s1.update([3, 4, 5], (6, 7, 8), 'abc') print(s1) # Of course we can achieve the same thing using the operators, it just requires a little more typing: s1 = {1, 2, 3} s1 |= set([3, 4, 5]) | set((6, 7, 8)) | set('abc') print(s1) # #### Where might this be useful? # You're hopefully seeing a parallel between these set mutation operations and list mutation operations such as `append` and `extend`. # # So the usefullness of mutating a set is no different than the usefullness of mutating a list. # # There might be a reason you want to maintain the same object reference - maybe you are writing a function that needs to mutate some set that was passed as an argument. # ##### Example 1 # Suppose you are writing a function that needs to return all the words found in multiple strings, but with certain words removed (like `'the'`, `'and'`, etc). # # You could take this approach: def combine(string, target): target.update(string.split(' ')) def cleanup(combined): words = {'the', 'and', 'a', 'or', 'is', 'of'} combined -= words result = set() combine('lumberjacks sleep all night', result) combine('the mistry of silly walks', result) combine('this parrot is a late parrot', result) cleanup(result) print(result) # ##### Example 2 # You may find the above example a little contrived, so let's see another example which might actually prove more practical. # # Suppose we have a program that fetches data from some API, database, whatever - and it retrieves a paged list of city names. We want our program to keep fetching data from the source until the source is exhausted, and filter out any cities we are not interested in from our final result. # To simulate the data source, let's do this: def gen_read_data(): yield ['Paris', 'Beijing', 'New York', 'London', 'Madrid', 'Mumbai'] yield ['Hyderabad', 'New York', 'Milan', 'Phoenix', 'Berlin', 'Cairo'] yield ['Stockholm', 'Cairo', 'Paris', 'Barcelona', 'San Francisco'] # And we can use this generator this way: data = gen_read_data() next(data) next(data) next(data) next(data) # Next we're going to create a filter that will look at the data just received, removing any cities that match one we want to ignore: def filter_incoming(*cities, data_set): data_set.difference_update(cities) result = set() data = gen_read_data() for page in data: result.update(page) filter_incoming('Paris', 'London', data_set=result) print(result)
dd_1/Part 3/Section 05 - Sets/04 - Update Operations.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # This notebook is used to create the function which converts a simulation to the ASPA. # + # %matplotlib inline import numpy as np import seaborn as sns import pandas as pd import matplotlib.pyplot as plt import matplotlib import math import copy # copy.deepcopy(dict_variable) to actually copy a dict without problems import keijzer_exogan as ke from tqdm import tqdm from sklearn.preprocessing import MinMaxScaler # %config InlineBackend.print_figure_kwargs={'facecolor' : "w"} # Make sure the axis background of plots is white, this is usefull for the black theme in JupyterLab # Initialize default seaborn layout sns.set_palette(sns.hls_palette(8, l=.3, s=.8)) sns.set(style='ticks') # - # # Load chunk # X[0] is a dict from regular chunk # X[0][0] is a dict from .npy selection # + dir_ = '/datb/16011015/ExoGAN_data//' X = ke.load_all_to_array(dir_,'chunck_0.pkgz') # - df = pd.read_csv('wavelengths_and_indices.csv', header=None, skiprows=[0], usecols=[1]) # load wavelengths df.columns = ['x'] df = df.loc[df['x'] <= 16] # select only wavelengths <= 16 len(df) df.head() # ## Dict to ASPA function def ASPA_v4(x, wavelengths, max_wavelength=16): """ x: dict max_wavelength: max wavelength in micron to decode to aspa returns: 1x32x32 ndarray """ #print('='*100) x = copy.deepcopy(x) wavelengths = wavelengths.copy() spectrum = x['data']['spectrum'] #print('Original spectrum length: ', len(spectrum)) spectrum = np.expand_dims(spectrum, axis=1) # change shape from (515,) to (515,1) params = x['param'] for param in params: #print('Param: ', param) if 'mixratio' in param: params[param] = np.log10(params[param]) # transform mixratio's to dex because they are generated on logarithmic scale """ Normalize params """ # Min max values from training set, in the same order as params above: planet mass, temp, .... co mixratio. min_values = [1.518400e+27, 1.000000e+03, -8, 5.592880e+07, -8, -8, -8] max_values = [3.796000e+27, 2.000000e+03, -1, 1.048665e+08, -1, -1, -1] for i,param in enumerate(params): params[param] = scale_param(params[param], min_values[i], max_values[i]) #print('%s: %s' % (param, params[param])) #print('-'*5) """ Select bins """ data = np.concatenate([wavelengths,spectrum], axis=1) #print('Original data length: ', len(data)) data = pd.DataFrame(data) data.columns = ['x', 'y'] # x is wavelength, y is (R_p / R_s)^2 data = data.loc[data['x'] <= 16] # select only wavelengths <= 16 # Could loop this, but right now this is more visual # H2O bins bin1 = data[data.x <= 0.44] bin2 = data[(data.x > 0.44) & (data.x <= 0.495)] bin3 = data[(data.x > 0.495) & (data.x <= 0.535)] bin4 = data[(data.x > 0.535) & (data.x <= 0.58)] bin5 = data[(data.x > 0.58) & (data.x <= 0.635)] bin6 = data[(data.x > 0.635) & (data.x <= 0.71)] bin7 = data[(data.x > 0.71) & (data.x <= 0.79)] bin8 = data[(data.x > 0.79) & (data.x <= 0.9)] bin9 = data[(data.x > 0.9) & (data.x <= 1.08)] bin10 = data[(data.x > 1.08) & (data.x <= 1.3)] bin11 = data[(data.x > 1.3) & (data.x <= 1.7)] bin12 = data[(data.x > 1.7) & (data.x <= 2.35)] # Manually chosen bins bin13 = data[(data.x > 2.35) & (data.x <= 4)] bin14 = data[(data.x > 4) & (data.x <= 6)] bin15 = data[(data.x > 6) & (data.x <= 10)] bin16 = data[(data.x > 10) & (data.x <= 14)] bin17 = data[data.x > 14] bins = [bin17, bin16, bin15, bin14, bin13, bin12, bin11, bin10, bin9, bin8, bin7, bin6, bin5, bin4, bin3, bin2, bin1] #print('Total bins length: ', len(np.concatenate(bins))) #for i,b in enumerate(bins): # print('Bin , shape: ',(len(bins)-i), b.values.shape) """ Normalize bins """ scalers = [MinMaxScaler(feature_range=(-1,1)).fit(b) for b in bins] # list of 8 scalers for the 8 bins mins = [ b.iloc[:,1].min() for b in bins] # .iloc[:,1] selects the R/R (y) only maxs = [ b.iloc[:,1].max() for b in bins] stds = [ b.iloc[:,1].std() for b in bins] #print(min(mins), max(maxs)) bins_scaled = [] for i,b in enumerate(bins): bins_scaled.append(scalers[i].transform(b)) spectrum_scaled = np.concatenate(bins_scaled, axis=0) spectrum_scaled = spectrum_scaled[:,1] #print('spectrum scaled shape: ', spectrum_scaled.shape) """ Create the ASPA """ """Spectrum""" aspa = np.zeros((32,32)) row_length = 25 # amount of pixels used per row n_rows = math.ceil(len(spectrum_scaled) / row_length) # amount of rows the spectrum needs in the aspa, so for 415 data points, 415/32=12.96 -> 13 rows #print('Using %s rows' % n_rows) for i in range(n_rows): # for i in start = i*row_length stop = start+row_length spec = spectrum_scaled[start:stop] if len(spec) != row_length: n_missing_points = row_length-len(spec) spec = np.append(spec, [0 for _ in range(n_missing_points)]) # for last row, if length != 32, fill remaining with 0's #print('Filled row with %s points' % n_missing_points) aspa[i, :row_length] = spec """ExoGAN params""" for i,param in enumerate(params): aspa[:16, 25+i:26+i] = params[param] """min max std values for spectrum bins""" for i in range(len(mins)): min_ = scale_param(mins[i], 0.00648 , 0.02877) max_ = scale_param(maxs[i], 0.00648 , 0.02877) #std_ = scale_param(stds[i], 9e-6, 2e-4) #min_ = mins[i] #max_ = maxs[i] aspa[16:17, i*2:i*2+2] = min_ aspa[17:18, i*2:i*2+2] = max_ """Fill unused space with noice""" for i in range(14): noise = np.random.rand(32) # random noise betweem 0 and 1 for each row aspa[18+i:19+i*1, :] = noise return aspa arr = np.array([i for i in range(25)]) len(arr[:16]) def scale_param(X, X_min, X_max): """ Formule source: https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.MinMaxScaler.html In this case 1 is max, 0 is min """ std = (X-X_min)/ (X_max - X_min) return std*(1 - -1)+-1 wavelengths = ke.load_wavelengths() # + np.random.shuffle(X) plt.figure(figsize=(15,15)) for i in tqdm(range(4*4)): image = ASPA_v4(X[i], wavelengths) plt.subplot(4, 4, i+1) plt.imshow(image, cmap='gray', vmin=-1.2, vmax=1.2) plt.tight_layout() # - # ## Inverse transform i = 0 dict_ = X[i] # ### Real values # + aspa = ASPA_v4(dict_, wavelengths) plt.imshow(image, cmap='gray', vmin=-1.2, vmax=1.2) # - params_original = copy.deepcopy(dict_['param']) params_original np.log(2.1544346900318865e-06) np.exp(-13.047982193632924) ke.plot_trans(wavelengths, dict_['data']['spectrum']) plt.plot(wavelengths, dict_['data']['spectrum']) plt.xlim(right=16) # # ---------------- Decoding Params from ASPA ------------ def inverse_scale(X_, X_min, X_max): """ X_ is scaled X X is unscaled X_ """ X = X_ * (X_max - X_min) + X_min return X """The values of the image parts from the aspa (still encoded)""" # Grab the values spectrum = aspa[:16, :25].flatten() params = [aspa[:16, 25+i:26+i].mean() for i in range(7)] # + """Decode to arrays""" # min max values for params used to decode min_values = [1.518400e+27, 1.000000e+03, -8, 5.592880e+07, -8, -8, -8] max_values = [3.796000e+27, 2.000000e+03, -1, 1.048665e+08, -1, -1, -1] # Initialize dict to be used for the param values params_dict = { 'planet_mass': 0, 'temp_profile': 0, 'ch4_mixratio': 0, 'planet_radius': 0, 'h2o_mixratio': 0, 'co2_mixratio': 0, 'co_mixratio': 0 } # - # ### Decode params # + for i,param in enumerate(params_dict): # put aspa values in dict params_dict[param] = params[i] # inverse scale these values params_dict[param] = inverse_scale(params[i], min_values[i], max_values[i]) # scale mixratios from log back to linear if 'mixratio' in param: params_dict[param] = np.exp(params_dict[param]) print(param, params_dict[param]) params_dict # - # # ----- Functionize param decoding ----- def decode_params_from_aspa(aspa): """The values of the image parts from the aspa (still encoded)""" # Grab the values spectrum = aspa[:16, :25].flatten() print("spectrum length: ", len(spectrum)) params = [aspa[:16, 25+i:26+i].mean() for i in range(7)] """Decode to arrays""" # min max values for params used to decode min_values = [1.518400e+27, 1.000000e+03, -8, 5.592880e+07, -8, -8, -8] max_values = [3.796000e+27, 2.000000e+03, -1, 1.048665e+08, -1, -1, -1] # Initialize dict to be used for the param values params_dict = { 'planet_mass': 0, 'temp_profile': 0, 'ch4_mixratio': 0, 'planet_radius': 0, 'h2o_mixratio': 0, 'co2_mixratio': 0, 'co_mixratio': 0 } for i,param in enumerate(params_dict): # put aspa values in dict params_dict[param] = params[i] # inverse scale these values params_dict[param] = inverse_scale(params[i], min_values[i], max_values[i]) # scale mixratios from log back to linear if 'mixratio' in param: params_dict[param] = np.exp(params_dict[param]) #print(param, params_dict[param]) return params_dict decode_params_from_aspa(aspa) # # ---------------- Decoding Spectrum from ASPA ------------ # ### Decode min max values for bins # + mins_ = [aspa[16:17, i*4:i*4+4].mean() for i in range(8)] maxs_ = [aspa[18:19, i*4:i*4+4].mean() for i in range(8)] """min max std values for spectrum bins""" mins = [] # globally decoded values maxs = [] for i in range(8): mins.append(inverse_scale(mins_[i], 0.005, 0.03)) maxs.append(inverse_scale(maxs_[i], 0.005, 0.03)) mins # - # ## Decode bins # + """Select bins""" df = ke.load_wavelengths() df.columns = ['x'] df = df.loc[df['x'] <= 16] # select only wavelengths <= 16 (max wavelength ASPA has been encoded with) df['y'] = spectrum # Could loop this, but right now this is more visual bin1 = df[df.x <= 0.8] bin2 = df[(df.x > 0.8) & (df.x <= 1.3)] # select data between 2 and 4 micron bin3 = df[(df.x > 1.3) & (df.x <= 2)] bin4 = df[(df.x > 2) & (df.x <= 4)] bin5 = df[(df.x > 4) & (df.x <= 6)] bin6 = df[(df.x > 6) & (df.x <= 10)] bin7 = df[(df.x > 10) & (df.x <= 14)] bin8 = df[df.x > 14] bins = [bin8, bin7, bin6, bin5, bin4, bin3, bin2, bin1] # - # ## Scaled spectrum bins lens = [] for b in bins: lens.append(len(b)) plt.plot(b.x, b.y) # ## Scaled bins concatenated # + y = np.concatenate([b.y for b in bins]) plt.plot(y) # - """Inverse scale bins""" for i,b in enumerate(bins): b.y = inverse_scale(b.y, mins[i], maxs[i]) # ## Decoded bins # + for b in bins: plt.plot(b.x, b.y, 'x-') plt.xlim((0, 16)) # - # ## Original spectrum # + plt.plot(wavelengths, dict_['data']['spectrum'], 'x-') plt.xlim((0, 16)) np.array(wavelengths).shape # - # ## Decoded spectrum concatenated x = np.concatenate([b.x for b in bins]) y = np.concatenate([b.y for b in bins]) x.shape, y.shape plt.plot(x,y, 'x-') # ### Original spectrum # Note that this doesn't have max wavelength set to 16 ke.plot_trans(wavelengths, dict_['data']['spectrum']) # ### Decoded spectrum ke.plot_trans(x,y) # # ----- Functionize spectrum decoding ----- def decode_spectrum_from_aspa(aspa, max_wavelength=16): """ Returns x: wavelength in micron, y: R/R It's currently hard coded to work with """ mins_ = [aspa[16:17, i*4:i*4+4].mean() for i in range(8)] maxs_ = [aspa[18:19, i*4:i*4+4].mean() for i in range(8)] """min max std values for spectrum bins""" mins = [] # globally decoded values maxs = [] for i in range(8): mins.append(inverse_scale(mins_[i], 0.005, 0.03)) maxs.append(inverse_scale(maxs_[i], 0.005, 0.03)) """Select bins""" df = ke.load_wavelengths() df.columns = ['x'] df = df.loc[df['x'] <= max_wavelength] # select only wavelengths <= 16 (max wavelength ASPA has been encoded with) df['y'] = spectrum # Could loop this, but right now this is more visual bin1 = df[df.x <= 0.8] bin2 = df[(df.x > 0.8) & (df.x <= 1.3)] # select data between 2 and 4 micron bin3 = df[(df.x > 1.3) & (df.x <= 2)] bin4 = df[(df.x > 2) & (df.x <= 4)] bin5 = df[(df.x > 4) & (df.x <= 6)] bin6 = df[(df.x > 6) & (df.x <= 10)] bin7 = df[(df.x > 10) & (df.x <= 14)] bin8 = df[df.x > 14] bins = [bin8, bin7, bin6, bin5, bin4, bin3, bin2, bin1] """Inverse scale bins""" for i,b in enumerate(bins): b.y = inverse_scale(b.y, mins[i], maxs[i]) x = np.concatenate([b.x for b in bins]) y = np.concatenate([b.y for b in bins]) return x, y
notebooks/1.3 ASPA v4.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG cfg_grammar= """ S -> Noun4 Adposition5 Trigger_Rule Noun4 -> "\\> 0 -mois" | "\\> 0-mois" | "\\> 0 ans" | "\\> 0 ans" | "\\> 0 mois" | "\\> 0 mois" | "\\> 0 annรฉe" | "\\> 0 ans" | "\\> 1 semaine" | "\\> 1 semaine" | "\\> 1 semaine" | "\\> 1 semaines" | "\\> 13 jours" | "\\> 13-jours" | "\\> 13 jour" | "\\> 13 jours" | "\\> 13 - jour" | "\\> 13 - jours" Adposition5 -> "de" Trigger_Rule -> "|forward|trigger|historical|30|Group[2]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG cfg_grammar= """ S -> Noun4 Trigger_Rule Noun4 -> "Il y a \\> 0 mois" | "Il y a \\> 0 mois" | "Il y a \\> 0 an" | "Il y a \\> 0 ans" | "Il y a \\> 0 ans" | "Il y a \\> 1 semaine" | "Il y a \\> 1 semaine" Trigger_Rule -> "|backward|trigger|historical|30|Group[4]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 19 # French Term : : non # English Term(s) : [': no'] # Index(es) : [31] # Grouping(s) : [23] cfg_grammar= """ S -> Adverb2 Trigger_Rule Adverb2 -> ": non" | ": neg" Trigger_Rule -> "|backward|trigger|negated|10|Group[23]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 20 # French Term : \ w + no # English Term(s) : ['\\w+ no'] # Index(es) : [32] # Grouping(s) : [24] cfg_grammar= """ S -> Adjective1 Other4 Trigger_Rule Adjective1 -> "\\w+" Other4 -> "non" Trigger_Rule -> "|forward|trigger|negated|10|Group[24]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 21 # French Term : absence de # English Term(s) : ['absence of'] # Index(es) : [34] # Grouping(s) : [26] cfg_grammar= """ S -> Noun1 Adposition2 Trigger_Rule Noun1 -> "absence" | "dรฉfaut" | "distraction" | "oubli" | "privation" | "omission" | "รฉclipse" | "carence" | "faute" | "disparition" Adposition2 -> "de" | "d'un" | "d'une" Trigger_Rule -> "|forward|trigger|negated|10|Group[26]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 23 # French Term : suffisant pour รฉcarter # English Term(s) : ['adequate to rule out for', 'adequate to rule out', 'sufficient to rule out'] # Index(es) : [41, 42, 51] # Grouping(s) : [30] cfg_grammar= """ S -> Verb1 Adposition2 Verb3 Trigger_Rule Verb1 -> "suffisant" | "arrogant" | "prรฉtentieux" | "passable" | "avantageux" | "fat" | "prรฉsomptueux" | "fier" | "convenable" | "pรฉdant" Adposition2 -> "pour" Verb3 -> "รฉcarter" | "dรฉtourner" | "dรฉvier" | "sรฉparer" | "rejeter" | "chasser" | "bannir" | "exiler" | "รฉliminer" | "proscrire" Trigger_Rule -> "|forward|trigger|negated|10|Group[30]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 24 # French Term : suffisant pour l'exclure # English Term(s) : ['adequate to rule her out for', 'sufficient to rule her out for'] # Index(es) : [37, 44] # Grouping(s) : [30] cfg_grammar= """ S -> Verb1 Adposition2 Determiner3 Noun4 Trigger_Rule Verb1 -> "suffisant" | "arrogant" | "prรฉtentieux" | "passable" | "avantageux" | "fat" | "prรฉsomptueux" | "fier" | "convenable" | "pรฉdant" Adposition2 -> "pour" Determiner3 -> "l'" Noun4 -> "exclure" | "expulser" | "รฉcarter" | "renvoyer" | "repousser" | "rejeter" | "chasser" | "retrancher" | "excepter" | "bannir" Trigger_Rule -> "|forward|trigger|negated|10|Group[30]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 25 # French Term : suffisant pour l'รฉliminer # English Term(s) : ['adequate to rule her out', 'adequate to rule him out for', 'adequate to rule him out', 'sufficient to rule her out', 'sufficient to rule him out for', 'sufficient to rule him out'] # Index(es) : [38, 39, 40, 45, 47, 48] # Grouping(s) : [30] cfg_grammar= """ S -> Verb1 Adposition2 Pronoun3 Verb4 Trigger_Rule Verb1 -> "suffisant" | "arrogant" | "prรฉtentieux" | "passable" | "avantageux" | "fat" | "prรฉsomptueux" | "fier" | "convenable" | "pรฉdant" Adposition2 -> "pour" Pronoun3 -> "l'" Verb4 -> "รฉliminer" | "proscrire" | "repousser" | "rejeter" | "chasser" | "รฉvincer" | "exclure" | "retrancher" | "recaler" | "รฉvacuer" Trigger_Rule -> "|forward|trigger|negated|10|Group[30]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 26 # French Term : suffisant pour exclure # English Term(s) : ['sufficient to rule out against', 'sufficient to rule out for'] # Index(es) : [49, 50] # Grouping(s) : [30] cfg_grammar= """ S -> Verb1 Adposition2 Noun3 Trigger_Rule Verb1 -> "suffisant" | "arrogant" | "prรฉtentieux" | "passable" | "avantageux" | "fat" | "prรฉsomptueux" | "fier" | "convenable" | "pรฉdant" Adposition2 -> "pour" Noun3 -> "exclure" | "expulser" | "รฉcarter" | "renvoyer" | "repousser" | "rejeter" | "chasser" | "retrancher" | "excepter" | "bannir" Trigger_Rule -> "|forward|trigger|negated|10|Group[30]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 27 # French Term : suffisant pour l'รฉliminer contre # English Term(s) : ['sufficient to rule her out against', 'sufficient to rule him out against'] # Index(es) : [43, 46] # Grouping(s) : [30] cfg_grammar= """ S -> Verb1 Adposition2 Pronoun3 Verb4 Adposition5 Trigger_Rule Verb1 -> "suffisant" | "arrogant" | "prรฉtentieux" | "passable" | "avantageux" | "fat" | "prรฉsomptueux" | "fier" | "convenable" | "pรฉdant" Adposition2 -> "pour" Pronoun3 -> "l'" Verb4 -> "รฉliminer" | "proscrire" | "repousser" | "rejeter" | "chasser" | "รฉvincer" | "exclure" | "retrancher" | "recaler" | "รฉvacuer" Adposition5 -> "contre" Trigger_Rule -> "|forward|trigger|negated|10|Group[30]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 28 # French Term : adรฉquate pour exclure le patient # English Term(s) : ['adequate to rule the patient out'] # Index(es) : [69] # Grouping(s) : [42] cfg_grammar= """ S -> Adjective1 Adposition2 Noun3 Determiner4 Noun5 Trigger_Rule Adjective1 -> "adรฉquate" Adposition2 -> "pour" Noun3 -> "exclure" | "expulser" | "รฉcarter" | "renvoyer" | "repousser" | "rejeter" | "chasser" | "retrancher" | "excepter" | "bannir" Determiner4 -> "le" Noun5 -> "patient" | "persรฉvรฉrant" | "client" | "infatigable" | "constant" | "calme" | "imperturbable" | "impassible" | "rรฉsignรฉ" | "courageux" Trigger_Rule -> "|forward|trigger|negated|10|Group[42]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 29 # French Term : suffisant pour รฉcarter le patient # English Term(s) : ['sufficient to rule the patient out'] # Index(es) : [72] # Grouping(s) : [42] cfg_grammar= """ S -> Verb1 Adposition2 Verb3 Determiner4 Noun5 Trigger_Rule Verb1 -> "suffisant" | "arrogant" | "prรฉtentieux" | "passable" | "avantageux" | "fat" | "prรฉsomptueux" | "fier" | "convenable" | "pรฉdant" Adposition2 -> "pour" Verb3 -> "รฉcarter" | "dรฉtourner" | "dรฉvier" | "sรฉparer" | "rejeter" | "chasser" | "bannir" | "exiler" | "รฉliminer" | "proscrire" Determiner4 -> "le" Noun5 -> "patient" | "persรฉvรฉrant" | "client" | "infatigable" | "constant" | "calme" | "imperturbable" | "impassible" | "rรฉsignรฉ" | "courageux" Trigger_Rule -> "|forward|trigger|negated|10|Group[42]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 30 # French Term : suffisant pour รฉcarter le patient contre # English Term(s) : ['adequate to rule the patient out against', 'sufficient to rule the patient out against'] # Index(es) : [67, 70] # Grouping(s) : [42] cfg_grammar= """ S -> Verb1 Adposition2 Verb3 Determiner4 Noun5 Adposition6 Trigger_Rule Verb1 -> "suffisant" | "arrogant" | "prรฉtentieux" | "passable" | "avantageux" | "fat" | "prรฉsomptueux" | "fier" | "convenable" | "pรฉdant" Adposition2 -> "pour" Verb3 -> "รฉcarter" | "dรฉtourner" | "dรฉvier" | "sรฉparer" | "rejeter" | "chasser" | "bannir" | "exiler" | "รฉliminer" | "proscrire" Determiner4 -> "le" Noun5 -> "patient" | "persรฉvรฉrant" | "client" | "infatigable" | "constant" | "calme" | "imperturbable" | "impassible" | "rรฉsignรฉ" | "courageux" Adposition6 -> "contre" Trigger_Rule -> "|forward|trigger|negated|10|Group[42]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 31 # French Term : suffisant pour exclure le patient pour # English Term(s) : ['adequate to rule the patient out for', 'sufficient to rule the patient out for'] # Index(es) : [68, 71] # Grouping(s) : [42] cfg_grammar= """ S -> Verb1 Adposition2 Noun3 Determiner4 Noun5 Adposition6 Trigger_Rule Verb1 -> "suffisant" | "arrogant" | "prรฉtentieux" | "passable" | "avantageux" | "fat" | "prรฉsomptueux" | "fier" | "convenable" | "pรฉdant" Adposition2 -> "pour" Noun3 -> "exclure" | "expulser" | "รฉcarter" | "renvoyer" | "repousser" | "rejeter" | "chasser" | "retrancher" | "excepter" | "bannir" Determiner4 -> "le" Noun5 -> "patient" | "persรฉvรฉrant" | "client" | "infatigable" | "constant" | "calme" | "imperturbable" | "impassible" | "rรฉsignรฉ" | "courageux" Adposition6 -> "pour" Trigger_Rule -> "|forward|trigger|negated|10|Group[42]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 32 # French Term : aprรจs # English Term(s) : ['after'] # Index(es) : [79] # Grouping(s) : [47] cfg_grammar= """ S -> Adposition1 Trigger_Rule Adposition1 -> "aprรจs" Trigger_Rule -> "|both|pseudo|uncertain|30|Group[47]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 33 # French Term : bien que # English Term(s) : ['although', 'though'] # Index(es) : [80, 81] # Grouping(s) : [48] cfg_grammar= """ S -> Adverb1 Subordinating_conjunction2 Trigger_Rule Adverb1 -> "bien" Subordinating_conjunction2 -> "que" Trigger_Rule -> "|forward|termination|negated|10|Group[48]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 34 # French Term : Presente avec # English Term(s) : ['presented with'] # Index(es) : [83, 85] # Grouping(s) : [51, 49] cfg_grammar= """ S -> Noun1 Adposition2 Trigger_Rule Noun1 -> "Presente" Adposition2 -> "avec" Trigger_Rule -> "|forward|termination|historical|10|Group[51, 49]" |"|forward|termination|negated|10|Group[51, 49]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 35 # French Term : prรฉsente avec # English Term(s) : ['presents with'] # Index(es) : [82, 84] # Grouping(s) : [51, 49] cfg_grammar= """ S -> Adjective1 Adposition2 Trigger_Rule Adjective1 -> "prรฉsente" Adposition2 -> "avec" Trigger_Rule -> "|forward|termination|historical|10|Group[51, 49]" |"|forward|termination|negated|10|Group[51, 49]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 36 # French Term : et \ w + \ w + show # English Term(s) : ['and \\w+ \\w+ show'] # Index(es) : [88] # Grouping(s) : [53] cfg_grammar= """ S -> Coordinating_conjunction1 Adjective2 Noun3 Adposition4 Adjective5 Noun6 Adposition7 Noun8 Trigger_Rule Coordinating_conjunction1 -> "et" Adjective2 -> "\" Noun3 -> "w" Adposition4 -> "+" Adjective5 -> "\" Noun6 -> "w" Adposition7 -> "+" Noun8 -> "show" Trigger_Rule -> "|forward|termination|negated|10|Group[53]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 37 # French Term : et \ w + spectacles # English Term(s) : ['and \\w+ shows'] # Index(es) : [90] # Grouping(s) : [53] cfg_grammar= """ S -> Coordinating_conjunction1 Adjective2 Adposition3 Determiner4 Noun5 Trigger_Rule Coordinating_conjunction1 -> "et" Adjective2 -> "\" Adposition3 -> "w" Determiner4 -> "+" Noun5 -> "spectacles" | "fรฉerie" | "reprรฉsentation" | "sรฉance" | "ballet" | "tableau" | "numรฉro" | "comรฉdie" | "panorama" | "cirque" Trigger_Rule -> "|forward|termination|negated|10|Group[53]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 38 # French Term : et \ w + \ w + affichรฉs # English Term(s) : ['and \\w+ \\w+ shown'] # Index(es) : [86] # Grouping(s) : [53] cfg_grammar= """ S -> Coordinating_conjunction1 Adjective2 Noun3 Adposition4 Adjective5 Noun6 Adposition7 Noun8 Trigger_Rule Coordinating_conjunction1 -> "et" Adjective2 -> "\" Noun3 -> "w" Adposition4 -> "+" Adjective5 -> "\" Noun6 -> "w" Adposition7 -> "+" Noun8 -> "affichรฉs" | "dรฉployer" | "dรฉclarer" | "exposer" | "รฉtaler" | "manifester" | "annoncer" | "montrer" | "marquer" | "tรฉmoigner" Trigger_Rule -> "|forward|termination|negated|10|Group[53]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 39 # French Term : et \ w + show # English Term(s) : ['and \\w+ show'] # Index(es) : [91] # Grouping(s) : [53] cfg_grammar= """ S -> Coordinating_conjunction1 Adjective2 Noun3 Adposition4 Noun5 Trigger_Rule Coordinating_conjunction1 -> "et" Adjective2 -> "\" Noun3 -> "w" Adposition4 -> "+" Noun5 -> "show" Trigger_Rule -> "|forward|termination|negated|10|Group[53]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 40 # French Term : et \ w + affichรฉ # English Term(s) : ['and \\w+ shown'] # Index(es) : [89] # Grouping(s) : [53] cfg_grammar= """ S -> Coordinating_conjunction1 Adjective2 Noun3 Adposition4 Verb5 Trigger_Rule Coordinating_conjunction1 -> "et" Adjective2 -> "\" Noun3 -> "w" Adposition4 -> "+" Verb5 -> "affichรฉ" | "public" | "รฉtalรฉ" | "exposรฉ" | "publiรฉ" | "prรฉsentรฉ" | "placard" | "pancarte" | "proclamation" | "marquer" Trigger_Rule -> "|forward|termination|negated|10|Group[53]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 41 # French Term : et \ w + \ w + spectacles # English Term(s) : ['and \\w+ \\w+ shows'] # Index(es) : [87] # Grouping(s) : [53] cfg_grammar= """ S -> Coordinating_conjunction1 Adjective2 Noun3 Adposition4 Adjective5 Noun6 Determiner7 Noun8 Trigger_Rule Coordinating_conjunction1 -> "et" Adjective2 -> "\" Noun3 -> "w" Adposition4 -> "+" Adjective5 -> "\" Noun6 -> "w" Determiner7 -> "+" Noun8 -> "spectacles" | "fรฉerie" | "reprรฉsentation" | "sรฉance" | "ballet" | "tableau" | "numรฉro" | "comรฉdie" | "panorama" | "cirque" Trigger_Rule -> "|forward|termination|negated|10|Group[53]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 42 # French Term : et a fait # English Term(s) : ['and did'] # Index(es) : [92] # Grouping(s) : [59] cfg_grammar= """ S -> Coordinating_conjunction1 Auxiliary2 Verb3 Trigger_Rule Coordinating_conjunction1 -> "et" Auxiliary2 -> "a" Verb3 -> "fait" Trigger_Rule -> "|forward|termination|negated|10|Group[59]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 43 # French Term : et il avait # English Term(s) : ['and he had'] # Index(es) : [93] # Grouping(s) : [60] cfg_grammar= """ S -> Coordinating_conjunction1 Pronoun2 Auxiliary3 Trigger_Rule Coordinating_conjunction1 -> "et" Pronoun2 -> "il" Auxiliary3 -> "avait" Trigger_Rule -> "|forward|termination|negated|10|Group[60]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 44 # French Term : et il a รฉtรฉ notรฉ pour avoir # English Term(s) : ['and he was noted to have'] # Index(es) : [94] # Grouping(s) : [61] cfg_grammar= """ S -> Coordinating_conjunction1 Pronoun2 Auxiliary3 Auxiliary4 Verb5 Adposition6 Verb7 Trigger_Rule Coordinating_conjunction1 -> "et" Pronoun2 -> "il" Auxiliary3 -> "a" Auxiliary4 -> "รฉtรฉ" Verb5 -> "notรฉ" | "fait" | "consignรฉ" | "bordereau" | "apostille" | "constatรฉ" | "rรฉflexion" | "aperรงu" | "notice" | "notule" Adposition6 -> "pour" Verb7 -> "avoir" Trigger_Rule -> "|forward|termination|negated|10|Group[61]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 45 # French Term : et il รฉtait # English Term(s) : ['and he was'] # Index(es) : [95] # Grouping(s) : [62] cfg_grammar= """ S -> Coordinating_conjunction1 Pronoun2 Auxiliary3 Trigger_Rule Coordinating_conjunction1 -> "et" Pronoun2 -> "il" Auxiliary3 -> "รฉtait" Trigger_Rule -> "|forward|termination|negated|10|Group[62]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 46 # French Term : et notรฉ d'avoir # English Term(s) : ['and noted to have'] # Index(es) : [96] # Grouping(s) : [63] cfg_grammar= """ S -> Coordinating_conjunction1 Verb2 Adposition3 Verb4 Trigger_Rule Coordinating_conjunction1 -> "et" Verb2 -> "notรฉ" | "fait" | "consignรฉ" | "bordereau" | "apostille" | "constatรฉ" | "rรฉflexion" | "aperรงu" | "notice" | "notule" Adposition3 -> "d'" Verb4 -> "avoir" Trigger_Rule -> "|forward|termination|negated|10|Group[63]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 47 # French Term : et elle avait # English Term(s) : ['and she had'] # Index(es) : [97] # Grouping(s) : [64] cfg_grammar= """ S -> Coordinating_conjunction1 Pronoun2 Auxiliary3 Trigger_Rule Coordinating_conjunction1 -> "et" Pronoun2 -> "elle" Auxiliary3 -> "avait" Trigger_Rule -> "|forward|termination|negated|10|Group[64]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 48 # French Term : et elle a รฉtรฉ notรฉe pour avoir # English Term(s) : ['and she was noted to have'] # Index(es) : [98] # Grouping(s) : [65] cfg_grammar= """ S -> Coordinating_conjunction1 Pronoun2 Auxiliary3 Auxiliary4 Verb5 Adposition6 Verb7 Trigger_Rule Coordinating_conjunction1 -> "et" Pronoun2 -> "elle" Auxiliary3 -> "a" Auxiliary4 -> "รฉtรฉ" Verb5 -> "notรฉe" | "transcrire" | "marquer" | "inscrire" | "relever" | "observer" | "mentionner" | "consigner" | "enregistrer" | "รฉcrire" Adposition6 -> "pour" Verb7 -> "avoir" Trigger_Rule -> "|forward|termination|negated|10|Group[65]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 49 # French Term : et elle รฉtait # English Term(s) : ['and she was'] # Index(es) : [99] # Grouping(s) : [66] cfg_grammar= """ S -> Coordinating_conjunction1 Pronoun2 Auxiliary3 Trigger_Rule Coordinating_conjunction1 -> "et" Pronoun2 -> "elle" Auxiliary3 -> "รฉtait" Trigger_Rule -> "|forward|termination|negated|10|Group[66]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 50 # French Term : et le patient avait # English Term(s) : ['and the patient had'] # Index(es) : [100] # Grouping(s) : [67] cfg_grammar= """ S -> Coordinating_conjunction1 Determiner2 Noun3 Auxiliary4 Trigger_Rule Coordinating_conjunction1 -> "et" Determiner2 -> "le" Noun3 -> "patient" | "persรฉvรฉrant" | "client" | "infatigable" | "constant" | "calme" | "imperturbable" | "impassible" | "rรฉsignรฉ" | "courageux" Auxiliary4 -> "avait" Trigger_Rule -> "|forward|termination|negated|10|Group[67]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 51 # French Term : et le patient รฉtait # English Term(s) : ['and the patient was'] # Index(es) : [101] # Grouping(s) : [68] cfg_grammar= """ S -> Coordinating_conjunction1 Determiner2 Noun3 Auxiliary4 Trigger_Rule Coordinating_conjunction1 -> "et" Determiner2 -> "le" Noun3 -> "patient" | "persรฉvรฉrant" | "client" | "infatigable" | "constant" | "calme" | "imperturbable" | "impassible" | "rรฉsignรฉ" | "courageux" Auxiliary4 -> "รฉtait" Trigger_Rule -> "|forward|termination|negated|10|Group[68]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 52 # French Term : et avec seulement # English Term(s) : ['and with only'] # Index(es) : [102] # Grouping(s) : [69] cfg_grammar= """ S -> Coordinating_conjunction1 Adposition2 Adverb3 Trigger_Rule Coordinating_conjunction1 -> "et" Adposition2 -> "avec" Adverb3 -> "seulement" Trigger_Rule -> "|forward|termination|negated|10|Group[69]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 53 # French Term : tout autre # English Term(s) : ['any other'] # Index(es) : [103] # Grouping(s) : [71] cfg_grammar= """ S -> Adverb1 Adjective2 Trigger_Rule Adverb1 -> "tout" Adjective2 -> "autre" Trigger_Rule -> "|forward|trigger|negated|10|Group[71]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 54 # French Term : #tout # English Term(s) : ['#any'] # Index(es) : [105] # Grouping(s) : [72] cfg_grammar= """ S -> Noun1 Adjective2 Trigger_Rule Noun1 -> "#" Adjective2 -> "tout" Trigger_Rule -> "|forward|trigger|negated|13|Group[72]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 55 # French Term : Exceptรฉ # English Term(s) : ['apart from'] # Index(es) : [106] # Grouping(s) : [73] cfg_grammar= """ S -> Adposition1 Trigger_Rule Adposition1 -> "Exceptรฉ" Trigger_Rule -> "|forward|termination|negated|10|Group[73]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 56 # French Term : semble # English Term(s) : ['appears to'] # Index(es) : [107] # Grouping(s) : [74] cfg_grammar= """ S -> Adjective1 Trigger_Rule Adjective1 -> "semble" Trigger_Rule -> "|forward|trigger|uncertain|30|Group[74]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 57 # French Term : sont nรฉgatifs # English Term(s) : ['are negative'] # Index(es) : [108] # Grouping(s) : [75] cfg_grammar= """ S -> Auxiliary1 Adjective2 Trigger_Rule Auxiliary1 -> "sont" Adjective2 -> "nรฉgatifs" Trigger_Rule -> "|backward|trigger|negated|10|Group[75]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 58 # French Term : ne sont plus # English Term(s) : ['are no longer'] # Index(es) : [110] # Grouping(s) : [77] cfg_grammar= """ S -> Adverb1 Auxiliary2 Adverb3 Trigger_Rule Adverb1 -> "ne" Auxiliary2 -> "sont" Adverb3 -> "plus" Trigger_Rule -> "|backward|trigger|negated|10|Group[77]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 59 # French Term : sont exclus # English Term(s) : ['are ruled out'] # Index(es) : [112] # Grouping(s) : [79] cfg_grammar= """ S -> Auxiliary1 Adjective2 Trigger_Rule Auxiliary1 -> "sont" Adjective2 -> "exclus" Trigger_Rule -> "|backward|trigger|negated|10|Group[79]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 60 # French Term : sont arrรชtรฉs # English Term(s) : ['are stopped'] # Index(es) : [114] # Grouping(s) : [81] cfg_grammar= """ S -> Auxiliary1 Verb2 Trigger_Rule Auxiliary1 -> "sont" Verb2 -> "arrรชtรฉs" | "bloquer" | "suspendre" | "paralyser" | "endiguer" | "terminer" | "enrayer" | "saisir" | "รฉtouffer" | "prendre" Trigger_Rule -> "|backward|trigger|negated|10|Group[81]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 61 # French Term : comme origine de # English Term(s) : ['as the origin of', 'as an origin of'] # Index(es) : [128, 138] # Grouping(s) : [84] cfg_grammar= """ S -> Adposition1 Noun2 Adposition3 Trigger_Rule Adposition1 -> "comme" Noun2 -> "origine" | "racine" | "source" | "filiation" | "naissance" | "descendance" | "extraction" | "provenance" | "cause" | "famille" Adposition3 -> "de" Trigger_Rule -> "|forward|termination|negated|10|Group[84]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 62 # French Term : comme une รฉtiologie de # English Term(s) : ['as a etiology of', 'as an etiology of'] # Index(es) : [120, 136] # Grouping(s) : [84] cfg_grammar= """ S -> Adposition1 Determiner2 Noun3 Adposition4 Trigger_Rule Adposition1 -> "comme" Determiner2 -> "une" Noun3 -> "รฉtiologie" | "รฉtiopathie" | "causalitรฉ" | "รฉtiologie" Adposition4 -> "de" Trigger_Rule -> "|forward|termination|negated|10|Group[84]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 63 # French Term : comme source de # English Term(s) : ['as a source for', 'as a source of', 'as an source for', 'as an source of', 'as the source for', 'as the source of'] # Index(es) : [131, 132, 141, 142, 143, 144] # Grouping(s) : [84] cfg_grammar= """ S -> Adposition1 Noun2 Adposition3 Trigger_Rule Adposition1 -> "comme" Noun2 -> "source" | "puits" | "racine" | "provenance" | "germe" | "principe" | "fontaine" | "foyer" | "ferment" | "รฉtymologie" Adposition3 -> "de" Trigger_Rule -> "|forward|termination|negated|10|Group[84]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 64 # French Term : comme cause de # English Term(s) : ['as a cause for', 'as a cause of', 'as the cause for', 'as the cause of', 'as an cause of'] # Index(es) : [117, 118, 123, 124, 134] # Grouping(s) : [84] cfg_grammar= """ S -> Adposition1 Noun2 Adposition3 Trigger_Rule Adposition1 -> "comme" Noun2 -> "cause" | "motif" | "raison" | "ferment" | "procรจs" | "sujet" | "prรฉtexte" | "moteur" | "fondement" | "germe" Adposition3 -> "de" Trigger_Rule -> "|forward|termination|negated|10|Group[84]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 65 # French Term : comme une cause pour # English Term(s) : ['as an cause for'] # Index(es) : [133] # Grouping(s) : [84] cfg_grammar= """ S -> Adposition1 Determiner2 Noun3 Adposition4 Trigger_Rule Adposition1 -> "comme" Determiner2 -> "une" Noun3 -> "cause" | "motif" | "raison" | "ferment" | "procรจs" | "sujet" | "prรฉtexte" | "moteur" | "fondement" | "germe" Adposition4 -> "pour" Trigger_Rule -> "|forward|termination|negated|10|Group[84]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 66 # French Term : comme pour # English Term(s) : ['as a for'] # Index(es) : [116] # Grouping(s) : [84] cfg_grammar= """ S -> Adposition1 Adposition2 Trigger_Rule Adposition1 -> "comme" Adposition2 -> "pour" Trigger_Rule -> "|forward|termination|negated|10|Group[84]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 67 # French Term : comme l'รฉtiologie de # English Term(s) : ['as the etiology for', 'as the etiology of'] # Index(es) : [125, 126] # Grouping(s) : [84] cfg_grammar= """ S -> Adposition1 Determiner2 Noun3 Adposition4 Trigger_Rule Adposition1 -> "comme" Determiner2 -> "l'" Noun3 -> "รฉtiologie" | "รฉtiopathie" | "causalitรฉ" | "รฉtiologie" Adposition4 -> "de" Trigger_Rule -> "|forward|termination|negated|10|Group[84]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 68 # French Term : comme origine pour # English Term(s) : ['as the origin for', 'as an origin for'] # Index(es) : [127, 137] # Grouping(s) : [84] cfg_grammar= """ S -> Adposition1 Noun2 Adposition3 Trigger_Rule Adposition1 -> "comme" Noun2 -> "origine" | "racine" | "source" | "filiation" | "naissance" | "descendance" | "extraction" | "provenance" | "cause" | "famille" Adposition3 -> "pour" Trigger_Rule -> "|forward|termination|negated|10|Group[84]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 69 # French Term : comme raison de # English Term(s) : ['as a reason of', 'as the reason for', 'as the reason of', 'as an reason of'] # Index(es) : [122, 129, 130, 140] # Grouping(s) : [84] cfg_grammar= """ S -> Adposition1 Noun2 Adposition3 Trigger_Rule Adposition1 -> "comme" Noun2 -> "raison" | "cause" | "modรฉration" | "intelligence" | "entendement" | "discernement" | "comprรฉhension" | "prรฉtexte" | "sagesse" | "luciditรฉ" Adposition3 -> "de" Trigger_Rule -> "|forward|termination|negated|10|Group[84]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 70 # French Term : comme une รฉtiologie pour # English Term(s) : ['as a etiology for', 'as an etiology for'] # Index(es) : [119, 135] # Grouping(s) : [84] cfg_grammar= """ S -> Adposition1 Determiner2 Noun3 Adposition4 Trigger_Rule Adposition1 -> "comme" Determiner2 -> "une" Noun3 -> "รฉtiologie" | "รฉtiopathie" | "causalitรฉ" | "รฉtiologie" Adposition4 -> "pour" Trigger_Rule -> "|forward|termination|negated|10|Group[84]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 71 # French Term : comme une raison de # English Term(s) : ['as a reason for', 'as an reason for'] # Index(es) : [121, 139] # Grouping(s) : [84] cfg_grammar= """ S -> Adposition1 Determiner2 Noun3 Adposition4 Trigger_Rule Adposition1 -> "comme" Determiner2 -> "une" Noun3 -> "raison" | "cause" | "modรฉration" | "intelligence" | "entendement" | "discernement" | "comprรฉhension" | "prรฉtexte" | "sagesse" | "luciditรฉ" Adposition4 -> "de" Trigger_Rule -> "|forward|termination|negated|10|Group[84]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 72 # French Term : comme origine secondaire de # English Term(s) : ['as a secondary origin of', 'as an secondary origin of', 'as the secondary origin of'] # Index(es) : [150, 160, 170] # Grouping(s) : [90] cfg_grammar= """ S -> Adposition1 Noun2 Adjective3 Adposition4 Trigger_Rule Adposition1 -> "comme" Noun2 -> "origine" | "racine" | "source" | "filiation" | "naissance" | "descendance" | "extraction" | "provenance" | "cause" | "famille" Adjective3 -> "secondaire" Adposition4 -> "de" Trigger_Rule -> "|forward|termination|negated|10|Group[90]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 73 # French Term : comme raison secondaire de # English Term(s) : ['as a secondary reason for', 'as a secondary reason of', 'as an secondary reason for', 'as an secondary reason of', 'as the secondary reason for', 'as the secondary reason of'] # Index(es) : [151, 152, 161, 162, 171, 172] # Grouping(s) : [90] cfg_grammar= """ S -> Adposition1 Noun2 Adjective3 Adposition4 Trigger_Rule Adposition1 -> "comme" Noun2 -> "raison" | "cause" | "modรฉration" | "intelligence" | "entendement" | "discernement" | "comprรฉhension" | "prรฉtexte" | "sagesse" | "luciditรฉ" Adjective3 -> "secondaire" Adposition4 -> "de" Trigger_Rule -> "|forward|termination|negated|10|Group[90]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 74 # French Term : comme origine secondaire pour # English Term(s) : ['as a secondary origin for', 'as an secondary origin for', 'as the secondary origin for'] # Index(es) : [149, 159, 169] # Grouping(s) : [90] cfg_grammar= """ S -> Adposition1 Noun2 Adjective3 Adposition4 Trigger_Rule Adposition1 -> "comme" Noun2 -> "origine" | "racine" | "source" | "filiation" | "naissance" | "descendance" | "extraction" | "provenance" | "cause" | "famille" Adjective3 -> "secondaire" Adposition4 -> "pour" Trigger_Rule -> "|forward|termination|negated|10|Group[90]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 75 # French Term : comme รฉtiologie secondaire de # English Term(s) : ['as a secondary etiology of', 'as an secondary etiology of', 'as the secondary etiology of'] # Index(es) : [148, 158, 168] # Grouping(s) : [90] cfg_grammar= """ S -> Adposition1 Noun2 Adjective3 Adposition4 Trigger_Rule Adposition1 -> "comme" Noun2 -> "รฉtiologie" | "รฉtiopathie" | "causalitรฉ" | "รฉtiologie" Adjective3 -> "secondaire" Adposition4 -> "de" Trigger_Rule -> "|forward|termination|negated|10|Group[90]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 76 # French Term : comme รฉtiologie secondaire pour # English Term(s) : ['as a secondary etiology for', 'as an secondary etiology for', 'as the secondary etiology for'] # Index(es) : [147, 157, 167] # Grouping(s) : [90] cfg_grammar= """ S -> Adposition1 Noun2 Adjective3 Adposition4 Trigger_Rule Adposition1 -> "comme" Noun2 -> "รฉtiologie" | "รฉtiopathie" | "causalitรฉ" | "รฉtiologie" Adjective3 -> "secondaire" Adposition4 -> "pour" Trigger_Rule -> "|forward|termination|negated|10|Group[90]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 77 # French Term : comme source secondaire de # English Term(s) : ['as a secondary source of', 'as an secondary source of', 'as the secondary source of'] # Index(es) : [154, 164, 174] # Grouping(s) : [90] cfg_grammar= """ S -> Adposition1 Noun2 Adjective3 Adposition4 Trigger_Rule Adposition1 -> "comme" Noun2 -> "source" | "puits" | "racine" | "provenance" | "germe" | "principe" | "fontaine" | "foyer" | "ferment" | "รฉtymologie" Adjective3 -> "secondaire" Adposition4 -> "de" Trigger_Rule -> "|forward|termination|negated|10|Group[90]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 78 # French Term : comme source secondaire pour # English Term(s) : ['as a secondary source for', 'as an secondary source for', 'as the secondary source for'] # Index(es) : [153, 163, 173] # Grouping(s) : [90] cfg_grammar= """ S -> Adposition1 Noun2 Adjective3 Adposition4 Trigger_Rule Adposition1 -> "comme" Noun2 -> "source" | "puits" | "racine" | "provenance" | "germe" | "principe" | "fontaine" | "foyer" | "ferment" | "รฉtymologie" Adjective3 -> "secondaire" Adposition4 -> "pour" Trigger_Rule -> "|forward|termination|negated|10|Group[90]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 79 # French Term : comme cause secondaire de # English Term(s) : ['as a secondary cause for', 'as a secondary cause of', 'as an secondary cause for', 'as an secondary cause of', 'as the secondary cause for', 'as the secondary cause of'] # Index(es) : [145, 146, 155, 156, 165, 166] # Grouping(s) : [90] cfg_grammar= """ S -> Adposition1 Noun2 Adjective3 Adposition4 Trigger_Rule Adposition1 -> "comme" Noun2 -> "cause" | "motif" | "raison" | "ferment" | "procรจs" | "sujet" | "prรฉtexte" | "moteur" | "fondement" | "germe" Adjective3 -> "secondaire" Adposition4 -> "de" Trigger_Rule -> "|forward|termination|negated|10|Group[90]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 80 # French Term : comme a # English Term(s) : ['as has'] # Index(es) : [175] # Grouping(s) : [122] cfg_grammar= """ S -> Adposition1 Verb2 Trigger_Rule Adposition1 -> "comme" Verb2 -> "a" Trigger_Rule -> "|forward|termination|negated|10|Group[122]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 81 # French Term : comme requis # English Term(s) : ['as needed'] # Index(es) : [176] # Grouping(s) : [123] cfg_grammar= """ S -> Adposition1 Verb2 Trigger_Rule Adposition1 -> "comme" Verb2 -> "requis" | "nรฉcessaire" | "prescrit" | "rรฉclamรฉ" | "obligรฉ" | "demandรฉ" | "rรฉquisitionnรฉ" | "compรฉtent" | "imposรฉ" | "mobilisรฉ" Trigger_Rule -> "|forward|trigger|conditional|30|Group[123]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 82 # French Term : ainsi que tout # English Term(s) : ['as well as any'] # Index(es) : [177] # Grouping(s) : [144] cfg_grammar= """ S -> Adverb1 Subordinating_conjunction2 Pronoun3 Trigger_Rule Adverb1 -> "ainsi" Subordinating_conjunction2 -> "que" Pronoun3 -> "tout" Trigger_Rule -> "|forward|trigger|negated|10|Group[144]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 83 # French Term : ร  part # English Term(s) : ['aside from'] # Index(es) : [179] # Grouping(s) : [146] cfg_grammar= """ S -> Adposition1 Noun2 Trigger_Rule Adposition1 -> "ร " Noun2 -> "part" | "partie" | "portion" | "morceau" | "ration" | "fraction" | "partage" | "apport" | "division" | "lot" Trigger_Rule -> "|forward|termination|negated|10|Group[146]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 84 # French Term : comme # English Term(s) : ['as'] # Index(es) : [180] # Grouping(s) : [147] cfg_grammar= """ S -> Adposition1 Trigger_Rule Adposition1 -> "comme" Trigger_Rule -> "|both|pseudo|uncertain|30|Group[147]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 85 # French Term : en ce moment # English Term(s) : ['at this time'] # Index(es) : [182] # Grouping(s) : [148] cfg_grammar= """ S -> Adposition1 Determiner2 Noun3 Trigger_Rule Adposition1 -> "en" Determiner2 -> "ce" Noun3 -> "moment" | "temps" | "instant" | "durรฉe" | "รฉpoque" | "point" | "saison" | "minute" | "seconde" | "stage" Trigger_Rule -> "|both|pseudo|uncertain|30|Group[148]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 86 # French Term : ร  ce moment-lร  # English Term(s) : ['at that time'] # Index(es) : [181] # Grouping(s) : [148] cfg_grammar= """ S -> Adposition1 Determiner2 Noun3 Punctuation4 Adverb5 Trigger_Rule Adposition1 -> "ร " Determiner2 -> "ce" Noun3 -> "moment" | "temps" | "instant" | "durรฉe" | "รฉpoque" | "point" | "saison" | "minute" | "seconde" | "stage" Punctuation4 -> "-" Adverb5 -> "lร " Trigger_Rule -> "|both|pseudo|uncertain|30|Group[148]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 87 # French Term : tentรฉ # English Term(s) : ['attempted'] # Index(es) : [183, 184] # Grouping(s) : [150, 151] cfg_grammar= """ S -> Noun1 Trigger_Rule Noun1 -> "tentรฉ" | "hasardรฉ" | "chapiteau" | "affriolรฉ" | "essayรฉ" | "entraรฎnรฉ" | "wigwam" | "velum" | "allรฉchรฉ" | "guitoune" Trigger_Rule -> "|both|termination|historical|30|Group[150, 151]" |"|both|termination|nonpatient|30|Group[150, 151]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 88 # French Term : tentative # English Term(s) : ['attempt'] # Index(es) : [185, 186] # Grouping(s) : [152, 153] cfg_grammar= """ S -> Noun1 Trigger_Rule Noun1 -> "tentative" | "expรฉrience" | "effort" | "dรฉmarche" | "essai" | "expรฉrimentation" | "recherche" | "avance" | "vellรฉitรฉ" | "candidature" Trigger_Rule -> "|both|termination|historical|30|Group[152, 153]" |"|both|termination|nonpatient|30|Group[152, 153]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 89 # French Term : la tante # English Term(s) : ["aunt's"] # Index(es) : [187] # Grouping(s) : [154] cfg_grammar= """ S -> Determiner1 Noun2 Trigger_Rule Determiner1 -> "la" Noun2 -> "tante" | "tantine" | "parent" | "tapette" | "pรฉdรฉ" | "amitat" | "homosexuel" | "pรฉdale" | "tantouse" | "pรฉdรฉraste" Trigger_Rule -> "|forward|trigger|nonpatient|30|Group[154]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 90 # French Term : tante # English Term(s) : ['aunt'] # Index(es) : [188] # Grouping(s) : [154] cfg_grammar= """ S -> Adjective1 Trigger_Rule Adjective1 -> "tante" Trigger_Rule -> "|forward|trigger|nonpatient|30|Group[154]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 91 # French Term : รชtre exclu pour # English Term(s) : ['be ruled out for'] # Index(es) : [190] # Grouping(s) : [156] cfg_grammar= """ S -> Auxiliary1 Verb2 Adposition3 Trigger_Rule Auxiliary1 -> "รชtre" Verb2 -> "exclu" | "exclusivement" | "expulsรฉ" | "paria" | "renvoyรฉ" | "prohibรฉ" | "refusรฉ" | "chassรฉ" | "รฉconduit" | "inenvisageable" Adposition3 -> "pour" Trigger_Rule -> "|forward|trigger|uncertain|30|Group[156]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 92 # French Term : รชtre exclu # English Term(s) : ['be ruled out', 'being ruled out'] # Index(es) : [189, 191] # Grouping(s) : [157] cfg_grammar= """ S -> Auxiliary1 Verb2 Trigger_Rule Auxiliary1 -> "รชtre" Verb2 -> "exclu" | "exclusivement" | "expulsรฉ" | "paria" | "renvoyรฉ" | "prohibรฉ" | "refusรฉ" | "chassรฉ" | "รฉconduit" | "inenvisageable" Trigger_Rule -> "|backward|trigger|uncertain|30|Group[157]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 93 # French Term : รชtre arrรชtรฉ # English Term(s) : ['be stopped'] # Index(es) : [192, 193] # Grouping(s) : [158, 159] cfg_grammar= """ S -> Auxiliary1 Verb2 Trigger_Rule Auxiliary1 -> "รชtre" Verb2 -> "arrรชtรฉ" | "bloquer" | "suspendre" | "paralyser" | "endiguer" | "terminer" | "enrayer" | "saisir" | "รฉtouffer" | "prendre" Trigger_Rule -> "|backward|trigger|negated|10|Group[158, 159]" |"|forward|termination|negated|10|Group[158, 159]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 94 # French Term : parce que # English Term(s) : ['because'] # Index(es) : [194] # Grouping(s) : [160] cfg_grammar= """ S -> Subordinating_conjunction1 Subordinating_conjunction2 Trigger_Rule Subordinating_conjunction1 -> "parce" Subordinating_conjunction2 -> "que" Trigger_Rule -> "|both|termination|conditional|30|Group[160]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 95 # French Term : au-delร  # English Term(s) : ['beyond'] # Index(es) : [195] # Grouping(s) : [162] cfg_grammar= """ S -> Adverb1 Trigger_Rule Adverb1 -> "au-delร " Trigger_Rule -> "|forward|termination|negated|10|Group[162]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 96 # French Term : biopsie de # English Term(s) : ['biopsy of'] # Index(es) : [196] # Grouping(s) : [163] cfg_grammar= """ S -> Noun1 Adposition2 Trigger_Rule Noun1 -> "biopsie" | "biopsie" Adposition2 -> "de" Trigger_Rule -> "|forward|termination|negated|10|Group[163]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 97 # French Term : limite # English Term(s) : ['borderline'] # Index(es) : [197] # Grouping(s) : [164] cfg_grammar= """ S -> Noun1 Trigger_Rule Noun1 -> "limite" | "borne" | "frontiรจre" | "lisiรจre" | "terme" | "contour" | "frein" | "extrรฉmitรฉ" | "barriรจre" | "dรฉlimitation" Trigger_Rule -> "|forward|trigger|uncertain|30|Group[164]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 98 # French Term : frรจres # English Term(s) : ["brother's", 'brothers'] # Index(es) : [198, 199] # Grouping(s) : [165] cfg_grammar= """ S -> Noun1 Trigger_Rule Noun1 -> "frรจres" | "frangin" | "semblable" | "frรฉrot" | "germain" | "compagnon" | "confrรจre" | "religieux" | "cousin" | "frater" Trigger_Rule -> "|forward|trigger|nonpatient|30|Group[165]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 99 # French Term : frรจre # English Term(s) : ['brother'] # Index(es) : [200] # Grouping(s) : [167] cfg_grammar= """ S -> Noun1 Trigger_Rule Noun1 -> "frรจre" | "frangin" | "semblable" | "frรฉrot" | "germain" | "compagnon" | "confrรจre" | "religieux" | "cousin" | "frater" Trigger_Rule -> "|forward|trigger|nonpatient|30|Group[167]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence)) # + from nltk.parse.generate import generate, demo_grammar from nltk import CFG # Item Number : 100 # French Term : mais # English Term(s) : ['but'] # Index(es) : [201, 202] # Grouping(s) : [168, 169] cfg_grammar= """ S -> Coordinating_conjunction1 Trigger_Rule Coordinating_conjunction1 -> "mais" Trigger_Rule -> "|forward|termination|negated|10|Group[168, 169]" |"|backward|termination|negated|10|Group[168, 169]" """ for sentence in generate(CFG.fromstring(cfg_grammar), n=1000): print(' '.join(sentence))
notebooks/Validation_Notebook_0_100-Working.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Cross-Validation and scoring methods # In the previous sections and notebooks, we split our dataset into two parts, a training set and a test set. We used the training set to fit our model, and we used the test set to evaluate its generalization performance -- how well it performs on new, unseen data. # # # <img src="figures/train_test_split.svg" width="100%"> # # However, often (labeled) data is precious, and this approach lets us only use ~ 3/4 of our data for training. On the other hand, we will only ever try to apply our model 1/4 of our data for testing. # A common way to use more of the data to build a model, but also get a more robust estimate of the generalization performance, is cross-validation. # In cross-validation, the data is split repeatedly into a training and non-overlapping test-sets, with a separate model built for every pair. The test-set scores are then aggregated for a more robust estimate. # # The most common way to do cross-validation is k-fold cross-validation, in which the data is first split into k (often 5 or 10) equal-sized folds, and then for each iteration, one of the k folds is used as test data, and the rest as training data: # <img src="figures/cross_validation.svg" width="100%"> # # This way, each data point will be in the test-set exactly once, and we can use all but a k'th of the data for training. # Let us apply this technique to evaluate the KNeighborsClassifier algorithm on the Iris dataset: # + from sklearn.datasets import load_iris from sklearn.neighbors import KNeighborsClassifier iris = load_iris() X, y = iris.data, iris.target classifier = KNeighborsClassifier() # - # The labels in iris are sorted, which means that if we split the data as illustrated above, the first fold will only have the label 0 in it, while the last one will only have the label 2: y # To avoid this problem in evaluation, we first shuffle our data: # + import numpy as np rng = np.random.RandomState(0) permutation = rng.permutation(len(X)) X, y = X[permutation], y[permutation] print(y) # - # Now implementing cross-validation is easy: k = 5 n_samples = len(X) fold_size = n_samples // k scores = [] masks = [] for fold in range(k): # generate a boolean mask for the test set in this fold test_mask = np.zeros(n_samples, dtype=bool) test_mask[fold * fold_size : (fold + 1) * fold_size] = True # store the mask for visualization masks.append(test_mask) # create training and test sets using this mask X_test, y_test = X[test_mask], y[test_mask] X_train, y_train = X[~test_mask], y[~test_mask] # fit the classifier classifier.fit(X_train, y_train) # compute the score and record it scores.append(classifier.score(X_test, y_test)) # Let's check that our test mask does the right thing: # + import matplotlib.pyplot as plt # %matplotlib inline _,ax = plt.subplots() ax.matshow(masks, cmap='gray_r'); # - # And now let's look a the scores we computed: print(scores) print(np.mean(scores)) # As you can see, there is a rather wide spectrum of scores from 90% correct to 100% correct. If we only did a single split, we might have gotten either answer. # As cross-validation is such a common pattern in machine learning, there are functions to do the above for you with much more flexibility and less code. # The ``sklearn.model_selection`` module has all functions related to cross validation. There easiest function is ``cross_val_score`` which takes an estimator and a dataset, and will do all of the splitting for you: from sklearn.model_selection import cross_val_score scores = cross_val_score(classifier, X, y, cv=3) print(f'Scores on each CV fold: {scores}') print(f'Mean score: {np.mean(scores):0.3f}') # As you can see, the function uses three folds by default. You can change the number of folds using the cv argument: cross_val_score(classifier, X, y, cv=5) # There are also helper objects in the cross-validation module that will generate indices for you for all kinds of different cross-validation methods, including k-fold: from sklearn.model_selection import KFold, StratifiedKFold, ShuffleSplit # By default, cross_val_score will use ``StratifiedKFold`` for classification, which ensures that the class proportions in the dataset are reflected in each fold. If you have a binary classification dataset with 90% of data point belonging to class 0, that would mean that in each fold, 90% of datapoints would belong to class 0. # If you would just use KFold cross-validation, it is likely that you would generate a split that only contains class 0. # It is generally a good idea to use ``StratifiedKFold`` whenever you do classification. # # ``StratifiedKFold`` would also remove our need to shuffle ``iris``. # Let's see what kinds of folds it generates on the unshuffled iris dataset. # Each cross-validation class is a generator of sets of training and test indices: cv = StratifiedKFold(n_splits=5) for train, test in cv.split(iris.data, iris.target): print(test) # As you can see, there are a couple of samples from the beginning, then from the middle, and then from the end, in each of the folds. # This way, the class ratios are preserved. Let's visualize the split: def plot_cv(cv, features, labels): masks = [] for train, test in cv.split(features, labels): mask = np.zeros(len(labels), dtype=bool) mask[test] = 1 masks.append(mask) _, ax = plt.subplots() ax.matshow(masks, cmap='gray_r') plot_cv(StratifiedKFold(n_splits=5), iris.data, iris.target) # For comparison, again the standard KFold, that ignores the labels: plot_cv(KFold(n_splits=5), iris.data, iris.target) # Keep in mind that increasing the number of folds will give you a larger training dataset, but will lead to more repetitions, and therefore a slower evaluation: plot_cv(KFold(n_splits=10), iris.data, iris.target) # Another helpful cross-validation generator is ``ShuffleSplit``. This generator simply splits of a random portion of the data repeatedly. This allows the user to specify the number of repetitions and the training set size independently: plot_cv(ShuffleSplit(n_splits=5, test_size=.2), iris.data, iris.target) # If you want a more robust estimate, you can just increase the number of splits: plot_cv(ShuffleSplit(n_splits=20, test_size=.2), iris.data, iris.target) # You can use all of these cross-validation generators with the `cross_val_score` method: cv = ShuffleSplit(n_splits=5, test_size=.2) cross_val_score(classifier, X, y, cv=cv) # <div class="alert alert-success"> # <b>EXERCISE</b>: # <ul> # <li> # Perform three-fold cross-validation using the ``KFold`` class on the iris dataset without shuffling the data. Can you explain the result? # </li> # </ul> # </div> # + # # %load solutions/13_cross_validation.py
notebooks/13.Cross_Validation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 35 # language: python # name: python35 # --- # ## Script order # # # # 1. **Y2019M06D17_RH_Ingest_MAPSPAM_EE_V01** # Ingest MAPSPAM 2010 data into earthengine # 1. **Y2019M06D24_RH_Aggregate_Crops_Mapspam_EE_V01** # Create all crops aggregates for selected variables. # 1. **Y2019M09D04_RH_Ingest_MAPSPAM_GBQ_V01** # Ingest Mapspam 2010 data into google bigquery. # 1. **Y2019M10D29_RH_MAPSPAM_AQ30_V01** # Join Aqueduct 30 data to MAPSPAM and upload to gbq. # # #
scripts/readme.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/cahyaekapermana/learn-machine-learning/blob/main/SKLearnLinearRegression.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="OJNZqQS27vfH" import numpy as np #buat data jumlah kamar bedrooms = np.array([1,1,2,2,3,4,4,5,5,5]) #data harga rumah. asumsi dalam dollar house_price = np.array([15000, 18000, 27000, 34000, 50000, 68000, 65000, 81000,85000, 90000]) # + colab={"base_uri": "https://localhost:8080/", "height": 283} id="rjoCZVJo78Xb" outputId="f6207123-3616-4b51-8c8c-bc20b0da1d28" # menampilkan scatter plot dari dataset import matplotlib.pyplot as plt # %matplotlib inline plt.scatter(bedrooms, house_price) # + colab={"base_uri": "https://localhost:8080/"} id="7fbkVQPW8IsL" outputId="363dd546-f0a6-4888-9d5e-3f4e21b10660" from sklearn.linear_model import LinearRegression # latih model dengan Linear Regression.fit() bedrooms = bedrooms.reshape(-1, 1) linreg = LinearRegression() linreg.fit(bedrooms, house_price) # + colab={"base_uri": "https://localhost:8080/", "height": 283} id="91yZUIZQ8NFn" outputId="23027448-f32a-48c2-a6cb-48ea01e4f761" # menampilkan plot hubungan antara jumlah kamar dengan harga rumah plt.scatter(bedrooms, house_price) plt.plot(bedrooms, linreg.predict(bedrooms)) # + id="1RDTlN-l8Qum"
SKLearnLinearRegression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # %matplotlib inline import os import pandas as pd # + import resnet_ssd_face import glob dataset = "cnn" names = glob.glob("cnn*/*/*.jpg") names.sort() resnet_ssd_face.processDatabase(dataset, names) # - # # cnn dataset ใฎๆคœๅ‡บๅ‡ฆ็†ๅพŒใฎใƒ‡ใƒผใ‚ฟ่งฃๆž # + import pandas as pd df = pd.read_csv("log_cnn.csv") df["num"].hist(bins=11) print df.groupby("num").count() print df.groupby("num").count()/float(df.shape[0]) #print df.groupby("angles", "num").count() # - print df.groupby("angles").count()
resnetSSD/resnet_cnn.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- # + # Obtiene el delta de temperatura entre el periodo 1970-2000 # y 2020-2040 con datos del modelo Access. import os import xarray as xr # + modelo = "Access" path_d= os.getcwd() + "/datos/" + modelo + "/" path_r = os.getcwd() + "/resultados/" + modelo + "/" name = ["tas_Amon_ACCESS1-0_historical_r1i1p1_185001-200512.nc", "tas_Amon_ACCESS1-0_rcp85_r1i1p1_200601-210012.nc", "hist", "proy", "delta"] # Si no existe la carpeta, la crea. if not os.path.exists(path_r): os.mkdir(path_r) # Aรฑo de inicio y de fin de climatologรญa, inclusive. with open(os.getcwd() + "/resultados/periodos", "r") as f: yr_i = [f.readline()[:-1]] yr_f = [f.readline()[:-1]] yr_i.append(f.readline()[:-1]) yr_f.append(f.readline()[:-1]) ds = [] vars = ["height", "time_bnds", "lat_bnds", "lon_bnds"] # - # Se abre el archivo histรณrico y luego la proyecciรณn. for i in range(0, 2): ds.append(xr.load_dataset( path_d + name[i]).drop(vars)) # Se selecciona el periodo deseado. ds[i] = ds[i].sel(time = slice(yr_i[i], yr_f[i])) # Se obtiene la media mensual. ds[i] = ds[i].groupby("time.month").mean() # Se ajustan los valores de la longitud para que estรฉn # en el rango (-180, 180). ds[i]["lon_ajus"] = xr.where( ds[i]["lon"] > 180, ds[i]["lon"] - 360, ds[i]["lon"]) # Se ajustan los valores de la longitud para que estรฉn # en el rango (-180, 180). ds[i] = (ds[i] .swap_dims(lon = "lon_ajus") .sel(lon_ajus = sorted(ds[i].lon_ajus)) .drop("lon")) ds[i] = ds[i].rename(lon_ajus = "lon") # Se guarda el netCDF. ds[i].to_netcdf( path_r + "Access_clim_" + name[i + 2] + "_" + str(yr_i[i]) + "_" + str(yr_f[i]) + "_monthly.nc" ) # + i = 2 # Se calcula el delta restando la climatologรญa # proyectada y la histรณrica del modelo. ds.append(ds[1] - ds[0]) # Se ajustan los valores de la longitud para que estรฉn # en el rango (-180, 180). ds[i]["lon_ajus"] = xr.where( ds[i]["lon"] > 180, ds[i]["lon"] - 360, ds[i]["lon"]) # Se reasignan las nuevas dimensiones como las # principales dimensiones de longitud y se reordenan # los datos. ds[i] = (ds[i] .swap_dims(lon = "lon_ajus") .sel(lon_ajus = sorted(ds[i].lon_ajus)) .drop("lon")) ds[i] = ds[i].rename(lon_ajus = "lon") # Se guarda el netCDF. ds[i].to_netcdf( path_r + modelo + "_" + name[i + 2] + "_" + str(yr_i[0]) + "_" + str(yr_f[0]) + "_" + str(yr_i[1]) + "_" + str(yr_f[1])+ "_monthly.nc" )
code/Jupyter/Access_delta.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # In this notebook, we implement the Deutsch-Jozsa algorithm using Qiskit. Recall that the algorithm is figuring out whether a given binary function # $$ # f \colon \{0,1\}^n \rightarrow \{0,1\} # $$ # is balanced or constant. So let us first choose a reasonably simple function f and implement the corresponding oracle. Basically the easiest choice is probably $n = 2$ and # $$ # f(x_0, x_1) = x_0 \oplus x_1 # $$ # What is the oracle $U_f$? The oracle is specified by # $$ # U_f |x \rangle |y \rangle = |x \rangle |y \oplus f(x) \rangle # $$ # So the third qubit is inverted if f is equal to one and left unchanged otherwise. Using the definition of f, we see that the third qubit is inverted if the first two qubits are different and left unchanged if they are equal. # This is easily implemented using two CNOT gates from qiskit import QuantumRegister, QuantumCircuit, ClassicalRegister from qiskit.tools.visualization import matplotlib_circuit_drawer as drawer from qiskit.tools.visualization import plot_histogram from qiskit import Aer from qiskit import compile, execute from qiskit import IBMQ import numpy as np # %matplotlib inline my_style = {'cregbundle': True} # + ##################################### # Create the oracle. We assume that # # our quantum register has three # # qubits. # ##################################### def createOracleBalanced(q,c): circuit = QuantumCircuit(q,c) circuit.cx(q[0], q[2]) circuit.cx(q[1], q[2]) circuit.barrier(q) return circuit ##################################### # Create a constant oracle # ##################################### def createOracleConstant(q,c): circuit = QuantumCircuit(q,c) circuit.x(q[2]) circuit.barrier(q) return circuit # - # # Try it out # q = QuantumRegister(3,"q") c = ClassicalRegister(3,"c") circuit = createOracleBalanced(q,c) drawer(circuit, style = my_style) backend = Aer.get_backend('unitary_simulator') job = execute(circuit, backend) # # Print this as truth table # for i in range(2**2): out = job.result().get_unitary() print("|", format(i, '03b'),"> --->> |", format(np.argmax(out.T[i]), '03b'),">") # Now we get to the actual Deutsch-Jozsa algorithm. We use the version of the algorithm described in https://arxiv.org/abs/quant-ph/9708016. Here, the first step is to create the usual superposition state # $$ # |\psi \rangle = \frac{1}{2} \sum_x |x \rangle # $$ # We know how to do this - we start with the fiducial state and apply a Hadamard gate to every qubit. # + def createInitialState(circuit): circuit.h(q[0]) circuit.h(q[1]) circuit.barrier(q) q = QuantumRegister(3,"q") c = ClassicalRegister(3,"c") circuit = QuantumCircuit(q,c) createInitialState(circuit) drawer(circuit, style = my_style) # - # Next, we add an ancilla qubit that we initialize to one and apply the Hadamard to this ancilla qubit. The result will be the state # $$ # |\psi \rangle = \frac{1}{2\sqrt{2}} \sum_x |x , 0 \rangle - |x, 1 \rangle # $$ # + def addAncilla(circuit): circuit.x(q[2]) circuit.h(q[2]) circuit.barrier(q) addAncilla(circuit) drawer(circuit, style = my_style) # - # Next we add $U_f$ to the circuit. This will result in the state # $$ # \frac{1}{\sqrt{2^n}} (\sum_x (-1)^{f(x)} |x\rangle) \otimes H|1\rangle # $$ circuit = circuit + (createOracleBalanced(q,c)) circuit.barrier(q) drawer(circuit, style = my_style) # Next, we uncompute the ancillary qubit - we apply another Hadamard followed by a Pauli X gate. This will give us the state # $$ # \frac{1}{\sqrt{2^n}} (\sum_x (-1)^{f(x)} |x\rangle) \otimes |0\rangle # $$ # + def uncomputeAncilla(circuit): circuit.h(q[2]) circuit.x(q[2]) circuit.barrier(q) uncomputeAncilla(circuit) drawer(circuit, style = my_style) # - # Finally, to return to the standard basis, we apply Hadamard gates on the first two qubits. We then measure the result. # + def addMeasurement(circuit): circuit.h(q[0]) circuit.h(q[1]) circuit.barrier(q) circuit.measure(q[0], c[0]) circuit.measure(q[1], c[1]) circuit.barrier(q) addMeasurement(circuit) drawer(circuit, style = my_style) # - # # Now we are ready to execute the full circuit # IBMQ.load_accounts() backend = IBMQ.get_backend('ibmqx4') #backend = IBMQ.get_backend('ibmq_16_melbourne') #backend = IBMQ.get_backend('ibmq_qasm_simulator') #backend = Aer.get_backend("qasm_simulator") print("Status of backend: ", backend.status()) # # Now we compile # from qiskit import compile qobj = compile(circuit, backend=backend, shots=1024) # # Display compiled QASM. Note that this actually depends on the backend! # compiled_qasm = qobj.as_dict()['experiments'][0]['header']['compiled_circuit_qasm'] print("Compiled qasm:\n", compiled_qasm) # # and submit as a job # job = backend.run(qobj) import time lapse = 0 # # Wait three seconds once, this should be good enough for the simulator in most cases # time.sleep(3) interval = 60 while (job.status().name != 'DONE') and (job.status().name != 'CANCELLED') and (job.status().name != 'ERROR'): print('Status @ {} seconds'.format(interval * lapse)) print(job.status()) print("Position in queue: ",job.queue_position()) time.sleep(interval) lapse += 1 print(job.status()) result = job.result() counts = result.get_counts() counts # # Plot this # plot_histogram(counts) # Finally let us interpret the result. We know that there are two cases. If the function is balanced, the probability to measure 00 is zero. If the function is not balanced, it will be one. if '000' in counts: zeroMeasurements = counts['000'] else: zeroMeasurements = 0 if (zeroMeasurements < 100): print("Function is balanced") else: print("Function is constant") # Now we repeat the entire exercise with a constant function. q = QuantumRegister(3,"q") c = ClassicalRegister(3,"c") circuit = QuantumCircuit(q,c) createInitialState(circuit) addAncilla(circuit) circuit = circuit + (createOracleConstant(q,c)) uncomputeAncilla(circuit) addMeasurement(circuit) drawer(circuit, style = my_style) qobj = compile(circuit, backend=backend, shots=1024) job = backend.run(qobj) import time lapse = 0 # # Wait three seconds once, this should be good enough for the simulator in most cases # time.sleep(3) interval = 60 while (job.status().name != 'DONE') and (job.status().name != 'CANCELLED') and (job.status().name != 'ERROR'): print('Status @ {} seconds'.format(interval * lapse)) print(job.status()) print("Position in queue: ",job.queue_position()) time.sleep(interval) lapse += 1 print(job.status()) result = job.result() counts = result.get_counts() counts if '000' in counts: zeroMeasurements = counts['000'] else: zeroMeasurements = 0 if (zeroMeasurements < 100): print("Function is balanced") else: print("Function is constant") plot_histogram(counts)
Qiskit/QiskitDeutschJozsa.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- Magic for inline plots # %matplotlib inline # I like to use pandas module for exploring and cleaning data. # I will also use pyplot for genearting multiple plots, and seaborn for correlation heatmap. Since I will also run PCA, I will import it form sklearn. Numpy will be used in PCA part. import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import numpy as np from sklearn.decomposition import PCA # Lets read data files provided with this problem. train = pd.read_csv("../input/train.csv") test = pd.read_csv("../input/test.csv") sampleSubmission = pd.read_csv("../input/sampleSubmission.csv") # Usually, the train dataset have extra collumns which are not present in test dataset. Lets identify them. set(train.columns) - set(test.columns) # Hmmm. What is interesting, it seems that train dataset has three extra collumns. Usually we are expecting one extra collumn. Lets try to guess which collum will be most interesting for us. **Fun fact**: since this is Kaggle competition, there should be clear description of data we should predict and precise description of datasets for train and test. I'm just pretending that my Internet connection is broken and I have only those three files ;) set(sampleSubmission.columns) - set(test.columns) # Ok, so it seems that there are no **casual, count, registered** columns in train dataset, but we are practically interested only in **count** column. pd.tools.plotting.scatter_matrix(train[["casual", "count", "registered"]], figsize=(11, 8)) # Based on above plots, it seems that **registered** column is related to **count** column. And what's about **casual** column? It also seems to be related. Maybe we should chcekc if **casual** and **registered** sums to **count**? (train["count"] == train["registered"] + train["casual"]).value_counts() # We have total 10886 equal sums, which should correspond to number of rows in our train dataset. numericalColumns = train.describe() numericalColumns # As we can see above, columns **registered** and **casual** sum into value located in **count** column. print(set(train.columns) - set(numericalColumns)) train["datetime"].head() # Apart of typical numerical columns, there is one column interpreted as "object". I can assume that this should be datetime column. Lets convert it and see basic description. train["datetime"] = pd.to_datetime(train["datetime"]) train["datetime"].describe() # Since we have datetime column now, maybe we should break it apart on different parts and train["dayofweek"] = train["datetime"].dt.dayofweek #The day of the week with Monday=0, Sunday=6 train["year"] = train["datetime"].dt.year train["month"] = train["datetime"].dt.month train["day"] = train["datetime"].dt.day train["hour"] = train["datetime"].dt.hour train["minute"] = train["datetime"].dt.minute train["second"] = train["datetime"].dt.second # Lets describe newly created columns. train[["dayofweek", "year", "month", "day", "hour", "minute", "second"]].describe() # Ok, so as we expected, **dayoftheweek** contains vales from 0 to 6. **year** contains to years, **month** has 12 values. **hour** has values from 0 to 23. Interesting is that **day** column has values from 1 to 19, which is suspicious. **minute** and **second** columns have only one value. That hint us, that resolution of data poins is limited to hour. We should prepare the same analysis for test datafile. test["datetime"] = pd.to_datetime(test["datetime"]) test["dayofweek"] = test["datetime"].dt.dayofweek #The day of the week with Monday=0, Sunday=6 test["year"] = test["datetime"].dt.year test["month"] = test["datetime"].dt.month test["day"] = test["datetime"].dt.day test["hour"] = test["datetime"].dt.hour test["minute"] = test["datetime"].dt.minute test["second"] = test["datetime"].dt.second test.describe() # After quick eye examination we can catch, that column **day** in test dataset starts from 20 and ends to 31. This fact gives us information that we probably want to predict **count** value in last 11 or 12 days in month. And this conclusion is correct accoriding to competition description. We can also see, that **minute** and **second** column don't have other values than *0* so we can discard them from further analysis. train = train.drop(["minute", "second"], axis = 1) test = test.drop(["minute", "second"], axis = 1) # Ok, it may be good time for some histograms. train.hist(figsize=(11, 8)) # Lets write function which will help us with plotting values form features grouped by hours and run it over all features which have no more than 10 unique values. def plotByFeature (df, feature): values_number = df[feature].unique().size if values_number <= 10: print("Plot for feature {0}:".format(feature)) for value in df[feature].unique(): df[df[feature] == value].groupby("hour")["count"].sum().plot(label = value, legend = True, grid = True, title = feature) plt.show() else: print("Feature {0} has too many unique values, skipping!".format(feature)) for feature in train.columns: plotByFeature(train, feature) # Based on above plots, I can make two assumptions: **workingday** plots totally different plots, so bike usage might be completly different on weekends than on workdays. It also seems that **wather** may affect bike borrowing. # Lest also plot correlations. train.corr().sort_values(by="count")["count"].plot(kind = 'bar', title = "Correlation against count") sns.heatmap(train.corr()) # We can also run principal component analysis on numerical features to see how much of them gives us information about variation of our data set. train_pca = train.drop(['datetime', 'registered', 'casual', 'count'], 1) # I will run PCA for various number of target components and see which number will give us reasonable information. pca = PCA().fit(train_pca) explained = np.cumsum(pca.explained_variance_ratio_) explained # It looks like first 6 components after PCA explains over 99% of data variance. We can also plot it. plt.plot(explained) plt.xlabel('number of components') plt.ylabel('cumulative explained variance'); # List of modules and Python versions used to build this notebook. # Print python packages... from pip.operations import freeze x = freeze.freeze() for p in x: print(p) # ... and Python version import sys print(sys.version)
workspace/Exploratory Data Analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import katdal import time # # KAT Archive # MeerKAT observation files are available through the MeerKAT archive at # https://archive.sarao.ac.za/ # The MeerKAT archive is access restricted, requiring registration and login. # # Please refer to the [Archive Interface User Guide](https://archive.sarao.ac.za/statics/Archive_Interface_User_Guide.pdf) for detail on SARAO archive interaction. # ## Reading a MeerKAT observation file # The enormous sizes of MeerKAT's observation data makes it difficult to distribute data, as well as process large chunks of data using old scripts. # MeerKAT archive provides multiple data access/download options, as well as easy access fully compatible with the MeerKAT software package `katdal`. # **Important note to the reader** # *To ensure secure interaction with your data using the `katdal` package* # # * To open an observation file in `katdal` a token is required. # The archive interface makes obtaining the required `katdal` tokens easy. # # * When hovering the mouse pointer of the KATDAL button associated with any observation of interest, the following hint message is displayed: # `Copy katdal open command to clipboard` # Indicating that clicking the KATDAL button with automatically copy the required token to your clipboard for direct use. # # * It is very important to note that the filename (`.rdb` file URL) **must** always be accompanied by a token to access the observational data in the archive. # If a token is expired or not provided, `katdal.open()` command will experience *authentication* or *timed out* errors. # # ### Using tokens # # To obtain a `katdal` token for remote access when processing data: # * Simply click on the KATDAL icon associated with the observation of interest. # * The entire `katdal.open()` command that is needed will be copied onto the clipboard of you local system. # * **Immediately** after requesting the token by clicking on the icon, paste the command in a python script or notebook cell to start working. # # ``` # https://archive-gw-1.kat.ac.za/1557528200/1557528200_sdp_l0.full.rdb?token=<KEY> # ``` # # **Note: all tokens have expiry dates.** # Once the token has expired the user will get an archive access error when trying to open the file with the `katdal` command. # This can be remedied by simply updating the token, following the same steps above, and replacing the `katdal.open()` command with the new instruction set containing a valid token. # This katdal object will access the data directly from the archive at the CHPC over the internet. stime = time.time() data=katdal.open('https://archive-gw-1.kat.ac.za/1557528200/1557528200_sdp_l0.full.rdb', s3_endpoint_url='https://archive-gw-1.kat.ac.za', token='<KEY>') print('time to read file = {} s'.format(time.time() - stime)) print('(dumps x channels x baselines) = {}'.format(data.shape)) print(data.vis.dataset) # **Alternatively**: # The user can constructed a url-token string using the `.rdb` file URL and token # Since the filename/URL location will never change, only the updated token needs to be inserted stime = time.time() data=katdal.open('https://archive-gw-1.kat.ac.za/1557528200/1557528200_sdp_l0.full.rdb?token=<KEY>') print('time to read file = {} s'.format(time.time() - stime)) print('(dumps x channels x baselines) = {}'.format(data.shape)) print(data.vis.dataset)
archive/Accessing MeerKAT observation data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:opencv-env] * # language: python # name: conda-env-opencv-env-py # --- # + import tensorflow as tf import tensorflow.keras from keras.models import load_model,Sequential,Input from gensim.models import Word2Vec from keras.preprocessing.image import ImageDataGenerator from keras.layers import Conv2D,Dense,Flatten,MaxPooling2D,BatchNormalization,Dropout from keras.models import Sequential import os from sklearn.metrics import confusion_matrix from mlxtend.plotting import plot_confusion_matrix from sklearn.metrics import classification_report # - os.chdir(r"C:\Users\HP\data\Brain_tumor_classification") # + model.add(Conv2D(filters=32,activation='relu',padding='same',use_bias=True,kernel_size=(5,5),input_shape=(200,200,3))) model.add(MaxPooling2D(padding='valid',pool_size=(2,2))) model.add(Conv2D(filters=64,activation='relu',padding='valid',use_bias=True,kernel_size=(5,5))) model.add(MaxPooling2D(padding='valid',pool_size=(2,2))) model.add(Conv2D(filters=64,activation='relu',padding='valid',use_bias=True,kernel_size=(5,5))) model.add(MaxPooling2D(padding='valid',pool_size=(2,2))) model.add(Conv2D(filters=128,activation='relu',padding='same',use_bias=True,kernel_size=(5,5))) model.add(MaxPooling2D(padding='valid',pool_size=(2,2))) model.add(Conv2D(filters=256,activation='relu',padding='same',use_bias=True,kernel_size=(5,5))) model.add(MaxPooling2D(padding='valid',pool_size=(2,2))) model.add(Flatten()) model.add(Dense(1024,activation='relu')) model.add(Dropout(0.2)) model.add(Dense(4,activation='softmax')) # - model.summary() generator=ImageDataGenerator(rescale=1/255,horizontal_flip=True,vertical_flip=True,rotation_range=45,zoom_range=0.25,validation_split=0.2) def load_training_data(): training_data=generator.flow_from_directory('Training',color_mode='rgb',class_mode='categorical',target_size=(200,200),subset='training') validation_data=generator.flow_from_directory('Training',color_mode='rgb',class_mode='categorical',target_size=(200,200),subset='validation') return (training_data,validation_data) def load_testing_data(): testing_data=generator.flow_from_directory('Testing',color_mode='rgb',class_mode='categorical',target_size=(200,200)) return (testing_data) train_data,val_data=load_training_data() # + testing=load_testing_data() # - pred_data=model.predict_classes(testing,batch_size=None,verbose=1) loss=keras.losses.CategoricalCrossentropy() model.compile(loss=loss,optimizer='adam',metrics=['accuracy']) history=model.fit(train_data,epochs=10,validation_data=val_data,verbose=1) # + #plotting the losses and accuracy : model_loss=history.history['loss'] model_accuracy=history.history['accuracy'] val_loss=history.history['val_loss'] val_accuracy=history.history['val_accuracy'] # + import matplotlib.pyplot as plt epochs=range(1,11) plt.plot(epochs,model_loss,c='black') plt.plot(epochs,val_loss,c='red') plt.xlabel('Number of epochs') plt.ylabel('loss') plt.title('Loss') plt.show() plt.plot(epochs,model_accuracy,c='black') plt.plot(epochs,model_accuracy,c='red') plt.xlabel('Number of epochs') plt.ylabel('loss') plt.title('Accuracy') plt.show() # - class_names=["glioma_tumor","meningioma_tumor","no_tumor","pituitary_tumour"] model.save('MRIac29.h5') labels=testing.labels pred=model.predict_classes(testing,batch_size=None) matrix=confusion_matrix(pred,labels) plot_confusion_matrix(matrix,class_names=class_names) lables={0:"glioma_tumor",1:"meningioma_tumor",2:"no_tumor",3:"pituitary_tumour"} print(classification_report(labels,pred))
BRAIN CANCER MRI CLASSIFICATION.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- import pandas as pd import datetime battles = pd.read_csv( 'revolutionary_war.csv', parse_dates=['Start Date'], usecols=['Start Date']).squeeze('columns') # breakdown of battles by weekday battles.dropna().apply(lambda dt: dt.strftime('%A')).value_counts() nfl = pd.read_csv( 'nfl.csv', index_col=['Name'], parse_dates=['Birthday'], ) nfl # number of players on each team nfl['Team'].value_counts() # 5 highest-paid players nfl.sort_values('Salary', ascending=False).head(5) # sort by team name and salary (descending) nfl.sort_values(['Team', 'Salary'], ascending=[True, False]) # oldest player on the New York Jets nfl[nfl['Team'] == 'New York Jets'].sort_values('Birthday').head(1) # oldest player on the New York Yets, by changing index from Name to Team nfl.reset_index().set_index('Team').loc['New York Jets'].sort_values('Birthday').head(1) netflix = pd.read_csv( 'netflix.csv', parse_dates=['date_added'], dtype={'type': 'category'}, ) netflix # all item with title Limitless netflix[netflix['title'] == 'Limitless'] # all movies directed by <NAME> netflix[(netflix['director'] == '<NAME>') & (netflix['type'] == 'Movie')] # all items added on July 31 2019 or directed by <NAME> netflix[(netflix['date_added'] == '2019-07-31') | (netflix['director'] == '<NAME>')] # all items directed by <NAME>, <NAME>, or <NAME> netflix[netflix['director'].isin(['<NAME>', '<NAME>', '<NAME>'])] # May 2019 item sorted by date may_items = netflix['date_added'].between('2019-05-1', '2019-06-01', inclusive='left') netflix[may_items].sort_values('date_added') # only items where directory has a value netflix.dropna(subset=['director']) # the number of dates where only one item was added netflix.drop_duplicates(subset=['date_added'], keep=False) # the number of dates where only one item was added, using value_counts() vc = netflix['date_added'].value_counts() vc[vc == 1] cars = pd.read_csv( 'used_cars.csv', dtype={'Fuel': 'category', 'Transmission': 'category'}, ) cars.info() # sum of all prices by fuel type cars.pivot_table(index='Fuel', values=['Price'], aggfunc='sum') # number of cars made by each manufacturer broken down by transmission cars.pivot_table( index='Manufacturer', columns='Transmission', values='Price', aggfunc='count' ) # average prices for each year and fuel type, broken down by transmission avg_prices = cars.pivot_table( index=['Year', 'Fuel'], columns=['Transmission'], values='Price', aggfunc='mean', fill_value=0, ) avg_prices # move column index into row index avg_prices.stack() min_wages = pd.read_csv('minimum_wage.csv') # min_wages.info() min_wages # convert from wide data to narrow data min_wages.melt( id_vars='State', var_name='Year', value_vars=[str(n) for n in range(2010, 2018)], value_name='Min wage', ) cereals = pd.read_csv( 'cereals.csv', index_col='Name', dtype={'Type': 'category'}, ) cereals.info() # cereals grouped by manufacturer manufacturers = cereals.groupby('Manufacturer') # number of manufacturers len(manufacturers) # number of cereals for each manufacturer manufacturers.size() # rows for Nabisco manufacturers.get_group('Nabisco') # average calories, fiber, and sugars for each manufacturer manufacturers.mean() # maximum sugars value for each manufacturer manufacturers['Sugars'].max() # minimum fiber value for each manufacturer manufacturers['Fiber'].min() # cereal with the lowest sugars value for each manufacturer manufacturers.apply(lambda df: df.nsmallest(1, 'Sugars'))
exercises.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + [markdown] id="d3d944c4-e18b-473f-abbd-0f344ab1e64e" # ## Scan Function # # - # ## Install Package # !pip install --upgrade jax==0.2.17 jaxlib==0.1.71+cuda111 -f https://storage.googleapis.com/jax-releases/jax_releases.html # ใ€้‡่ฆใ€‘ใƒ‘ใƒƒใ‚ฑใƒผใ‚ธใฎใ‚คใƒณใ‚นใƒˆใƒผใƒซๅฎŒไบ†ๅพŒใซใ€ใƒฉใƒณใ‚ฟใ‚คใƒ ใ‚’ๅ†่ตทๅ‹•ใ—ใฆไธ‹ใ•ใ„๏ผ # ## Import Packages # + id="4a43c112-74d3-476e-a944-4acc423a0c2d" import jax import jax.numpy as jnp from jax.lax import scan # + [markdown] id="30248dd3-4fae-49e0-a99c-577f5e3ec07d" # ## Define Data # + colab={"base_uri": "https://localhost:8080/"} id="6d1fafcc-527f-42e4-a75d-0d9f9f4630ca" outputId="a94dfe86-0f49-49b4-e5e9-76a2266279da" vec_in = jnp.arange(10) vec_in # + [markdown] id="45f0ce91-7797-4be2-a3d5-c67837d5a3ba" # ## Example-1 # ๅ…ฅๅŠ›ใ‚’ใใฎใพใพ๏ผ’ๅ€ใ—ใฆๅ‡บๅŠ›ใ™ใ‚‹ไพ‹๏ผˆcarry ใฏไฝฟใ‚ใชใ„๏ผ‰ # + id="0abfd639-a5e8-477c-90ee-fb9c246ced3c" def transition_fn(carry, x_in): x_out = 2 * x_in return carry, x_out # + id="003c7f59-c208-4d04-9b69-df4fddec86a2" carry_init = None carry_ends, vec_out = scan(transition_fn, carry_init, vec_in) # + colab={"base_uri": "https://localhost:8080/"} id="e6daaa11-2bb4-4759-9773-9402fa98c068" outputId="a6fcff29-0c91-4397-c020-306cd5cde76b" vec_out # + id="ac21ab6f-1b2c-47ce-bc22-87cbfac901d4" carry_ends # + [markdown] id="84ce223a-84ad-4c56-a7b0-c12b745cd8b1" # ## Example-2 # carry ใซๅ…ฅๅŠ›ๅ€คใ‚’ๅŠ ็ฎ—ใ™ใ‚‹ไพ‹๏ผˆๅ‡บๅŠ›ใฏ carry ใฎๅ€คใ‚’ๅ‡บๅŠ›๏ผ‰ # + id="b62350bf-97ff-4895-9f46-a86e259ea4a8" def transition_fn(carry, x_in): carry = carry + x_in x_out = carry return carry, x_out # + id="6a0dcf3c-7d59-4376-b7b4-37f9c2af51b0" carry_init = 0 carry_ends, vec_out = scan(transition_fn, carry_init, vec_in) # + colab={"base_uri": "https://localhost:8080/"} id="dadf0833-47a4-48ea-b4a0-1237952caf7d" outputId="fc112361-4b01-4893-9529-07b6f1d4ca2a" vec_out # + colab={"base_uri": "https://localhost:8080/"} id="977f8b83-37cd-49e1-aad5-025d15c1dd1c" outputId="d6799053-8eed-494a-bea0-c27cf10e7275" carry_ends # + [markdown] id="84978846-3a3b-4cf5-bd8c-e81fcc0aba86" # ## Example-3 # carry ใซ๏ผ’ใคใฎ็Šถๆ…‹ใ‚’ๆŒใŸใ›ใŸไพ‹๏ผˆๅ‡บๅŠ›ใฏ carry ใฎๅ€คใ‚’ๅ‡บๅŠ›๏ผ‰ # + id="2b48ebe7-b574-4fea-93b3-44e52e8c0111" def transition_fn(carry, x_in): state0, state1 = carry state0 = state0 + x_in state1 = state1 + 2 * x_in carry = (state0, state1) x_out = carry return carry, x_out # + id="9c7cb52a-5f1d-4a9c-b88f-a23407cf5350" carry_init = (0, 0) carry_ends, vec_out = scan(transition_fn, carry_init, vec_in) # + colab={"base_uri": "https://localhost:8080/"} id="7992c30b-52a1-4816-baa0-c701e4a68359" outputId="a5d2df11-11e8-4cb0-969f-0b1cffcaadf1" vec_out # + colab={"base_uri": "https://localhost:8080/"} id="b4c2ce6d-2925-470f-9666-c55ce7058cbb" outputId="c4d8eca3-b926-4ef1-a084-dbb3496dd62d" carry_ends # -
notebooks/01_laxscan_example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + def valid_path(maze, i, j, m, n): if i == m or j == n: return False if maze[i][j] == 1: return False return True def rat_maze(maze, i, j, m, n, arr): if arr[-1][-1] == 1: return True if valid_path(maze, i, j, m, n): arr[i][j] = 1 if rat_maze(maze, i + 1, j, m, n, arr): return True if rat_maze(maze, i, j + 1, m, n, arr): return True arr[i][j] = 0 return False maze = [[0, 1, 0, 1, 1], [0, 0, 0, 0, 0], [1, 0, 1, 0, 1], [0, 0, 1, 0, 0], [1, 0, 0, 1, 0]] arr = [[0 for i in range(len(maze[0]))] for j in range(len(maze))] rat_maze(maze, 0, 0, len(maze), len(maze[0]), arr) for i in arr: print(i) # -
flarow/Backtracking/Rat Maze Problem.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- import numpy as np import pandas as pd from pandas import DataFrame, Series from sklearn.datasets import load_iris iris_data = load_iris() iris_data.feature_names iris_data.data iris_data.target iris_dataframe = DataFrame(iris_data.data,columns=iris_data.feature_names) iris_dataframe.head() iris_dataframe['Target'] = iris_data.target iris_dataframe.head() iris_dataframe[(iris_dataframe['Target']==0)|(iris_dataframe['Target']==1)] # + def custom(df,l1): return df[~df['Target'].isin(l1)] custom(iris_dataframe,[0,1]) # - all(l1)
My_Panda_Notebooks/Iris_Data_to_Dataframe.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # # Use scikit-learn to recognize hand-written digits with `ibm-watson-machine-learning` # # This notebook contains steps and code to demonstrate how to persist and deploy locally trained scikit-learn model in Watson Machine Learning Service. This notebook contains steps and code to work with [ibm-watson-machine-learning](https://pypi.python.org/pypi/ibm-watson-machine-learning) library available in PyPI repository. This notebook introduces commands for getting model and training data, persisting model, deploying model, scoring it, updating the model and redeploying it. # # Some familiarity with Python is helpful. This notebook uses Python 3.7 with the ibm-watson-machine-learning package. # ## Learning goals # # The learning goals of this notebook are: # # - Train sklearn model. # - Persist trained model in Watson Machine Learning repository. # - Deploy model for online scoring using client library. # - Score sample records using client library. # # # ## Contents # # This notebook contains the following parts: # # 1. [Setup](#setup) # 2. [Explore data and create scikit-learn model](#train) # 3. [Persist externally created scikit model](#upload) # 4. [Deploy and score](#deploy) # 5. [Clean up](#cleanup) # 6. [Summary and next steps](#summary) # <a id="setup"></a> # ## 1. Set up the environment # # Before you use the sample code in this notebook, you must perform the following setup tasks: # # - Contact with your Cloud Pack for Data administrator and ask him for your account credentials # ### Connection to WML # # Authenticate the Watson Machine Learning service on IBM Cloud Pack for Data. You need to provide platform `url`, your `username` and `password`. username = 'PASTE YOUR USERNAME HERE' password = '<PASSWORD>' url = 'PASTE THE PLATFORM URL HERE' wml_credentials = { "username": username, "password": password, "url": url, "instance_id": 'openshift', "version": '3.5' } # ### Install and import the `ibm-watson-machine-learning` package # **Note:** `ibm-watson-machine-learning` documentation can be found <a href="http://ibm-wml-api-pyclient.mybluemix.net/" target="_blank" rel="noopener no referrer">here</a>. # !pip install -U ibm-watson-machine-learning # + from ibm_watson_machine_learning import APIClient client = APIClient(wml_credentials) # - # ### Working with spaces # # First of all, you need to create a space that will be used for your work. If you do not have space already created, you can use `{PLATFORM_URL}/ml-runtime/spaces?context=icp4data` to create one. # # - Click New Deployment Space # - Create an empty space # - Go to space `Settings` tab # - Copy `space_id` and paste it below # # **Tip**: You can also use SDK to prepare the space for your work. More information can be found [here](https://github.com/IBM/watson-machine-learning-samples/blob/master/cpd3.5/notebooks/python_sdk/instance-management/Space%20management.ipynb). # # **Action**: Assign space ID below space_id = 'PASTE YOUR SPACE ID HERE' # You can use `list` method to print all existing spaces. client.spaces.list(limit=10) # To be able to interact with all resources available in Watson Machine Learning, you need to set **space** which you will be using. client.set.default_space(space_id) # + [markdown] pycharm={"name": "#%% md\n"} # <a id="train"></a> # ## 2. Explore data and create scikit-learn model # In this section, you will prepare and train handwritten digits model using scikit-learn library. # - # ### 2.1 Explore data # As a first step, you will load the data from scikit-learn sample datasets and perform a basic exploration. # + pycharm={"is_executing": false, "name": "#%%\n"} import sklearn from sklearn import datasets digits = datasets.load_digits() # - # Loaded toy dataset consists of 8x8 pixels images of hand-written digits. # # Let's display first digit data and label using **data** and **target**. # + pycharm={"is_executing": false, "name": "#%%\n"} print(digits.data[0].reshape((8, 8))) # - digits.target[0] # In next step, you will count data examples. samples_count = len(digits.images) print("Number of samples: " + str(samples_count)) # ### 2.2. Create a scikit-learn model # **Prepare data** # In this step, you'll split your data into three datasets: # - train # - test # - score # + train_data = digits.data[: int(0.7*samples_count)] train_labels = digits.target[: int(0.7*samples_count)] test_data = digits.data[int(0.7*samples_count): int(0.9*samples_count)] test_labels = digits.target[int(0.7*samples_count): int(0.9*samples_count)] score_data = digits.data[int(0.9*samples_count): ] print("Number of training records: " + str(len(train_data))) print("Number of testing records : " + str(len(test_data))) print("Number of scoring records : " + str(len(score_data))) # - # **Create pipeline** # Next, you'll create scikit-learn pipeline. # In ths step, you will import scikit-learn machine learning packages that will be needed in next cells. from sklearn.pipeline import Pipeline from sklearn import preprocessing from sklearn import svm, metrics # Standardize features by removing the mean and scaling to unit variance. scaler = preprocessing.StandardScaler() # Next, define estimators you want to use for classification. Support Vector Machines (SVM) with radial basis function as kernel is used in the following example. clf = svm.SVC(kernel='rbf') # Let's build the pipeline now. This pipeline consists of transformer and an estimator. pipeline = Pipeline([('scaler', scaler), ('svc', clf)]) # **Train model** # Now, you can train your SVM model by using the previously defined **pipeline** and **train data**. model = pipeline.fit(train_data, train_labels) # **Evaluate model** # You can check your **model quality** now. To evaluate the model, use **test data**. # + predicted = model.predict(test_data) print("Evaluation report: \n\n%s" % metrics.classification_report(test_labels, predicted)) # - # You can tune your model now to achieve better accuracy. For simplicity of this example tuning section is omitted. # <a id="upload"></a> # ## 3. Persist locally created scikit-learn model # In this section, you will learn how to store your model in Watson Machine Learning repository by using the IBM Watson Machine Learning SDK. # ### 3.1: Publish model # #### Publish model in Watson Machine Learning repository on Cloud. # Define model name, autor name and email. # + pycharm={"is_executing": false, "name": "#%%\n"} sofware_spec_uid = client.software_specifications.get_id_by_name("default_py3.7") # + pycharm={"is_executing": false, "name": "#%%\n"} metadata = { client.repository.ModelMetaNames.NAME: 'Scikit model', client.repository.ModelMetaNames.TYPE: 'scikit-learn_0.23', client.repository.ModelMetaNames.SOFTWARE_SPEC_UID: sofware_spec_uid } published_model = client.repository.store_model( model=model, meta_props=metadata, training_data=train_data, training_target=train_labels) # - # ### 3.2: Get model details # + pycharm={"is_executing": false, "name": "#%%\n"} import json published_model_uid = client.repository.get_model_uid(published_model) model_details = client.repository.get_details(published_model_uid) print(json.dumps(model_details, indent=2)) # - # ### 3.3 Get all models # + pycharm={"is_executing": false, "name": "#%%\n"} models_details = client.repository.list_models() # - # <a id="deploy"></a> # ## 4. Deploy and score # In this section you will learn how to create online scoring and to score a new data record by using the IBM Watson Machine Learning SDK. # ### 4.1: Create model deployment # #### Create online deployment for published model # + pycharm={"is_executing": false, "name": "#%%\n"} metadata = { client.deployments.ConfigurationMetaNames.NAME: "Deployment of scikit model", client.deployments.ConfigurationMetaNames.ONLINE: {} } created_deployment = client.deployments.create(published_model_uid, meta_props=metadata) # - # **Note**: Here we use deployment url saved in published_model object. In next section, we show how to retrive deployment url from Watson Machine Learning instance. deployment_uid = client.deployments.get_uid(created_deployment) # Now you can print an online scoring endpoint. # + pycharm={"is_executing": false, "name": "#%%\n"} scoring_endpoint = client.deployments.get_scoring_href(created_deployment) print(scoring_endpoint) # - # You can also list existing deployments. client.deployments.list() # ### 4.2: Get deployment details # + pycharm={"is_executing": false, "name": "#%%\n"} client.deployments.get_details(deployment_uid) # - # <a id="score"></a> # ### 4.3: Score # You can use the following method to do test scoring request against deployed model. # **Action**: Prepare scoring payload with records to score. score_0 = list(score_data[0]) score_1 = list(score_data[1]) # + pycharm={"is_executing": false, "name": "#%%\n"} scoring_payload = {"input_data": [{"values": [score_0, score_1]}]} # - # Use ``client.deployments.score()`` method to run scoring. # + pycharm={"is_executing": false, "name": "#%%\n"} predictions = client.deployments.score(deployment_uid, scoring_payload) # + pycharm={"is_executing": false, "name": "#%%\n"} print(json.dumps(predictions, indent=2)) # - # <a id="cleanup"></a> # ## 5. Clean up # If you want to clean up all created assets: # - experiments # - trainings # - pipelines # - model definitions # - models # - functions # - deployments # # please follow up this sample [notebook](https://github.com/IBM/watson-machine-learning-samples/blob/master/cpd3.5/notebooks/python_sdk/instance-management/Machine%20Learning%20artifacts%20management.ipynb). # <a id="summary"></a> # ## 6. Summary and next steps # You successfully completed this notebook! You learned how to use scikit-learn machine learning as well as Watson Machine Learning for model creation and deployment. # # Check out our [Online Documentation](https://dataplatform.cloud.ibm.com/docs/content/analyze-data/wml-setup.html) for more samples, tutorials, documentation, how-tos, and blog posts. # ### Authors # # **<NAME>**, Software Engineer # Copyright ยฉ 2020, 2021 IBM. This notebook and its source code are released under the terms of the MIT License.
cpd3.5/notebooks/python_sdk/deployments/scikit-learn/Use scikit-learn to recognize hand-written digits.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/bxck75/piss-ant-pix2pix/blob/master/modeltransferv2_apes_imshow.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="mcGsrSyVEVLc" colab_type="code" colab={} # remove defaults # !rm -r sample_data # Clone the repo # !git clone https://github.com/bxck75/Python_Helpers.git # Change dir # %cd /content/Python_Helpers # install # !python setup.py install from IPython.display import clear_output import main import os import sys import IPython import Helpers P=Helpers.core.Core() hlp=P.H clear_output() # + id="ZwAVOi1WEPPY" colab_type="code" colab={} # get repo # %cd /content/ hlp.repolist= hlp.repo_collection repos_sorted = hlp.repo_collection.repos_sorted A1=['bxck75/piss-ant-pix2pix'] hlp.Me(['inst_reps',A1,'/content/installed_repos',False,True]) clear_output() # + id="CS20whuQLlWe" colab_type="code" colab={} # get images # %cd /content/ '''insects''' # images_set_name='insect' # images_set_code='1fbEUcBIvLwjlVFrUiW2g8KtRTvFSsW9D' # images_model_code='1eFR5ZHfp5PzPEnFv_v3xxE9T0tKPJTS4' '''garbagepailkids''' # images_set_name='garbagepailkids' # images_set_code='1nb8yr9TIJaiB1GFN9S4mD5GfTNvEQpW-' # images_model_code='1tp8Gm1fMuSQoLEEXQ_C1Dlw1N4wwFxKb' '''apes''' images_set_name='apes' images_set_code='1PUEpLgRfKKDvaMAVtKBVr-Z0ThQvOGWv' images_model_code='1vLmCOaActAAZ0bHdrv0YgP-rCnYHX0O9' '''fetch and unzip''' Helpers.GdriveD.GdriveD(images_set_code, '/content/'+images_set_name+'.zip') # !unzip /content/{images_set_name}.zip clear_output() # + id="eW2829u8K3oC" colab_type="code" colab={} # make dataset # %cd /content/installed_repos/piss-ant-pix2pix # !bash image_folder_make_set_train_colab.sh /content/{images_set_name} 1 prep clear_output() # + id="Vky0_HEvLT3p" colab_type="code" colab={} # ''' # startup loop 1x # # 1 get metric from gdrive # 2 train 5 epochs # 3 dump metrics to gdrive with the same file id # ''' # for i in range(1): # # fetch metrics # %cd /content/ # # Helpers.GdriveD.GdriveD(images_model_code, '/content/'+images_set_name+'_model_in.zip') # # !unzip /content/{images_set_name}_model_in.zip -d metrics/ # # !rm -r /content/metrics/options.json # # train epochs # %cd /content/installed_repos/piss-ant-pix2pix # # !python pix2pix.py --output_dir /content/metrics --progress_freq 10 --save_freq 50 --summary_freq 50 --display_freq 200 --max_epochs 5 --mode train --input_dir /content/{images_set_name}images/_combined/train --which_direction 'BtoA' # # dump metrics # %cd /content/ # Helpers.Core() # obj=Helpers.core.Core() # folder_of_model='metrics' # result=obj.H.zip(images_set_name+'model',obj.Gdrive_root+'/models',folder_of_model).ZipUp # zip_hash=result.split('(id) ')[1] # print(zip_hash) # # delete old crap # # !rm -r /content/metrics # # !rm -r /content/{images_set_name}model.zip # + [markdown] id="w9whUNUbJxLw" colab_type="text" # start values # progress epoch 1 step 10 image/sec 0.7 remaining 122m # dloss_GAN 0.11989411 # gloss_GAN 0.060013566 # gloss_L1 0.043864187 # + id="D_YjfSe8JOfy" colab_type="code" colab={} import sys import os import dlib import glob # %matplotlib inline from matplotlib import pyplot as plt import cv2 ''' loop 10x 1 get metric from gdrive 2 train 5 epochs 3 dump metrics to gdrive with the same file id ''' for i in range(10): ''' fetch metrics''' # %cd /content/ Helpers.GdriveD.GdriveD(images_model_code, '/content/'+images_set_name+'_model_in.zip') # !unzip /content/{images_set_name}_model_in.zip -d metrics/ ''' train epochs ''' # %cd /content/installed_repos/piss-ant-pix2pix # !python pix2pix.py --checkpoint /content/metrics --output_dir /content/metrics --progress_freq 110 --save_freq 150 --summary_freq 0 --display_freq 100 --max_epochs 5 --mode train --input_dir /content/{images_set_name}images/_combined/train --which_direction 'BtoA' # %cd /content/ '''clean old models,test images,logs etc befor zipping''' # delete unwanted # !rm -r /content/metrics/index.html # clean up images img_list=hlp.Me(['globx','/content/metrics/images','*.*g']) img_list = sorted(img_list) print(img_list) n=6 #pop the last 6 items off the list (latest images) latest = img_list[-n:] for i in range(n): img = dlib.load_rgb_image(latest[i]) plt.imshow(img) del img_list[-n:] # delete the files left in the list for i_file in img_list: img = dlib.load_rgb_image(i_file) plt.imshow(img) print('deleting : ' + i_file) # !rm -r {i_file} plt.show() # clean up models models_list=hlp.Me(['globx','/content/metrics','model-*']) models_list = sorted(models_list) print(models_list) n=3 #pop the last 3 items off the list (latest model) del models_list[-n:] # delete the files left in the list for m_file in models_list: print('deleting : ' + m_file) # !rm -r {m_file} # clean up events events_list=hlp.Me(['globx','/content/metrics','events*']) events_list = sorted(events_list) print(events_list) n=1 #pop the last item off the list (latest event) del events_list[-n:] # delete the files left in the list for e_file in events_list: print('deleting : ' + e_file) # !rm -r {e_file} ''' zip metrics up to gdrive ''' # %cd /content/ Helpers.Core() obj=Helpers.core.Core() folder_of_model='metrics' result=obj.H.zip(images_set_name+'model',obj.Gdrive_root+'/models',folder_of_model).ZipUp zip_hash=result.split('(id) ')[1] print(zip_hash) # delete old metrics before unpacking new in the beginning of the loop # !rm -r /content/metrics # !rm -r /content/{images_set_name}model.zip # + id="ypa7AzDYG-Ca" colab_type="code" colab={} # # rewrite checkpointfile # # !echo 'model_checkpoint_path: "/content/metrics/model-21880"' > /content/metrics/checkpoint # # !echo 'all_model_checkpoint_paths: "/content/metrics/model-21880"' >> /content/metrics/checkpoint # + id="xPvviEG3EcsD" colab_type="code" colab={} # # download/unzip metrics # Get pretrained metrics # # %cd /content/ # Helpers.GdriveD.GdriveD(images_model_code, '/content/'+images_set_name+'_model_in.zip') # # !unzip /content/{images_set_name}_model_in.zip -d metrics/ # + id="-SdaTrm4g_KU" colab_type="code" colab={} # # remove old crap before rezipping # # !rm -r /content/metrics/images # # !rm -r /content/metrics/index.html # clean up models models_list=hlp.Me(['globx','/content/metrics','model-*']) models_list = sorted(models_list) print(models_list) n=3 #pop the last 3 items off the list (latest model) del models_list[-n:] for m_file in models_list: print('deleting : ' + m_file) # !rm -r {m_file} # clean up events events_list=hlp.Me(['globx','/content/metrics','events*']) events_list = sorted(events_list) print(events_list) n=1 #pop the last 1 items off the list (latest event) del events_list[-n:] for e_file in events_list: print('deleting : ' + e_file) # !rm -r {e_file} # + colab_type="code" id="gQn7oz9xVXKW" colab={} # # rezip # # %cd /content/ # Helpers.Core() # obj=Helpers.core.Core() # folder_of_model='metrics' # print(obj.Gdrive_root+'/models') # result=obj.H.zip(images_set_name+'model',obj.Gdrive_root+'/models',folder_of_model).ZipUp # # print(result) # zip_hash=result.split('(id) ')[1] # print(zip_hash) # + id="uZJRLD5ESxD_" colab_type="code" colab={} # test the current checkpoint # get metrics # %cd /content/ Helpers.GdriveD.GdriveD(images_model_code, '/content/'+images_set_name+'_model_in.zip') # !unzip /content/{images_set_name}_model_in.zip -d metrics/ # test # %cd /content/installed_repos/piss-ant-pix2pix # !python pix2pix.py --mode test --output_dir /content/{images_set_name}_pretrained/test --input_dir /content/{images_set_name}images/_combined/val --max_steps 100 --which_direction "BtoA" --seed 0 --checkpoint /content/metrics # export # !python pix2pix.py --mode export --output_dir /content/{images_set_name}_pretrained/exp --checkpoint /content/metrics # + id="4wanq7fpUvuV" colab_type="code" colab={} # /content/apes_pretrained/exp # rezip # %cd /content/ Helpers.Core() obj=Helpers.core.Core() folder_of_model=images_set_name+'_pretrained/exp' print(obj.Gdrive_root+'/models') result=obj.H.zip(images_set_name+'model_exp',obj.Gdrive_root+'/models',folder_of_model).ZipUp # print(result) zip_hash=result.split('(id) ')[1] print(zip_hash) # + colab_type="code" id="1B5pz7RvyOGy" colab={} # Example python rename.py '/root/Bureaublad/data/boefjes/front' def rename(directory_in,directory_out): from PIL import Image import os,sys,glob # directory=sys.argv[1] i=int(0) for infilename in glob.iglob(directory_in+'/*.*g'): im = Image.open(infilename) rgb_im = im.convert('RGB') outfilename = "/img%d.png" % int(i + 1) outfile=os.path.join(directory_out, outfilename) print(directory_out+outfile) rgb_im.save(directory_out+outfile) i += 1 edged_images = glob.iglob('/content/metrics/images/*inputs.png') predicted_images = glob.iglob('/content/metrics/images/*outputs.png') original_images = glob.iglob('/content/metrics/images/*targets.png') # zipping lists of lists # using map() + __add__ import itertools # initializing lists test_list1 = [[1, 3], [4, 5], [5, 6]] test_list2 = [[7, 9], [3, 2], [3, 10]] # printing original lists print ("The original list 1 is : " + str(test_list1)) print ("The original list 2 is : " + str(test_list2)) # using map() + __add__ # zipping lists of lists res = [list(itertools.chain(*i)) for i in zip(test_list1, test_list2)] # printing result print ("The modified zipped list is : " + str(res))
modeltransferv2_apes_imshow.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # RGI19 (Antarctic and Subantarctic) # # <NAME> & <NAME> # # Same as RGI6 import pandas as pd import geopandas as gpd import subprocess import matplotlib.pyplot as plt import matplotlib.patches as mpatches import seaborn as sns import numpy as np from utils import mkdir, submission_summary, needs_size_filter, size_filter, plot_map, plot_date_hist import os # ## Files and storage paths # + # Region of interest reg = 19 # go down from rgi7_scripts/workflow data_dir = '../../rgi7_data/' # Level 2 GLIMS files l2_dir = os.path.join(data_dir, 'l2_sel_reg_tars') # Output directories output_dir = mkdir(os.path.join(data_dir, 'l3_rgi7a')) output_dir_tar = mkdir(os.path.join(data_dir, 'l3_rgi7a_tar')) # RGI v6 file for comparison later rgi6_reg_file = os.path.join(data_dir, 'l0_RGIv6', '19_rgi60_AntarcticSubantarctic.zip') # - # ### Load the input data # Read L2 files shp = gpd.read_file('tar://' + l2_dir + f'/RGI{reg:02d}.tar.gz/RGI{reg:02d}/RGI{reg:02d}.shp') # ### List of submissions sdf, _ = submission_summary(shp) sdf # + # # Optional: write out selection in intermediate shape files for manual GIS review # tmp_output_dir = mkdir(os.path.join(data_dir, 'l0_tmp_data', f'rgi{reg:02d}_inventories')) # tmp_output_dir_tar = mkdir(os.path.join(data_dir, 'l0_tmp_data')) # for subid in shp.subm_id.unique(): # s_loc = shp.loc[shp.subm_id == subid] # s_loc.to_file(tmp_output_dir + f'/subm_{int(subid):03d}.shp') # print('Taring...') # print(subprocess.run(['tar', '-zcvf', f'{tmp_output_dir_tar}/rgi{reg:02d}_inventories.tar.gz', '-C', # os.path.join(data_dir, 'l0_tmp_data'), f'rgi{reg:02d}_inventories'])) # - # ## Outline selection rgi7 = shp.loc[shp['subm_id']==585].copy() rgi7['is_rgi6'] = True # Size filter? needs_size_filter(rgi7) # ### Some sanity checks sdf, df_class = submission_summary(rgi7) df_class # Check the orphaned rock outcrops orphan_f = os.path.join(data_dir, 'l1_orphan_interiors', f'RGI{reg:02d}', f'RGI{reg:02d}.shp') if os.path.exists(orphan_f): orphan_f = gpd.read_file(orphan_f) check = np.isin(rgi7.subm_id.unique(), orphan_f.subm_id.unique()) if np.any(check): print(f'Orphan rock outcrops detected in subm_id {rgi7.subm_id.unique()[check]}') orphan_f['area'] = orphan_f.to_crs({'proj':'cea'}).area orphan_f = orphan_f.loc[orphan_f.subm_id.isin(rgi7.subm_id.unique()[check])] orphan_f['area'].sum() * 1e-6 # ### Plots plot_map(rgi7, reg, linewidth=3) plot_map(rgi7, reg, linewidth=3, is_rgi6=True) plot_date_hist(rgi7, reg, figsize=(16, 7)) # ### Text for github fgh = sdf.T fgh print(fgh.to_markdown(headers=np.append(['subm_id'], fgh.columns))) # ## Write out and tar # + dd = mkdir(f'{output_dir}/RGI{reg:02d}/', reset=True) print('Writing...') rgi7.to_file(dd + f'RGI{reg:02d}.shp') print('Taring...') print(subprocess.run(['tar', '-zcvf', f'{output_dir_tar}/RGI{reg:02d}.tar.gz', '-C', output_dir, f'RGI{reg:02d}'])) # - # ## Consistency check with RGI6 # #### load reference data (here RGI6 original) to enable comparison # load reference data from utils import open_zip_shapefile ref_odf = open_zip_shapefile(rgi6_reg_file) # #### Number of elements (differences depict problems) print('Number of glaciers in new RGI subset:', len(rgi7)) print('Number of glaciers in reference data:', len(ref_odf)) print('Difference:', len(rgi7)-len(ref_odf)) # #### Check for 'nominal glaciers' in the RGI6 original data and delete them from new RGI subset from GLIMS if they are in there # how many nominals in RGI06 (identifiable via 'Status' attribute in RGI 06) nom = ref_odf.loc[ref_odf.Status == 2] len(nom) # #### Total area # add an area field to RGI_ss and reference data ref_odf['area'] = ref_odf.to_crs({'proj':'cea'}).area # print and compare area values Area_Rep = rgi7['area'].sum()/1000000 print('Area Rep [kmยฒ]:', Area_Rep) Area_RGI6 = ref_odf['area'].sum()/1000000 print('Area RGI6 [kmยฒ]:', Area_RGI6) d = (Area_Rep - Area_RGI6) d_perc = (d/Area_Rep*100) print('Area difference [kmยฒ]:',d,'/','percentage:', d_perc) # 3 kmยฒ area difference... which mainly originate from "small" outline differences between RGI06 original and the outlines of RGI06 extracted from the Glims data base. Main sources may be glaciers RGI60-19.00707 and RGI60-19.00168 (and possibly others). This is similar to what happend in Greenland (figure of 8 in the rock outcrops), but much less area is added so its ok. # # ![image.png](attachment:599c8e15-1bba-433a-8383-7456d8a0fba7.png)
workflow/RGI19.ipynb
# --- # title: "Series Object" # author: "TACT" # date: 2019-04-20 # description: "-" # type: technical_note # draft: false # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- The pandas series object the base data structure of pandas is the sereis object,which is designed to operate similar to a numpy array but also add index capablilities import pandas as pd from pandas import Series s = Series([1, 2, 3, 4]) #create a four item s s[[1, 3]] #return a series with the rows with labels 1 and 3 s = Series([1, 2, 3, 4], index = ['a', 'b', 'c', 'd']) #create a item with index s s[['a', 'd']] s.index #it will show index s.values #it will show values dates = pd.date_range('2019-05-18', '2019-05-25') dates temp_chennai = Series([36, 37, 36, 37, 37, 37, 37, 37], index = dates) temp_delhi = Series([34, 39, 41, 41, 41, 41, 41, 42], index = dates) temp_chennai.mean() temp_delhi.mean() temp_diffs_between_chennai_and_delhi = abs(temp_delhi - temp_chennai) temp_diffs_between_chennai_and_delhi temp_diffs_between_chennai_and_delhi['2019-05-20']
docs/python/pandas/Series_object.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Libraries # !pip install xgboost import pandas as pd import numpy as np import os, sys from sklearn import metrics from sklearn.model_selection import train_test_split from sklearn.preprocessing import MinMaxScaler from xgboost import XGBClassifier # Read The Data data = pd.read_csv("parkinsons.data") data.shape data.head() data.isnull().sum() data.nunique() data.dtypes data.describe() # --------------- # Here we can see that there is great difference between values of different columns... # So, we have to rescale the values X = data.drop(['name', 'status'], axis=1) y = data['status'] # Scaler init scaler = MinMaxScaler((-1, 1)) X = scaler.fit_transform(X) # split the data bw training and testing data X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state = 22) # + # initialize classifier clf = XGBClassifier() # - # %time clf.fit(X_train, y_train) y_pred = clf.predict(X_test) metrics.accuracy_score(y_pred, y_test)*100
parkinsons disease.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # **AIM** # # ## 1. Plot Planckโ€™s law & Rayleigh-Jean's Law of Black body radiation w.r.t. wavelength at different temperatures. # ## 2. Compare both at high & low temperatures. # ## 3. Verify Weins-Displacement Law # # ### Breif about BlackBody Radiation # # * "Blackbody radiation" or "Cavity radiation" refers to an object or system which absorbs all radiation incident upon it and re-radiates energy which is characteristic of this radiating system only, not dependent upon the type of radiation which is incident upon it. # # * The radiated energy can be considered to be produced by standing wave or resonant modes of the cavity which is radiating. # ## Step-1 : Importing necessary libraries import numpy as np from scipy.constants import h,c,k,pi import matplotlib.pyplot as plt # ## Step-2 : Define an array for wavelength in micrometers & then convert it in meters L = (np.arange(0.1,30,0.005))*(1e-6) #0.1 um to 30 um with step size 0.005um # ## Step-3 : Define function planck_lamda for Plancks Law of Black Body Radiation # Plancks Radiation Formula in terms of Wavelength : # # ![Plancks Radiation Formula in terms of wavelength](https://www.linkpicture.com/q/planks-radiation-formula.png) # # Image Source : [BlackBody Radiation](http://hyperphysics.phy-astr.gsu.edu/hbase/mod6.html). def planck_lamda(L,T): a = (8*pi*h*c)/(L**5) b = (h*c)/(L*k*T) c1 = np.exp(b)-1 d = a/c1 return d # ## Step-4 : Find Intensity at 4 different temperatures (ex: 500K, 700K, 900K & 1100K) T500 = planck_lamda(L , 500) T700 = planck_lamda(L , 700) T900 = planck_lamda(L , 900) T1100 = planck_lamda(L , 1100) # ## Step-5 : Plotting Planck's Law of Radiation at different temperatures # + plt.figure(figsize=(15, 8)) #Changing Figure Size fontji = {'family':'serif','size':20} fontji2 = {'family':'serif','size':30} plt.plot(L, T500,label='T=500 K') plt.plot(L, T700 ,label='T=700 K') plt.plot(L, T900 ,label='T=900 K') plt.plot(L, T1100 ,label='T=1100 K') plt.legend() plt.xlabel(r"$\lambda$ (in meters)",fontdict=fontji) plt.ylabel(r"Intensity (in terms of $\lambda$)",fontdict=fontji) plt.title("Planck's Law of Radiation",fontdict=fontji2) plt.ylim(0,300) plt.xlim(0,0.00002) # - # ## Step-6 : Define function rayleigh_lamda for Rayleigh Jeans Formula def r_lamda(L,T): i = 8*pi*k*T/(L**4) return i # ## Step-7 : Finding Intensity at different temperatures using r_lamda Tr500 = r_lamda(L , 500) Tr700 = r_lamda(L , 700) Tr900 = r_lamda(L , 900) Tr1100 = r_lamda(L , 1100) # ## Step-8 : Plotting Rayleigh Jeans formula for different temperatures # + plt.figure(figsize=(15, 8)) #Changing Figure Size plt.plot(L, Tr500,label='T=500 K') plt.plot(L, Tr700 ,label='T=700 K') plt.plot(L, Tr900 ,label='T=900 K') plt.plot(L, Tr1100 ,label='T=1100 K') plt.legend() plt.xlabel(r"$\lambda$ (in meters)",fontdict=fontji) plt.ylabel(r"Intensity (in terms of $\lambda$)",fontdict=fontji) plt.title("Rayleigh-Jeans Law of Radiation",fontdict=fontji2) #plt.ylim(0,1.2) plt.xlim(0,0.000001) # - # ## Step-9 : Comparing Rayleigh Jeans & Plancks Formula at low & high temperatures # + #plt.suptitle("Comparing Rayleigh-Jeans & Plancks Law for BBR at low & high temperatures") plt.figure(figsize=(15, 10)) #Changing Figure Size plt.subplot(2,1,1) plt.plot(L, (planck_lamda(L,200)),label='Planck Law') plt.plot(L, (r_lamda(L,200)) , "--" , label="Rayleigh-Jeans Law") plt.legend(loc="best") plt.xlabel(r"$\lambda$ ") plt.ylabel("Intensity") plt.title("T=200 K (For Low Temperature)") plt.ylim(0,0.4) plt.xlim(0,0.00003) plt.subplot(2,1,2) plt.plot(L, T1100 ,label='Planck Law') plt.plot(L, Tr1100 , "--" , label="Rayleigh-Jeans Law") plt.legend(loc="best") plt.xlabel(r"$\lambda$ ") plt.ylabel("Intensity") plt.title("T=1100 K (For High Temperature)") plt.ylim(0,350) # - # >**Conclusion** : The Rayleigh-Jeans curve agrees with the Planck radiation formula for long wavelengths or low frequencies. # ### Step-10 : Verifying Weins Displacement Law # * When the temperature of a blackbody radiator increases, the overall radiated energy increases and the peak of the radiation curve moves to shorter wavelengths. # # * When the maximum is evaluated from the Planck radiation formula, the product of the peak wavelength and the temperature is found to be a constant. # # * Formula : ![Weins Displacement Law](https://www.linkpicture.com/q/temp_7.png) # # * This relationship is called **Wien's displacement law**. # # > **Note** : It should be noted that the peak of the radiation curve in the Wien relationship is the peak only because the intensity is plotted as a function of wavelength. If frequency or some other variable is used on the horizontal axis, the peak will be at a different wavelength. # # * Source : [Weins Displacement Law](http://hyperphysics.phy-astr.gsu.edu/hbase/wien.html#c2)
8. Planck's Law vs Rayleigh Jeans Law for Black Body Radiation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + # Metadata This notebook should be used for retrieving info from the database and for any kind of supporting data for agu. * Number of pits * Number of pits with integrating sphere * Number of SMP profiles # + from snowexsql.db import get_db from snowexsql.data import * db_name = 'snow:hackweek@db.snowexdata.org/snowex' engine, session = get_db(db_name) # Number of pits at gm result = session.query(LayerData.site_id, LayerData.date).filter(LayerData.type =='hand_hardness').distinct().all() print(f'Number of pits dug at grand mesa: {len(result)}') # Number of pits with integrating sphere result = session.query(LayerData.site_id, LayerData.date).filter(LayerData.type=='specific_surface_area').distinct().all() print(f'Number of pits at grand mesa with integrating sphere: {len(result)}') # Number of SMP profiles result = session.query(LayerData.site_id, LayerData.date, LayerData.time).filter(LayerData.instrument=='snowmicropen').distinct().all() print(f'Number of pits at grand mesa with snowmicropen: {len(result)}') # -
notebooks/metadata.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python3 # --- # # Note, this *only* works for observing things in the future. Dealing with the pointing corrections in the analysis is not supported here. # # # # # # First thing to do, make a copy of this notebook under File->Make a Copy # # # --- # # Set your observing times here. # # ## NB: This will be a broader window that you'll actually use. The final output will give you orbit-by-orbit pointing information. So you can use a subset of the orbits that are chosen here. # + from datetime import datetime tstart = '2017-07-17T00:00:00' tend = '2017-07-19T00:00:00' # Turn these into datetime objects tstart = datetime.strptime(tstart, '%Y-%m-%dT%H:%M:%S') tend = datetime.strptime(tend, '%Y-%m-%dT%H:%M:%S') # - # --- # # Step 1, get the nominal RA/Dec Position for Jupiter at the start. # # ## We use this to determine what the occultation times are. # + from skyfield.api import Loader from astropy.time import Time import astropy.units as u load = Loader('../../data') ts = load.timescale() planets = load('jup310.bsp') astro_time = Time(tstart) t = ts.from_astropy(astro_time) jupiter, earth = planets['jupiter'], planets['earth'] astrometric = earth.at(t).observe(jupiter) ra, dec, distance = astrometric.radec() occstring = "./occ {0:0.4f} {1:0.4f} Latest_TLE.txt {2}:{3}:00:00:00 {4}:{5}:00:00:00 jupiter_{3}_{5}.occ".format( ra.to(u.deg).value, dec.to(u.deg).value, tstart.timetuple().tm_year, tstart.timetuple().tm_yday, tend.timetuple().tm_year, tend.timetuple().tm_yday) outfile = '../orbit_engine/jupiter_{0}to{1}.sh'.format(tstart.timetuple().tm_yday, tend.timetuple().tm_yday) print(outfile) f = open(outfile, 'w') f.write(occstring) f.close() import os import stat st = os.stat(outfile) os.chmod(outfile, st.st_mode | stat.S_IEXEC) # - # --- # # Step 2: Go run the code that figures out the unocculted periods for the RA/Dec and the date range reported above. # # This works on lif. There are example shell scripts in the ../orbit_engine directory that use the version that Karl already compiled for the nuops users. # # First, get the latest TLE archive: # # `./get_latest_TLE.sh` # # Run the script that was produced above. # # # Step 3: Initialize your libraries and parse the resulting occultation file: # # + from nustar_planning import io occfile= "../orbit_engine/jupiter_{0}_{1}.occ".format(tstart.timetuple().tm_yday,tend.timetuple().tm_yday ) orbits = io.parse_occ(occfile) # NB: The "head" command here only shows the first couple of rows. Do a "print(orbits)" to see them all. orbits.head() # - # # Use SkyField to get the location of Jupiter for each orbit: # # This puts the output into the provided text file. This shows the *aim* time that was used to determine the pointing. You should slew while the source is occulted. from nustar_planning import jupiter_planning from imp import reload outfile = 'jupiter_{0}_{1}_pointing.txt'.format(tstart.timetuple().tm_yday,tend.timetuple().tm_yday ) jupiter_planning.position(orbits, load_path ='../../data', parallax_correction=True, outfile=outfile) print("Output is stored in: {}".format(outfile))
jupiter_planning/Jupiter_Planning_20170712_standalone.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] colab_type="text" id="Jxv6goXm7oGF" # ##### Copyright 2018 The TensorFlow Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # + colab={} colab_type="code" id="llMNufAK7nfK" #@title Licensed under the Apache License, Version 2.0 (the "License"); { display-mode: "form" } # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # + [markdown] colab_type="text" id="8Byow2J6LaPl" # # tf.function and AutoGraph in TensorFlow 2.0 # + [markdown] colab_type="text" id="kGXS3UWBBNoc" # <table class="tfo-notebook-buttons" align="left"> # <td> # <a target="_blank" href="https://www.tensorflow.org/guide/autograph"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> # </td> # <td> # <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/guide/autograph.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> # </td> # <td> # <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/guide/autograph.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> # </td> # <td> # <a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/guide/autograph.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> # </td> # </table> # + [markdown] colab_type="text" id="CydFK2CL7ZHA" # TF 2.0 brings together the ease of eager execution and the power of TF 1.0. At the center of this merger is `tf.function`, which allows you to transform a subset of Python syntax into portable, high-performance TensorFlow graphs. # # A cool new feature of `tf.function` is AutoGraph, which lets you write graph code using natural Python syntax. For a list of the Python features that you can use with AutoGraph, see [AutoGraph Capabilities and Limitations](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/autograph/g3doc/reference/limitations.md). For more details about `tf.function`, see the RFC [TF 2.0: Functions, not Sessions](https://github.com/tensorflow/community/blob/master/rfcs/20180918-functions-not-sessions-20.md). For more details about AutoGraph, see `tf.autograph`. # # This tutorial will walk you through the basic features of `tf.function` and AutoGraph. # + [markdown] colab_type="text" id="n4EKOpw9mObL" # ## Setup # # Import TensorFlow 2.0: # + colab={} colab_type="code" id="V9oECvVSI1Kj" from __future__ import absolute_import, division, print_function, unicode_literals import numpy as np # + colab={} colab_type="code" id="mT7meGqrZTz9" try: # # %tensorflow_version only exists in Colab. # %tensorflow_version 2.x except Exception: pass import tensorflow as tf # + [markdown] colab_type="text" id="77AsVr1GGtBP" # ## The `tf.function` decorator # # When you annotate a function with `tf.function`, you can still call it like any other function. But it will be compiled into a graph, which means you get the benefits of faster execution, running on GPU or TPU, or exporting to SavedModel. # + colab={} colab_type="code" id="FhIg7-z6HNWj" @tf.function def simple_nn_layer(x, y): return tf.nn.relu(tf.matmul(x, y)) x = tf.random.uniform((3, 3)) y = tf.random.uniform((3, 3)) simple_nn_layer(x, y) # + [markdown] colab_type="text" id="U-LAE4pMNR9g" # If we examine the result of the annotation, we can see that it's a special callable that handles all interactions with the TensorFlow runtime. # + colab={} colab_type="code" id="q4t2iuS7Nqc0" simple_nn_layer # + [markdown] colab_type="text" id="DqeefLGNXjZQ" # If your code uses multiple functions, you don't need to annotate them all - any functions called from an annotated function will also run in graph mode. # + colab={} colab_type="code" id="3VGF7tlVXiZY" def linear_layer(x): return 2 * x + 1 @tf.function def deep_net(x): return tf.nn.relu(linear_layer(x)) deep_net(tf.constant((1, 2, 3))) # + [markdown] colab_type="text" id="yQvg6ZSKWyqE" # Functions can be faster than eager code, for graphs with many small ops. But for graphs with a few expensive ops (like convolutions), you may not see much speedup. # # + colab={} colab_type="code" id="0EL6lVwEWuFo" import timeit conv_layer = tf.keras.layers.Conv2D(100, 3) @tf.function def conv_fn(image): return conv_layer(image) image = tf.zeros([1, 200, 200, 100]) # warm up conv_layer(image); conv_fn(image) print("Eager conv:", timeit.timeit(lambda: conv_layer(image), number=10)) print("Function conv:", timeit.timeit(lambda: conv_fn(image), number=10)) print("Note how there's not much difference in performance for convolutions") # + colab={} colab_type="code" id="L4zj-jpH0jKH" lstm_cell = tf.keras.layers.LSTMCell(10) @tf.function def lstm_fn(input, state): return lstm_cell(input, state) input = tf.zeros([10, 10]) state = [tf.zeros([10, 10])] * 2 # warm up lstm_cell(input, state); lstm_fn(input, state) print("eager lstm:", timeit.timeit(lambda: lstm_cell(input, state), number=10)) print("function lstm:", timeit.timeit(lambda: lstm_fn(input, state), number=10)) # + [markdown] colab_type="text" id="ohbSnA79mcJV" # ## Use Python control flow # # When using data-dependent control flow inside `tf.function`, you can use Python control flow statements and AutoGraph will convert them into appropriate TensorFlow ops. For example, `if` statements will be converted into `tf.cond()` if they depend on a `Tensor`. # # In the example below, `x` is a `Tensor` but the `if` statement works as expected: # + colab={} colab_type="code" id="aA3gOodCBkOw" @tf.function def square_if_positive(x): if x > 0: x = x * x else: x = 0 return x print('square_if_positive(2) = {}'.format(square_if_positive(tf.constant(2)))) print('square_if_positive(-2) = {}'.format(square_if_positive(tf.constant(-2)))) # + [markdown] colab_type="text" id="GMiCUkdyoq98" # Note: The previous example uses simple conditionals with scalar values. <a href="#batching">Batching</a> is typically used in real-world code. # + [markdown] colab_type="text" id="m-jWmsCmByyw" # AutoGraph supports common Python statements like `while`, `for`, `if`, `break`, `continue` and `return`, with support for nesting. That means you can use `Tensor` expressions in the condition of `while` and `if` statements, or iterate over a `Tensor` in a `for` loop. # + colab={} colab_type="code" id="toxKBOXbB1ro" @tf.function def sum_even(items): s = 0 for c in items: if c % 2 > 0: continue s += c return s sum_even(tf.constant([10, 12, 15, 20])) # + [markdown] colab_type="text" id="AtDaLrbySw4j" # AutoGraph also provides a low-level API for advanced users. For example we can use it to have a look at the generated code. # + colab={} colab_type="code" id="aRsde3x_SjTQ" print(tf.autograph.to_code(sum_even.python_function)) # + [markdown] colab_type="text" id="rvJXCfk8VkLf" # Here's an example of more complicated control flow: # + colab={} colab_type="code" id="h-Z87IJqVlKl" @tf.function def fizzbuzz(n): for i in tf.range(n): if (i % 3) == 0: tf.print('Fizz') elif (i % 5) == 0: tf.print('Buzz') else: tf.print(i) fizzbuzz(tf.constant(15)) # + [markdown] colab_type="text" id="h_Y4uC1R1B55" # ## Keras and AutoGraph # # AutoGraph is available by default in non-dynamic Keras models. For more information, see `tf.keras`. # + colab={} colab_type="code" id="cR6mpLKP1HLe" class CustomModel(tf.keras.models.Model): @tf.function def call(self, input_data): if tf.reduce_mean(input_data) > 0: return input_data else: return input_data // 2 model = CustomModel() model(tf.constant([-2, -4])) # + [markdown] colab_type="text" id="NTEvpBK9f8kj" # ## Side effects # # Just like in eager mode, you can use operations with side effects, like `tf.assign` or `tf.print` normally inside `tf.function`, and it will insert the necessary control dependencies to ensure they execute in order. # + colab={} colab_type="code" id="-Wd6i8S9gcuC" v = tf.Variable(5) @tf.function def find_next_odd(): v.assign(v + 1) if v % 2 == 0: v.assign(v + 1) find_next_odd() v # + [markdown] colab_type="text" id="kdMrkVjelpQy" # # + [markdown] colab_type="text" id="4LfnJjm0Bm0B" # ## Debugging # # `tf.function` and AutoGraph work by generating code and tracing it into TensorFlow graphs. This mechanism does not yet support step-by-step debuggers like `pdb`. However, you can call `tf.config.run_functions_eagerly(True)` to temporarily enable eager execution inside the `tf.function' and use your favorite debugger: # + colab={} colab_type="code" id="Yci8ve6hmgpF" @tf.function def f(x): if x > 0: # Try setting a breakpoint here! # Example: # import pdb # pdb.set_trace() x = x + 1 return x tf.config.experimental_run_functions_eagerly(True) # You can now set breakpoints and run the code in a debugger. f(tf.constant(1)) tf.config.experimental_run_functions_eagerly(False) # + [markdown] colab_type="text" id="Em5dzSUOtLRP" # ### Download data # + colab={} colab_type="code" id="xqoxumv0ssQW" def prepare_mnist_features_and_labels(x, y): x = tf.cast(x, tf.float32) / 255.0 y = tf.cast(y, tf.int64) return x, y def mnist_dataset(): (x, y), _ = tf.keras.datasets.mnist.load_data() ds = tf.data.Dataset.from_tensor_slices((x, y)) ds = ds.map(prepare_mnist_features_and_labels) ds = ds.take(20000).shuffle(20000).batch(100) return ds train_dataset = mnist_dataset() # + [markdown] colab_type="text" id="znmy4l8ntMvW" # ### Define the model # + colab={} colab_type="code" id="ltxyJVWTqNAO" model = tf.keras.Sequential(( tf.keras.layers.Reshape(target_shape=(28 * 28,), input_shape=(28, 28)), tf.keras.layers.Dense(100, activation='relu'), tf.keras.layers.Dense(100, activation='relu'), tf.keras.layers.Dense(10))) model.build() optimizer = tf.keras.optimizers.Adam() # + [markdown] colab_type="text" id="oeYV6mKnJGMr" # ### Define the training loop # + colab={} colab_type="code" id="3xtg_MMhJETd" compute_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) compute_accuracy = tf.keras.metrics.SparseCategoricalAccuracy() def train_one_step(model, optimizer, x, y): with tf.GradientTape() as tape: logits = model(x) loss = compute_loss(y, logits) grads = tape.gradient(loss, model.trainable_variables) optimizer.apply_gradients(zip(grads, model.trainable_variables)) compute_accuracy(y, logits) return loss @tf.function def train(model, optimizer): train_ds = mnist_dataset() step = 0 loss = 0.0 accuracy = 0.0 for x, y in train_ds: step += 1 loss = train_one_step(model, optimizer, x, y) if step % 10 == 0: tf.print('Step', step, ': loss', loss, '; accuracy', compute_accuracy.result()) return step, loss, accuracy step, loss, accuracy = train(model, optimizer) print('Final step', step, ': loss', loss, '; accuracy', compute_accuracy.result()) # + [markdown] colab_type="text" id="SnsumiP6eRYL" # ## Batching # # In real applications batching is essential for performance. The best code to convert to AutoGraph is code where the control flow is decided at the _batch_ level. If making decisions at the individual _example_ level, try to use batch APIs to maintain performance. # # For example, if you have the following code in Python: # # + colab={} colab_type="code" id="t31QoERiNccJ" def square_if_positive(x): return [i ** 2 if i > 0 else i for i in x] square_if_positive(range(-5, 5)) # + [markdown] colab_type="text" id="kSeEJ76uNgwD" # You may be tempted to write it in TensorFlow as such (and this would work!): # # + colab={} colab_type="code" id="RqR8WzSzNf87" @tf.function def square_if_positive_naive(x): result = tf.TensorArray(tf.int32, size=x.shape[0]) for i in tf.range(x.shape[0]): if x[i] > 0: result = result.write(i, x[i] ** 2) else: result = result.write(i, x[i]) return result.stack() square_if_positive_naive(tf.range(-5, 5)) # + [markdown] colab_type="text" id="gTcyWXVGN3gS" # But in this case, it turns out you can write the following: # # + colab={} colab_type="code" id="VO2f6x-lNfVj" def square_if_positive_vectorized(x): return tf.where(x > 0, x ** 2, x) square_if_positive_vectorized(tf.range(-5, 5))
site/en/guide/autograph.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: conda_python3 # language: python # name: conda_python3 # --- # # [๋ชจ๋“ˆ 4.5] ๋ชจ๋ธ ๋ ˆ์ง€์ŠคํŠธ๋ฆฌ๋กœ ๋ถ€ํ„ฐ ๋ชจ๋ธ ๋ฐฐํฌ ๋ฐ ๋žŒ๋‹ค ์Šคํ… ๊ฐœ๋ฐœ (SageMaker Lambda Step) # # ์ด ๋…ธํŠธ๋ถ์€ ์•„๋ž˜์™€ ๊ฐ™์€ ๋ชฉ์ฐจ๋กœ ์ง„ํ–‰ ๋ฉ๋‹ˆ๋‹ค. ์ „์ฒด๋ฅผ ๋ชจ๋‘ ์‹คํ–‰์‹œ์— ์™„๋ฃŒ ์‹œ๊ฐ„์€ ์•ฝ 5๋ถ„-10๋ถ„ ์†Œ์š” ๋ฉ๋‹ˆ๋‹ค. # # - 1. ๋ชจ๋ธ ๋ ˆ์ง€์ŠคํŠธ๋ฆฌ๋ฅผ ํ†ตํ•œ ๋ชจ๋ธ ๋ฐฐํฌ # - (1) ๋ชจ๋ธ ๋ ˆ์ง€์ŠคํŠธ๋ฆฌ์—์„œ ๋ชจ๋ธ ๋“ฑ๋ก ํ™•์ธ # - (2) ๋ชจ๋ธ ๋ฒ„์ „ ์Šน์ธ ์ƒํƒœ ๋ณ€๊ฒฝ # - (3) ๋ชจ๋ธ ๋ฐฐํฌ # - 2. ๋žŒ๋‹ค ์Šคํ… ๊ฐœ์š” # - ์œ„์˜ `"๋ชจ๋ธ ๋ ˆ์ง€์ŠคํŠธ๋ฆฌ๋ฅผ ํ†ตํ•œ ๋ชจ๋ธ ๋ฐฐํฌ"` ๋ฅผ ๋žŒ๋‹ค ์Šคํ… ๋ฐ ๋ชจ๋ธ ์ƒ์„ฑ ์Šคํ…์„ ํ†ตํ•˜์—ฌ ๊ตฌํ˜„ ํ•ฉ๋‹ˆ๋‹ค. # - 3. ๋ฆฌ์†Œ์Šค ์ •๋ฆฌ # # # --- # # 1. ๋ชจ๋ธ ๋ ˆ์ง€์ŠคํŠธ๋ฆฌ๋ฅผ ํ†ตํ•œ ๋ชจ๋ธ ๋ฐฐํฌ # # --- # `import ` ์‹œ๋งˆ๋‹ค ์›๋ณธ ์†Œ์Šค์—์„œ ์žฌ๋กœ๋”ฉ์„ ์„ค์ •ํ•จ. (๋‹ค๋ฅธ ์†Œ์Šค ํŒŒ์ผ์„ ์ˆ˜์ •ํ›„์— ๋””๋ฒ„๊น…ํ•˜๊ธฐ์— ํŽธํ•จ) # %load_ext autoreload # %autoreload 2 # + import boto3 import sagemaker import pandas as pd import os sagemaker_session = sagemaker.session.Session() role = sagemaker.get_execution_role() sm_client = boto3.client("sagemaker") # %store -r # ๋…ธํŠธ๋ถ์— ์ €์žฅ๋˜์–ด ์žˆ๋Š” ๋ณ€์ˆ˜๋ฅผ ๋ณด๊ธฐ ์œ„ํ•ด์„œ๋Š” ์ฃผ์„์„ ์ œ๊ฑฐํ•˜๊ณ  ์‹คํ–‰ํ•˜์‹œ๋ฉด ๋ฉ๋‹ˆ๋‹ค. # # %store # - # ## (1) ๋ชจ๋ธ ๋ ˆ์ง€์ŠคํŠธ๋ฆฌ์—์„œ ๋ชจ๋ธ ๋“ฑ๋ก ํ™•์ธ # ์œ„์—์„œ ๋“ฑ๋กํ•œ ๋ชจ๋ธ ๊ทธ๋ฃน ์ด๋ฆ„์„ ํ†ตํ•ด์„œ ์–ด๋–ค ๋ชจ๋ธ์ด ๋“ฑ๋ก๋˜์—ˆ๋Š”์ง€๋ฅผ ํ™•์ธ ํ•ฉ๋‹ˆ๋‹ค. # - ๋“ฑ๋ก๋œ ๋ชจ๋ธ ๋ฒ„์ „์— ๋Œ€ํ•œ ๋ณด๊ธฐ --> [๋ชจ๋ธ ๋ฒ„์ „์˜ ์„ธ๋ถ€ ์ •๋ณด ๋ณด๊ธฐ](https://docs.aws.amazon.com/ko_kr/sagemaker/latest/dg/model-registry-details.html) # + import boto3 sm_client = boto3.client('sagemaker') # ์œ„์—์„œ ์ƒ์„ฑํ•œ model_package_group_name ์„ ์ธ์ž๋กœ ์ œ๊ณต ํ•ฉ๋‹ˆ๋‹ค. response = sm_client.list_model_packages(ModelPackageGroupName= model_package_group_name) response # - # #### ๋“ฑ๋ก๋œ ๋ชจ๋ธ ๋ฒ„์ „์˜ ์ƒ์„ธ ์ •๋ณด ํ™•์ธ ModelPackageArn = response['ModelPackageSummaryList'][0]['ModelPackageArn'] sm_client.describe_model_package(ModelPackageName=ModelPackageArn) # ## (2) Model ์Šน์ธ ์ƒํƒœ ๋ณ€๊ฒฝ model_package_update_input_dict = { "ModelPackageArn" : ModelPackageArn, "ModelApprovalStatus" : "Approved" } model_package_update_response = sm_client.update_model_package(**model_package_update_input_dict) response = sm_client.describe_model_package(ModelPackageName=ModelPackageArn) image_uri_approved = response["InferenceSpecification"]["Containers"][0]["Image"] ModelDataUrl_approved = response["InferenceSpecification"]["Containers"][0]["ModelDataUrl"] print("image_uri_approved: ", image_uri_approved) print("ModelDataUrl_approved: ", ModelDataUrl_approved) # ## (3) ๋ชจ๋ธ์„ ์•ค๋“œํฌ์ธํŠธ ๋ฐฐํฌ # + # %%time from sagemaker import ModelPackage model = ModelPackage(role=role, model_package_arn=ModelPackageArn, sagemaker_session=sagemaker_session) _ = model.deploy(initial_instance_count=1, instance_type='ml.t2.medium') # - # ๋ฐฐํฌ๋œ ๋ชจ๋ธ์„ ์‚ญ์ œ ํ•ฉ๋‹ˆ๋‹ค. # + predictor = sagemaker.predictor.Predictor( endpoint_name= model.endpoint_name, sagemaker_session= sagemaker_session, ) predictor.delete_endpoint() # - # # 2. ๋žŒ๋‹ค ์Šคํ… ๊ฐœ์š” # # - ์„ธ์ด์ง€ ๋ฉ”์ด์ปค ๋ชจ๋ธ ๋นŒ๋”ฉ ํŒŒ์ดํ”„๋ผ์ธ ์—์„œ ๋žŒ๋‹ค ์Šคํ…์„ (2021.08) ์‚ฌ์šฉํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ๋žŒ๋‹ค ํ•จ์ˆ˜์— ๋จธ์‹ ๋Ÿฌ๋‹ ์›Œํฌ ํ”Œ๋กœ์šฐ์— ์ ๋‹นํ•œ ๋‹จ๊ณ„๋ฅผ ๊ตฌํ˜„ํ•˜์—ฌ ์‚ฌ์šฉํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. # - ์—ฌ๊ธฐ์„œ๋Š” ์˜ˆ์‹œ๋กœ์จ, ๋ชจ๋ธ ๋ ˆ์ง€์ŠคํŠธ๋ฆฌ์˜ ๋ชจ๋ธ ํŒจํ‚ค์ง€ ๊ทธ๋ฃน์— ์žˆ๋Š” ์ตœ์‹  ๋ชจ๋ธ์˜ ์Šน์ธ ์ƒํƒœ๋ฅผ ๋ณ€๊ฒฝํ•˜๊ณ , ๋ฐฐํฌ๊นŒ์ง€ ํ•˜๋Š” ๊ฒƒ์„ ๋žŒ๋‹ค ์Šคํ…์„ ์ด์šฉํ•˜์—ฌ ๊ตฌํ˜„ ํ•ฉ๋‹ˆ๋‹ค. # - ๋žŒ๋‹ค ์Šคํ… ์‚ฌ์šฉ ๋ฒ• # - LambdaStep์„ ์ •์˜ํ•  ๋•Œ SageMaker Lambda ๋„์šฐ๋ฏธ ํด๋ž˜์Šค๋Š” Lambda ํ•จ์ˆ˜๋ฅผ ์ƒ์„ฑํ•˜๊ธฐ ์œ„ํ•œ ๋„์šฐ๋ฏธ ํ•จ์ˆ˜๋ฅผ ์ œ๊ณตํ•ฉ๋‹ˆ๋‹ค.์‚ฌ์šฉ์ž๋Š” lambda_func ์ธ์ˆ˜๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ ์ด๋ฏธ ๋ฐฐํฌ๋œ Lambda ํ•จ์ˆ˜์— ํ•จ์ˆ˜ ARN์„ ์ œ๊ณตํ•˜๊ฑฐ๋‚˜ Lambda ํด๋ž˜์Šค๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ Lambda ํ•จ์ˆ˜์— ๋Œ€ํ•œ ์Šคํฌ๋ฆฝํŠธ, ํ•จ์ˆ˜ ์ด๋ฆ„ ๋ฐ ์—ญํ• ์„ ์ œ๊ณตํ•˜์—ฌ Lambda ํ•จ์ˆ˜๋ฅผ ์ƒ์„ฑํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. # # - ์ž…๋ ฅ์„ Lambda์— ์ „๋‹ฌํ•  ๋•Œ inputs ์ธ์ˆ˜๋ฅผ ์‚ฌ์šฉํ•  ์ˆ˜ ์žˆ์œผ๋ฉฐ Lambda ํ•จ์ˆ˜์˜ ํ•ธ๋“ค๋Ÿฌ ๋‚ด์—์„œ event ์ธ์ˆ˜๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ ์ž…๋ ฅ์„ ๊ฒ€์ƒ‰ํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. # # - Lambda ํ•จ์ˆ˜์˜ ๋”•์…”๋„ˆ๋ฆฌ ์‘๋‹ต์€ outputs ์ธ์ˆ˜์— ์ œ๊ณต๋œ lambdaOutput ๊ฐ์ฒด๋ฅผ ํ†ตํ•ด ๊ตฌ๋ฌธ ๋ถ„์„๋ฉ๋‹ˆ๋‹ค.LambdaOutput ์˜ output_name์€ ๋žŒ๋‹ค์˜ ๋ฆฌํ„ด ๋”•์…”๋„ˆ๋ฆฌ์— ์žˆ๋Š” ๋”•์…”๋„ˆ๋ฆฌ ํ‚ค์— ํ•ด๋‹นํ•ฉ๋‹ˆ๋‹ค. # # # - ์ฐธ๊ณ  # - ๊ฐœ๋ฐœ์ž ๊ฐ€์ด๋“œ์˜ ๋žŒ๋‹ค ๋‹จ๊ณ„ ์ฐธ๊ณ  --> [๋žŒ๋‹ค ๋‹จ๊ณ„](https://docs.aws.amazon.com/ko_kr/sagemaker/latest/dg/build-and-manage-steps.html#step-type-lambda) # # # # # #### ๋žŒ๋‹ค ํ•จ์ˆ˜ ์ •์˜ # - ์—ฌ๊ธฐ์„œ๋Š” 2๊ฐœ์˜ ๋žŒ๋‹ค ํ•จ์ˆ˜๋ฅผ ์ •์˜ ํ–ˆ์Šต๋‹ˆ๋‹ค. # - (1) iam_change_model_approval.py # - ๋ชจ๋ธ ๋ ˆ์ง€์ŠคํŠธ๋ฆฌ์—์„œ ํ•ด๋‹น ๋ชจ๋ธ ํŒจํ‚ค์ง€ ๊ทธ๋ฃน์„ ์กฐํšŒํ•˜๊ณ , ๊ฐ€์žฅ ์ตœ์‹  ๋ฒ„์ „์˜ ๋ชจ๋ธ์— ๋Œ€ํ•ด์„œ '๋ชจ๋ธ ์Šน์ธ ์ƒํƒœ ๋ณ€๊ฒฝ' ์„ ํ•ฉ๋‹ˆ๋‹ค. # - (2) iam_create_endpoint.py # - ์ž…๋ ฅ์œผ๋กœ ์„ธ์ด์ง€ ๋ฉ”์ด์ปค ๋ชจ๋ธ, ์•ค๋“œ ํฌ์ธํŠธ ์ปจํ”ผ๊ทธ ๋ฐ ์•ค๋“œ ํฌ์ธํŠธ ์ด๋ฆ„์„ ๋ฐ›์•„์„œ, ์•ค๋“œํฌ์ธํŠธ๋ฅผ ์ƒ์„ฑ ํ•จ. # # # - ์˜ˆ์‹œ๋กœ์จ ์ฒซ๋ฒˆ์งธ ๋žŒ๋‹ค ํ•จ์ˆ˜๋ฅผ ์ •์˜ํ•œ ๊ฒƒ์˜ ์˜ˆ์ œ ์ž…๋‹ˆ๋‹ค. # # !pygmentize src/iam_change_model_approval.py # #### ๋žŒ๋‹ค IAM Role # # Lambda ํ•จ์ˆ˜์—๋Š” SageMaker ์—์„œ ์ˆ˜ํ–‰ํ•  ์žก(์˜ˆ: ์—”๋“œํฌ์ธํŠธ๋ฅผ ๋ฐฐํฌ) ์— ๋Œ€ํ•œ IAM ์—ญํ• ์ด ํ•„์š”ํ•ฉ๋‹ˆ๋‹ค.์—ญํ•  ARN์€ ๋žŒ๋‹ค์Šคํ…์—์„œ ์ œ๊ณต๋˜์–ด์•ผ ํ•ฉ๋‹ˆ๋‹ค. # # Lambda ์—ญํ• ์—๋Š” ์ตœ์†Œํ•œ์˜ ๋žŒ๋‹ค ์‹คํ–‰ ์ •์ฑ… ์™ธ์— ์—ฌ๊ธฐ์„œ๋Š” `์„ธ์ด์ง€๋ฉ”์ด์ปค:ํฌ๋ฆฌ์—์ดํ„ฐ ๋ชจ๋ธ', `์„ธ์ด์ง€๋ฉ”์ด์ปค:์ƒ์„ฑ์—”๋“œํฌ์ธํŠธ๊ตฌ์„ฑ`, `์„ธ์ด์ง€๋ฉ”์ด์ปค:์ƒ์„ฑ์—”๋“œํฌ์ธํŠธ'๋“ฑ์˜ ํ—ˆ์šฉํ•˜๋Š” ์ •์ฑ…์ด ์žˆ์–ด์•ผ ํ•ฉ๋‹ˆ๋‹ค. # # `iam_helper.py`์˜ ๋„์šฐ๋ฏธ ํ•จ์ˆ˜๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ Lambda ํ•จ์ˆ˜ ์—ญํ• ์„ ์ƒ์„ฑํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค.์ด ์—ญํ• ์€ ์•„๋งˆ์กด ๊ด€๋ฆฌํ˜• ์ •์ฑ… (`์„ธ์ด์ง€๋ฉ”์ด์ปคํ’€์•ก์„ธ์Šค') ์„ ์‚ฌ์šฉํ•œ๋‹ค๋Š” ์ ์— ์œ ์˜ํ•˜์‹ญ์‹œ์˜ค.์ด๋Š” AWS IAM ๋ชจ๋ฒ” ์‚ฌ๋ก€์— ๋”ฐ๋ผ ์ตœ์†Œ ๊ถŒํ•œ์„ ๊ฐ€์ง„ IAM ์ •์ฑ…์œผ๋กœ ๋Œ€์ฒดํ•ด์•ผ ํ•ฉ๋‹ˆ๋‹ค. # + from src.iam_helper import create_lambda_role lambda_role = create_lambda_role("lambda-deployment-role") print("lambda_role: \n", lambda_role) # - # ## ์Šคํ… ์ƒ์„ฑ # ### (1) ๋žŒ๋‹ค ์Šคํ…: ๋ชจ๋ธ ๋ฒ„์ „ ์ƒํƒœ๋ฅผ ์Šน์ธ ์œผ๋กœ ๋ณ€๊ฒฝ # + from sagemaker.lambda_helper import Lambda from sagemaker.workflow.lambda_step import ( LambdaStep, LambdaOutput, LambdaOutputTypeEnum, ) import time current_time = time.strftime("%m-%d-%H-%M-%S", time.localtime()) function_name = "sagemaker-lambda-step-approve-model-deployment-" + current_time print("function_name: \n", function_name) # + # Lambda helper class can be used to create the Lambda function func_approve_model = Lambda( function_name=function_name, execution_role_arn=lambda_role, script="src/iam_change_model_approval.py", handler="iam_change_model_approval.lambda_handler", ) output_param_1 = LambdaOutput(output_name="statusCode", output_type=LambdaOutputTypeEnum.String) output_param_2 = LambdaOutput(output_name="body", output_type=LambdaOutputTypeEnum.String) output_param_3 = LambdaOutput(output_name="other_key", output_type=LambdaOutputTypeEnum.String) step_approve_lambda = LambdaStep( name="LambdaApproveModelStep", lambda_func=func_approve_model, inputs={ "model_package_group_name" : model_package_group_name, "ModelApprovalStatus": "Approved", }, outputs=[output_param_1, output_param_2, output_param_3], ) # - # ### (2) ์„ธ์ด์ง€ ๋ฉ”์ด์ปค ๋ชจ๋ธ ์Šคํ… ์ƒ์„ฑ # - ์•„๋ž˜ ๋‘ ํŒŒ๋ฆฌ๋ฏธํ„ฐ์˜ ์ž…๋ ฅ์ด ์ด์ „ ์Šคํ…์˜ ๊ฒฐ๊ณผ๊ฐ€ ์ œ๊ณต๋ฉ๋‹ˆ๋‹ค. # - image_uri= step_train.properties.AlgorithmSpecification.TrainingImage, # - model_data= step_train.properties.ModelArtifacts.S3ModelArtifacts, # # # ๋ชจ๋ธ ๋ ˆ์ง€์ŠคํŠธ๋ฆฌ์˜ ํ•ด๋‹น ๋ชจ๋ธ ํŒจํ‚ค์ง€ ๊ทธ๋ฃน์—์„œ ์ตœ์‹  ๋ฒ„์ „ ๋ชจ๋ธ์— ๋Œ€ํ•œ '์ถ”๋ก  ๋„์ปค ์ด๋ฏธ์ง€', '๋ชจ๋ธ ์•„ํ‹ฐํŽ™ํŠธ ๊ฒฝ๋กœ' ๋ฅผ ์„ธ์ด์ง€ ๋ฉ”์ด์ปค ๋ชจ๋ธ ์ƒ์„ฑ์‹œ์— ์ž…๋ ฅ์œผ๋กœ ์ œ๊ณต ํ•ฉ๋‹ˆ๋‹ค. print("image_uri_approved: ", image_uri_approved) print("ModelDataUrl_approved: ", ModelDataUrl_approved) # + from sagemaker.model import Model model = Model( image_uri= image_uri_approved, model_data= ModelDataUrl_approved, sagemaker_session=sagemaker_session, role=role, ) # + from sagemaker.inputs import CreateModelInput from sagemaker.workflow.steps import CreateModelStep inputs = CreateModelInput( instance_type="ml.m5.large", # accelerator_type="ml.eia1.medium", ) step_create_model = CreateModelStep( name="CreateFraudhModel", model=model, inputs=inputs, ) step_create_model.add_depends_on([step_approve_lambda]) # step_approve_lambda ์™„๋ฃŒ ํ›„ ์‹คํ–‰ ํ•จ. # - # ### (3) ๋žŒ๋‹ค ์Šคํ…: ์—”๋“œํฌ์ธํŠธ ๋ฐฐํฌ # + # model_name = project_prefix + "-lambda-model" + current_time endpoint_config_name = "lambda-deploy-endpoint-config-" + current_time endpoint_name = "lambda-deploy-endpoint-" + current_time function_name = "sagemaker-lambda-step-endpoint-deploy-" + current_time # print("model_name: \n", model_name) print("endpoint_config_name: \n", endpoint_config_name) print("endpoint_config_name: \n", len(endpoint_config_name)) print("endpoint_name: \n", endpoint_name) print("function_name: \n", function_name) # + # Lambda helper class can be used to create the Lambda function func_deploy_model = Lambda( function_name=function_name, execution_role_arn=lambda_role, script="src/iam_create_endpoint.py", handler="iam_create_endpoint.lambda_handler", timeout = 900, # ๋””ํดํŠธ๋Š” 120์ดˆ ์ž„. 10๋ถ„์œผ๋กœ ์—ฐ์žฅ ) output_param_1 = LambdaOutput(output_name="statusCode", output_type=LambdaOutputTypeEnum.String) output_param_2 = LambdaOutput(output_name="body", output_type=LambdaOutputTypeEnum.String) output_param_3 = LambdaOutput(output_name="other_key", output_type=LambdaOutputTypeEnum.String) step_deploy_lambda = LambdaStep( name="LambdaDeployStep", lambda_func=func_deploy_model, inputs={ "model_name": step_create_model.properties.ModelName, "endpoint_config_name": endpoint_config_name, "endpoint_name": endpoint_name, }, outputs=[output_param_1, output_param_2, output_param_3], ) # - # ## ๋ชจ๋ธ ๋นŒ๋”ฉ ํŒŒ์ดํ”„๋ผ์ธ ๋ณ€์ˆ˜ ์ƒ์„ฑ # # from sagemaker.workflow.parameters import ( ParameterInteger, ParameterString, ) model_approval_status = ParameterString( name="ModelApprovalStatus", default_value="PendingManualApproval" ) # ## ๋ชจ๋ธ ๋นŒ๋”ฉ ํŒŒ์ดํ”„๋ผ์ธ ์ •์˜ # + from sagemaker.workflow.pipeline import Pipeline from sagemaker.workflow.execution_variables import ExecutionVariables from sagemaker.workflow.pipeline_experiment_config import PipelineExperimentConfig pipeline_name = project_prefix + "-Lambda-step" pipeline = Pipeline( name=pipeline_name, parameters=[ model_approval_status, ], pipeline_experiment_config=PipelineExperimentConfig( ExecutionVariables.PIPELINE_NAME, ExecutionVariables.PIPELINE_EXECUTION_ID ), steps=[step_approve_lambda, step_create_model, step_deploy_lambda], sagemaker_session=sagemaker_session, ) # + import json definition = json.loads(pipeline.definition()) # definition # - # ## ํŒŒ์ดํ”„๋ผ์ธ์„ SageMaker์— ์ œ์ถœํ•˜๊ณ  ์‹คํ–‰ํ•˜๊ธฐ # # ํŒŒ์ดํ”„๋ผ์ธ ์ •์˜๋ฅผ ํŒŒ์ดํ”„๋ผ์ธ ์„œ๋น„์Šค์— ์ œ์ถœํ•ฉ๋‹ˆ๋‹ค. ํ•จ๊ป˜ ์ „๋‹ฌ๋˜๋Š” ์—ญํ• (role)์„ ์ด์šฉํ•˜์—ฌ AWS์—์„œ ํŒŒ์ดํ”„๋ผ์ธ์„ ์ƒ์„ฑํ•˜๊ณ  ์ž‘์—…์˜ ๊ฐ ๋‹จ๊ณ„๋ฅผ ์‹คํ–‰ํ•  ๊ฒƒ์ž…๋‹ˆ๋‹ค. pipeline.upsert(role_arn=role) execution = pipeline.start() execution.wait() execution.list_steps() # # 3. ๋ฆฌ์†Œ์Šค ์ •๋ฆฌ # # #### ํŒŒ์ดํ”„๋ผ์ธ ์‚ญ์ œ # # - ์œ„์—์„œ ์ƒ์„ฑํ•œ ํŒŒ์ดํ”„๋ผ์ธ์„ ์ œ๊ฑฐ ํ•ฉ๋‹ˆ๋‹ค. # - isDeletePipeline=False, verbose=Fasle # - ํŒŒ์ดํ”„๋ผ์ธ์„ ์ง€์šฐ์ง€ ์•Š๊ณ , ์กด์žฌํ•˜๋Š”์ง€ ํ™•์ธ ํ•ฉ๋‹ˆ๋‹ค. # - isDeletePipeline=False, verbose=True # - ํŒŒ์ดํ”„๋ผ์ธ์˜ ์ •์˜๋ฅผ ์ž์„ธํ•˜ ํ™•์ธ ํ•ฉ๋‹ˆ๋‹ค. # - isDeletePipeline=True, verbose=True or False # - ํŒŒ์ดํ”„๋ผ์ธ์„ ์‚ญ์ œ ํ•ฉ๋‹ˆ๋‹ค. # + from src.p_utils import clean_pipeline # clean_pipeline(pipeline_name = pipeline_name, isDeletePipeline=False, verbose=False) clean_pipeline(pipeline_name = pipeline_name, isDeletePipeline=True, verbose=False) # - # #### ๋žŒ๋‹ค ํ•จ์ˆ˜ ์‚ญ์ œ # Delete the Lambda function func_deploy_model.delete() func_approve_model.delete() # ### ์•ค๋“œํฌ์ธํŠธ ์ปจํ”ผ๊ทธ ๋ฐ ์•ค๋“œํฌ์ธํŠธ ์‚ญ์ œ # - ์œ„์˜ ํŒŒ์ดํ”„๋ผ์ธ ์Šคํƒฌ์—์„œ Async ๋กœ ์—”๋“œํฌ์ธํŠธ ์ƒ์„ฑ์„ ์š”์ฒญํ•จ. ๊ทธ๋ž˜์„œ ์•„๋ž˜ ์—”๋“œํฌ์ธํŠธ ์‚ญ์ œ์‹œ์— ์•ค๋“œํฌ์ธํŠธ๊ฐ€ ์ƒ์„ฑ๋œ ํ›„์— ์‚ญ์ œ # ํ•จ. # - [์•Œ๋ฆผ] `An exception occurred: list index out of range` ๋ฉ”์„ธ์ œ์ง€๊ฐ€ ์ถœ๋ ฅ์ด ๋˜๋ฉด ํ•ด๋‹น ์•ค๋“œํฌ์ธํŠธ๊ฐ€ ์กด์žฌํ•˜์ง€ ์•Š์œผ๋‹ˆ ์ค‘๋‹จํ•ด์ฃผ์„ธ์š”. # + from src.p_utils import is_available_endpoint while not is_available_endpoint(endpoint_name): time.sleep(30) print("Endpoint is being creating") sm_client.delete_endpoint_config(EndpointConfigName=endpoint_config_name) sm_client.delete_endpoint(EndpointName=endpoint_name) print("endpoint is deleted")
phase02/4.5.Endpoint_Lambda.ipynb