code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="6ZiJWyW4_7u7" # # Ungraded Lab: Saliency # # Like class activation maps, saliency maps also tells us what parts of the image the model is focusing on when making its predictions. # - The main difference is in saliency maps, we are just shown the relevant pixels instead of the learned features. # - You can generate saliency maps by getting the gradient of the loss with respect to the image pixels. # - This means that changes in certain pixels that strongly affect the loss will be shown brightly in your saliency map. # # Let's see how this is implemented in the following sections. # + [markdown] id="4k8CUIO5g78a" # ## Imports # + id="jeuqmYSvrn9w" # %tensorflow_version 2.x import tensorflow as tf import tensorflow_hub as hub import cv2 import numpy as np import matplotlib.pyplot as plt # + [markdown] id="cn6Xzhpcg_co" # ## Build the model # # For the classifier, you will use the [Inception V3 model](https://arxiv.org/abs/1512.00567) available in [Tensorflow Hub](https://tfhub.dev/google/tf2-preview/inception_v3/classification/4). This has pre-trained weights that is able to detect 1001 classes. You can read more [here](https://tfhub.dev/google/tf2-preview/inception_v3/classification/4). # + id="S2GDglRGrvIl" # grab the model from Tensorflow hub and append a softmax activation model = tf.keras.Sequential([ hub.KerasLayer('https://tfhub.dev/google/tf2-preview/inception_v3/classification/4'), tf.keras.layers.Activation('softmax') ]) # build the model based on a specified batch input shape model.build([None, 300, 300, 3]) # + [markdown] id="QyW_KpZWkSne" # ## Get a sample image # # You will download a photo of a Siberian Husky that our model will classify. We left the option to download a Tabby Cat image instead if you want. # + id="LlAXGlVhuFXb" colab={"base_uri": "https://localhost:8080/"} outputId="e2c60553-3473-4177-e09e-a77d73baae76" # !wget -O image.jpg https://cdn.pixabay.com/photo/2018/02/27/14/11/the-pacific-ocean-3185553_960_720.jpg # If you want to try the cat, uncomment this line # # !wget -O image.jpg https://cdn.pixabay.com/photo/2018/02/27/14/11/the-pacific-ocean-3185553_960_720.jpg # + [markdown] id="vjJq5MFbk8T9" # ## Preprocess the image # # The image needs to be preprocessed before being fed to the model. This is done in the following steps: # + id="9sDUZf6Sui1_" # read the image img = cv2.imread('image.jpg') # format it to be in the RGB colorspace img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # resize to 300x300 and normalize pixel values to be in the range [0, 1] img = cv2.resize(img, (300, 300)) / 255.0 # add a batch dimension in front image = np.expand_dims(img, axis=0) # + [markdown] id="p4W3B9bflaBU" # We can now preview our input image. # + id="b2bGeCW_-vbl" colab={"base_uri": "https://localhost:8080/", "height": 466} outputId="18c9a8d0-d047-4ec4-bbea-00f879d5758b" plt.figure(figsize=(8, 8)) plt.imshow(img) plt.axis('off') plt.show() # + [markdown] id="ZpZ9Fw8UlgYQ" # ## Compute Gradients # # You will now get the gradients of the loss with respect to the input image pixels. This is the key step to generate the map later. # + id="hoBGoGcqsOfi" # <NAME>'s class ID in ImageNet class_index = 251 # If you downloaded the cat, use this line instead #class_index = 282 # Tabby Cat in ImageNet # number of classes in the model's training data num_classes = 1001 # convert to one hot representation to match our softmax activation in the model definition expected_output = tf.one_hot([class_index] * image.shape[0], num_classes) with tf.GradientTape() as tape: # cast image to float inputs = tf.cast(image, tf.float32) # watch the input pixels tape.watch(inputs) # generate the predictions predictions = model(inputs) # get the loss loss = tf.keras.losses.categorical_crossentropy( expected_output, predictions ) # get the gradient with respect to the inputs gradients = tape.gradient(loss, inputs) # + [markdown] id="8x9N7FS7oAv-" # ## Visualize the results # # Now that you have the gradients, you will do some postprocessing to generate the saliency maps and overlay it on the image. # + id="ODpAzA-vsTTi" # reduce the RGB image to grayscale grayscale_tensor = tf.reduce_sum(tf.abs(gradients), axis=-1) # normalize the pixel values to be in the range [0, 255]. # the max value in the grayscale tensor will be pushed to 255. # the min value will be pushed to 0. normalized_tensor = tf.cast( 255 * (grayscale_tensor - tf.reduce_min(grayscale_tensor)) / (tf.reduce_max(grayscale_tensor) - tf.reduce_min(grayscale_tensor)), tf.uint8, ) # remove the channel dimension to make the tensor a 2d tensor normalized_tensor = tf.squeeze(normalized_tensor) # + [markdown] id="d2n74-pAs2ir" # Let's do a little sanity check to see the results of the conversion. # + id="IXOyqipj5Fz6" colab={"base_uri": "https://localhost:8080/"} outputId="b7c0465c-927a-4ac4-92df-07e799a8e001" # max and min value in the grayscale tensor print(np.max(grayscale_tensor[0])) print(np.min(grayscale_tensor[0])) print() # coordinates of the first pixel where the max and min values are located max_pixel = np.unravel_index(np.argmax(grayscale_tensor[0]), grayscale_tensor[0].shape) min_pixel = np.unravel_index(np.argmin(grayscale_tensor[0]), grayscale_tensor[0].shape) print(max_pixel) print(min_pixel) print() # these coordinates should have the max (255) and min (0) value in the normalized tensor print(normalized_tensor[max_pixel]) print(normalized_tensor[min_pixel]) # + [markdown] id="FMgNIk3jte62" # You should get something like: # # ``` # 1.2167013 # 0.0 # # (203, 129) # (0, 299) # # tf.Tensor(255, shape=(), dtype=uint8) # tf.Tensor(0, shape=(), dtype=uint8) # ``` # + [markdown] id="hVkRpp2VtoVK" # Now let's see what this looks like when plotted. The white pixels show the parts the model focused on when classifying the image. # + id="xW1C4JGLvYMk" colab={"base_uri": "https://localhost:8080/", "height": 466} outputId="59781640-0d57-4d8e-f33c-8676ba703076" plt.figure(figsize=(8, 8)) plt.axis('off') plt.imshow(normalized_tensor, cmap='gray') plt.show() # + [markdown] id="wX1M6A6Ct48Y" # Let's superimpose the normalized tensor to the input image to get more context. You can see that the strong pixels are over the husky and that is a good indication that the model is looking at the correct part of the image. # + id="OeULunlW2Vln" colab={"base_uri": "https://localhost:8080/", "height": 466} outputId="0fdb7b1c-12d2-4fdc-a1c5-fb867c89b219" gradient_color = cv2.applyColorMap(normalized_tensor.numpy(), cv2.COLORMAP_HOT) gradient_color = gradient_color / 255.0 super_imposed = cv2.addWeighted(img, 0.5, gradient_color, 0.5, 0.0) plt.figure(figsize=(8, 8)) plt.imshow(super_imposed) plt.axis('off') plt.show()
Advanced Computer Vision with TensorFlow/Week 4 Visualization and Interpretability/Lab_3_Saliency.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # DWD_historical_weather: Beispiel-Notebook # #### Bundesland als globalen Parameter festlegen BUNDESLAND = 'Berlin' # + from DWD_hist_weather import tagestemp_land, tageswerte_land import pandas as pd import pickle import matplotlib.pyplot as plt import matplotlib.dates as mdates import seaborn as sns # - # #### Das eigentliche Einlesen der Daten: Wenn vorhanden aus pickle, sonst **tageswerte_land** aus dem Modul aufrufen und die Daten vom DWD ziehen pickle_dateiname = f'{BUNDESLAND}.pickle' try: tageswerte = pickle.load(open(pickle_dateiname, 'rb')) print(f'Wetterdaten für {BUNDESLAND} aus pickle eingelesen.') except (OSError, IOError): tageswerte = tageswerte_land(BUNDESLAND) pickle.dump(tageswerte, open(pickle_dateiname, 'wb')) print(f'\nWetterdaten für {BUNDESLAND} in pickle geschrieben.') # ### DataFrame ausgeben display(tageswerte) # ### Heatmap der täglichen Durchschnittstemperaturen # + ana = tageswerte.pivot(index='Jahr', columns='Tag_des_Jahres', values='TempMean') f, ax = plt.subplots(figsize=(20, 10)) sns.heatmap(ana, vmin=-10, vmax=23, cmap="RdBu_r") ax.axes.set_title("Tagesmitteltemperaturen", y=1.01) ax.xaxis.set_major_locator(mdates.MonthLocator()) ax.xaxis.set_minor_locator(mdates.DayLocator()) ax.xaxis.set_major_formatter(mdates.DateFormatter('%b')) # - # ### Jährliche Durchschnittstemperaturen plus 5-Jahres-Mittel # + ana = tageswerte.pivot(index='Jahr', columns='Tag_des_Jahres', values='TempMean') ana['Jahresmittel'] = ana.mean(axis=1) ana['Jahresmittel5'] = ana['Jahresmittel'].rolling(5).mean() plt.subplots(figsize=(20, 10)) sns.lineplot(data=ana, x='Jahr', y='Jahresmittel') sns.lineplot(data=ana, x='Jahr', y='Jahresmittel5', color='red')
example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # !pip install --quiet arcgis import os from arcgis.gis import GIS; from arcgis.mapping import WebMap; from arcgis.features import SpatialDataFrame import pandas as pd; import arcpy; # + fp_path = r"F:\SitingStagingCache"; fp_fgdb = r"footprint.gdb"; fp_full = fp_path + os.sep + fp_fgdb; fp_layer = "NED13"; fp_trg = fp_path + os.sep + "NED13" + os.sep + "trg"; resources = [f.name for f in os.scandir(fp_trg) if f.is_dir()]; len(resources) # + if not os.path.exists(fp_full): arcpy.CreateFileGDB_management(fp_path,fp_fgdb); if arcpy.Exists(fp_full + os.sep + fp_layer): arcpy.Delete_management(fp_full + os.sep + fp_layer); arcpy.CreateFeatureclass_management( out_path = fp_full ,out_name = fp_layer ,geometry_type = "POLYGON" ,has_m = "DISABLED" ,has_z = "DISABLED" ,spatial_reference = arcpy.SpatialReference(4269) ,config_keyword = None ); arcpy.env.workspace = fp_full; arcpy.management.AddFields( fp_layer ,[ ['resource_name' ,'TEXT' ,'Resource_Name' ,255, None,''] ,['resource_path' ,'TEXT' ,'Resource_Path' ,255, None,''] ] ); # + cursor = arcpy.da.InsertCursor( fp_full + os.sep + fp_layer ,["resource_name","resource_path","SHAPE@"] ); for item in resources: if len(item) == 7: img = 'img' + item + '_13.img'; else: img = item + '.img'; raster = fp_trg + os.sep + item + os.sep + img; ar = arcpy.Raster(raster); ll = getattr(ar.extent,"lowerLeft"); lr = getattr(ar.extent,"lowerRight"); ur = getattr(ar.extent,"upperRight"); ul = getattr(ar.extent,"upperLeft"); rg = arcpy.Array([ arcpy.Point(ul.X,ul.Y) ,arcpy.Point(ll.X,ll.Y) ,arcpy.Point(lr.X,lr.Y) ,arcpy.Point(ur.X,ur.Y) ,arcpy.Point(ul.X,ul.Y) ]); fp = arcpy.Polygon(rg,ar.spatialReference); cursor.insertRow([img,raster,fp]); if cursor: del cursor; 'footprints generated' # + gis = GIS(); sdf = pd.DataFrame.spatial.from_featureclass(fp_full + os.sep + fp_layer); wmap = gis.map("North America"); sdf.spatial.plot( kind = 'map' ,map_widget = wmap ,symbol_type='simple' ,colors = 'Greens_r' ,outline_color = 'Dark2_r' ,line_width = 0.5 ); wmap # -
SitingStaging/NED 13ArcSec Footprints.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from autogluon.tabular import TabularDataset, TabularPredictor import os os.listdir('../data/walmart_recruiting') train_data = TabularDataset('../data/walmart_recruiting/train.csv') train_data.shape train_data.head() label = 'Weekly_Sales' save_path = 'agModels-kaggle_walmart_recruiting' # specifies folder to store trained models predictor = TabularPredictor(label=label, path=save_path).fit(train_data) test_data_nolab = TabularDataset('../data/walmart_recruiting/test.csv') y_pred = predictor.predict(test_data_nolab) y_pred id_ = ['Id'] sub = test_data_nolab[['Store', 'Dept','Date']].copy() sub[label] = list(y_pred.values) # + def cols_concat(df, con_list): name = "__".join(con_list) df[name] = df[con_list[0]].astype(str) for item in con_list[1:]: df[name] = df[name] + '_' + df[item].astype(str) return df sub = cols_concat(sub, ['Store', 'Dept','Date']) # - del_cols = ['Store', 'Dept','Date'] sub.drop(del_cols, axis=1, inplace=True) sub.rename({'Store__Dept__Date': 'Id'}, axis=1, inplace=True) sub = sub[['Id', 'Weekly_Sales']] sub.to_csv("./autogluon_sub_walmart_recruiting.csv", index = False) # + # # !zip -r autogluon_sub_kaggle_ventilator.csv.zip autogluon_sub_kaggle_ventilator.csv # -
demo/walmart_recruiting/autogluon_kaggle_walmart_recruiting.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.10.2 64-bit ('3.10.2') # language: python # name: python3 # --- # + tags=["parameters"] project = 'saga-nvdb-prod-vlmh' use_colab_auth = True # Legg inn ditt eget prosjekt her, f.eks. 'saga-olanor-playground-ab12' bq_job_project = '' # - if (use_colab_auth): from google.colab import auth auth.authenticate_user() print('Authenticated') # + import warnings from google.cloud import bigquery warnings.filterwarnings('ignore') client = bigquery.Client(project=bq_job_project) # - # Denne spørringen henter tunnelid, tunnelnavn, åpningsår og antall tunnelløp per tunnel. Den viser også vegsystemreferansens kortform for alle tunnelløpene hver tunnel har. # + query = f""" SELECT tunnel.id, ANY_VALUE(tunnel.egenskaper.navn) as tunnelnavn, tunnel.egenskaper.aapningsaar, COUNT(tunnellop) antall_lop, ARRAY_AGG(tunnellop_vegsystemreferanser.kortform) tunnellop_vegsystemreferanse_kortform, FROM `{project}.curated.tunneler_med_lop` tunnel, UNNEST(tunnellop) tunnellop, UNNEST(tunnellop.lokasjon.vegsystemreferanser) tunnellop_vegsystemreferanser GROUP BY tunnel.id, tunnel.egenskaper.aapningsaar ORDER BY aapningsaar DESC, antall_lop DESC """ print(query) client.query(query).to_dataframe() # - # Denne spørringen viser total lengde av tunnelenes løp, lengden på det lengste tunnelløpet, tunnelens kostruksjonstype, samt geometri for hele tunnellen, sammensatt av tunnelløpenes geometri. # + query = f""" SELECT tunnel.id, ANY_VALUE(tunnel.egenskaper.navn), SUM(COALESCE(tunnellop.egenskaper.lengde, ST_LENGTH(tunnellop.lokasjon.geometri))) AS sum_lengde_tunnellop, ST_UNION_AGG(tunnellop.lokasjon.geometri) AS geometri, MAX(COALESCE(tunnellop.egenskaper.lengde, ST_LENGTH(tunnellop.lokasjon.geometri))) AS lengde_lengste_tunnellop, ARRAY_AGG(DISTINCT tunnellop.egenskaper.typeTunnelLop IGNORE NULLS) AS typer FROM `{project}.curated.tunneler_med_lop` AS tunnel, UNNEST(tunnellop) AS tunnellop GROUP BY tunnel.id """ print(query) client.query(query).to_dataframe()
notebooks/bigquery:nvdb.curated.tunneler_med_lop.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %matplotlib inline import matplotlib.pyplot as plt import seaborn as sns sns.set_style("whitegrid") plt.rcParams["figure.figsize"] = (20, 20) import os import json import nltk import numpy as np import pandas as pd from PIL import Image from scipy.spatial.distance import cdist from tqdm import tqdm_notebook as tqdm import torch from torch import nn, optim from torch.utils.data import Dataset, DataLoader from torchvision import models, transforms nltk.download("punkt") device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # - # # load coco images and captions # + with open("/mnt/efs/images/coco/annotations/captions_val2014.json") as f: meta = json.load(f) df = pd.merge( pd.DataFrame(meta["images"]).set_index("id"), pd.DataFrame(meta["annotations"]).set_index("image_id"), left_index=True, right_index=True, ).reset_index()[["caption", "file_name"]] df["file_name"] = "/mnt/efs/images/coco/val2014/" + df["file_name"] df["caption"] = ( df["caption"] .apply(lambda x: "".join([c for c in x if c.isalpha() or c.isspace()])) .apply(str.lower) .apply(lambda x: " ".join(x.split())) ) # - # # train test splits # + split_ratio = 0.8 train_size = int(split_ratio * len(df)) train_df = df.loc[:train_size] test_df = df.loc[train_size:] len(train_df), len(test_df) # - # # load InferSent model from InferSent import InferSent # + MODEL_PATH = "/mnt/efs/models/infersent2.pkl" params_model = { "bsize": 1024, "word_emb_dim": 300, "enc_lstm_dim": 2048, "pool_type": "max", "dpout_model": 0.0, "version": 2, } infersent_model = InferSent(params_model) infersent_model.load_state_dict(torch.load(MODEL_PATH)) # - W2V_PATH = "/mnt/efs/nlp/word_vectors/fasttext/crawl-300d-2M.vec" infersent_model.set_w2v_path(W2V_PATH) infersent_model.build_vocab_k_words(K=100000) infersent_model = infersent_model.to(device) # # embed captions with infersent # + train_embeddings = infersent_model.encode(train_df["caption"].values, tokenize=True) test_embeddings = infersent_model.encode(test_df["caption"].values, tokenize=True) len(train_embeddings), len(test_embeddings) # - # # pytorch datasets and dataloaders # # ### dataset class CaptionsDataset(Dataset): def __init__(self, path_df, caption_embeddings, transform=transforms.ToTensor()): self.ids = path_df.index.values self.image_paths = path_df["file_name"].values self.titles = path_df["caption"].values self.caption_embeddings = caption_embeddings self.transform = transform def __getitem__(self, index): image = Image.open(self.image_paths[index]).convert("RGB") if self.transform is not None: image = self.transform(image) target = self.caption_embeddings[index] return image, target def __len__(self): return len(self.ids) transform = transforms.Compose( [ transforms.RandomResizedCrop(224, scale=[0.5, 0.9]), transforms.RandomHorizontalFlip(), transforms.RandomGrayscale(0.25), transforms.ToTensor(), ] ) train_dataset = CaptionsDataset(train_df, train_embeddings, transform=transform) test_dataset = CaptionsDataset(test_df, test_embeddings, transform=transform) train_dataset.__getitem__(0) # ### dataloader # + batch_size = 128 train_loader = DataLoader( dataset=train_dataset, batch_size=batch_size, shuffle=True, num_workers=5 ) test_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, num_workers=5) # - # # create DeViSE model backbone = models.vgg16_bn(pretrained=True).features for param in backbone[:34].parameters(): param.requires_grad = False class DeViSE(nn.Module): def __init__(self, backbone, target_size=300): super(DeViSE, self).__init__() self.backbone = backbone self.head = nn.Sequential( nn.Linear(in_features=(25088), out_features=target_size * 2), nn.ReLU(inplace=True), nn.Dropout(), nn.Linear(in_features=target_size * 2, out_features=target_size), nn.ReLU(inplace=True), nn.Dropout(), nn.Linear(in_features=target_size, out_features=target_size), ) def forward(self, x): x = self.backbone(x) x = x.view(x.size(0), -1) x = self.head(x) x = x / x.max() return x devise_model = DeViSE(backbone, target_size=4096).to(device) # # train # + losses = [] def train( model, train_loader, n_epochs, loss_function, additional_metric, optimiser, device=device, ): """ do some training """ model.train() for epoch in range(n_epochs): loop = tqdm(train_loader) for data, target in loop: data, target, flags = ( data.cuda(non_blocking=True), target.cuda(non_blocking=True), torch.ones(len(target)).cuda(non_blocking=True), ) optimiser.zero_grad() prediction = model(data) loss = loss_function(prediction, target, flags) mean_sq_error = additional_metric(prediction, target) losses.append([loss.item(), mean_sq_error.item()]) loss.backward() optimiser.step() loop.set_description("Epoch {}/{}".format(epoch + 1, n_epochs)) loop.set_postfix(loss=loss.item(), mse=mean_sq_error.item()) # + torch.backends.cudnn.benchmark = True trainable_parameters = filter(lambda p: p.requires_grad, devise_model.parameters()) loss_function, mse = nn.CosineEmbeddingLoss(), nn.MSELoss() optimiser = optim.Adam(trainable_parameters, lr=0.001) # - train( model=devise_model, train_loader=train_loader, loss_function=loss_function, additional_metric=mse, optimiser=optimiser, n_epochs=3, ) # + loss_data = pd.DataFrame(losses).rolling(window=15).mean() loss_data.columns = ["cosine loss", "mse"] ax = loss_data.plot(subplots=True) ax[0].set_xlim( 0, ) ax[0].set_ylim(0.3, 0.6) ax[1].set_ylim( 0, ); # - # # evaluate on test set # + preds = [] test_loss = [] with torch.no_grad(): test_loop = tqdm(test_loader) for data, target in test_loop: data, target, flags = ( data.cuda(non_blocking=True), target.cuda(non_blocking=True), torch.ones(len(target)).cuda(non_blocking=True), ) prediction = devise_model.eval()(data) loss = loss_function(prediction, target, flags) preds.append(prediction.cpu().data.numpy()) test_loss.append(loss.item()) test_loop.set_description("Test set") test_loop.set_postfix(loss=loss.item()) # - preds = np.concatenate(preds).reshape(-1, 4096) np.mean(test_loss) # # run a test search def search(query): query_embedding = infersent_model.encode([query], tokenize=True) distances = cdist(query_embedding, preds, "cosine").squeeze() nearby_image_paths = test_df["file_name"].values[np.argsort(distances)][:20] nearby_images = [ np.array((Image.open(path).convert("RGB").resize((224, 224), Image.BILINEAR))) for path in nearby_image_paths ] return Image.fromarray( np.concatenate( [ np.concatenate(nearby_images[:5], axis=1), np.concatenate(nearby_images[5:10], axis=1), np.concatenate(nearby_images[10:15], axis=1), np.concatenate(nearby_images[15:20], axis=1), ], axis=0, ) ) search("a man playing tennis")
notebooks/devise/notebooks/06 - devise against sentence embeddings.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Number Theory # # By <NAME> # In the blog post below, I will outline my solution to a twist on a Google recruitment question. In solving the problem, I rely on smaller helper functions. I also implement unit testing for each helper function and my overall function. # ### The Problem # We are asked to find the first 10-digit prime in the decimal expansion of 17$\pi$. # # The first 5 digits in the decimal expansion of $\pi$ are 14159. The first 4-digit prime in the decimal expansion of $\pi$ are 4159. We are asked to find the first 10-digit prime in the decimal expansion of 17$\pi$. # # To accomplish this we use three helper functions to: # 1. Generate a large decimal expansion of any number # 2. Check if a number is prime # 3. Generate sliding windows of a specified width from a long iterable # ### Generating a Large Decimal Expansion # # In this first helper function we want to generate a large decimal expansion of any given number. To do this, we use the mpmath library. The function, called "num_times", takes in the number we wish to expand, the number of digits we desire for the expansion, and a multiplier called "times". The function notes the number of desired digits, and then return the decimal expansion of that number, multiplied by the multiplier. # # Below the function we have a unit test. The test shows that our function can correctly expand pi to 3.14159. The second part of the test shows that it can multiply and create much larger expansions with the 100-digit expansion of $17\pi$. from mpmath import * """ Write a function generate a large decimal expansion of any number (like pi) """ def num_times(num, digits, times): """ set number of digits using input """ mp.dps = digits """ multiply the number by the desired constant from input """ return(str(times*num)) """ Unit Test """ """ 6 digit expansion of pi should be 3.14159 """ print(num_times(mp.pi,6,1)) """ 100 digit expansion of 17 pi should be 53.___ """ print(num_times(mp.pi,100,17)) # ### Checking if a Number is Prime # # In this second helper function we want to take a given number and return whether or not it is prime. To do this, we begin with the factor 2. While x, our number of interest, is greater than or equal to the square of our factor we want to check of x in divisible by our factor. If x is divisible by the factor, then we return False. If x isn't divisible by the factor, then we increase the factor by 1 and go through the while loop again. Once x is not greater than the square of our factor, we know we have explored all possible factors, and we can declare the number a prime number. # # Below the function we have a unit test. The test shows that our function correctly identify prime numbers. Our function identifies 5 as prime, 8 as not prime, and 17 as prime. """ Write a function to check if a number is prime """ def is_prime_num(x): p = 2 """ Check if x is greater than or equal to the square of p """ while (p*p <= x): """ Check if x is divisible by the factor, p """ if (x%p == 0): """ If x is divisible, it is not prime - return False """ return False else: p+=1 """ Once we've checked all possible factors - return True """ return True """ Unit test """ """ Is 5 a prime number? True """ print(is_prime_num(5)) """ Is 8 a prime number ? False """ print(is_prime_num(8)) """ Is 17 a prime number? True """ print(is_prime_num(17)) # ### Generating Sliding Windows of a Specified Width # # In this third helper function we want to take in a long string and return a list of sliding windows. We want the sliding windows to be of the specified length, n. We begin by creating an empty list called windows, and cleaning the input string of any decimal points. Then, we loop through each character in the string except for the last n, to avoid going past the end of the string. We add a window of length n to our list, windows. Once the loop ends, we return windows. # # Below the function we have a unit test. The test shows our function can return sliding windows of size 5 for 'abcdefghijklmnop' and sliding windows of size 7 for the decimal '5.467892346'. """ Write a function to generate sliding windows of a specified width from a long iterable """ def slide_n(n,long_str): windows = [] """ Clean string of decimal points """ long_str = long_str.replace('.', '') """ Loop through each character, until n before the end of the string """ for i in range(0,len(long_str)-(n-1)): """ Add sliding window of size n to windows """ temp_str = long_str[i:i+n] windows.append(temp_str) """ Return windows """ return windows """ Unit Test """ """ Can we create sliding ranges of 5 letters for the first part of alphabet? """ print(slide_n(5,'abcdefghijklmnop')) """ Can we create sliding ranges of 5 numbers for a decimal? """ print(slide_n(7,'5.467892346')) # ### Find The First n-digit Prime in The Decimal Expansion of a Number # # In our final function we use all of our helper functions to solve our question of interest - finding the first 10-digit prime number in the decimal expansion of $17\pi$. Our function can find the first n-digit prime in the decimal expansion of a number. The function takes as input: the number (ex. $\pi$), any multipliers for the number (ex. 17), the desired window length (ex. 10), and the desired decimal expansion length (ex. 1000). The function first calls num_time to generate the full decimal expansion of the desired number. Then, we feed the window length and full decimal expansion to slide_n to generate our list of sliding windows. Finally, we loop through our sliding windows, checking if each is prime, and return the first prime number we find. If we don't find any primes after looping through all windows, we return a string encouraging the user to try a longer decimal expansion. # # Below our function we have unit test, seeking the first ten digit prime in the decimal expansion of e (7427466391). We begin by testing a decimal expansion of 100, but find we need a longer decimal expansion. With a decimal expansion of 1000 we see our function returns the correct answer. # # Fianlly, we are able to answer our initial question below our unit test. The first 10-digit prime in the decimal expansion of $17\pi$ is '8649375157'. """ Find the first n-digit prime in the decimal expansion of a number. """ def first_n_prime(num,multiplier,window,dec_length): """ Get the full decimal expansion of the number """ full_str = num_times(num,dec_length,multiplier) """ Create the sliding windows of size n """ windows = slide_n(window,full_str) """ Loop through all windows """ for i in range(0,len(windows)): """ Check if each window is prime, and return the first prime found """ if (is_prime_num(int(windows[i]))): return windows[i] else: continue """ If no primes were found, return a message to the user """ return str("Try a longer decimal expansion.") """ Unit Test """ """ Search for first ten digit prime of e - with only 100 decimals we do not find it """ y = first_n_prime(mp.e,1,10,100) """ First ten digit prime of e should be 7427466391, we find it with 1000 decimals """ z = first_n_prime(mp.e,1,10,1000) print(z) """ Answer our original question: First 10-digit prime in the decimal expansion of 17 pi """ first_n_prime(mp.pi,17,10,1000) """ Check that it is prime """ is_prime_num(8649375157)
_notebooks/2021-09-16-Number-Theory.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline from __future__ import print_function import os import sys import numpy as np import matplotlib.pyplot as plt import plot_domain fig = plot_domain.henry_domain() # ##Model background # Here is an example based on the Henry saltwater intrusion problem. The synthetic model is a 2-dimensional SEAWAT model (X-Z domain) with 1 row, 120 columns and 20 layers. The left boundary is a specified flux of freshwater, the right boundary is a specified head and concentration saltwater boundary. The model has two stress periods: an initial steady state (calibration) period, then a transient period with less flux (forecast). # The inverse problem has 603 parameters: 600 hydraulic conductivity pilot points, 1 global hydraulic conductivity, 1 specified flux multiplier for history matching and 1 specified flux multiplier for forecast conditions. The inverse problem has 36 obseravtions (21 heads and 15 concentrations) measured at the end of the steady-state calibration period. The forecasts of interest of the distance from the left model edge to the 10% seawater concentration in the basal model layer and the concentration at location 10. Both of there forecasts are "measured" at the end of the forecast stress period. The forecasts are both in the Jacobian matrix as zero-weight observations named `pd_ten` and `C_obs10_2`.I previously calculated the jacobian matrix, which is in the `henry/` folder, along with the PEST control file. # # Unlike the Schur's complement example notebook, here we will examine the consequences of not adjusting the specified flux multiplier parameters (```mult1``` and ```mult2```) during inversion, since these types of model inputs are not typically considered for adjustment. # # ##Using `pyemu` import pyemu # First create a linear_analysis object. We will use `err_var` derived type, which replicates the behavior of the `PREDVAR` suite of PEST as well as `ident_par` utility. We pass it the name of the jacobian matrix file. Since we don't pass an explicit argument for `parcov` or `obscov`, `pyemu` attempts to build them from the parameter bounds and observation weights in a pest control file (.pst) with the same base case name as the jacobian. Since we are interested in forecast uncertainty as well as parameter uncertainty, we also pass the names of the forecast sensitivity vectors we are interested in, which are stored in the jacobian as well. Note that the `forecasts` argument can be a mixed list of observation names, other jacobian files or PEST-compatible ASCII matrix files. Remember you can pass a filename to the `verbose` argument to write log file. # # Since most groundwater model history-matching analyses focus on adjusting hetergeneous hydraulic properties and not boundary condition elements, let's identify the ```mult1``` and ```mult2``` parameters as `omitted` in the error variance analysis. We can conceptually think of this action as excluding the ```mult1``` and ```mult2``` parameters from the history-matching process. Later we will explicitly calculate the penalty for not adjusting this parameter. la = pyemu.ErrVar(jco=os.path.join("henry", "pest.jcb"), omitted_parameters=["mult1","mult2"]) print(la.jco.shape) #without the omitted parameter or the prior info la.forecast_names # #Parameter identifiability # The `errvar` dervied type exposes a method to get a `pandas` dataframe of parameter identifiability information. Recall that parameter identifiability is expressed as $d_i = \Sigma(\mathbf{V}_{1i})^2$, where $d_i$ is the parameter identifiability, which ranges from 0 (not identified by the data) to 1 (full identified by the data), and $\mathbf{V}_1$ are the right singular vectors corresonding to non-(numerically) zero singular values. First let's look at the singular spectrum of $\mathbf{Q}^{\frac{1}{2}}\mathbf{J}$, where $\mathbf{Q}$ is the cofactor matrix and $\mathbf{J}$ is the jacobian: s = la.qhalfx.s import pylab as plt figure = plt.figure(figsize=(10, 5)) ax = plt.subplot(111) ax.plot(s.x) ax.set_title("singular spectrum") ax.set_ylabel("power") ax.set_xlabel("singular value") ax.set_xlim(0,20) plt.show() # We see that the singluar spectrum decays rapidly (not uncommon) and that we can really only support about 3 right singular vectors even though we have 600+ parameters in the inverse problem. # # Let's get the identifiability dataframe at 15 singular vectors: ident_df = la.get_identifiability_dataframe(3) # the method is passed the number of singular vectors to include in V_1 ident_df.sort_values(by="ident").iloc[0:10] # Plot the indentifiability: # We see that the `global_k` parameter has a much higher identifiability than any one of the 600 pilot points # #Forecast error variance # # Now let's explore the error variance of the forecasts we are interested in. We will use an extended version of the forecast error variance equation: # # $\sigma_{s - \hat{s}}^2 = \underbrace{\textbf{y}_i^T({\bf{I}} - {\textbf{R}})\boldsymbol{\Sigma}_{{\boldsymbol{\theta}}_i}({\textbf{I}} - {\textbf{R}})^T\textbf{y}_i}_{1} + \underbrace{{\textbf{y}}_i^T{\bf{G}}\boldsymbol{\Sigma}_{\mathbf{\epsilon}}{\textbf{G}}^T{\textbf{y}}_i}_{2} + \underbrace{{\bf{p}}\boldsymbol{\Sigma}_{{\boldsymbol{\theta}}_o}{\bf{p}}^T}_{3}$ # # Where term 1 is the null-space contribution, term 2 is the solution space contribution and term 3 is the model error term (the penalty for not adjusting uncertain parameters). Remember the `mult1` and `mult2` parameters that we marked as omitted? The consequences of that action can now be explicitly evaluated. See Moore and Doherty (2005) and White and other (2014) for more explanation of these terms. Note that if you don't have any `omitted_parameters`, the only terms 1 and 2 contribute to error variance # # First we need to create a list (or numpy ndarray) of the singular values we want to test. Since we have $\lt40$ data, we only need to test up to $40$ singular values because that is where the action is: sing_vals = np.arange(40) # The `errvar` derived type exposes a convience method to get a multi-index pandas dataframe with each of the terms of the error variance equation: errvar_df = la.get_errvar_dataframe(sing_vals) errvar_df.iloc[0:10] # plot the error variance components for each forecast: # + fig = plt.figure(figsize=(10, 10)) ax_1, ax_2= plt.subplot(211), plt.subplot(212) axes = [ax_1,ax_2] colors = {"first": 'g', "second": 'b', "third": 'c'} max_idx = 19 idx = sing_vals[:max_idx] for ipred, pred in enumerate(la.forecast_names): pred = pred.lower() ax = axes[ipred] ax.set_title(pred) first = errvar_df[("first", pred)][:max_idx] second = errvar_df[("second", pred)][:max_idx] third = errvar_df[("third", pred)][:max_idx] ax.bar(idx, first, width=1.0, edgecolor="none", facecolor=colors["first"], label="first",bottom=0.0) ax.bar(idx, second, width=1.0, edgecolor="none", facecolor=colors["second"], label="second", bottom=first) ax.bar(idx, third, width=1.0, edgecolor="none", facecolor=colors["third"], label="third", bottom=second+first) ax.set_xlim(-1,max_idx+1) ax.set_xticks(idx+0.5) ax.set_xticklabels(idx) if ipred == 2: ax.set_xlabel("singular value") ax.set_ylabel("error variance") ax.legend(loc="upper right") plt.show() # - # Here we see the trade off between getting a good fit to push down the null-space (1st) term and the penalty for overfitting (the rise of the solution space (2nd) term)). The sum of the first two terms in the "appearent" error variance (e.g. the uncertainty that standard analyses would yield) without considering the contribution from the omitted parameters. You can verify this be checking prior uncertainty from the Schur's complement notebook against the zero singular value result using only terms 1 and 2. # # We also see the added penalty for not adjusting the `mult1` and `mult2` parameters (3rd term). The ability to forecast the distance from the left edge of the model to the 10% saltwater concentration and the forecast the concentration at location 10 has been compromised by not adjusting `mult1` and `mult2` during calibration. # # Let's check the ```errvar``` results against the results from ```schur```. This is simple with ```pyemu```, we simply cast the ```errvar``` type to a ```schur``` type: schur = la.get(astype=pyemu.Schur) schur_prior = schur.prior_forecast schur_post = schur.posterior_forecast print("{0:10s} {1:>12s} {2:>12s} {3:>12s} {4:>12s}" .format("forecast","errvar prior","errvar min", "schur prior", "schur post")) for ipred, pred in enumerate(la.forecast_names): first = errvar_df[("first", pred)][:max_idx] second = errvar_df[("second", pred)][:max_idx] min_ev = np.min(first + second) prior_ev = first[0] + second[0] prior_sh = schur_prior[pred] post_sh = schur_post[pred] print("{0:12s} {1:12.6f} {2:12.6f} {3:12.6} {4:12.6f}" .format(pred,prior_ev,min_ev,prior_sh,post_sh)) # We see that the prior from ```schur``` class matches the two-term ```errvar``` result at zero singular values. We also see, as expected, the posterior from ```schur``` is slightly lower than the two-term ```errvar``` result. This shows us that the "appearent" uncertainty in these predictions, as found through application of Bayes equation, is being under estimated because if the ill effects of the omitted ```mult1``` and ```mult2``` parameters.
examples/errvarexample_henry.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. # + colab={} colab_type="code" id="6rfq5oMWj5EY" import datetime import theano import theano.tensor as tt import pandas as pd import numpy as np import pymc3 as pm #theano.config.gcc.cxxflags="-Wno-c++11-narrowing" theano.config.mode = "FAST_COMPILE" #ADD THIS LOGIC TO THE APPLY_DELAY FUNCTION DIRECTLY #def lognormal_tensor(x, mean, sigma): # dist = tt.exp(-((tt.log(x)-mean) **2)/ (2*sigma**2) # return dist/tt.sum(dist, axis=0) def make_delay_matrix(n_rows, n_columns, initial_delay=0): """ Has in each entry the delay between the input with size n_rows and the output with size n_columns initial_delay is the top-left element. """ size = max(n_rows, n_columns) mat = np.zeros((size, size)) for i in range(size): diagonal = np.ones(size - i) * (initial_delay + i) mat += np.diag(diagonal, i) for i in range(1, size): diagonal = np.ones(size - i) * (initial_delay - i) mat += np.diag(diagonal, -i) return mat[:n_rows, :n_columns] def delay_cases(new_I_t, len_new_I_t, len_out, delay, delay_diff): """ Delays (time shifts) the input new_I_t by delay. Parameters ---------- new_I_t : ~numpy.ndarray or theano vector Input to be delayed. len_new_I_t : integer Length of new_I_t. (Think len(new_I_t) ). Assure len_new_I_t is larger then len(cum_confirmed_positive)-delay, otherwise it means that the simulated data is not long enough to be fitted to the data. len_out : integer Length of the output. delay : number If delay is an integer, the array will be exactly shifted. Else, the data will be shifted and intepolated (convolved with hat function of width one). Take care that delay is smaller than or equal to delay_diff, otherwise zeros are returned, which could potentially lead to errors. delay_diff: integer The difference in length between the new_I_t and the output. Returns ------- an array with length len_out that was time-shifted by delay """ # elementwise delay of input to output delay_mat = make_delay_matrix( n_rows=len_new_I_t, n_columns=len_out, initial_delay=delay_diff ) inferred_cases = itplt(new_I_t, delay, delay_mat) return inferred_cases def delay(rows, columns, delay=0): #make_Delay_matrix #This looks at the shape of the parameters, and the delay, in order to create #a delay matrix with numbers starting from the diagonal (the diagonal takes the value of the delay and the next values, #follow an arithmetic progression with a unit increase) size= max(rows, columns) out = np.zeros((size, size)) for i in range(size): del_new= np.ones(size-i)*(delay+i) out = out + np.diag(del_new, i) for i in range(1, size): del_new = np.ones(size-i)*(delay-i) return out[:rows, :columns] #Function lognormal_tensor added to the function delayed def itplt(arr, delay, datadelay): #Data smoothing function itplt_data = tt.maximum(1 - tt.abs_(datadelay - delay), 0) dotprod = tt.dot(arr, itplt_data) return dotprod def delay_cases_lognormal( input_arr, len_input_arr, len_output_arr, median_delay, scale_delay, delay_t, ): delay_mat = delay( rows=len_input_arr, columns=len_output_arr, delay=delay_t, ) delay_mat[ delay_mat < 0.01 ] = 0.01 # needed because negative values lead to nans in the lognormal distribution. delayed_arr = delayed(input_arr, median_delay, scale_delay, delay_mat) return delayed_arr def delayed(arr, delay, delay_shape, datadelay): #apply_delay and tt_lognormal distribution = tt.exp(-((tt.log(datadelay)-np.log(delay))**2)/ (2*delay_shape **2)) arr2 = distribution/tt.sum(distribution, axis=0) return tt.dot(arr, arr2) def infer_delayed(I_rate_t,I_tperiod, output_length, delay, diff_I_output): delayed_initial = delay(rows= I_tperiod, columns = output_length, delay = diff_I_output) delay_inferred = itplt(I_rate_t, delay, delayed_initial) return delay_inferred def dist_smooth(v1, vk, t1, tk, t_total): t = np.arange(t_total) smooth = tt.clip((t - t1)/(tk- t1), 0,1) * (vk - v1) + v1 #Smoothing with delta values return smooth #output=delay(rows= 6,columns = 4, delay = 2) #print(output) def SIR_MOD(daily_positive_cases, ordered_list_of_gov_interventions, date1, constant_parameters ): #Adding default values for when shapes and parameters are not defined in the change point list #for key, value in prior_information_0.items(): # if key not in prior_information: # prior_information[key] = value #svi_theme_4_wt_avg = 2.0995 svi_score = constant_parameters['svi_score'] with pm.Model() as sim: I_start = pm.HalfNormal(name = 'start_inf', sigma = constant_parameters['prior_beta_I_start']/(1+svi_score)) list_infections = [] for i, sd_pt in enumerate(ordered_list_of_gov_interventions): list_infections.append( pm.Lognormal( name=f'Inf_rate_{i}', mu=np.log(sd_pt['prior_inf_rate_median'] ), sigma = sd_pt['prior_inf_rate_sigma'] ) ) cp_transient_list = [] prev_date = date1 for i, sd_pt in enumerate(ordered_list_of_gov_interventions[1:]): transient_start = sd_pt['prior_mean_transient'] prior_mean = (transient_start - prev_date).days tr_start = pm.Normal( name=f"transient_start_{i}", mu=prior_mean, sigma=sd_pt["prior_variance_date_start_transient"], ) cp_transient_list.append(tr_start) dt_before = transient_start # same for transient times tr_len_list = [] for i, cp in enumerate(ordered_list_of_gov_interventions[1:]): tr_len = pm.Lognormal( name=f"transient_len_{i}", mu=np.log(cp["prior_median_transient_len"]), sigma=cp["prior_variance_transient_len"],) tr_len_list.append(tr_len) Inf_rate_t_list = [list_infections[0] * tt.ones(constant_parameters['num_days_simulation'])] Inf_rate_before = list_infections[0] for tr_start, tr_len, Inf_rate_after in zip( cp_transient_list, tr_len_list, list_infections[1:] ): Inf_rate_t = dist_smooth( v1=0, vk=1, t1=tr_start, tk=tr_start + tr_len, t_total=constant_parameters['num_days_simulation'], ) * (Inf_rate_after - Inf_rate_before) Inf_rate_before = Inf_rate_after Inf_rate_t_list.append(Inf_rate_t) Inf_rate_t = sum(Inf_rate_t_list) # fraction of people that recover each day, recovery rate mu mu = pm.Lognormal( name="mu", mu=np.log(constant_parameters["prior_median_mu"]), sigma=constant_parameters["prior_variance_mu"], ) # delay in days between contracting the disease and being recorded delay = pm.Lognormal( name="delay", mu=np.log(constant_parameters["prior_median_delay"]), sigma=constant_parameters["prior_variance_delay"], ) # prior of the error of observed cases sigma_obs = pm.HalfNormal("sigma_obs", sigma=constant_parameters["prior_beta_variance_obs"]) # -------------------------------------------------------------------------- # # training the model with loaded data provided as argument # -------------------------------------------------------------------------- # S_start = constant_parameters['tot_pop'] - I_start new_I_0 = tt.zeros_like(I_start) S, I, new_I = Model_simulation( Inf_rate_t=Inf_rate_t, mu=mu, S_start=S_start, I_start=I_start, N=constant_parameters['tot_pop'] ) ''' def subsequent_day_parameters(Inf_rate_t, S_t, I_t, _,mu, tot_pop): new_I_t = Inf_rate_t / tot_pop * I_t * S_t S_t = S_t - new_I_t I_t = I_t + new_I_t - mu * I_t I_t = tt.clip(I_t, 0, tot_pop) #for stability return S_t, I_t, new_I_t # first tuple of theano scan will return S, I, new_I outputs, _ = theano.scan( fn=subsequent_day_parameters, sequences=[Inf_rate_t], outputs_info=[S_start, I_start, new_I_0], non_sequences=[mu, constant_parameters['tot_pop']],) S, I, new_I = outputs ''' new_cases_inferred = delay_cases( new_I_t=new_I, len_new_I_t=constant_parameters['num_days_simulation'], len_out=constant_parameters['num_days_simulation'] - constant_parameters['diff_data_simulation'], delay=delay, delay_diff=constant_parameters['diff_data_simulation'],) new_cases_inferred_eff = new_cases_inferred num_days_data = daily_positive_cases.shape[-1] pm.StudentT( name="_new_cases_studentT", nu=4, mu=new_cases_inferred_eff[:num_days_data], sigma=tt.abs_(new_cases_inferred[:num_days_data] + 1) ** 0.5 * sigma_obs, #+1 and tt.abs to avoid nans observed=daily_positive_cases,) pm.Deterministic("Inf_rate_t", Inf_rate_t) pm.Deterministic("new_cases", new_cases_inferred_eff) pm.Deterministic("new_cases_raw", new_cases_inferred) return sim def Model_simulation(Inf_rate_t, mu, S_start, I_start, N): """ Implements the susceptible-infected-recovered model Parameters ---------- Inf_rate_t : ~numpy.ndarray time series of spreading rate, the length of the array sets the number of steps to run the model for mu : number recovery rate S_start : number initial number of susceptible at first time step I_start : number initial number of infected N : number population size Returns ------- S : array time series of the susceptible I : array time series of the infected new_I : array time series of the new infected """ new_I_0 = tt.zeros_like(I_start) def next_day(Inf_rate_t, S_t, I_t, _, mu, N): new_I_t = Inf_rate_t / N * I_t * S_t S_t = S_t - new_I_t I_t = I_t + new_I_t - mu * I_t I_t = tt.clip(I_t, 0, N) # for stability return S_t, I_t, new_I_t # theano scan returns two tuples, first one containing a time series of # what we give in outputs_info : S, I, new_I outputs, _ = theano.scan( fn=next_day, sequences=[Inf_rate_t], outputs_info=[S_start, I_start, new_I_0], non_sequences=[mu, N], ) return outputs # + colab={"base_uri": "https://localhost:8080/", "height": 33} colab_type="code" id="NsTyL1tGy50h" outputId="df321afb-af55-41f0-e5fd-055a04d91f70" import os import pandas as pd os.getcwd() # + colab={"base_uri": "https://localhost:8080/", "height": 391} colab_type="code" id="gWwJCtitSDuy" outputId="ff20af9a-7f50-4272-f6e4-8b77568e3f31" data=pd.read_csv('C:\\Users\\Administrator\\Desktop\\COVID19_Master_File\\Data\\confirmed_cases_by_state_13_5.csv') #FOR STATE DATA #data=pd.read_csv('C:\\Users\\Aman\\Documents\\covid_confirmed_usafacts_1604.csv') #FOR COUNTY DATA population_lockdown_dates=pd.read_csv('C:\\Users\\Administrator\\Desktop\\COVID19_Master_File\\Data\\LockdownDates_populationMerged.csv') country = 'United States' state_codes=data.columns[1:] date_data_begin = max(pd.to_datetime(data['Dates'], infer_datetime_format=True)).to_pydatetime() - datetime.timedelta(days=40) date_data_end = max(pd.to_datetime(data['Dates'], infer_datetime_format=True)).to_pydatetime() data2=data[pd.to_datetime(data['Dates'], infer_datetime_format=True)>=date_data_begin] population_lockdown_dates.loc[population_lockdown_dates['lockdown_date']=='YTA','lockdown_date']=date_data_end population_lockdown_dates.loc[population_lockdown_dates['partial_date']=='YTA','lockdown_date']=date_data_end population_lockdown_dates.loc[population_lockdown_dates['partial_date']=='YTA','partial_date']=date_data_end population_lockdown_dates['partial_date']=population_lockdown_dates['partial_date'].fillna(date_data_end) population_lockdown_dates['lockdown_date']=population_lockdown_dates['lockdown_date'].fillna(date_data_end) population_lockdown_dates['lockdown_date']=pd.to_datetime(population_lockdown_dates['lockdown_date'], infer_datetime_format=True) population_lockdown_dates.loc[population_lockdown_dates['lockdown_date']<date_data_begin,'lockdown_date']=date_data_end population_lockdown_dates['partial_date']=pd.to_datetime(population_lockdown_dates['partial_date'], infer_datetime_format=True) population_lockdown_dates.loc[population_lockdown_dates['lockdown_date']<population_lockdown_dates['partial_date'],'partial_date']=population_lockdown_dates['lockdown_date'] population_lockdown_dates.dtypes pd.set_option('display.max_rows', None) pd.set_option('display.max_columns', None) print(data.head()) #FOR STATE DATA (i should correspond to the state code we're running it for in state_code) i= 34 #4: California; 31: NEw Jersey, 34: New York,20: Maryland, 22 : Michigan, 6 : Conneticut state=state_codes[i] print(state) cases_obs = data2[state].values population_lockdown_dates_sub=population_lockdown_dates.loc[population_lockdown_dates['State Code']==state] population_lockdown_dates_sub #prior_date_mild_dist_begin = datetime.datetime.utcfromtimestamp(population_lockdown_dates_sub['partial_date'].values[0].tolist()/1e9)- datetime.timedelta(days = 3) #prior_date_mild_dist_begin diff_data_sim = 14 date_begin_sim = date_data_begin - datetime.timedelta(days = diff_data_sim) prior_date_mild_dist_begin = datetime.datetime.utcfromtimestamp(population_lockdown_dates_sub['partial_date'].values[0].tolist()/1e9)- datetime.timedelta(days = 3) prior_date_strong_dist_begin = datetime.datetime.utcfromtimestamp(population_lockdown_dates_sub['partial_date'].values[0].tolist()/1e9)- datetime.timedelta(days = 2) prior_date_contact_ban_begin = datetime.datetime.utcfromtimestamp(population_lockdown_dates_sub['lockdown_date'].values[0].tolist()/1e9)- datetime.timedelta(days = 1) num_days_data = (date_data_end-date_data_begin).days num_days_future = 110 date_begin_sim = date_data_begin - datetime.timedelta(days = diff_data_sim) date_end_sim = date_data_end + datetime.timedelta(days = num_days_future) num_days_sim = (date_end_sim-date_begin_sim).days cases_obs = data2[state].values svi_table = pd.read_csv('C:\\Users\\Administrator\\Desktop\\COVID19_Master_File\\Data\\SVI2018_US_COUNTY.csv') #state name state_code = state #svi parameter name svi_param = 'F_TOTAL' svi_state_table = svi_table.loc[svi_table['ST_ABBR'] == state_code] # theme score # population density weighted avergae of selected svi prameter svi_score = sum((svi_state_table['E_TOTPOP']/svi_state_table['AREA_SQMI'])*svi_state_table[svi_param])/sum(svi_state_table['E_TOTPOP']/svi_state_table['AREA_SQMI']) svi_score = svi_score/15 print(svi_score) #what if scenarios #intervention attribute dictionary ################################################################### #dictionary format for adding new goverment interventions########## ################################################################### intervetion_attributes = dict( #date of intervention begin prior_mean_transient=None, prior_variance_date_start_transient=None, #time for intervebtion to take effect prior_median_transient_len=None, prior_variance_transient_len=None, #infection rate prior_inf_rate_median= None, prior_inf_rate_sigma= None, ) ################################################################### #define new goverment interventions################################ ################################################################### #defining default prior default_prior = { #date of intervention begin 'prior_mean_transient':None, 'prior_variance_date_start_transient':None, #time for intervebtion to take effect 'prior_median_transient_len':None, 'prior_variance_transient_len':None, #infection rate 'prior_inf_rate_median': 0.4, 'prior_inf_rate_sigma': 0.9} #defining changept 2 - mild_social_distancing mild_social_distancing = { #date of intervention begin 'prior_mean_transient':prior_date_mild_dist_begin, 'prior_variance_date_start_transient':3, #time for intervebtion to take effect 'prior_median_transient_len':3, 'prior_variance_transient_len':0.3, #infection rate 'prior_inf_rate_median': 0.2, 'prior_inf_rate_sigma': 0.5 } #defining changept 3 - strong social distancing strong_social_distancing = { #date of intervention begin 'prior_mean_transient':prior_date_strong_dist_begin, 'prior_variance_date_start_transient':1, #time for intervebtion to take effect 'prior_median_transient_len':3, 'prior_variance_transient_len':0.3, #infection rate 'prior_inf_rate_median': 1/8, 'prior_inf_rate_sigma': 0.5 } #defining changept 3 - strong social distancing lock_down = { #date of intervention begin 'prior_mean_transient':prior_date_contact_ban_begin, 'prior_variance_date_start_transient':1, #time for intervebtion to take effect 'prior_median_transient_len':3, 'prior_variance_transient_len':0.3, #infection rate 'prior_inf_rate_median': 1/8/2, 'prior_inf_rate_sigma': 0.5 } ordered_list_of_gov_interventions = [ default_prior, mild_social_distancing, strong_social_distancing, lock_down] constant_parameters = { 'tot_pop': population_lockdown_dates_sub['Population'].values[0], 'svi_score' : svi_score, 'prior_beta_I_start' : 100, 'prior_median_mu' : 0.12, 'prior_variance_mu' : 0.2, 'prior_median_delay' : 5, 'prior_variance_delay' : 0.2, 'prior_beta_variance_obs' : 10, #simulation inofrmation 'num_days_simulation' : num_days_sim, 'diff_data_simulation' : diff_data_sim } # - traces = [] models=[] for scenarios in np.arange(1,4): model = SIR_MOD(daily_positive_cases= np.diff(cases_obs), ordered_list_of_gov_interventions = ordered_list_of_gov_interventions[:scenarios], date1 = date_begin_sim, constant_parameters = constant_parameters) models.append(model) traces.append(pm.sample(model=model, init='advi', draws=600, cores=4 )) # + colab={"base_uri": "https://localhost:8080/", "height": 33} colab_type="code" id="kJC3DH9omreF" outputId="f2b5563d-b08b-4c79-a41f-1103a241719d" import matplotlib # + def truncate_number(number, precision): return '{{:.{}f}}'.format(precision).format(number) def print_median_CI(arr, prec = 2): f_trunc = lambda n: truncate_number(n, prec) med = f_trunc(np.median(arr)) perc1, perc2 = f_trunc(np.percentile(arr, q=2.5)), f_trunc(np.percentile(arr, q=97.5)) return 'Median: {}\n95% CI: [{}, {}]'.format(med, perc1, perc2) def conv_time_to_mpl_dates(arr): return matplotlib.dates.date2num([datetime.timedelta(days=float(date)) + date_begin_sim for date in arr]) traces_copy=traces trace = traces[0] posterior = traces colors = ['tab:green'] #points = [ 'Mild Social Distancing', 'Strong Social Distancing','Total Lockdown'] points = [ 'Mild Social Distancing','Strong Social Distancing','Total Lockdown'] new_cases_past=pd.DataFrame() new_cases_Future=pd.DataFrame() new_cases_Future_percentiles=pd.DataFrame() new_cases_Future_percentiles_DF=pd.DataFrame() for trace_scen, point in zip(posterior, points): new_cases_past1 = trace_scen.new_cases[:,:num_days_data] new_cases_past[point]=np.median(new_cases_past1, axis=0) time2 = np.arange(0, num_days_future+1) mpl_dates_fut = conv_time_to_mpl_dates(time2) + diff_data_sim + num_days_data end_date = mpl_dates_fut[-10] cases_future = trace_scen['new_cases'][:, num_days_data:].T new_cases_Future[point] = np.median(cases_future, axis=-1) new_cases_Future_percentiles[point] = ( np.percentile(cases_future, q=2.5, axis=-1), np.percentile(cases_future, q=97.5, axis=-1), ) new_cases_Future_percentiles_DF[f'{point} - Lower Bound'] = new_cases_Future_percentiles[point][0] new_cases_Future_percentiles_DF[f'{point} - Upper Bound'] = new_cases_Future_percentiles[point][1] Date_sim=[] for i in range(1, len(time2)): Date_sim.append(date_begin_sim +datetime.timedelta(days=float(time2[i]))+datetime.timedelta(days=float(54))) date_sim2=[] for date in Date_sim: date_sim2.append(datetime.datetime.strftime(date, '%d/%m/%y')) print(len(date_sim2[1:])) date_sim3=date_sim2[1:] new_cases_Future new_cases_Future_cum=new_cases_Future.cumsum(axis=0) new_cases_Future_cum1=new_cases_Future_cum+cases_obs[-1] new_cases_Future_cum1 new_cases_Future_cum2=new_cases_Future_cum1 new_cases_Future_cum2.index=date_sim2 new_cases_Future_cum2 #SAVE THIS CUM_Cases_Future_percentiles=pd.DataFrame() Cumulative_Cases=pd.DataFrame() cum_cases_Future_percentiles_DF=pd.DataFrame() for trace_scen, point in zip(posterior, points): new_cases_past = trace_scen.new_cases[:,:num_days_data] cum_cases = np.cumsum(new_cases_past, axis=1) + cases_obs[0] Cumulative_Cases[point]=np.median(cum_cases, axis=0) time2 = np.arange(0, num_days_future+1) mpl_dates_fut = conv_time_to_mpl_dates(time2) + diff_data_sim + num_days_data cases_future = np.cumsum(trace_scen['new_cases'][:, num_days_data:].T, axis=0) + cases_obs[-1] #cases_future = np.concatenate([np.ones((1,cases_future.shape[1]))*cases_obs[-1], cases_future], axis=0) #Cumulative_Cases[legend] = np.median(cases_future, axis=-1) CUM_Cases_Future_percentiles[point] = ( np.percentile(cases_future, q=2.5, axis=-1), np.percentile(cases_future, q=97.5, axis=-1),) cum_cases_Future_percentiles_DF[f'{point}- Lower Bound'] = CUM_Cases_Future_percentiles[point][0] cum_cases_Future_percentiles_DF[f'{point} - Upper Bound'] = CUM_Cases_Future_percentiles[point][1] cum_cases_Future_percentiles_DF.index=date_sim2 cum_cases_Future_percentiles_DF cum_cases_Future_percentiles_DF['Date']=cum_cases_Future_percentiles_DF.index.astype('str') new_cases_Future_cum2['Date']=new_cases_Future_cum2.index.astype('str') FINAL_DF=pd.merge(cum_cases_Future_percentiles_DF, new_cases_Future_cum2, on="Date") FINAL_DF.index=FINAL_DF['Date'] FINAL_DF2=FINAL_DF.copy() #FINAL_DF2.to_csv('California_0804_Projection_Data2_1.csv') #FINAL_DF2.to_csv('California_2604_Projection_Data2_1.csv') #FINAL_DF2.to_csv('N_1604_Projection_Data2_1.csv') FINAL_DF2.head() FINAL_DF2.to_csv(f'C:\\Users\\Administrator\\Desktop\\COVID19_Master_File\\Data\\{state}_State_new_code_projection_13_05_AllScenarios.csv') # - # Authors: <NAME> (Cognizant), <NAME> (Cognizant) # # Copyright {2020} Cognizant Technology Solutions # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
New_Code_run_SVI_13_5_NY.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Paddlepaddle实现浅层神经网络回归 # # 在该实验中,我们将使用PaddlePaddle实现深层神经网络,解决识别猫的问题,使用的数据与“Paddlepaddle实现Logistic回归”中一致。 # # 该版本代码与第三章PaddlePaddle部分代码大体一致,区别在于增加了隐藏层并设置不同隐藏层节点,隐藏层激活函数换为Relu激活函数,同时修改训练次数和学习率。 # # # ## 1 - 引用库 # # 首先,载入几个需要用到的库,它们分别是: # - numpy:一个python的基本库,用于科学计算 # - dnn_utils:定义了一些工具函数 # - paddle.v2:paddle深度学习平台 # - matplotlib.pyplot:用于生成图,在验证模型准确率和展示成本变化趋势时会使用到 # + import matplotlib import numpy as np import paddle.v2 as paddle import matplotlib.pyplot as plt import dnn_utils TRAINING_SET = None TEST_SET = None DATADIM = None # - # ## 2 - 数据预处理 # # 数据与“Paddlepaddle实现浅层神经网络”中一致,包含了如下内容: # # - 训练数据集:包含了m_train个图片的数据集,数据的标签(Label)分为cat(y=1)和non-cat(y=0)两类。 # - 测试数据集:包含了m_test个图片的数据集,数据的标签(Label)同上。 # 载入数据(cat/non-cat) def load_data(): """ 载入数据,数据项包括: train_set_x_orig:原始训练数据集 train_set_y:原始训练数据标签 test_set_x_orig:原始测试数据集 test_set_y:原始测试数据标签 classes(cat/non-cat):分类list Args: Return: """ global TRAINING_SET, TEST_SET, DATADIM train_set_x_orig, train_set_y, test_set_x_orig, test_set_y, classes = dnn_utils.load_dataset() m_train = train_set_x_orig.shape[0] m_test = test_set_x_orig.shape[0] num_px = train_set_x_orig.shape[1] # 定义纬度 DATADIM = num_px * num_px * 3 # 数据展开,注意此处为了方便处理,没有加上.T的转置操作 train_set_x_flatten = train_set_x_orig.reshape(m_train, -1) test_set_x_flatten = test_set_x_orig.reshape(m_test, -1) # 归一化 train_set_x = train_set_x_flatten / 255. test_set_x = test_set_x_flatten / 255. TRAINING_SET = np.hstack((train_set_x, train_set_y.T)) TEST_SET = np.hstack((test_set_x, test_set_y.T)) # ## 3 - 构造reader # # 构造两个reader()函数来分别读取训练数据集TRAINING_SET和测试数据集TEST_SET,需要注意的是,yield关键字的作用类似return关键字,但不同指出在于yield关键字让reader()变成一个生成器(Generator),生成器不会创建完整的数据集列表,而是在每次循环时计算下一个值,这样不仅节省内存空间,而且符合reader的定义,也即一个真正的读取器。 # + # 读取训练数据或测试数据,服务于train()和test() def read_data(data_set): """ 一个reader Args: data_set -- 要获取的数据集 Return: reader -- 用于获取训练数据集及其标签的生成器generator """ def reader(): """ 一个reader Args: Return: data[:-1], data[-1:] -- 使用yield返回生成器(generator), data[:-1]表示前n-1个元素,也就是训练数据,data[-1:]表示最后一个元素,也就是对应的标签 """ for data in data_set: yield data[:-1], data[-1:] return reader # 获取训练数据集 def train(): """ 定义一个reader来获取训练数据集及其标签 Args: Return: read_data -- 用于获取训练数据集及其标签的reader """ global TRAINING_SET return read_data(TRAINING_SET) # 获取测试数据集 def test(): """ 定义一个reader来获取测试数据集及其标签 Args: Return: read_data -- 用于获取测试数据集及其标签的reader """ global TEST_SET return read_data(TEST_SET) # - # ## 4 - 配置网络结构和设置参数 # # 开始配置网络结构,这是本章与Logistic回归的不同之处,本章节实现双层神经网络,增加了一层隐藏层,隐藏层设置7个节点,激活函数使用Relu,其余不变。 # # ** 损失函数 ** # # 在这里使用PaddlePaddle提供的交叉熵损失函数,cost = paddle.layer.multi_binary_label_cross_entropy_cost(input=y_predict, label=y_label)定义了成本函数,并使用y_predict与label计算成本。定义了成本函数之后,使用PaddlePaddle提供的简单接口parameters=paddle.parameters.create(cost)来创建和初始化参数。 # # ** optimizer ** # # 参数创建完成后,定义参数优化器optimizer= paddle.optimizer.Momentum(momentum=0, learning_rate=0.000075),使用Momentum作为优化器,并设置动量momentum为零,学习率为0.00002。注意,读者暂时无需了解Momentum的含义,只需要学会使用即可。 # # ** 其它配置 ** # # feeding={‘image’:0, ‘label’:1}是数据层名称和数组索引的映射,用于在训练时输入数据。 # # # 配置网络结构和设置参数 def netconfig(): """ 配置网络结构和设置参数 Args: Return: image -- 输入层,DATADIM维稠密向量 y_predict -- 输出层,Sigmoid作为激活函数 y_label -- 标签数据,1维稠密向量 cost -- 损失函数 parameters -- 模型参数 optimizer -- 优化器 feeding -- 数据映射,python字典 """ # 输入层,paddle.layer.data表示数据层,name=’image’:名称为image, # type=paddle.data_type.dense_vector(DATADIM):数据类型为DATADIM维稠密向量 image = paddle.layer.data( name='image', type=paddle.data_type.dense_vector(DATADIM)) # 隐藏层1,paddle.layer.fc表示全连接层,input=image: 该层输入数据为image # size=20:神经元个数,act=paddle.activation.Relu():激活函数为Relu() h1 = paddle.layer.fc( input=image, size=20, act=paddle.activation.Relu()) # 隐藏层2,paddle.layer.fc表示全连接层,input=h1: 该层输入数据为h1 # size=7:神经元个数,act=paddle.activation.Relu():激活函数为Relu() h2 = paddle.layer.fc( input=h1, size=7, act=paddle.activation.Relu()) # 隐藏层3,paddle.layer.fc表示全连接层,input=h2: 该层输入数据为h2 # size=5:神经元个数,act=paddle.activation.Relu():激活函数为Relu() h3 = paddle.layer.fc( input=h2, size=5, act=paddle.activation.Relu()) # 输出层,paddle.layer.fc表示全连接层,input=h3: 该层输入数据为h3 # size=1:神经元个数,act=paddle.activation.Sigmoid():激活函数为Sigmoid() y_predict = paddle.layer.fc( input=h3, size=1, act=paddle.activation.Sigmoid()) # 标签数据,paddle.layer.data表示数据层,name=’label’:名称为label # type=paddle.data_type.dense_vector(1):数据类型为1维稠密向量 y_label = paddle.layer.data( name='label', type=paddle.data_type.dense_vector(1)) # 定义成本函数为交叉熵损失函数multi_binary_label_cross_entropy_cost cost = paddle.layer.multi_binary_label_cross_entropy_cost(input=y_predict, label=y_label) # 利用cost创建parameters parameters = paddle.parameters.create(cost) # 创建optimizer,并初始化momentum和learning_rate optimizer = paddle.optimizer.Momentum(momentum=0, learning_rate=0.000075) # 数据层和数组索引映射,用于trainer训练时喂数据 feeding = { 'image': 0, 'label': 1} data = [image, y_predict, y_label, cost, parameters, optimizer, feeding] return data # ## 5 - 训练过程 # # 接下来进入训练过程。 # # ** 初始化 ** # # 首先进行最基本的初始化操作,paddle.init(use_gpu=False, trainer_count=1)表示不使用gpu进行训练并且仅使用一个trainer进行训练,load_data()用于获取并预处理数据 # + # 初始化 paddle.init(use_gpu=False, trainer_count=1) # 获取数据并预处理 load_data() # 配置网络结构和设置参数 image, y_predict, y_label, cost, parameters, optimizer, feeding = netconfig() # 记录成本cost costs = [] # - # ** 模型训练 ** # # 上述内容进行了初始化并配置了网络结构,接下来利用上述配置进行模型训练。 # # 首先定义一个随机梯度下降trainer,配置三个参数cost、parameters、update_equation,它们分别表示成本函数、参数和更新公式。 # # 再利用trainer.train()即可开始真正的模型训练: # - paddle.reader.shuffle(train(), buf_size=5000)表示trainer从train()这个reader中读取了buf_size=5000大小的数据并打乱顺序 # - paddle.batch(reader(), batch_size=256)表示从打乱的数据中再取出batch_size=256大小的数据进行一次迭代训练 # - 参数feeding用到了之前定义的feeding索引,将数据层image和label输入trainer,也就是训练数据的来源。 # - 参数event_handler是事件管理机制,读者可以自定义event_handler,根据事件信息作相应的操作。 # - 参数num_passes=5000表示迭代训练5000次后停止训练。 # + def event_handler(event): """ 事件处理器,可以根据训练过程的信息作相应操作 Args: event -- 事件对象,包含event.pass_id, event.batch_id, event.cost等信息 Return: """ if isinstance(event, paddle.event.EndIteration): if event.pass_id % 100 == 0: print("Pass %d, Batch %d, Cost %f" % (event.pass_id, event.batch_id, event.cost)) costs.append(event.cost) with open('params_pass_%d.tar' % event.pass_id, 'w') as f: parameters.to_tar(f) # 构造trainer,配置三个参数cost、parameters、update_equation,它们分别表示成本函数、参数和更新公式。 trainer = paddle.trainer.SGD( cost=cost, parameters=parameters, update_equation=optimizer) """ 模型训练 paddle.reader.shuffle(train(), buf_size=5000):表示trainer从train()这个reader中读取了buf_size=5000 大小的数据并打乱顺序 paddle.batch(reader(), batch_size=256):表示从打乱的数据中再取出batch_size=256大小的数据进行一次迭代训练 feeding:用到了之前定义的feeding索引,将数据层image和label输入trainer event_handler:事件管理机制,可以自定义event_handler,根据事件信息作相应的操作 num_passes:定义训练的迭代次数 """ trainer.train( reader=paddle.batch( paddle.reader.shuffle(train(), buf_size=5000), batch_size=256), feeding=feeding, event_handler=event_handler, num_passes=3000) # - # ** 模型检验 ** # # 模型训练完成后,接下来检验模型的准确率。 # # 首先定义get_data()函数来帮助我们读取训练数据和测试数据。 # + # 获取data def get_data(data_creator): """ 使用参数data_creator来获取测试数据 Args: data_creator -- 数据来源,可以是train()或者test() Return: result -- 包含测试数据(image)和标签(label)的python字典 """ data_creator = data_creator data_image = [] data_label = [] for item in data_creator(): data_image.append((item[0],)) data_label.append(item[1]) result = { "image": data_image, "label": data_label } return result # - # 获得数据之后,我们就可以开始利用paddle.infer()来进行预测,参数output_layer 表示输出层,参数parameters表示模型参数,参数input表示输入的测试数据。 # + # 获取测试数据和训练数据,用来验证模型准确度 train_data = get_data(train()) test_data = get_data(test()) # 根据train_data和test_data预测结果,output_layer表示输出层,parameters表示模型参数,input表示输入的测试数据 probs_train = paddle.infer( output_layer=y_predict, parameters=parameters, input=train_data['image'] ) probs_test = paddle.infer( output_layer=y_predict, parameters=parameters, input=test_data['image'] ) # - # 获得检测结果probs_train和probs_test之后,我们将结果转化为二分类结果并计算预测正确的结果数量,定义calc_accuracy()来分别计算训练准确度和测试准确度。 # 计算准确度 def calc_accuracy(probs, data): """ 根据数据集来计算准确度accuracy Args: probs -- 数据集的预测结果,调用paddle.infer()来获取 data -- 数据集 Return: calc_accuracy -- 训练准确度 """ right = 0 total = len(data['label']) for i in range(len(probs)): if float(probs[i][0]) > 0.5 and data['label'][i] == 1: right += 1 elif float(probs[i][0]) < 0.5 and data['label'][i] == 0: right += 1 accuracy = (float(right) / float(total)) * 100 return accuracy # 调用上述两个函数并输出 # 计算train_accuracy和test_accuracy print("train_accuracy: {} %".format(calc_accuracy(probs_train, train_data))) print("test_accuracy: {} %".format(calc_accuracy(probs_test, test_data))) # ** 学习曲线 ** # # 可以输出成本的变化情况,利用学习曲线对模型进行分析。 costs = np.squeeze(costs) plt.plot(costs) plt.ylabel('cost') plt.xlabel('iterations (per hundreds)') plt.title("Learning rate = 0.000075") plt.show() # 读者可以看到图中成本在刚开始收敛较快,随着迭代次数变多,收敛速度变慢,最终收敛到一个较小值。
jupyter/5.deep_neural_network/Multi_nn_paddle.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:generic_expression] * # language: python # name: conda-env-generic_expression-py # --- # # Analyze generic genes and pathways # # This notebook uses the DEG and GSEA statistics obtained from the previous notebooks [3_gene_DE_analysis](3_gene_DE_analysis.ipynb) and [4_pathway enrichment analysis](4_pathway_enrichment_analysis.ipynb) to: # 1. Determine if our simulation approach can identify a set of generic genes and pathways # 2. Compare our set of generic genes and pathways with what has been previously reported # + # %load_ext autoreload # %load_ext rpy2.ipython # %autoreload 2 import os import sys import pickle import pandas as pd import numpy as np import random import warnings import rpy2.robjects import seaborn as sns from scipy import stats from plotnine import (ggplot, labs, geom_point, aes, ggsave, theme_bw, theme, xlim, ylim, facet_wrap, scale_color_manual, guides, guide_legend, element_blank, element_text, element_rect, element_line, coords) def fxn(): warnings.warn("deprecated", DeprecationWarning) with warnings.catch_warnings(): warnings.simplefilter("ignore") fxn() from ponyo import utils from generic_expression_patterns_modules import calc from numpy.random import seed randomState = 123 seed(randomState) # + # Read in config variables base_dir = os.path.abspath(os.path.join(os.getcwd(),"../")) config_file = os.path.abspath(os.path.join(base_dir, "config_human.tsv")) params = utils.read_config(config_file) # - # Load params local_dir = params["local_dir"] col_to_rank = params['col_to_rank'] # + # Input files gene_summary_file = os.path.join( local_dir, "gene_summary_table_"+col_to_rank+".tsv") pathway_summary_file = os.path.join( local_dir, "pathway_summary_table_"+col_to_rank+".tsv") # - # ## Generic genes # Studies have found that there are some genes that are more likely to be differentially expressed even across a wide range of experimental designs. These *generic genes* are not necessarily specific to the biological process being studied but instead represents a more systematic change. We want to compare the ability to detect these generic genes using our method vs those found by Crow et. al. # ### Map gene ids # Our gene ids are ensembl while the published gene ids are using hgnc symbols. We need to map ensembl to hgnc ids in order to compare results. # + # #%%R #suppressWarnings(library("biomaRt")) # + # #%%R -i gene_summary_file -o gene_id_mapping # Convert gene ids from ensembl (ours) to entrez (DE_prior) #source('../generic_expression_patterns_modules/GSEA_analysis.R') #gene_id_mapping <- get_ensembl_symbol_mapping(gene_summary_file) # + # Set ensembl id as index #gene_id_mapping.set_index("ensembl_gene_id", inplace=True) #print(gene_id_mapping.shape) #gene_id_mapping.head() # + # Save #gene_id_file = os.path.join( # local_dir, # "ensembl_hgnc_mapping.tsv") #gene_id_mapping.to_csv(gene_id_file, float_format='%.5f', sep='\t') # - """# Read data gene_stats = pd.read_csv( gene_summary_file, header=0, sep='\t', index_col=0) print(gene_stats.shape) sample_gene_id = gene_stats.index[0].split(".")[0] gene_stats.head()""" """# Read file mapping ensembl ids to hgnc symbols gene_id_file = os.path.join( local_dir, "ensembl_hgnc_mapping.tsv") gene_id_mapping = pd.read_csv( gene_id_file, header=0, sep='\t', index_col=0) gene_id_mapping.set_index("ensembl_gene_id", inplace=True) gene_id_mapping.head()""" """# Replace ensembl ids with gene symbols # Only replace if ensembl ids exist if sample_gene_id in list(gene_id_mapping.index): print("replacing ensembl ids") utils.replace_ensembl_ids(gene_summary_file, gene_id_mapping)""" # ### Our DEGs # Genes are ranked by their adjusted p-value and the median rank reported across 25 simulated experiments is shown in column `Rank (simulated)`. # + # Read data gene_stats = pd.read_csv( gene_summary_file, header=0, sep='\t', index_col=0) gene_stats.head() # - # Get list of our genes gene_ids = list(gene_stats.index) # ### Published DEGs # These DEGs are based on the [Crow et. al. publication](https://www.pnas.org/content/pnas/116/13/6491.full.pdf). Their genes are ranked 0 = not commonly DE; 1 = commonly DE. Genes by the number differentially expressed gene sets they appear in and then ranking genes by this score. # + # Get generic genes identified by Crow et. al. DE_prior_file = "https://raw.githubusercontent.com/maggiecrow/DEprior/master/DE_Prior.txt" DE_prior = pd.read_csv(DE_prior_file, header=0, sep="\t") DE_prior.head() # - # Get list of published generic genes published_generic_genes = list(DE_prior['Gene_Name']) # ### Compare DEG ranks # + # Get intersection of gene lists shared_genes = set(gene_ids).intersection(published_generic_genes) print(len(shared_genes)) ## CHECK NUMBERS OF GENES # + # Load shared genes #shared_genes_file = os.path.join( # local_dir, # "shared_gene_ids.pickle") #shared_genes = pickle.load(open(shared_genes_file, "rb" )) #print(len(shared_genes)) # + # check that all our genes are a subset of the published ones, no genes unique to ours # - # Get rank of shared genes our_gene_rank_df = pd.DataFrame(gene_stats.loc[shared_genes,'Rank (simulated)']) print(our_gene_rank_df.shape) our_gene_rank_df.head() # + # Merge published ranking shared_gene_rank_df = pd.merge(our_gene_rank_df, DE_prior[['DE_Prior_Rank','Gene_Name']], left_index=True, right_on='Gene_Name') shared_gene_rank_df.set_index('Gene_Name', inplace=True) print(shared_gene_rank_df.shape) shared_gene_rank_df.head() # - # Scale published ranking to our range max_rank = max(shared_gene_rank_df['Rank (simulated)']) shared_gene_rank_df['DE_Prior_Rank'] = round(shared_gene_rank_df['DE_Prior_Rank']*max_rank) shared_gene_rank_df.head() # Get top ranked genes by both methods shared_gene_rank_df[(shared_gene_rank_df['Rank (simulated)']>17500) & (shared_gene_rank_df['DE_Prior_Rank']>17500)] # Get low ranked genes by both methods shared_gene_rank_df[(shared_gene_rank_df['Rank (simulated)']<300) & (shared_gene_rank_df['DE_Prior_Rank']<300)] # + # Plot our ranking vs published ranking fig_file = os.path.join( local_dir, "gene_ranking_"+col_to_rank+".svg") fig = sns.jointplot(data=shared_gene_rank_df, x='Rank (simulated)', y='DE_Prior_Rank', kind='hex', marginal_kws={'color':'white'}) fig.set_axis_labels("Our preliminary method", "DE prior (Crow et. al. 2019)", fontsize=14) fig.savefig(fig_file, format='svg', bbox_inches="tight", transparent=True, pad_inches=0, dpi=300,) # - # ### Calculate correlation # + # Get correlation r, p, ci_high, ci_low = calc.spearman_ci(0.95, shared_gene_rank_df, 1000) print(r, p, ci_high, ci_low) # - # ## Generic pathways """ # Read data pathway_stats = pd.read_csv( pathway_summary_file, header=0, sep='\t', index_col=0) pathway_stats.head()""" """# Define what are the set of generic genes generic_pathway_data = pathway_stats.sort_values(by="Z score", ascending=True)[0:10] generic_pathway_data.head()""" # + # Manually compare against Powers et. al publication # https://academic.oup.com/bioinformatics/article/34/13/i555/5045793
human_general_analysis/archive/5_analyze_generic_genes_and_pathways.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:.conda-ngaml] # language: python # name: conda-env-.conda-ngaml-py # --- # # High-level CNN Keras (TF) Example # *Modified by <NAME> (jordancaraballo)* # + import os import sys import numpy as np os.environ['KERAS_BACKEND'] = "tensorflow" import warnings # make notebook more readable and nice warnings.filterwarnings('ignore', category=FutureWarning) warnings.filterwarnings('ignore', category=DeprecationWarning) from tensorflow.python.util import deprecation deprecation._PRINT_DEPRECATION_WARNINGS = False import keras as K import tensorflow from keras.models import Sequential from keras.layers import Dense, Dropout, Flatten from keras.layers import Conv2D, MaxPooling2D, Dropout from common.params import * from common.utils import * # - # Force one-gpu os.environ["CUDA_VISIBLE_DEVICES"] = "0" # Performance Improvement # 1. Make sure channels-first (not last) K.backend.set_image_data_format('channels_first') print("OS: ", sys.platform) print("Python: ", sys.version) print("Keras: ", K.__version__) print("Numpy: ", np.__version__) print("Tensorflow: ", tensorflow.__version__) print(K.backend.backend()) print(K.backend.image_data_format()) print("GPU: ", get_gpu_name()) print(get_cuda_version()) print("CuDNN Version ", get_cudnn_version()) def create_symbol(n_classes=N_CLASSES): model = Sequential() model.add(Conv2D(50, kernel_size=(3, 3), padding='same', activation='relu', input_shape=(3, 32, 32))) model.add(Conv2D(50, kernel_size=(3, 3), padding='same', activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2))) model.add(Dropout(0.25)) model.add(Conv2D(100, kernel_size=(3, 3), padding='same', activation='relu')) model.add(Conv2D(100, kernel_size=(3, 3), padding='same', activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(512, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(n_classes, activation='softmax')) return model def init_model(m, lr=LR, momentum=MOMENTUM): m.compile( loss = "categorical_crossentropy", optimizer = K.optimizers.SGD(lr, momentum), metrics = ['accuracy']) return m # %%time # Data into format for library x_train, x_test, y_train, y_test = cifar_for_library(channel_first=True, one_hot=True) print(x_train.shape, x_test.shape, y_train.shape, y_test.shape) print(x_train.dtype, x_test.dtype, y_train.dtype, y_test.dtype) # %%time # Load symbol sym = create_symbol() # %%time # Initialise model model = init_model(sym) model.summary() # %%time # Main training loop: 1m16s EPOCHS=40 model.fit(x_train, y_train, batch_size=BATCHSIZE, epochs=EPOCHS, verbose=1) # %%time # Main evaluation loop y_guess = model.predict(x_test, batch_size=BATCHSIZE) y_guess = np.argmax(y_guess, axis=-1) y_truth = np.argmax(y_test, axis=-1) print("Accuracy: ", 1.*sum(y_guess == y_truth)/len(y_guess))
notebooks/benchmarks/Keras_TF_CNN.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Dataset loading import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from matplotlib import rcParams rcParams['axes.spines.top'] = False rcParams['axes.spines.right'] = False df = pd.read_csv('data/iris.csv') df.head() # <br><br> # # ## Exploratory Data Analysis df.isnull().sum() ax = df.groupby('species').count().plot(kind='bar', figsize=(10, 6), fontsize=13, color='#4f4f4f') ax.set_title('Iris Dataset target variable distribution', size=20, pad=30) ax.set_ylabel('Count', fontsize=14) ax.set_xlabel('Species', fontsize=14) ax.get_legend().remove() plt.figure(figsize=(12, 9)) plt.title('Correlation matrix', size=20) sns.heatmap(df.corr(), annot=True, cmap='Blues'); # <br><br> # # ## Modeling # + from sklearn.model_selection import train_test_split X = df.drop('species', axis=1) y = df['species'] X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.25, random_state=3 ) y_train.shape, y_test.shape # + from sklearn.linear_model import LogisticRegression from sklearn.metrics import confusion_matrix lm = LogisticRegression(random_state=42) lm.fit(X_train, y_train) lm_preds = dt.predict(X_test) print(confusion_matrix(y_test, lm_preds)) # + from sklearn.metrics import accuracy_score print(accuracy_score(y_test, lm_preds)) # + from tpot import TPOTClassifier pipeline_optimizer = TPOTClassifier( scoring='accuracy', max_time_mins=10, random_state=42, verbosity=2 ) pipeline_optimizer.fit(X_train, y_train) # + tpot_preds = pipeline_optimizer.predict(X_test) accuracy_score(y_test, tpot_preds) # - pipeline_optimizer.fitted_pipeline_ pipeline_optimizer.export('iris_pipeline.py')
Chapter04/001_IrisDataset.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Entropy Analysis # ## Prolog # ### Imports # + from importlib import reload from math import log import numpy as np # Numeric Python import scipy.stats as stats # Distribution functions and stuff from scipy.optimize import minimize import sqlite3 as sql # To fetch data import analysis # Own analysis tools reload(analysis); # force reload of analysis, for it will be changed often import seaborn as sb # Plots import matplotlib.pyplot as plt # %matplotlib inline plt.rcParams["figure.figsize"] = analysis.a4_dims import random import warnings warnings.filterwarnings('ignore') # - # ### Table Schemes analysis.print_table_schemes( 'data/k3-v500-r4.1.db', 'experiment', 'algorithm_run', 'search_run', 'dist_1', 'dist_2' ) # ## Analysis # ### Entropy Distribution # + query = """ SELECT search_run.flips, search_run.{} FROM algorithm_run INNER JOIN search_run ON search_run.algorithm_run_id = algorithm_run.id WHERE algorithm_run.experiment_id = ? AND search_run.last_unsat {} """ samples = 2 bins_1 = np.arange(4.0,6.25,0.05) bins_2 = np.arange(6.0,10.25,0.05) pdf = stats.norm.pdf bounds = [(0.0001,None),(0.0001,None)] theta_0 = lambda X: [np.average(X), np.var(X)] with sql.connect('data/k3-v500-r4.2.db') as conn: c = conn.cursor() ids, = zip(*c.execute('SELECT id FROM experiment')) # Get experiment indices ids = random.sample(ids, samples) # Choose three experiments randomly print(list(c.execute(query.format('h_1', '= 0'),(1,)))) div = (lambda stuff: stuff[1]/stuff[0]) #div = (lambda stuff: stuff[1]) XS_sat = [list(map(div,c.execute(query.format('h_1', '= 0'),(exp_id,)))) for exp_id in ids] YS_sat = [list(map(div,c.execute(query.format('h_2', '= 0'),(exp_id,)))) for exp_id in ids] XS_unsat = [list(map(div,c.execute(query.format('h_1', '> 0'),(exp_id,)))) for exp_id in ids] YS_unsat = [list(map(div,c.execute(query.format('h_2', '> 0'),(exp_id,)))) for exp_id in ids] print(YS_unsat) #figX, axesX = plt.subplots(1,samples) #for i,X in enumerate(XS_sat): # sb.distplot(X, label = 'Success', ax = axesX[i], hist=True, bins=bins_1) #res = minimize( # fun = lambda args: -analysis.log_likelihood(lambda x: pdf(x, *args), X), # x0 = theta_0(X), # bounds = bounds, #) #if res.success: # loc, scale = res.x # axesX[i].plot(bins_1, np.vectorize(lambda x: pdf(x, loc, scale))(bins_1)) #else: # print(loc, scale) #for i,X in enumerate(XS_unsat): # sb.distplot(X, label = 'Failure', ax = axesX[i], hist=True, bins=bins_1) #res = minimize( # fun = lambda args: -analysis.log_likelihood(lambda x: pdf(x, *args), X), # x0 = theta_0(X), # bounds = bounds, #) #if res.success: # loc, scale = res.x # axesX[i].plot(bins_1, np.vectorize(lambda x: pdf(x, loc, scale))(bins_1)) #else: # print(loc, scale) #plt.legend() figY, axesY = plt.subplots(1,samples) for i,Y in enumerate(YS_sat): sb.distplot(Y, label = 'Success',ax = axesY[i], hist=True) #res = minimize( # fun = lambda args: -analysis.log_likelihood(lambda x: pdf(x, *args), Y), # x0 = theta_0(Y), # bounds = bounds, #) #if res.success: # loc, scale = res.x # axesY[i].plot(bins_2, np.vectorize(lambda x: pdf(x, loc, scale))(bins_2)) #else: # print(loc, scale) for i,Y in enumerate(YS_unsat): sb.distplot(Y, label = 'Failure',ax = axesY[i], hist=True) #res = minimize( # fun = lambda args: -analysis.log_likelihood(lambda x: pdf(x, *args), Y), # x0 = theta_0(Y), # bounds = bounds, #) #if res.success: # loc, scale = res.x # axesY[i].plot(bins_2, np.vectorize(lambda x: pdf(x, loc, scale))(bins_2)) #else: # print(loc, scale) plt.legend() # - for i,x in enumerate([11,33,44]): print(i,x)
analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import matplotlib.pyplot as plt valores = np.arange(1,7) probas = np.zeros(6) + 1/6 plt.bar(valores, probas) plt.title('Distribución de probabilidad uniforme: lanzamiento de un dado') plt.show() mu = 2.0 sigma_1 = 5.0 sigma_2 = 2.0 muestras_1 = np.random.normal(loc = mu, scale = sigma_1, size = 1000) muestras_2 = np.random.normal(loc = mu, scale = sigma_2, size = 1000) print(muestras_1, muestras_2) plt.hist(muestras_1, bins = 20, alpha = 0.5, label = 'Histrograma Muestra 1') plt.hist(muestras_2, bins = 20, alpha = 0.5, label = 'Histrograma Muestra 2') plt.legend() plt.grid() plt.show() mu = 8.5 sigma = 3.0 muestras = np.random.normal(loc = mu, scale = sigma, size = 100) # + mu = 8.5 sigma = 3.0 muestras = np.random.normal(loc = mu, scale = sigma, size = 100) from scipy.stats import norm plt.hist(muestras, bins=20, density=True, alpha=0.6, color='g') xmin, xmax = plt.xlim() x = np.linspace(xmin, xmax, 100) p = norm.pdf(x, mu, sigma) plt.plot(x, p, 'k', linewidth=2, label = 'Distribución Teórica') title = "Muestras obtenidas de una distribución normal con mu = %.2f, sigma = %.2f" % (mu, sigma) plt.title(title) plt.legend() plt.show() # - valores = np.arange(1,7) probas = np.zeros(6) + 1/6 valores2 = np.arange(1,7) probas2 = np.zeros(6) + 1/6 i=0 for n in probas: if(i<=6): valores3=valores[i]+valores2[i] probas3=probas[i]*probas2[i] plt.bar(valores3, probas3) plt.title('Distribución de probabilidad uniforme: lanzamiento de un dado') plt.show() # + n = 1000 sigma_1 = 10 sigma_2 = 1000 x = np.random.normal(size = n, scale = sigma_1) y = np.random.normal(size = n, scale = sigma_2) # Graficamos plt.scatter(x, y) plt.grid() plt.xlim([-60,60]) plt.ylim([-60,60]) plt.show() # - cov = np.sum((x - x.mean())*(y - y.mean()))/x.size print(cov) corr = cov/(x.std()*y.std()) print(corr) # + n = 100 x = np.linspace(-1,1,n) + 0.25*np.random.normal(size = n) y = 1.2*x + 0.25*np.random.normal(size = n) # Graficamos plt.scatter(x, y) plt.grid() plt.show() # - cov = np.sum((x - x.mean())*(y - y.mean()))/x.size print(cov) corr = cov/(x.std()*y.std()) print(corr) import pandas as pd data = pd.read_csv('DS_Bitácora_04_Iris.csv') data.drop(columns = 'Id', inplace = True) data.head() data.corr() # + import numpy as np D1=np.random.randint(1,7,size=100) D2=np.random.randint(1,7,size=100) suma = D1+D2 plt.hist(suma, bins = np.arange(1.5,13.5,1), density=True, rwidth = 0.8,) plt.show() # + dado = [1, 2, 3, 4, 5, 6] resultados = [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12] distribucion = np.zeros(11) for i in range(len(dado)): for j in range(len(dado)): if dado[i] + dado[j] in resultados: distribucion[(dado[i] + dado[j]) - 2] = distribucion[(dado[i] + dado[j]) - 2] + 1 print(distribucion/36) # -
Ejs.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="7K5EkziVyKW_" colab_type="text" # **Building database with top-30 populated cities** # + id="RZ02h50o_pkH" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 87} outputId="48051c0e-e385-4fa2-c95c-d00247439303" import csv from math import sqrt def left_binary_search(arr, x, l, r): if r - l <= 0: return l elif x >= arr[(l+r)//2][1]: return left_binary_search(arr, x, l, (l+r)//2) return left_binary_search(arr, x, (l+r)//2 + 1, r) sorted_population = list() cities = {} with open('cities.csv') as csv_file: csv_reader = csv.reader(csv_file, delimiter=',') skip = True # skip first row with the description for row in csv_reader: name = "" if skip: skip = False continue if len(row[6]) < 2: if (row[0] != '366500'): cities[row[2]] = [float(row[17]), float(row[18])] name = row[2] else: cities[row[6]] = [float(row[17]), float(row[18])] name = row[6] if len(sorted_population) < 30 or int(row[-1]) > sorted_population[-1][1]: sorted_population.insert(left_binary_search(sorted_population, int(row[-1]), 0, len(sorted_population)), [name, int(row[-1])]) if len(sorted_population) > 30: sorted_population.pop() print(cities) print(sorted_population) top30 = {} for index, city in enumerate(sorted_population): top30[city[0]] = cities[city[0]] sorted_population[index][1] = top30[city[0]] print(top30) # + [markdown] id="NF3rmSBCyXkc" colab_type="text" # **Features for the SA algo** # + id="F1OI29Us-r42" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 87} outputId="c7559a6b-aa21-4766-bb4f-249bba55dd1d" import geopy.distance import random import math def distance(cityA, cityB): return geopy.distance.vincenty(cities[cityA], cities[cityB]).km/1000 def paths_length(path): length = 0 for i in range(1, len(path)): length += distance(path[i-1][0], path[i][0]) return length def random_traversal(vertices): random.shuffle(vertices) path = vertices return path def P_star(new_loss, old_loss, T): ### energy function that is p*(x) in the task try: p_val = math.exp((old_loss - new_loss)/T) except OverflowError: p_val = float('inf') return p_val print(sorted_population) travers = random_traversal(sorted_population) print(travers) print("random path is {0} km length".format(round(1000*paths_length(travers), 3))) # + [markdown] id="ILLmCMxZycOi" colab_type="text" # **SA algo from the task for optimal route search** # + id="ZtsgTwYMokMp" colab_type="code" colab={} T = 400 a = 0.95 iters_number = 500 curr_iter = 0 p_old = P_star(paths_length(travers), 0, T) history = list() history.append(paths_length(travers)) while curr_iter < iters_number: rand1 = random.randint(0, len(travers) - 2) rand2 = random.randint(rand1 + 1, len(travers) - 1) new_travers = list() for i in range(0, rand1): new_travers.append(travers[i]) new_travers.append(travers[rand2]) for i in range(rand1 + 1, rand2): new_travers.append(travers[i]) new_travers.append(travers[rand1]) for i in range(rand2 + 1, len(travers)): new_travers.append(travers[i]) p = P_star(paths_length(new_travers), paths_length(travers), T) u = random.random() if u < p: travers = new_travers history.append(paths_length(travers)) T *= a curr_iter += 1 # + [markdown] id="9T6rhwSTx3sk" colab_type="text" # **Drawing sub-optimal route that was found by SA** # + id="NxAzB5Cyx0pO" colab_type="code" colab={} # !pip install transliterate # + id="EvGGFumJdl1p" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="6e955917-5a66-4cc5-cf02-a672bfb1285b" import cv2 import numpy as np # from google.colab.patches import cv2_imshow # uncomment this if you want to test on google colab import transliterate min_lon, min_lat = float('inf'), float('inf') max_lon, max_lat = -1, -1 for i in travers: if i[1][0] < min_lat: min_lat = i[1][0] if i[1][0] > max_lat: max_lat = i[1][0] if i[1][1] < min_lon: min_lon = i[1][1] if i[1][1] > max_lon: max_lon = i[1][1] x = int(100*(max_lat - min_lat)) y = int(100*(max_lon - min_lon)) pict = np.zeros([x+200,y+400,3]) cv2.line(pict, (0, 0), (10470, 1690), color=(255, 255, 255), thickness=30000) for i, city in enumerate(travers[:-1]): a = (int(100*(city[1][1] - min_lon)) + 100, 100+ int(100*(city[1][0] - min_lat))) b = (100+ int(100*(travers[i + 1][1][1] - min_lon)), \ 100+int(100*(travers[i + 1][1][0] - min_lat))) cv2.line(pict, a, b, color=(0, 0, 150), thickness=5) cv2.circle(pict, a, 50, color=(100, 150, 0), thickness=10) cv2.putText(pict, transliterate.translit(city[0], reversed=True), \ a, fontFace=cv2.FONT_HERSHEY_SIMPLEX, color=(0,0,0), fontScale=1.6, thickness=4, bottomLeftOrigin=True) cv2.circle(pict, (100 + int(100*(travers[-1][1][1] - min_lon)), \ 100+int(100*(travers[-1][1][0] - min_lat))), 50, color=(30, 20, 250), thickness=12) cv2.putText(pict, transliterate.translit(travers[-1][0], reversed=True), \ (100+int(100*(travers[-1][1][1] - min_lon)), \ 100+int(100*(travers[-1][1][0] - min_lat))), fontFace=cv2.FONT_HERSHEY_SIMPLEX, \ color=(0,0,0), fontScale=1.5, thickness=5, bottomLeftOrigin=True) cv2.circle(pict, (100+int(100*(travers[0][1][1] - min_lon)), 100+int(100*(travers[0][1][0] - min_lat))), \ 50, color=(200, 20, 20), thickness=10) pict = cv2.flip(pict, 0) # cv2_imshow(pict) # uncomment this line and line from the top if you test code on google colab cv2.imwrite('suboptimal_route.png', pict) # + [markdown] id="3IIVwmlTylZx" colab_type="text" # **The history of the results of the search, the final route and its length is here:** # + id="HMKj9bjYsCjr" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 817} outputId="8b484505-6f9d-4d87-8c6c-03362cf7bf0a" import matplotlib.pyplot as plt plt.xlabel("iterations") plt.ylabel("length of path in thousands of km") plt.plot(history) print("final temperature: {0}".format(T)) print("final path length: {0} km".format(int(1000*round(paths_length(travers), 3)))) print("The first city is {0}".format(travers[0][0])) for index, city in enumerate(travers[1:]): print("next city number {0} is {1}, it is {2} km away".format(index+2, city[0], round(distance(travers[index - 1][0], city[0])*1000, 1)))
second_task.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="fg7ioxFP93Ma" #-*- coding: utf-8 -*- import glob import json # + id="ASlokxKR9um3" colab={"base_uri": "https://localhost:8080/"} outputId="5bdea2ab-2782-422d-eebd-4dd4db1f59db" # !git clone https://github.com/chinese-poetry/chinese-poetry # + colab={"base_uri": "https://localhost:8080/"} id="Nbfe7EtiB3Qe" outputId="56ecadf0-315f-4f19-eb1d-3885f80f1e33" # !pip install opencc # + id="IRPYu6-W958-" colab={"base_uri": "https://localhost:8080/"} outputId="11643c56-3d00-4034-e51c-9f1bcaeae001" # 词 is located under ~/chinese-poetry/ci # 诗 is located under ~/chinese-poetry/json # poet name could be in 繁體 or 简体 so its better to search for both import opencc converter_t2s = opencc.OpenCC('t2s.json') converter_s2t = opencc.OpenCC('s2t.json') # 摘出某个诗人的诗 poet_name_simple = "王安石" poet_name = [poet_name_simple, converter_s2t.convert(poet_name_simple)] dynasty = "song" # choice from ["tang", "song"] shiciji_file_name = poet_name_simple shiciji5 = [] shiciji7 = [] yan = set([]) for p_name in poet_name: files = glob.glob("./chinese-poetry/json/poet.{}*.json".format(dynasty)) #files1 = glob.glob("./chinese-poetry/ci/*.json") #files.extend(files1) for file in files: with open(file) as fi: fi_json = json.load(fi) for poem in fi_json: if 'author' in poem and poem["author"] == p_name: sen_len = len(poem["paragraphs"][0]) poem_str = converter_t2s.convert(poem['title'])+":"+converter_t2s.convert("".join(poem["paragraphs"])) if sen_len in [6, 12]: #五言 shiciji5.append(poem_str) elif sen_len in [8, 16]: #七言 shiciji7.append(poem_str) print(len(shiciji5)) print(len(shiciji7)) fo5 = open(shiciji_file_name+"5", 'w') #五言 fo7 = open(shiciji_file_name+"7", 'w') #七言 fo = open(shiciji_file_name, 'w') #所有 for item in shiciji5: fo5.write(item) fo5.write("\n") fo.write(item) fo.write("\n") for item in shiciji7: fo7.write(item) fo7.write("\n") fo.write(item) fo.write("\n") # + colab={"base_uri": "https://localhost:8080/"} id="MRtfbtJlDMoV" outputId="18adb5fd-3953-42c4-d00f-f308b9920d04" # !pip install keras # + id="x_3eWFXcAEbc" import re import pandas as pd import numpy as np from keras.utils import np_utils input_file = shiciji_file_name #选择生成的训练集(五言或者7言或者所有) with open(input_file) as f: raw_text = f.read() lines = raw_text.split("\n")[:-1] poem_text = [i.split(':')[1] for i in lines] char_list = [re.findall('[\x80-\xff]{3}|[\w\W]', s) for s in poem_text] all_words = [] for i in char_list: all_words.extend(i) word_dataframe = pd.DataFrame(pd.Series(all_words).value_counts()) word_dataframe['id']=list(range(1,len(word_dataframe)+1)) word_index_dict = word_dataframe['id'].to_dict() index_dict = {} for k in word_index_dict: index_dict.update({word_index_dict[k]:k}) seq_len = 2 # 输入长度 rotate_len = 1 # 间隔长度 dataX = [] dataY = [] for sentence in char_list: for i in range(0, len(sentence) - seq_len, rotate_len): seq_in = sentence[i : i + seq_len] seq_out = sentence[i + seq_len] dataX.append([word_index_dict[x] for x in seq_in]) dataY.append(word_index_dict[seq_out]) X = np.array(dataX) y = np_utils.to_categorical(np.array(dataY)) # + colab={"base_uri": "https://localhost:8080/"} id="ClaFrKanMXHT" outputId="5c804950-43a3-4371-b7fb-82317b18d45d" for i in range(7): print([index_dict[word_index] for word_index in dataX[i]]) print(index_dict[dataY[i]]) # + id="HNfTzkupMjb8" import tensorflow as tf # + id="9TUe2IJg_Ljx" from keras import Sequential from keras.layers import Embedding, GRU, LSTM, Dense, Activation import os def make_model(): model = Sequential() model.add(Embedding(len(word_dataframe)+1, 512)) #model.add(LSTM(512, return_sequences = True)) model.add(GRU(512)) model.add(Dense(y.shape[1])) model.add(Activation('softmax')) return model # tf.keras.backend.clear_session() # resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='grpc://' + os.environ['COLAB_TPU_ADDR']) # tf.config.experimental_connect_to_cluster(resolver) # # This is the TPU initialization code that has to be at the beginning. # tf.tpu.experimental.initialize_tpu_system(resolver) # print("All devices: ", tf.config.list_logical_devices('TPU')) # strategy = tf.distribute.experimental.TPUStrategy(resolver) # with strategy.scope(): training_model = make_model() training_model.compile(loss='categorical_crossentropy', optimizer='adam') # + colab={"base_uri": "https://localhost:8080/"} id="fV5eKNVBEymp" outputId="a07b993b-6d93-4354-c922-248258ca35ea" # define the checkpoint from keras.callbacks import ModelCheckpoint import time; ts = int(time.time()) checkpoint_dir = "./"+input_file+"-checkpoints-"+str(ts) os.makedirs(checkpoint_dir) #filepath=checkpoint_dir+"/weights-improvement-{epoch:02d}-{loss:.4f}.hdf5" checkpoint = ModelCheckpoint(checkpoint_dir, monitor='loss', verbose=1, save_best_only=True, mode='min') callbacks_list = [checkpoint] # fit the model training_model.fit(X, y, epochs=25, batch_size=256, callbacks=callbacks_list) # + id="iy7Z3wvNDkWw" import random import re from keras.models import load_model # training_model = make_model() # training_model.load_weights(filepath) def gen_poem(seed_text): rows = 4 #输出多少句 cols = 6 #每行多少字,如果五言就6,七言就8 chars = re.findall('[\x80-\xff]{3}|[\w\W]', seed_text) if len(chars) != seq_len: return "" arr = [word_index_dict[k] for k in chars] for i in range(seq_len, rows * cols): if (i+1) % cols == 0: if (i+1) / cols == 2 or (i+1) / cols == 4: arr.append(2) else: arr.append(1) else: proba = training_model.predict(np.array(arr[-seq_len:]), verbose=0) predicted = np.argsort(proba[1])[-5:] index = random.randint(0,len(predicted)-1) new_char = predicted[index] while new_char == 1 or new_char == 2: index = random.randint(0,len(predicted)-1) new_char = predicted[index] arr.append(new_char) poem = [index_dict[i] for i in arr] return "".join(poem) # + colab={"base_uri": "https://localhost:8080/", "height": 35} id="9ox35-XUET0D" outputId="464e6d25-f22e-4b10-b05d-7733b0488941" gen_poem("槿花") # + id="UeTo9M1fnIR8"
shixu.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # katex-extension # ## Example 1 # $$ # f(x) = \int_{-\infty}^\infty\hat f(\xi)\,e^{2 \pi i \xi x}\,d\xi # $$ # ## Example 2 # $$ # \frac{1}{\Bigl(\sqrt{\phi \sqrt{5}}-\phi\Bigr) e^{\frac25 \pi}} = 1+\frac{e^{-2\pi}} {1+\frac{e^{-4\pi}} {1+\frac{e^{-6\pi}} {1+\frac{e^{-8\pi}} {1+\cdots} } } } # $$ # ## Example 3 # $$ # 1 + \frac{q^2}{(1-q)}+\frac{q^6}{(1-q)(1-q^2)}+\cdots = \prod_{j=0}^{\infty}\frac{1}{(1-q^{5j+2})(1-q^{5j+3})}, \quad\quad \text{for }\lvert q\rvert<1. # $$ # ## Rendering bug in Chrome on macOS # $$ # \frac{a}{b} # $$
notebooks/katex-extension.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Crawling #3 : Selenium Browser # + # 기존의 정적인 페이지 ( HTML, CSS 에 모든 정보가 로딩 초기에 Rendering 되서 내려오는 페이지 )나 # AJAX 기반의 동적인 페이지 중에서 API를 직접 까볼 수 있는 페이지 들은 # 기존처럼 requests, beautifulsoup4, json 등을 이용해서 크롤링하실 수 있습니다. # 다만, # AJAX를 이용한 것 같기는 한데, 대체 어디서 정보를 가져오는지 모르겠는 페이지나 # 네이버 카페 등과 같이 Frame 을 이용해서 사이트를 렌더링하는 경우에는 # 이렇게 Selenium으로 직접 브라우져를 띄우고 크롤링을 하실 수 있습니다. # + import pandas as pd from selenium import webdriver # - room_df = pd.DataFrame(columns=[ "연락처", ]) driver = webdriver.Firefox() driver.get("""http://www.dabangapp.com/search#/search?type=subway&id=446&filters={"deposit-range":[0,999999],"price-range":[0,999999],"room-type":[0,1,2,3,4,5],"location":[[127.00468076340508,37.50362345510044],[127.03536523453545,37.52894892024945]]}&position={"center":[127.02002299897026,37.51628726200644],"zoom":15}""") driver.implicitly_wait(10) # + # 세부 페이지를 크롤링할 새로운 브라우저를 키자. new_driver = webdriver.Firefox() room_elements = driver.find_elements_by_css_selector('ul.items li.item') # 여기서는 샘플로 5개만 데이터를 가져오겠습니다. for room_element in room_elements[:5]: room_detail_page_url = room_element.find_element_by_css_selector('a').get_attribute('href') new_driver.get(room_detail_page_url) new_driver.implicitly_wait(10) new_driver.find_element_by_css_selector('div.contact-view-button-wrap button').click() phonenumber = new_driver.find_element_by_css_selector('div.agent-profile-wrap span.number').text room_df.loc[len(room_df)] = [phonenumber] # 웹 브라우저는 메모리를 많이 잡아먹으니, # 꼭 종료를 합시다! driver.quit() new_driver.quit() # - room_df.to_csv("dabang_room.csv", index=False)
crawling/crawling-with-browser.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline # # PyTorch: Defining New autograd Functions # ---------------------------------------- # # A third order polynomial, trained to predict $y=\sin(x)$ from $-\pi$ # to $pi$ by minimizing squared Euclidean distance. Instead of writing the # polynomial as $y=a+bx+cx^2+dx^3$, we write the polynomial as # $y=a+b P_3(c+dx)$ where $P_3(x)= # rac{1}{2}\left(5x^3-3x # ight)$ is # the `Legendre polynomial`_ of degree three. # # https://en.wikipedia.org/wiki/Legendre_polynomials # # This implementation computes the forward pass using operations on PyTorch # Tensors, and uses PyTorch autograd to compute gradients. # # In this implementation we implement our own custom autograd function to perform # $P_3'(x)$. By mathematics, $P_3'(x)= # rac{3}{2}\left(5x^2-1 # ight)$ # # # + import torch import math class LegendrePolynomial3(torch.autograd.Function): """ We can implement our own custom autograd Functions by subclassing torch.autograd.Function and implementing the forward and backward passes which operate on Tensors. """ @staticmethod def forward(ctx, input): """ In the forward pass we receive a Tensor containing the input and return a Tensor containing the output. ctx is a context object that can be used to stash information for backward computation. You can cache arbitrary objects for use in the backward pass using the ctx.save_for_backward method. """ ctx.save_for_backward(input) return 0.5 * (5 * input ** 3 - 3 * input) @staticmethod def backward(ctx, grad_output): """ In the backward pass we receive a Tensor containing the gradient of the loss with respect to the output, and we need to compute the gradient of the loss with respect to the input. """ input, = ctx.saved_tensors return grad_output * 1.5 * (5 * input ** 2 - 1) dtype = torch.float device = torch.device("cpu") # device = torch.device("cuda:0") # Uncomment this to run on GPU # Create Tensors to hold input and outputs. # By default, requires_grad=False, which indicates that we do not need to # compute gradients with respect to these Tensors during the backward pass. x = torch.linspace(-math.pi, math.pi, 2000, device=device, dtype=dtype) y = torch.sin(x) # Create random Tensors for weights. For this example, we need # 4 weights: y = a + b * P3(c + d * x), these weights need to be initialized # not too far from the correct result to ensure convergence. # Setting requires_grad=True indicates that we want to compute gradients with # respect to these Tensors during the backward pass. a = torch.full((), 0.0, device=device, dtype=dtype, requires_grad=True) b = torch.full((), -1.0, device=device, dtype=dtype, requires_grad=True) c = torch.full((), 0.0, device=device, dtype=dtype, requires_grad=True) d = torch.full((), 0.3, device=device, dtype=dtype, requires_grad=True) learning_rate = 5e-6 for t in range(2000): # To apply our Function, we use Function.apply method. We alias this as 'P3'. P3 = LegendrePolynomial3.apply # Forward pass: compute predicted y using operations; we compute # P3 using our custom autograd operation. y_pred = a + b * P3(c + d * x) # Compute and print loss loss = (y_pred - y).pow(2).sum() if t % 100 == 99: print(t, loss.item()) # Use autograd to compute the backward pass. loss.backward() # Update weights using gradient descent with torch.no_grad(): a -= learning_rate * a.grad b -= learning_rate * b.grad c -= learning_rate * c.grad d -= learning_rate * d.grad # Manually zero the gradients after updating weights a.grad = None b.grad = None c.grad = None d.grad = None print(f'Result: y = {a.item()} + {b.item()} * P3({c.item()} + {d.item()} x)')
Learning PyTorch/Learning PyTorch with Example/polynomial_custom_function.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # The Python Standard Library # String for i in range(5): for j in range(2): print("this is ({},{}) integer".format(i,j)) import re s = 'hellow' lst =re.findall('\S+@\S', s) print(lst) # # collections from collections import namedtuple point = namedtuple('print', ['x','y']) point(11,12) point(2,3) from collections import Counter A = [1,3,5,6,5,8,3,4,3] Counter(A) # Itertools from itertools import permutations L = ["a","b","c","d","e"] for item in permutations(L,2): print(item) import os path = "plot" os.path.isdir(path) path = "l. DataStructure.ipynb" os.path.isfile(path) os.listdir() import os output = os.popen ('wmic').read() print(output[0:1000]) import glob glob.glob('.ipynb') # + import time t1=time.time() for i in range(1000001): if i==1000000 print('done!') t2 = time.time() t2-t1 # -
standard library.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # Remember to execute this cell with Control+Enter import sys; sys.path.append('../'); import jupman; # # Functions 6 - exercises with sets # # ## [Download exercises zip](../_static/generated/functions.zip) # # [Browse files online](https://github.com/DavidLeoni/softpython-en/tree/master/functions) # ### Exercise - syllabs # # Write a function `syllabs` which given a string `word` made by only bisyllabs and a set `found`, finds all the distinct bisyllabs and puts them into the set `found`. # # * **NOTE**: the function `syllabs` return NOTHING ! # # Example 1: # # ```python # >>> found = set() # >>> syllabs("banana", found) # >>> found # {'an', 'ba'} # ``` # # Example 2: # # ```python # >>> found = set() # >>> syllabs("bonobo", found) # >>> found # {'bo', 'on'} # ``` # + #jupman-purge-output # write here def syllabs(word, t): for i in range(len(word)//2): t.add(word[i:i+2]) found = set() syllabs("banana", found) print(found) found = set() syllabs("bonobo", found) print(found) # - # ### Exercise - distinguish # # ✪✪ Write a function `distinguish` which given a list `big_list` containing sublists of _two_ characters each, RETURN a NEW LIST containing all the _distinct_ sublists (ignoring the duplicated sublists) # # * the returned list must have the elements _in the same order_ in which they were found in `big_list` # * to know fast whether a sublist was already found, **use a set** # * **DO NOT** search in lists (so no `count`, `index`, `in` in lists - they're slow!) # * **DO NOT** remove from lists (so no `remove` from lists - it's slow!) # * **HINT**: lists are _mutable,_ can we place them in a set? If it's not possible, what can we do? # Example: # # ```python # >>> big_list = [ ['d','d'],['a','b'],['d','d'],['c','a'],['c','a'],['d','d'],['a','b'] ] # >>> distinguish( big_list) # [['d', 'd'], ['a', 'b'], ['c', 'a']] # #NOTE: variable big_list MUST NOT be modified: # >>> big_list # [ ['d','d'],['a','b'],['d','d'],['c','a'],['c','a'],['d','d'],['a','b'] ] # ``` # + # write here def distinguish(blist): s = set() ret = [] for sublist in blist: # In sets we can't place lists because they are mutable, # but we can insert tuples tup = tuple(sublist) # Checking whether an element belongs to a set it's very fast: # it is independent from the set dimension! if tup not in s: ret.append(sublist) # Adding an element to a set is very fast: # it is independent from the set dimension! s.add(tup) return ret big_list = [ ['d','d'],['a','b'],['d','d'],['c','a'],['c','a'],['d','d'],['a','b'] ] #print('distincts:', distinguish(big_list)) #print('big_list:', big_list) # - # ### Exercise - intersectron # # ![intersectron](img/abc-intersection.png) # Given a list `sets` containing an arbitrary number of sets, RETURN a NEW set which contains the elements common to all sets. # # To solve the exercise, you can intersecate a set at a time with a `for` cycle (slow) or with the technique [described here](https://stackoverflow.com/a/2541814) (short and fast). # # * try to solve it in **both** ways # * **BEWARE** of the empty list! # * your code must work with **any** number of sets (the image is just an example) # + def inter_for(sets): #jupman-raise if len(sets) == 0: return set() first = True for el in sets: if first: ret = set(el) first = False else: ret.intersection_update(el) return ret #/jupman-raise # TEST START - DO NOT TOUCH ! assert inter_for([]) == set() assert inter_for([set(),set()]) == set() assert inter_for([set(),set(),set()]) == set() assert inter_for([{'a'},{'a'},{'a'}]) == {'a'} assert inter_for([{'a','b'},{'b'},{'b'}]) == {'b'} assert inter_for([{'a'},{'a','b'},{'a'}]) == {'a'} assert inter_for([{'c'},{'c'},{'c','b'}]) == {'c'} assert inter_for([{'a','b'},{'a','b'},{'a','b'}]) == {'a','b'} assert inter_for([{'a','b','c'},{'a','b','c','d'},{'b','c','d'}, {'b','c'}]) == {'b','c'} # check we didn't modify the input sets s = {'a','b'} assert inter_for([s,{'b','c'}]) == {'b'} assert s == {'a','b'} # TEST END # + def inter_fast(sets): #jupman-raise if len(sets) == 0: return set() return set.intersection(*sets) #/jupman-raise # TEST START - DO NOT TOUCH ! assert inter_fast([]) == set() assert inter_fast([set(),set()]) == set() assert inter_fast([set(),set(),set()]) == set() assert inter_fast([{'a'},{'a'},{'a'}]) == {'a'} assert inter_fast([{'a','b'},{'b'},{'b'}]) == {'b'} assert inter_fast([{'a'},{'a','b'},{'a'}]) == {'a'} assert inter_fast([{'c'},{'c'},{'c','b'}]) == {'c'} assert inter_fast([{'a','b'},{'a','b'},{'a','b'}]) == {'a','b'} assert inter_fast([{'a','b','c'},{'a','b','c','d'},{'b','c','d'}, {'b','c'}]) == {'b','c'} # check we didn't modify the input sets s = {'a','b'} assert inter_fast([s,{'b','c'}]) == {'b'} assert s == {'a','b'} # TEST END # - # ## -------- # # <!--Continue # # Go on with exercises about [functions and dictionaries](https://en.softpython.org/functions/fun7-dictionaries-sol.html) -->
functions/fun6-sets-sol.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # KernelSHAP: combining preprocessor and predictor # <div class="alert alert-info"> # To enable SHAP support, you may need to run # # ```bash # pip install alibi[shap] # ``` # # </div> # ## Introduction # In [this](kernel_shap_adult_lr.ipynb) example, we showed that the categorical variables can be handled by fitting the explainer on preprocessed data and passing preprocessed data to the `explain` call. To handle the categorical variables, we either group them explicitly or sum the estimated shap values for each encoded shap dimension. An alternative way is to define our black-box model to include the preprocessor, as shown in [this](anchor_tabular_adult.ipynb) example. We now show that these methods give the same results. # + import shap shap.initjs() import matplotlib.pyplot as plt import numpy as np import pandas as pd from alibi.explainers import KernelShap from alibi.datasets import fetch_adult from scipy.special import logit from sklearn.compose import ColumnTransformer from sklearn.impute import SimpleImputer from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score, confusion_matrix, plot_confusion_matrix from sklearn.model_selection import cross_val_score, train_test_split from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler, OneHotEncoder # - # ## Data preparation # ### Load and split # The `fetch_adult` function returns a `Bunch` object containing the features, the targets, the feature names and a mapping of categorical variables to numbers. adult = fetch_adult() adult.keys() data = adult.data target = adult.target target_names = adult.target_names feature_names = adult.feature_names category_map = adult.category_map # Note that for your own datasets you can use our utility function `gen_category_map` to create the category map. from alibi.utils.data import gen_category_map np.random.seed(0) data_perm = np.random.permutation(np.c_[data, target]) data = data_perm[:,:-1] target = data_perm[:,-1] idx = 30000 X_train,y_train = data[:idx,:], target[:idx] X_test, y_test = data[idx+1:,:], target[idx+1:] # ### Create feature transformation pipeline # Create feature pre-processor. Needs to have 'fit' and 'transform' methods. Different types of pre-processing can be applied to all or part of the features. In the example below we will standardize ordinal features and apply one-hot-encoding to categorical features. # # Ordinal features: ordinal_features = [x for x in range(len(feature_names)) if x not in list(category_map.keys())] ordinal_transformer = Pipeline(steps=[('imputer', SimpleImputer(strategy='median')), ('scaler', StandardScaler())]) # Categorical features: categorical_features = list(category_map.keys()) categorical_transformer = Pipeline(steps=[('imputer', SimpleImputer(strategy='median')), ('onehot', OneHotEncoder(drop='first', handle_unknown='error'))]) # Note that in order to be able to interpret the coefficients corresponding to the categorical features, the option `drop='first'` has been passed to the `OneHotEncoder`. This means that for a categorical variable with `n` levels, the length of the code will be `n-1`. This is necessary in order to avoid introducing feature multicolinearity, which would skew the interpretation of the results. For more information about the issue about multicolinearity in the context of linear modelling see [[1]](#References). # <a id='src_1'></a> # Combine and fit: preprocessor = ColumnTransformer(transformers=[('num', ordinal_transformer, ordinal_features), ('cat', categorical_transformer, categorical_features)]) preprocessor.fit(X_train) # ## Fit a binary logistic regression classifier to the preprocessed Adult dataset # ### Preprocess the data X_train_proc = preprocessor.transform(X_train) X_test_proc = preprocessor.transform(X_test) # ### Training classifier = LogisticRegression(multi_class='multinomial', random_state=0, max_iter=500, verbose=0, ) classifier.fit(X_train_proc, y_train) # ### Model assessment y_pred = classifier.predict(X_test_proc) cm = confusion_matrix(y_test, y_pred) title = 'Confusion matrix for the logistic regression classifier' disp = plot_confusion_matrix(classifier, X_test_proc, y_test, display_labels=target_names, cmap=plt.cm.Blues, normalize=None, ) disp.ax_.set_title(title) print('Test accuracy: ', accuracy_score(y_test, classifier.predict(X_test_proc))) # ## Explaining the model with an explainer fitted on the preprocessed data # To speed up computation, we will use a background dataset with only `100` samples. start_example_idx = 0 stop_example_idx = 100 background_data = slice(start_example_idx, stop_example_idx) # First, we group the categorical variables. # + def make_groups(num_feats_names, cat_feats_names, feat_enc_dim): """ Given a list with numerical feat. names, categorical feat. names and a list specifying the lengths of the encoding for each cat. varible, the function outputs a list of group names, and a list of the same len where each entry represents the column indices that the corresponding categorical feature """ group_names = num_feats_names + cat_feats_names groups = [] cat_var_idx = 0 for name in group_names: if name in num_feats_names: groups.append(list(range(len(groups), len(groups) + 1))) else: start_idx = groups[-1][-1] + 1 if groups else 0 groups.append(list(range(start_idx, start_idx + feat_enc_dim[cat_var_idx] ))) cat_var_idx += 1 return group_names, groups def sparse2ndarray(mat, examples=None): """ Converts a scipy.sparse.csr.csr_matrix to a numpy.ndarray. If specified, examples is slice object specifying which selects a number of rows from mat and converts only the respective slice. """ if examples: return mat[examples, :].toarray() return mat.toarray() # - # obtain the indices of the categorical and the numerical features from the pipeline. numerical_feats_idx = preprocessor.transformers_[0][2] categorical_feats_idx = preprocessor.transformers_[1][2] num_feats_names = [feature_names[i] for i in numerical_feats_idx] cat_feats_names = [feature_names[i] for i in categorical_feats_idx] perm_feat_names = num_feats_names + cat_feats_names ohe = preprocessor.transformers_[1][1].named_steps['onehot'] feat_enc_dim = [len(cat_enc) - 1 for cat_enc in ohe.categories_] # create the groups X_train_proc_d = sparse2ndarray(X_train_proc, examples=background_data) group_names, groups = make_groups(num_feats_names, cat_feats_names, feat_enc_dim) # Having created the groups, we are now ready to instantiate the explainer and explain our set. pred_fcn = classifier.predict_proba grp_lr_explainer = KernelShap(pred_fcn, link='logit', feature_names=perm_feat_names, seed=0) grp_lr_explainer.fit(X_train_proc_d, group_names=group_names, groups=groups) # We select only a small fraction of the testing set to explain for the purposes of this example. def split_set(X, y, fraction, random_state=0): """ Given a set X, associated labels y, split\\s a fraction y from X. """ _, X_split, _, y_split = train_test_split(X, y, test_size=fraction, random_state=random_state, ) print("Number of records: {}".format(X_split.shape[0])) print("Number of class {}: {}".format(0, len(y_split) - y_split.sum())) print("Number of class {}: {}".format(1, y_split.sum())) return X_split, y_split fraction_explained = 0.01 X_explain, y_explain = split_set(X_test, y_test, fraction_explained, ) X_explain_proc = preprocessor.transform(X_explain) X_explain_proc_d = sparse2ndarray(X_explain_proc) grouped_explanation = grp_lr_explainer.explain(X_explain_proc_d) # ### Explaining with an explainer fitted on the raw data # To explain with an explainer fitted on the raw data, we make the preprocessor part of the predictor, as shown below. pred_fcn = lambda x: classifier.predict_proba(preprocessor.transform(x)) lr_explainer = KernelShap(pred_fcn, link='logit', feature_names=feature_names, seed=0) # We use the same background dataset to fit the explainer. lr_explainer.fit(X_train[background_data]) # We explain the same dataset as before. explanation = lr_explainer.explain(X_explain) # ### Results comparison # To show that fitting the explainer on the raw data and combining the preprocessor with the classifier gives the same results as grouping the variables and fitting the explainer on the preprocessed data, we check to see that the same features are considered as most important when combining the two approaches. # + def get_ranked_values(explanation): """ Retrives a tuple of (feature_effects, feature_names) for each class explained. A feature's effect is its average shap value magnitude across an array of instances. """ ranked_shap_vals = [] for cls_idx in range(len(explanation.shap_values)): this_ranking = ( explanation.raw['importances'][str(cls_idx)]['ranked_effect'], explanation.raw['importances'][str(cls_idx)]['names'] ) ranked_shap_vals.append(this_ranking) return ranked_shap_vals def compare_ranking(ranking_1, ranking_2, methods=None): for i, (combined, grouped) in enumerate(zip(ranking_1, ranking_2)): print(f"Class: {i}") c_names, g_names = combined[1], grouped[1] c_mag, g_mag = combined[0], grouped[0] different = [] for i, (c_n, g_n) in enumerate(zip(c_names, g_names)): if c_n != g_n: different.append((i, c_n, g_n)) if different: method_1 = methods[0] if methods else "Method_1" method_2 = methods[1] if methods else "Method_2" i, c_ns, g_ns = list(zip(*different)) data = {"Rank": i, method_1: c_ns, method_2: g_ns} df = pd.DataFrame(data=data) print("Found the following rank differences:") print(df) else: print("The methods provided the same ranking for the feature effects.") print(f"The ranking is: {c_names}") print("") def reorder_feats(vals_and_names, src_vals_and_names): """Given a two tuples, each containing a list of ranked feature shap values and the corresponding feature names, the function reorders the values in vals according to the order specified in the list of names contained in src_vals_and_names. """ _, src_names = src_vals_and_names vals, names = vals_and_names reordered = np.zeros_like(vals) for i, name in enumerate(src_names): alt_idx = names.index(name) reordered[i] = vals[alt_idx] return reordered, src_names def compare_avg_mag_shap(class_idx, comparisons, baseline, **kwargs): """ Given a list of tuples, baseline, containing the feature values and a list with feature names for each class and, comparisons, a list of lists with tuples with the same structure , the function reorders the values of the features in comparisons entries according to the order of the feature names provided in the baseline entries and displays the feature values for comparison. """ methods = kwargs.get("methods", [f"method_{i}" for i in range(len(comparisons) + 1)]) n_features = len(baseline[class_idx][0]) # bar settings bar_width = kwargs.get("bar_width", 0.05) bar_space = kwargs.get("bar_space", 2) # x axis x_low = kwargs.get("x_low", 0.0) x_high = kwargs.get("x_high", 1.0) x_step = kwargs.get("x_step", 0.05) x_ticks = np.round(np.arange(x_low, x_high + x_step, x_step), 3) # y axis (these are the y coordinate of start and end of each group # of bars) start_y_pos = np.array(np.arange(0, n_features))*bar_space end_y_pos = start_y_pos + bar_width*len(methods) y_ticks = 0.5*(start_y_pos + end_y_pos) # figure fig_x = kwargs.get("fig_x", 10) fig_y = kwargs.get("fig_y", 7) # fontsizes title_font = kwargs.get("title_fontsize", 20) legend_font = kwargs.get("legend_fontsize", 20) tick_labels_font = kwargs.get("tick_labels_fontsize", 20) axes_label_fontsize = kwargs.get("axes_label_fontsize", 10) # labels title = kwargs.get("title", None) ylabel = kwargs.get("ylabel", None) xlabel = kwargs.get("xlabel", None) # process input data methods = list(reversed(methods)) base_vals = baseline[class_idx][0] ordering = baseline[class_idx][1] comp_vals = [] # reorder the features so that they match the order of the baseline (ordering) for comparison in comparisons: vals, ord_ = reorder_feats(comparison[class_idx], baseline[class_idx]) comp_vals.append(vals) assert ord_ is ordering all_vals = [base_vals] + comp_vals data = dict(zip(methods, all_vals)) df = pd.DataFrame(data=data, index=ordering) # plotting logic fig, ax = plt.subplots(figsize=(fig_x, fig_y)) for i, col in enumerate(df.columns): values = list(df[col]) y_pos = [y + bar_width*i for y in start_y_pos] ax.barh(y_pos, list(values), bar_width, label=col) # add ticks, legend and labels ax.set_xticks(x_ticks) ax.set_xticklabels([str(x) for x in x_ticks], rotation=45, fontsize=tick_labels_font) ax.set_xlabel(xlabel, fontsize=axes_label_fontsize) ax.set_yticks(y_ticks) ax.set_yticklabels(ordering, fontsize=tick_labels_font) ax.set_ylabel(ylabel, fontsize=axes_label_fontsize) ax.invert_yaxis() # labels read top-to-bottom ax.legend(fontsize=legend_font) plt.grid(True) plt.title(title, fontsize=title_font) return ax, fig, df # - ranked_grouped_shap_vals = get_ranked_values(grouped_explanation) ranked_shal_vals_raw = get_ranked_values(explanation) compare_ranking(ranked_grouped_shap_vals, ranked_shal_vals_raw) # Above we can see that both methods returned the same feature importances. # + class_idx = 0 ax, fig, _ = compare_avg_mag_shap(class_idx, [ranked_shal_vals_raw], ranked_grouped_shap_vals, methods=('raw_data', 'grouped'), bar_width=0.5, tick_labels_fontsize=12, legend_fontsize=12, title_fontsize=15, xlabel="Features effects (class {})".format(0), ylabel="Feature", axes_label_fontsize=15, ) # - # We can see that the shap values are very similar. The differences appear because the regression dataset generated in order to compute the shap values differes slightly between the two runs due to the difference in the order of the features in the background dataset. # # ### References # <a id='References'></a> # # [[1]](#src_1) *<NAME>., 2019. "One-Hot-Encoding, Multicollinearity and the Dummy Variable Trap". Retrieved 02 Feb 2020* [(link)](https://towardsdatascience.com/one-hot-encoding-multicollinearity-and-the-dummy-variable-trap-b5840be3c41a)
examples/kernel_shap_adult_categorical_preproc.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from IPython.core.display import display, HTML with open('style.css') as file: css = file.read() HTML(css) # # Loading a Program # # The file `min_sort.py` contains a *Python* program that we want to load. First, let us display the content of this file. # To do this, we can use the command # ``` # type min_sort.py # ``` # if we are working with a *Windows* operating system. # !type min_sort.py # If you are working with either a *Linux* or *MacOS* operating system, the the command # ``` # cat min_sort.py # ``` # should display the contents of the file `min_sort.py`. # !cat min_sort.py # We can load this program file using the `import` command. import min_sort # Now the function `minSort` is available, provided we prefix it with the *module name* `min_sort`. min_sort.minSort([7, 8, 3, 2, 15, -4]) # If we prefer to import this function directly without having to reference the module name, then we can do this using the keyword `from` as follows. from min_sort import minSort minSort([13, 4, 25, 2, 11]) # We can even import *everything*, i.e. every function and every variable defined in a file using the following syntax: from min_sort import * # However, in general this is not a good idea because it could create name clashes between variables defined in the module and variables defined in the *Jupyter* notebook.
Python/Min-Sort.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:tensorflow_p36] # language: python # name: conda-env-tensorflow_p36-py # --- # + # Import Libraries from __future__ import print_function from config import * from utils import * from model import * # - # Global Variables class_names=[] batch_size = 0 input_shape=(0,0,0) def init(): global batch_size batch_size = batch_size_train logging.debug('batch_size {}'.format(batch_size)) global class_names class_names = sorted(get_subdir_list(dataset_train_path)) logging.debug('class_names {}'.format(class_names)) global input_shape input_shape = (img_width, img_height, img_channel) logging.debug('input_shape {}'.format(input_shape)) if not os.path.exists(output_path_name): os.makedirs(output_path_name) if not os.path.exists(logs_path_name): os.makedirs(logs_path_name) if not os.path.exists(btl_path): os.makedirs(btl_path) if not os.path.exists(btl_train_path): os.makedirs(btl_train_path) if not os.path.exists(btl_val_path): os.makedirs(btl_val_path) def save_bottleneck(): logging.debug('class_names {}'.format(class_names)) logging.debug('batch_size {}'.format(batch_size)) logging.debug('epochs {}'.format(epochs)) logging.debug('input_shape {}'.format(input_shape)) ## Build the VGG16 network model = applications.VGG16(include_top=False, weights='imagenet', input_shape=input_shape) #model = applications.inception_v3.InceptionV3(include_top=False, weights='imagenet', input_shape=input_shape) for train_val in ['train', 'validation']: with open('bottleneck/btl_' + train_val + '.txt', 'w') as f_image: for class_name in class_names: dataset_train_class_path = os.path.join(dataset_path, train_val, class_name) logging.debug('dataset_train_class_path {}'.format(dataset_train_class_path)) images_list = [] images_name_list = [] images_path_name = sorted(glob.glob(dataset_train_class_path + '/*.jpg')) logging.debug('images_path_name {}'.format(len(images_path_name))) for index, image in enumerate(images_path_name): # logging.debug('image {}'.format(image)) img = Image.open(image) img = preprocess_image(img) current_batch_size = len(images_list) # logging.debug('current_batch_size {}'.format(current_batch_size)) images_list.append(img) image_name = image.split('/')[-1].split('.jpg')[0] images_name_list.append(image) images_list_arr = np.array(images_list) # TODO: Skipping n last images of a class which do not sum up to batch_size if (current_batch_size < batch_size-1): continue X = images_list_arr bottleneck_features_train_class = model.predict(X, batch_size) # bottleneck_features_train_class = model.predict(X, nb_train_class_samples // batch_size) ## Save bottleneck file btl_save_file_name = btl_path + train_val + '/btl_' + train_val + '_' + class_name + '.' + str(index).zfill(7) + '.npy' logging.info('btl_save_file_name {}'.format(btl_save_file_name)) np.save(open(btl_save_file_name, 'wb'), bottleneck_features_train_class) for name in images_name_list: f_image.write(str(name) + '\n') images_list = [] images_name_list = [] def train_model(): ## Build network model = applications.VGG16(include_top=False, weights='imagenet', input_shape=input_shape) #model = applications.inception_v3.InceptionV3(include_top=False, weights='imagenet', input_shape=input_shape) # Get sorted bottleneck file names in a list btl_train_names = sorted(glob.glob(btl_train_path + '/*.npy')) btl_val_names = sorted(glob.glob(btl_val_path + '/*.npy')) ## Train Labels btl_train_list = [] train_labels_class = [] train_labels_iou = [] # Get list of image IoU values with open('bottleneck/btl_train.txt') as f_btl_train: btl_train_list = f_btl_train.readlines() # logging.debug('btl_train_list {}'.format(btl_train_list)) for btl_train_image in btl_train_list: train_labels_class.append(btl_train_image.split('/')[2]) iou_value = np.round(np.float( btl_train_image.split('_')[-1].split('.jpg')[0] ), 2) train_labels_iou.append(iou_value) # logging.debug('val {}'.format(val)) # logging.debug('class_names {}'.format(class_names)) # logging.debug('train_labels_class {}'.format(train_labels_class)) train_labels_class_int = [] for index, class_name in enumerate(train_labels_class): train_labels_class_int.append(class_names.index(class_name)) train_labels_class = train_labels_class_int # logging.debug('train_labels_class {}'.format(train_labels_class)) train_labels_class = np.array(train_labels_class) train_labels_iou = np.array(train_labels_iou) logging.debug('train_labels_iou {}'.format(train_labels_iou)) logging.debug('train_labels_iou {}'.format(type(train_labels_iou))) logging.debug('train_labels_class {}'.format(type(train_labels_class))) logging.debug('train_labels_class {}'.format((train_labels_class.shape))) # Load bottleneck files to create train set train_data = [] for index, btl_name in enumerate(btl_train_names): temp = np.load(open(btl_name)) train_data.append(temp) train_data = np.array(train_data) n1, n2, w, h, c = train_data.shape logging.info('train_data {}'.format(train_data.shape)) train_data_ = train_data train_data = np.reshape(train_data_, (n1*n2, w, h, c)) logging.info('train_data {}'.format(train_data.shape)) ## Validation Labels btl_val_list = [] val_labels_class = [] val_labels_iou = [] # Get list of image IoU values with open('bottleneck/btl_validation.txt') as f_btl_val: btl_val_list = f_btl_val.readlines() # logging.debug('btl_val_list {}'.format(btl_val_list)) for btl_val_image in btl_val_list: val_labels_class.append(btl_val_image.split('/')[2]) val = np.round(np.float( btl_val_image.split('_')[-1].split('.jpg')[0] ), 2) val_labels_iou.append(val) # logging.debug('val {}'.format(val)) # logging.debug('val_labels_class {}'.format(val_labels_class)) val_labels_class_int = [] for index, class_name in enumerate(val_labels_class): val_labels_class_int.append(class_names.index(class_name)) val_labels_class = val_labels_class_int # logging.debug('val_labels_class {}'.format(val_labels_class)) val_labels_class = np.array(val_labels_class) # logging.debug('val_labels_class {}'.format(val_labels_class)) val_labels_iou = np.array(val_labels_iou) # logging.debug('val_labels_iou {}'.format(val_labels_iou)) logging.debug('val_labels_iou {}'.format(type(val_labels_iou))) logging.debug('val_labels_class {}'.format(type(val_labels_class))) logging.debug('val_labels_class {}'.format(val_labels_class.shape)) # Load bottleneck files to create validation set val_data = [] for index, btl_name in enumerate(btl_val_names): temp = np.load(open(btl_name,'rb')) val_data.append(temp) val_data = np.array(val_data) n1, n2, w, h, c = val_data.shape logging.info('val_data {}'.format(val_data.shape)) val_data_ = val_data val_data = np.reshape(val_data_, (n1*n2, w, h, c)) logging.info('val_data {}'.format(val_data.shape)) ## Register Callbacks filename = 'output/model_train.csv' csv_log = CSVLogger(filename, separator=' ', append=False) early_stopping = EarlyStopping( monitor='loss', patience=early_stopping_patience, verbose=1, mode='min') #filepath = "output/best-weights-{epoch:03d}-{loss:.4f}-{acc:.4f}.hdf5" filepath = "output/best-weights-{epoch:03d}-{val_loss:.4f}.hdf5" checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, save_weights_only=False, mode='min', period=1) tensorboard = TensorBoard(log_dir=logs_path_name, histogram_freq=0, write_graph=True, write_images=True) callbacks_list = [csv_log, early_stopping, checkpoint, tensorboard] logging.info('callbacks_list {}'.format(callbacks_list)) ## Generate weights based on images count for each class class_weight_val = class_weight.compute_class_weight('balanced', np.unique(train_labels_class), train_labels_class) logging.debug('class_weight_val {}'.format(class_weight_val)) input_shape_btl_layer = train_data.shape[1:] logging.debug('input_shape_btl_layer {}'.format(input_shape_btl_layer)) #model = create_model(is_input_bottleneck=True, is_load_weights=False, input_shape, optimizer, learn_rate, decay, momentum, activation, dropout_rate) model = create_model(True, False, input_shape_btl_layer, len(class_names), optimizer, learn_rate, decay, momentum, activation, dropout_rate) logging.info('train_labels_iou {}'.format(train_labels_iou.shape)) logging.info('train_labels_class {}'.format(train_labels_class.shape)) logging.info('train_data {}'.format(train_data.shape)) logging.info('val_labels_iou {}'.format(val_labels_iou.shape)) logging.info('val_labels_class {}'.format(val_labels_class.shape)) logging.info('val_data {}'.format(val_data.shape)) # TODO: class_weight_val wrong model.fit(train_data, [train_labels_class, train_labels_iou], class_weight=[class_weight_val, class_weight_val], # dictionary mapping classes to a weight value, used for scaling the loss function (during training only). epochs=epochs, batch_size=batch_size, validation_data=(val_data, [val_labels_class, val_labels_iou]), callbacks=callbacks_list) # TODO: These are not the best weights model.save_weights(top_model_weights_path_save) # Initialize Input, Output Data init() # Save Bottlenecks (As Numpy Arrays) save_bottleneck() train_model()
DeepFashion2/fashion_train_alt.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import random import cv2 import matplotlib.pyplot as plt import numpy as np import albumentations as A # - def show(image): plt.figure(figsize=(6, 6)) plt.imshow(image) # Let's load an image image = cv2.imread('./images/parrot.jpg') image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) show(image) # ### You can use `ReplayCompose` class to debug your augmentation pipeline. Let's apply something to our image transform = A.ReplayCompose([ A.Resize(512, 512, always_apply=True), A.RandomCrop(200, 200, always_apply=True), A.OneOf([ A.RGBShift(), A.HueSaturationValue() ]), ]) data = transform(image=image) show(data['image']) # ### Some augmentations have randomness inside and imagine that we want to track which random parameters was applied to image. Keys `'replay'` and `'params'` to rescue data['replay'] # ### And you can apply exact same augmentations to other images to see these augmentations work on other images image2 = cv2.imread('./images/image_2.jpg') image2 = cv2.cvtColor(image2, cv2.COLOR_BGR2RGB) show(image2) image3 = cv2.imread('./images/image_3.jpg') image3 = cv2.cvtColor(image3, cv2.COLOR_BGR2RGB) show(image3) image2_transformed = A.ReplayCompose.replay(data['replay'], image=image2)['image'] show(image2_transformed) image3_transformed = A.ReplayCompose.replay(data['replay'], image=image3)['image'] show(image3_transformed)
notebooks/replay.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Les types de modèles de propagation # * Le modèle SI est un modèle où il n'y a que deux possibilités : ou bien l'individu est susceptible d'être infecté (S), ou bien il est infecté à vie (I). Cela s'applique par exemple au cas du SIDA. Pour le coronavirus, nous ne l'utiliserons pas. # * Le modèle SIR est un modèle où l'individu peut être guéri du virus, et être donc immunisé (R). Ce modèle s'applique bien au problème du coronavirus, et nous l'avons déjà modélisé. # * Le modèle SIS est un modèle où l'individu peut guérir, mais est alors de nouveau susceptible d'être infecté. Il n'y a pas d'immunité. On va tester ce modèle là, puisque nous n'avons pas la certitude qu'il est impossible de contracter plusieurs fois le coronavirus. # * Dans le modèle SEIR, il y a un état intermédiaire, exposé (E). Un individu qui a été au contact d'une personne infectée devient infecté avec une certaine probabilité. L'individu peut être susceptible, exposé, infecté puis immunisé. # * Le modèle SEIS est identique au modèle SEIR mais sans immunité. # # D'autres caractéristiques peuvent être ajoutées à ces modèles, tels que la possibilité de déceder (D), d'être mis en quarantaine (Q) ou encore d'êre immunisé à la naissance (M). # # Choix d'un modèle de propagation du virus # Je vais donc tester un modèle SIS où les individus qui guérissent du coronavirus sont de nouveau susceptobles de tomber malade. # + import datetime import os import yaml import numpy as np import pandas as pd # + # Lecture du fichier d'environnement ENV_FILE = '..\env.yaml' with open(ENV_FILE) as f: params = yaml.load(f , Loader=yaml.FullLoader) # Initialisation des chemins vers les fichiers ROOT_DIR = os.path.dirname(os.path.abspath(ENV_FILE)) DATA_FILE = os.path.join(ROOT_DIR, params['directories']['processed'], params['files']['all_data']) # Lecture du fichier de données epidemie_df = (pd.read_csv(DATA_FILE, parse_dates=['Last Update']) .assign(day=lambda _df: _df['Last Update'].dt.date) .drop_duplicates(subset=['Country/Region', 'Province/State', 'day']) [lambda df: df['day'] <= datetime.date(2020, 3, 12)] ) # - epidemie_df =epidemie_df.groupby(['Country/Region', 'day']).sum() epidemie_df['Infected'] = epidemie_df['Confirmed'].groupby(['Country/Region']).diff() epidemie_df = epidemie_df.reset_index() # + # Importation de la base de données de la Banque Mondiale pour la population par pays en 2018 import pandas as pd pop_df = pd.read_excel(r'..\Population.xlsx') # - pop_df = pop_df[['Country Name','2018 [YR2018]']] pop_df = pop_df.rename(columns={"Country Name": "Country", "2018 [YR2018]": "Population"}) epidemie_df = epidemie_df.rename(columns={'Country/Region':'Country'}) # + # Pour pouvoir merger les deux base de données, je fais en sorte que les noms des pays soient identiques pop_df.loc[(pop_df.Country == 'Brunei Darussalam'),'Country']='Brunei' pop_df.loc[(pop_df.Country == 'Congo, Dem. Rep.'),'Country']='Congo (Kinshasa)' pop_df.loc[(pop_df.Country == 'United States'),'Country']='US' pop_df.loc[(pop_df.Country == 'Egypt, Arab Rep.'),'Country']='Egypt' pop_df.loc[(pop_df.Country == 'United Kingdom'),'Country']='UK' pop_df.loc[(pop_df.Country == 'Hong Kong SAR, China'),'Country']='Hong Kong' pop_df.loc[(pop_df.Country == 'Iran, Islamic Rep.'),'Country']='Iran' pop_df.loc[(pop_df.Country == 'Korea, Rep.'),'Country']='South Korea' pop_df.loc[(pop_df.Country == 'Macao SAR, China'),'Country']='Macau' pop_df.loc[(pop_df.Country == 'China'),'Country']='Mainland China' pop_df.loc[(pop_df.Country == 'Russian Federation'),'Country']='Russia' pop_df.loc[(pop_df.Country == 'St. Martin (French part)'),'Country']='Saint Martin' pop_df.loc[(pop_df.Country == 'Slovak Republic'),'Country']='Slovakia' # + # Je merge les deux base de données, certaines regions n'ont pas pu être mergées # Pour celles-là, il faudra entrer manuellement la population final_df=pd.merge(epidemie_df, pop_df, on='Country', how='outer', indicator=True) final_df=final_df[final_df['_merge'] == "both"] # + # Cette fonction défini la taille de la population selon le pays pour lequel on veut avoir la propagation du virus def pop_pays(pays): global pop pop=final_df[final_df['Country'] == pays]['Population'].iloc[0] # + # Choix du pays à modéliser pop_pays('France') pop # + # Modèle SIS import matplotlib.pylab as plt S = pop - 1 I = 1 beta = 0.2 gamma = 0.01 sus = [] inf = [] def infection(S, I, pop): for t in range (0, 500): S, I = S - beta * ((S * I / pop)) + gamma * I, I + beta * ((S * I) / pop) - gamma * I sus.append(S) inf.append(I) infection(S, I, pop) # + figure = plt.figure() figure.canvas.set_window_title('SIS model') infectes,=plt.plot(inf, label='Infectes') susceptibles,=plt.plot(sus, label='Susceptibles') plt.legend(handles=[infectes, susceptibles]) plt.xlabel('Temps') plt.ylabel('Population') plt.show() # - pop_pays('Mainland China') pop # + # Modèle SIS import matplotlib.pylab as plt S = pop - 1 I = 1 beta = 0.2 gamma = 0.01 sus = [] inf = [] def infection(S, I, pop): for t in range (0, 500): S, I = S - beta * ((S * I / pop)) + gamma * I, I + beta * ((S * I) / pop) - gamma * I sus.append(S) inf.append(I) infection(S, I, pop) figure = plt.figure() figure.canvas.set_window_title('SIS model') infectes,=plt.plot(inf, label='Infectes') susceptibles,=plt.plot(sus, label='Susceptibles') plt.legend(handles=[infectes, susceptibles]) plt.xlabel('Temps') plt.ylabel('Population') plt.show() # -
notebooks/Nouveau_modele.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Task 1 # # Here we will see how our nn and autograd works. # # Also we will review some Catalyst's abstractions, implement callbacks and datasets. # # Unfortunately, python is slow, and implementing dynamic computational graph in pure python for product-ready solution is not a good idea. But this task will help you to understand what's happening when you call `backward` method for variable or tensor. Also it will help you in learning Catalyst framework and will teach how you to write your code in more Catalyst-like way. # + pycharm={"name": "#%%\n"} import numpy as np from sklearn.datasets import make_moons from sklearn.model_selection import train_test_split from nn import Linear, ReLU, CrossEntropyLoss, Module from optim import SGD from engine import Value from matplotlib import pyplot as plt import seaborn as sns sns.set(style="whitegrid", font_scale=1.4) # %pylab inline # - # ### Defining toy dataset # # To be more human-readable and easy to understand, we want to store every data in key-value format. # # So, the dataset should yield dict, moreover we will store train/valid datasets in a dict. # + pycharm={"name": "#%%\n"} class Dataset: def __init__(self, X, y): self.X = X self.y = y def __getitem__(self, item): return {"features": ..., "targets": ...} def __len__(self): return len(self.y) X, y = make_moons(200, noise=0.2) X_train, X_val, y_train, y_val = # Split data to Train and Valid datasets = {"train": Dataset(X_train, y_train), "valid": Dataset(X_val, y_val)} # - # Take a look on a data. # + pycharm={"name": "#%%\n"} plt.figure(figsize=(10, 8)) plt.title("Data", fontsize=14) colors = list(map(lambda x: "green" if x ==0 else "orange", y)) plt.scatter(X[:, 0], X[:, 1], c=colors) plt.show() # - # ### Model # # Let's define our model in PyTorch-style. But don't forget to implement `parameters()` method. # + pycharm={"name": "#%%\n"} class SimpleModel(Module): def __init__(self): # Create your own network! def forward(self, inp): return ... def parameters(self): return ... # - # ## For loop # # Let's start with simple train/test loop. # + pycharm={"name": "#%%\n"} criterion = CrossEntropyLoss() model = SimpleModel() optimizer = SGD(model.parameters(), lr=0.1) num_epochs = 100 batch_size = 4 log_period = 5 for epoch in range(num_epochs): current_batch = [] metrics = {} for k, dataset in datasets.items(): loader_metrics = {} epoch_loss = 0 epoch_accuracy = 0 for idx, data in enumerate(dataset): last = idx == (len(dataset)-1) current_batch.append(data) if last or len(current_batch) == batch_size: current_batch=[] for data in dataset: #Train your model! # - # # ## General training loop - Catalyst intro - Runner¶ # # Code above can be reused for almost all machine learning task. Let's take a look on experiment structure # # ``` # for stage in stage: # for epoch in epochs: # for loader on loaders: # for batch in loader: # # do something # ``` # # ### Runner # # In most cases we only need to adapt our batch handling method. And here comes the Runner. # # Runner is the main part of your experiment. It runs train loop, calls callbacks (we will discusds them later) and keeps track on your model. And the only thing you need to change is _handle_batch method. # # + from tqdm.notebook import tqdm class Runner: def __init__( self, model, criterion, optimizer, datasets, batch_size ): self.model = model self.criterion = criterion self.optimizer = optimizer self.datasets = datasets self.batch_size = batch_size self.input = {} self.output = {} self.batch_metrics = {} self.loader_metrcis = {} self.epoch_metrics = {} def _handle_batch(self, batch, train=True): """ Stores the main logic of data aggregating. """ loss = 0 correct = 0 for data in batch: # Calculate predictions, loss and metric loss = loss / len(batch) accuracy = correct / len(batch) if train: # Optimize model's parameters self.batch_metrics = {"loss": loss.item(), "accuracy": accuracy} def train(self, num_epochs: int = 100, verbose=False): for epoch in range(num_epochs): self.epoch_metrics = {} self.epoch = epoch for dataset_name, dataset in self.datasets.items(): self.dataset_name = dataset_name self.loader_metrics = {} current_batch = [] if verbose: iter_ = tqdm(enumerate(dataset), total=len(dataset)) else: iter_ = enumerate(dataset) for idx, data in iter_: last = idx == (len(dataset)-1) current_batch.append(data) if last or len(current_batch) == self.batch_size: # Handle batch current_batch = [] for k, v in self.batch_metrics.items(): if k not in self.loader_metrics: self.loader_metrics[k] = [] self.loader_metrics[k].append(v) for metric, value in self.loader_metrics.items(): value = np.mean(self.loader_metrics[metric]) self.loader_metrics[metric] = value print(f"epoch {epoch}: {dataset_name} {metric} - {value}") # - # ### Run training criterion = CrossEntropyLoss() model = SimpleModel() optimizer = SGD(model.parameters(), lr=0.1) runner = Runner( model=model, criterion=criterion, optimizer=optimizer, datasets=datasets, batch_size=3, ) runner.train(10) # ### Visualization # + h = 0.25 x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1 y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) Xmesh = np.c_[xx.ravel(), yy.ravel()] inputs = [list(map(Value, xrow)) for xrow in Xmesh] scores = list(map(model, inputs)) Z = np.array([s[1].exp().data/(s[0].exp()+s[1].exp()).data for s in scores]) Z = Z.reshape(xx.shape) fig = plt.figure(figsize=(10, 8)) plt.title("Decision boundary", fontsize=14) plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral, alpha=0.6) plt.scatter(X[:, 0], X[:, 1], c=y, s=40, cmap=plt.cm.Spectral) plt.xlim(xx.min(), xx.max()) plt.ylim(yy.min(), yy.max()) plt.show() # - # # ## General training loop - Catalyst intro - Callbacks # # But could we make it even more general? Should we rewrite loss.backward or compute accuracy again and again? Do we really need it? # # I don't think so, I think, we could introduce another general abstaction for typical train-loop logic. Let's introduce Callbacks! # # ### Callbacks # # In catalyst callbacks have significant impact in everything you do. # Let's try to implement some of them. # # There are a list of moments, where callbacks can be integrated. We will need only three of them. # ``` # on_stage_start # on_epoch_start # on_loader_start # on_batch_start # -------> on_batch_end # -----> on_loader_end # --> on_epoch_end # on_stage_end # ``` # + pycharm={"name": "#%%\n"} class Callback: def on_stage_start(self): pass def on_stage_end(self): pass def on_epoch_start(self): pass def on_epoch_end(self, runner): pass def on_loader_start(self): pass def on_loader_end(self, runner): pass def on_batch_start(self): pass def on_batch_end(self, runner): pass class LossCallback(Callback): """ Aggregating loss value. """ def __init__(self): self.cum_loss = 0 self.num_batches = 0 def on_batch_end(self, runner): """ On batch end action. Accumulates loss and num batches. Args: output: dict with loss and other model's outputs. """ self.cum_loss += runner.output["loss"] self.num_batches += 1 def on_loader_end(self, runner): """ On loader end action. Args: epoch_metrics: dict with epoch metrics Returns: loss over the loader. """ runner.epoch_metrics["loss"] = self.cum_loss / self.num_batches self.cum_loss = 0 self.num_batches = 0 class AccuracyCallback(Callback): """ Aggregating accuracy value. """ def __init__(self): self.correct = 0 def on_batch_end(self, runner): """ On batch end action Accumulates number of correct predictions. Args: output: dict with number of the correct predictions """ self.correct += # Calculate Accuracy def on_loader_end(self, runner): """ On loader end action. Args: epoch_metrics: dict with epoch metrics Returns: accuracy value over the loader. """ runner.epoch_metrics["accuracy"] = # Sum up metrics self.correct=0 class LoggerCallback(Callback): """ Log metrics to output. """ def __init__(self, log_period): self.log_period = log_period def on_epoch_end(self, runner): """ On epoch end action. Prints all epoch metrics if log_period is suitable. Args: epoch_metrics: dict with epoch metrics epoch: current epoch """ if runner.epoch % self.log_period == 0: log_string = f"Epoch: {runner.epoch}\n" for metric, value in runner.epoch_metrics.items(): log_string += # Logging all metrics print(log_string) class OptimizerCallback(Callback): def on_batch_start(self, runner): if runner.dataset_name == "train": # Reset gradients def on_batch_end(self, runner): loss = 0 for data, outp in zip(runner.input, runner.output): current_loss = runner.criterion(outp, data["targets"]) loss += current_loss loss = loss / len(runner.input) if runner.dataset_name == "train": # Optimize model's parameters # + pycharm={"name": "#%%\n"} from tqdm.notebook import tqdm class Runner: def __init__( self, model, criterion, optimizer, datasets, batch_size, callbacks, ): self.model = model self.criterion = criterion self.optimizer = optimizer self.datasets = datasets self.batch_size = batch_size self.callbacks = callbacks self.input = {} self.output = {} self.batch_metrics = {} self.loader_metrcis = {} self.epoch_metrics = {} def _handle_batch(self, batch): """ Stores the main logic of data aggregating. """ output = [] for data in batch: # Calculate predictions self.input = batch self.output = output def train(self, num_epochs: int = 100, verbose=False): for epoch in range(num_epochs): self.epoch_metrics = {} self.epoch = epoch for dataset_name, dataset in self.datasets.items(): self.dataset_name = dataset_name self.loader_metrics = {} current_batch = [] if verbose: iter_ = tqdm(enumerate(dataset), total=len(dataset)) else: iter_ = enumerate(dataset) for idx, data in iter_: last = idx == (len(dataset)-1) current_batch.append(data) if last or len(current_batch) == self.batch_size: for clb in self.callbacks.values(): clb.on_batch_start(self) # Handle batch current_batch = [] for clb in self.callbacks.values(): clb.on_batch_end(self) for clb in self.callbacks.values(): clb.on_loader_end(self) # - # ### Run training # + pycharm={"name": "#%%\n"} criterion = CrossEntropyLoss() model = SimpleModel() optimizer = SGD(model.parameters(), lr=0.1) runner = Runner( model=model, criterion=criterion, optimizer=optimizer, datasets=datasets, batch_size=3, callbacks={ "loss": LossCallback(), "accuracy": AccuracyCallback(), "logger": LoggerCallback(log_period=5), "optimizer": OptimizerCallback(), } ) runner.train(50) # - # ### Visualization # + pycharm={"name": "#%%\n"} h = 0.25 x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1 y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) Xmesh = np.c_[xx.ravel(), yy.ravel()] inputs = [list(map(Value, xrow)) for xrow in Xmesh] scores = list(map(model, inputs)) Z = np.array([s[1].exp().data/(s[0].exp()+s[1].exp()).data for s in scores]) Z = Z.reshape(xx.shape) fig = plt.figure(figsize=(10, 8)) plt.title("Decision boundary", fontsize=14) plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral, alpha=0.6) plt.scatter(X[:, 0], X[:, 1], c=y, s=40, cmap=plt.cm.Spectral) plt.xlim(xx.min(), xx.max()) plt.ylim(yy.min(), yy.max()) plt.show() # - # ## MNIST # # Try to train model on MNIST task! # + pycharm={"name": "#%%\n"} import mnist X_train, y_train, X_val, y_val, X_test, y_test = mnist.load_dataset() # + pycharm={"name": "#%%\n"} class MNISTDataset: def __init__(self, x, y): self.x = x self.y = y def __getitem__(self, idx): return {"features": ..., "targets": ...} def __len__(self): return len(self.y) # Python is slow, that's why we use only small group of object datasets = { "train": MNISTDataset(X_train[:2000], y_train[:2000]), "valid": MNISTDataset(X_val[:200], y_val[:200]) } # + pycharm={"name": "#%%\n"} class MnistModel(Module): def __init__( self, inp_shape=28*28, out_shape=10, hidden_shapes=[10, 10] ): # Create your model! def forward(self, inp): return ... def parameters(self): parameters = [] # Don't forget about parameters! return parameters # + pycharm={"name": "#%%\n"} criterion = CrossEntropyLoss() model = MnistModel() optimizer = SGD(model.parameters(), lr=0.1) runner = Runner( model=model, criterion=criterion, optimizer=optimizer, datasets=datasets, batch_size=16, callbacks={ "loss": LossCallback(), "accuracy": AccuracyCallback(), "logger": LoggerCallback(log_period=1), "optimizer": OptimizerCallback(), } ) runner.train(5, verbose=True) # + pycharm={"name": "#%%\n"}
homework-1/micrograd/test_nn.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # # Insurance database # # # Using MySQL commands and Python to import a database from the workbench to a Pandas df. An insurance's database is analyzed. # import mysql.connector from mysql.connector import Error from sqlalchemy import create_engine import pandas as pd import seaborn as sns import matplotlib.pyplot as plt # # ## Connecting to MySQL and defining some functions # # + #Se crea la conexion def create_server_connection(host_name, user_name, user_password): try: connection = mysql.connector.connect( host = host_name, user = user_name, passwd = <PASSWORD> ) print("MySQL Database connection succesful") except Error as err: print(f"Error: '{err}'") return connection pw = "<PASSWORD>" #Put our MySQL Terminal password connection = create_server_connection("localhost", "root", pw) #Se crea la base de datos def create_database(connection, query): cursor = connection.cursor() #to make statements to communicate with mysql database try: cursor.execute(query) print('Database created successufully') except Error as err: print(f"Error: '{err}'") db = 'aseguradora_db' #nombre de la base de datos a la cual conectar #Se conecta a la base de datos creada def create_db_connection(host_name, user_name, user_password, db_name): connection = None try: connection = mysql.connector.connect( host = host_name, user = user_name, passwd = <PASSWORD>, database = db_name) print('MySQL database connection successful') except Error as err: print(f"Error: '{err}'") return connection # + #Definiendo las funciones para ejecutar y leer queries def execute_query(connection, query): cursor = connection.cursor() try: cursor.execute(query) connection.commit() print('Query was successful') except Error as err: print(f"Error: '{err}'") def read_query(connection, query): cursor = connection.cursor() result = None try: cursor.execute(query) result = cursor.fetchall() #will return all results in table return result except Error as err: print(f"Error: '{err}'") # + view_users = """ SELECT * FROM users; """ connection = create_db_connection("localhost", "root", pw, db) results = read_query(connection, view_users) for result in results: print(result) # - # # ### Joining the four tables as a view for visualization # # # ***The database from MySQL is imported to a pandas dataframe*** # # + view_insurance_table = """ CREATE OR REPLACE VIEW insurance_table AS SELECT plan_subs.plan_id, plan_subs.user_id, users.gender, users.date_of_birth, plan_subs.subscription_date, TIMESTAMPDIFF(YEAR, users.date_of_birth,plan_subs.subscription_date) AS plan_age, plan_prices.annual_price_of_plan, claim_report_date, claims_reports.claim_amount FROM users, plan_prices, plan_subs LEFT OUTER JOIN claims_reports ON plan_subs.plan_id = claims_reports.plan_id WHERE plan_subs.user_id = users.user_id AND plan_prices.age = TIMESTAMPDIFF(YEAR, users.date_of_birth, plan_subs.subscription_date) AND plan_prices.gender = users.gender ORDER BY plan_subs.plan_id; """ connection = create_db_connection("localhost", "root", pw, db) execute_query(connection, view_insurance_table) # + view_insurance_table_2 = """ CREATE OR REPLACE VIEW insurance_table_2 AS SELECT plan_subs.plan_id, plan_subs.user_id, users.gender, users.date_of_birth, plan_subs.subscription_date, TIMESTAMPDIFF(YEAR, users.date_of_birth,plan_subs.subscription_date) AS plan_age, CASE WHEN TIMESTAMPDIFF(YEAR, users.date_of_birth,plan_subs.subscription_date) BETWEEN 20 AND 24 THEN '20-24' WHEN TIMESTAMPDIFF(YEAR, users.date_of_birth,plan_subs.subscription_date) BETWEEN 25 AND 29 THEN '25-29' WHEN TIMESTAMPDIFF(YEAR, users.date_of_birth,plan_subs.subscription_date) BETWEEN 30 AND 34 THEN '30-34' WHEN TIMESTAMPDIFF(YEAR, users.date_of_birth,plan_subs.subscription_date) BETWEEN 35 AND 39 THEN '35-39' WHEN TIMESTAMPDIFF(YEAR, users.date_of_birth,plan_subs.subscription_date) BETWEEN 40 AND 44 THEN '40-44' WHEN TIMESTAMPDIFF(YEAR, users.date_of_birth,plan_subs.subscription_date) BETWEEN 45 AND 49 THEN '45-49' WHEN TIMESTAMPDIFF(YEAR, users.date_of_birth,plan_subs.subscription_date) BETWEEN 50 AND 54 THEN '50-54' WHEN TIMESTAMPDIFF(YEAR, users.date_of_birth,plan_subs.subscription_date) BETWEEN 55 AND 59 THEN '55-59' WHEN TIMESTAMPDIFF(YEAR, users.date_of_birth,plan_subs.subscription_date) BETWEEN 60 AND 64 THEN '60-64' END AS age_group, plan_prices.annual_price_of_plan, claim_report_date, claims_reports.claim_amount, (claim_amount/annual_price_of_plan) AS claim_plan FROM users, plan_prices, plan_subs LEFT OUTER JOIN claims_reports ON plan_subs.plan_id = claims_reports.plan_id WHERE plan_subs.user_id = users.user_id AND plan_prices.age = TIMESTAMPDIFF(YEAR, users.date_of_birth, plan_subs.subscription_date) AND plan_prices.gender = users.gender ORDER BY plan_subs.plan_id; """ connection = create_db_connection("localhost", "root", pw, db) execute_query(connection, view_insurance_table_2) read_insurance_table_2 = """ SELECT * FROM insurance_table_2; """ connection = create_db_connection("localhost", "root", pw, db) results = read_query(connection, read_insurance_table_2) #Create a pandas dataframe from the view from_db = [] #creating an empty list for result in results: result = list(result) from_db.append(result) #appending the results to the empty list columns = ['Plan_id', 'User_id', 'Gender', 'Date_of_birth', 'Subscription_date', 'Plan_age', 'Age_group', 'Annual_price_of_plan', 'Claim_report_date', 'Claim_amount', 'Claim/plan'] insurance_table_2_df = pd.DataFrame(from_db, columns = columns) insurance_table_2_df # - # # ## Summarizing database info # # + view_insurance_results_4col = """ CREATE OR REPLACE VIEW insurance_results_4col AS SELECT '20-24' AS Age_group, 'M' AS Gender, ROUND(AVG(annual_price_of_plan), 2) AS Avg_annual_price_of_plan, ROUND(AVG(claim_amount), 2) AS Avg_claim_amount FROM insurance_table WHERE ((plan_age BETWEEN 20 AND 24) AND gender = 'M') UNION SELECT '25-29', 'M', ROUND(AVG(annual_price_of_plan), 2), ROUND(AVG(claim_amount), 2) FROM insurance_table WHERE ((plan_age BETWEEN 25 AND 29) AND gender = 'M') UNION SELECT '30-34', 'M', ROUND(AVG(annual_price_of_plan), 2), ROUND(AVG(claim_amount), 2) FROM insurance_table WHERE ((plan_age BETWEEN 30 AND 34) AND gender = 'M') UNION SELECT '35-39', 'M', ROUND(AVG(annual_price_of_plan), 2), ROUND(AVG(claim_amount), 2) FROM insurance_table WHERE ((plan_age BETWEEN 35 AND 39) AND gender = 'M') UNION SELECT '40-44', 'M', ROUND(AVG(annual_price_of_plan), 2), ROUND(AVG(claim_amount), 2) FROM insurance_table WHERE ((plan_age BETWEEN 40 AND 44) AND gender = 'M') UNION SELECT '45-49', 'M', ROUND(AVG(annual_price_of_plan), 2), ROUND(AVG(claim_amount), 2) FROM insurance_table WHERE ((plan_age BETWEEN 45 AND 49) AND gender = 'M') UNION SELECT '50-54', 'M', ROUND(AVG(annual_price_of_plan), 2), ROUND(AVG(claim_amount), 2) FROM insurance_table WHERE ((plan_age BETWEEN 50 AND 54) AND gender = 'M') UNION SELECT '55-59', 'M', ROUND(AVG(annual_price_of_plan), 2), ROUND(AVG(claim_amount), 2) FROM insurance_table WHERE ((plan_age BETWEEN 55 AND 59) AND gender = 'M') UNION SELECT '60-64', 'M', ROUND(AVG(annual_price_of_plan), 2), ROUND(AVG(claim_amount), 2) FROM insurance_table WHERE ((plan_age BETWEEN 60 AND 64) AND gender = 'M') UNION SELECT '20-24', 'F', ROUND(AVG(annual_price_of_plan), 2), ROUND(AVG(claim_amount), 2) FROM insurance_table WHERE ((plan_age BETWEEN 20 AND 24) AND gender = 'F') UNION SELECT '25-29', 'F', ROUND(AVG(annual_price_of_plan), 2), ROUND(AVG(claim_amount), 2) FROM insurance_table WHERE ((plan_age BETWEEN 25 AND 29) AND gender = 'F') UNION SELECT '30-34', 'F', ROUND(AVG(annual_price_of_plan), 2), ROUND(AVG(claim_amount), 2) FROM insurance_table WHERE ((plan_age BETWEEN 30 AND 34) AND gender = 'F') UNION SELECT '35-39', 'F', ROUND(AVG(annual_price_of_plan), 2), ROUND(AVG(claim_amount), 2) FROM insurance_table WHERE ((plan_age BETWEEN 35 AND 39) AND gender = 'F') UNION SELECT '40-44', 'F', ROUND(AVG(annual_price_of_plan), 2), ROUND(AVG(claim_amount), 2) FROM insurance_table WHERE ((plan_age BETWEEN 40 AND 44) AND gender = 'F') UNION SELECT '45-49', 'F', ROUND(AVG(annual_price_of_plan), 2), ROUND(AVG(claim_amount), 2) FROM insurance_table WHERE ((plan_age BETWEEN 45 AND 49) AND gender = 'F') UNION SELECT '50-54', 'F', ROUND(AVG(annual_price_of_plan), 2), ROUND(AVG(claim_amount), 2) FROM insurance_table WHERE ((plan_age BETWEEN 50 AND 54) AND gender = 'F') UNION SELECT '55-59', 'F', ROUND(AVG(annual_price_of_plan), 2), ROUND(AVG(claim_amount), 2) FROM insurance_table WHERE ((plan_age BETWEEN 55 AND 59) AND gender = 'F') UNION SELECT '60-64', 'F', ROUND(AVG(annual_price_of_plan), 2), ROUND(AVG(claim_amount), 2) FROM insurance_table WHERE ((plan_age BETWEEN 60 AND 64) AND gender = 'F'); """ connection = create_db_connection("localhost", "root", pw, db) execute_query(connection, view_insurance_results_4col) # + read_insurance_r4col = """ SELECT * FROM insurance_results_4col; """ connection = create_db_connection("localhost", "root", pw, db) results = read_query(connection, read_insurance_r4col) for result in results: print(result) # + #Create a pandas dataframe from the view from_db = [] #creating an empty list for result in results: result = list(result) from_db.append(result) #appending the results to the empty list columns = ['Age_group', 'Gender', 'Avg_annual_price_of_plan', 'Avg_claim_amount'] insurance_results_4col_df = pd.DataFrame(from_db, columns = columns) insurance_results_4col_df # + view_insurance_results_6col = """ CREATE OR REPLACE VIEW insurance_results_6col AS SELECT '20-24' AS Age_group, 'M' AS Gender, ROUND(AVG(annual_price_of_plan), 2) AS Avg_annual_price_of_plan, ROUND(AVG(claim_amount), 2) AS Avg_claim_amount, COUNT(plan_id) AS Plans_group, COUNT(claim_amount) AS Claims_group FROM insurance_table WHERE ((plan_age BETWEEN 20 AND 24) AND gender = 'M') UNION SELECT '25-29', 'M', ROUND(AVG(annual_price_of_plan), 2), ROUND(AVG(claim_amount), 2), COUNT(plan_id), COUNT(claim_amount) FROM insurance_table WHERE ((plan_age BETWEEN 25 AND 29) AND gender = 'M') UNION SELECT '30-34', 'M', ROUND(AVG(annual_price_of_plan), 2), ROUND(AVG(claim_amount), 2), COUNT(plan_id), COUNT(claim_amount) FROM insurance_table WHERE ((plan_age BETWEEN 30 AND 34) AND gender = 'M') UNION SELECT '35-39', 'M', ROUND(AVG(annual_price_of_plan), 2), ROUND(AVG(claim_amount), 2), COUNT(plan_id), COUNT(claim_amount) FROM insurance_table WHERE ((plan_age BETWEEN 35 AND 39) AND gender = 'M') UNION SELECT '40-44', 'M', ROUND(AVG(annual_price_of_plan), 2), ROUND(AVG(claim_amount), 2), COUNT(plan_id), COUNT(claim_amount) FROM insurance_table WHERE ((plan_age BETWEEN 40 AND 44) AND gender = 'M') UNION SELECT '45-49', 'M', ROUND(AVG(annual_price_of_plan), 2), ROUND(AVG(claim_amount), 2), COUNT(plan_id), COUNT(claim_amount) FROM insurance_table WHERE ((plan_age BETWEEN 45 AND 49) AND gender = 'M') UNION SELECT '50-54', 'M', ROUND(AVG(annual_price_of_plan), 2), ROUND(AVG(claim_amount), 2), COUNT(plan_id), COUNT(claim_amount) FROM insurance_table WHERE ((plan_age BETWEEN 50 AND 54) AND gender = 'M') UNION SELECT '55-59', 'M', ROUND(AVG(annual_price_of_plan), 2), ROUND(AVG(claim_amount), 2), COUNT(plan_id), COUNT(claim_amount) FROM insurance_table WHERE ((plan_age BETWEEN 55 AND 59) AND gender = 'M') UNION SELECT '60-64', 'M', ROUND(AVG(annual_price_of_plan), 2), ROUND(AVG(claim_amount), 2), COUNT(plan_id), COUNT(claim_amount) FROM insurance_table WHERE ((plan_age BETWEEN 60 AND 64) AND gender = 'M') UNION SELECT '20-24', 'F', ROUND(AVG(annual_price_of_plan), 2), ROUND(AVG(claim_amount), 2), COUNT(plan_id), COUNT(claim_amount) FROM insurance_table WHERE ((plan_age BETWEEN 20 AND 24) AND gender = 'F') UNION SELECT '25-29', 'F', ROUND(AVG(annual_price_of_plan), 2), ROUND(AVG(claim_amount), 2), COUNT(plan_id), COUNT(claim_amount) FROM insurance_table WHERE ((plan_age BETWEEN 25 AND 29) AND gender = 'F') UNION SELECT '30-34', 'F', ROUND(AVG(annual_price_of_plan), 2), ROUND(AVG(claim_amount), 2), COUNT(plan_id), COUNT(claim_amount) FROM insurance_table WHERE ((plan_age BETWEEN 30 AND 34) AND gender = 'F') UNION SELECT '35-39', 'F', ROUND(AVG(annual_price_of_plan), 2), ROUND(AVG(claim_amount), 2), COUNT(plan_id), COUNT(claim_amount) FROM insurance_table WHERE ((plan_age BETWEEN 35 AND 39) AND gender = 'F') UNION SELECT '40-44', 'F', ROUND(AVG(annual_price_of_plan), 2), ROUND(AVG(claim_amount), 2), COUNT(plan_id), COUNT(claim_amount) FROM insurance_table WHERE ((plan_age BETWEEN 40 AND 44) AND gender = 'F') UNION SELECT '45-49', 'F', ROUND(AVG(annual_price_of_plan), 2), ROUND(AVG(claim_amount), 2), COUNT(plan_id), COUNT(claim_amount) FROM insurance_table WHERE ((plan_age BETWEEN 45 AND 49) AND gender = 'F') UNION SELECT '50-54', 'F', ROUND(AVG(annual_price_of_plan), 2), ROUND(AVG(claim_amount), 2), COUNT(plan_id), COUNT(claim_amount) FROM insurance_table WHERE ((plan_age BETWEEN 50 AND 54) AND gender = 'F') UNION SELECT '55-59', 'F', ROUND(AVG(annual_price_of_plan), 2), ROUND(AVG(claim_amount), 2), COUNT(plan_id), COUNT(claim_amount) FROM insurance_table WHERE ((plan_age BETWEEN 55 AND 59) AND gender = 'F') UNION SELECT '60-64', 'F', ROUND(AVG(annual_price_of_plan), 2), ROUND(AVG(claim_amount), 2), COUNT(plan_id), COUNT(claim_amount) FROM insurance_table WHERE ((plan_age BETWEEN 60 AND 64) AND gender = 'F'); """ connection = create_db_connection("localhost", "root", pw, db) execute_query(connection, view_insurance_results_6col) # + read_insurance_r6col = """ SELECT * FROM insurance_results_6col; """ connection = create_db_connection("localhost", "root", pw, db) results = read_query(connection, read_insurance_r6col) for result in results: print(result) # + #Create a pandas dataframe from the view from_db = [] #creating an empty list for result in results: result = list(result) from_db.append(result) #appending the results to the empty list columns = ['Age_group', 'Gender', 'Avg_annual_price_of_plan', 'Avg_claim_amount', 'Plans/group', 'Claims/group'] insurance_results_6col_df = pd.DataFrame(from_db, columns = columns) insurance_results_6col_df # + view_insurance_results_8col = """ CREATE OR REPLACE VIEW insurance_results_8col AS SELECT '20-24' AS Age_group, 'M' AS Gender, ROUND(AVG(annual_price_of_plan), 2) AS Avg_annual_price_of_plan, ROUND(AVG(claim_amount), 2) AS Avg_claim_amount, COUNT(plan_id) AS Plans_group, COUNT(claim_amount) AS Claims_group, (ROUND(AVG(claim_amount)/AVG(annual_price_of_plan), 5)) AS Claim_plan_avg, (COUNT(claim_amount)/COUNT(plan_id)) AS Claim_plan_no FROM insurance_table WHERE ((plan_age BETWEEN 20 AND 24) AND gender = 'M') UNION SELECT '25-29', 'M', ROUND(AVG(annual_price_of_plan), 2), ROUND(AVG(claim_amount), 2), COUNT(plan_id), COUNT(claim_amount), (ROUND(AVG(claim_amount)/AVG(annual_price_of_plan), 5)), (COUNT(claim_amount)/COUNT(plan_id)) FROM insurance_table WHERE ((plan_age BETWEEN 25 AND 29) AND gender = 'M') UNION SELECT '30-34', 'M', ROUND(AVG(annual_price_of_plan), 2), ROUND(AVG(claim_amount), 2), COUNT(plan_id), COUNT(claim_amount), (ROUND(AVG(claim_amount)/AVG(annual_price_of_plan), 5)), (COUNT(claim_amount)/COUNT(plan_id)) FROM insurance_table WHERE ((plan_age BETWEEN 30 AND 34) AND gender = 'M') UNION SELECT '35-39', 'M', ROUND(AVG(annual_price_of_plan), 2), ROUND(AVG(claim_amount), 2), COUNT(plan_id), COUNT(claim_amount), (ROUND(AVG(claim_amount)/AVG(annual_price_of_plan), 5)), (COUNT(claim_amount)/COUNT(plan_id)) FROM insurance_table WHERE ((plan_age BETWEEN 35 AND 39) AND gender = 'M') UNION SELECT '40-44', 'M', ROUND(AVG(annual_price_of_plan), 2), ROUND(AVG(claim_amount), 2), COUNT(plan_id), COUNT(claim_amount), (ROUND(AVG(claim_amount)/AVG(annual_price_of_plan), 5)), (COUNT(claim_amount)/COUNT(plan_id)) FROM insurance_table WHERE ((plan_age BETWEEN 40 AND 44) AND gender = 'M') UNION SELECT '45-49', 'M', ROUND(AVG(annual_price_of_plan), 2), ROUND(AVG(claim_amount), 2), COUNT(plan_id), COUNT(claim_amount), (ROUND(AVG(claim_amount)/AVG(annual_price_of_plan), 5)), (COUNT(claim_amount)/COUNT(plan_id)) FROM insurance_table WHERE ((plan_age BETWEEN 45 AND 49) AND gender = 'M') UNION SELECT '50-54', 'M', ROUND(AVG(annual_price_of_plan), 2), ROUND(AVG(claim_amount), 2), COUNT(plan_id), COUNT(claim_amount), (ROUND(AVG(claim_amount)/AVG(annual_price_of_plan), 5)), (COUNT(claim_amount)/COUNT(plan_id)) FROM insurance_table WHERE ((plan_age BETWEEN 50 AND 54) AND gender = 'M') UNION SELECT '55-59', 'M', ROUND(AVG(annual_price_of_plan), 2), ROUND(AVG(claim_amount), 2), COUNT(plan_id), COUNT(claim_amount), (ROUND(AVG(claim_amount)/AVG(annual_price_of_plan), 5)), (COUNT(claim_amount)/COUNT(plan_id)) FROM insurance_table WHERE ((plan_age BETWEEN 55 AND 59) AND gender = 'M') UNION SELECT '60-64', 'M', ROUND(AVG(annual_price_of_plan), 2), ROUND(AVG(claim_amount), 2), COUNT(plan_id), COUNT(claim_amount), (ROUND(AVG(claim_amount)/AVG(annual_price_of_plan), 5)), (COUNT(claim_amount)/COUNT(plan_id)) FROM insurance_table WHERE ((plan_age BETWEEN 60 AND 64) AND gender = 'M') UNION SELECT '20-24', 'F', ROUND(AVG(annual_price_of_plan), 2), ROUND(AVG(claim_amount), 2), COUNT(plan_id), COUNT(claim_amount), (ROUND(AVG(claim_amount)/AVG(annual_price_of_plan), 5)), (COUNT(claim_amount)/COUNT(plan_id)) FROM insurance_table WHERE ((plan_age BETWEEN 20 AND 24) AND gender = 'F') UNION SELECT '25-29', 'F', ROUND(AVG(annual_price_of_plan), 2), ROUND(AVG(claim_amount), 2), COUNT(plan_id), COUNT(claim_amount), (ROUND(AVG(claim_amount)/AVG(annual_price_of_plan), 5)), (COUNT(claim_amount)/COUNT(plan_id)) FROM insurance_table WHERE ((plan_age BETWEEN 25 AND 29) AND gender = 'F') UNION SELECT '30-34', 'F', ROUND(AVG(annual_price_of_plan), 2), ROUND(AVG(claim_amount), 2), COUNT(plan_id), COUNT(claim_amount), (ROUND(AVG(claim_amount)/AVG(annual_price_of_plan), 5)), (COUNT(claim_amount)/COUNT(plan_id)) FROM insurance_table WHERE ((plan_age BETWEEN 30 AND 34) AND gender = 'F') UNION SELECT '35-39', 'F', ROUND(AVG(annual_price_of_plan), 2), ROUND(AVG(claim_amount), 2), COUNT(plan_id), COUNT(claim_amount), (ROUND(AVG(claim_amount)/AVG(annual_price_of_plan), 5)), (COUNT(claim_amount)/COUNT(plan_id)) FROM insurance_table WHERE ((plan_age BETWEEN 35 AND 39) AND gender = 'F') UNION SELECT '40-44', 'F', ROUND(AVG(annual_price_of_plan), 2), ROUND(AVG(claim_amount), 2), COUNT(plan_id), COUNT(claim_amount), (ROUND(AVG(claim_amount)/AVG(annual_price_of_plan), 5)), (COUNT(claim_amount)/COUNT(plan_id)) FROM insurance_table WHERE ((plan_age BETWEEN 40 AND 44) AND gender = 'F') UNION SELECT '45-49', 'F', ROUND(AVG(annual_price_of_plan), 2), ROUND(AVG(claim_amount), 2), COUNT(plan_id), COUNT(claim_amount), (ROUND(AVG(claim_amount)/AVG(annual_price_of_plan), 5)), (COUNT(claim_amount)/COUNT(plan_id)) FROM insurance_table WHERE ((plan_age BETWEEN 45 AND 49) AND gender = 'F') UNION SELECT '50-54', 'F', ROUND(AVG(annual_price_of_plan), 2), ROUND(AVG(claim_amount), 2), COUNT(plan_id), COUNT(claim_amount), (ROUND(AVG(claim_amount)/AVG(annual_price_of_plan), 5)), (COUNT(claim_amount)/COUNT(plan_id)) FROM insurance_table WHERE ((plan_age BETWEEN 50 AND 54) AND gender = 'F') UNION SELECT '55-59', 'F', ROUND(AVG(annual_price_of_plan), 2), ROUND(AVG(claim_amount), 2), COUNT(plan_id), COUNT(claim_amount), (ROUND(AVG(claim_amount)/AVG(annual_price_of_plan), 5)), (COUNT(claim_amount)/COUNT(plan_id)) FROM insurance_table WHERE ((plan_age BETWEEN 55 AND 59) AND gender = 'F') UNION SELECT '60-64', 'F', ROUND(AVG(annual_price_of_plan), 2), ROUND(AVG(claim_amount), 2), COUNT(plan_id), COUNT(claim_amount), (ROUND(AVG(claim_amount)/AVG(annual_price_of_plan), 5)), (COUNT(claim_amount)/COUNT(plan_id)) FROM insurance_table WHERE ((plan_age BETWEEN 60 AND 64) AND gender = 'F'); """ connection = create_db_connection("localhost", "root", pw, db) execute_query(connection, view_insurance_results_6col) read_insurance_r8col = """ SELECT * FROM insurance_results_8col; """ connection = create_db_connection("localhost", "root", pw, db) results = read_query(connection, read_insurance_r8col) #Create a pandas dataframe from the view from_db = [] #creating an empty list for result in results: result = list(result) from_db.append(result) #appending the results to the empty list columns = ['Age_group', 'Gender', 'Avg_annual_price_of_plan', 'Avg_claim_amount', 'Plans/group', 'Claims/group', 'Claims/plan avg', 'Claims/plan no.'] insurance_results_8col_df = pd.DataFrame(from_db, columns = columns) insurance_results_8col_df # - # # ### Some graphics are shown # sns.relplot(x="Age_group", y="Avg_annual_price_of_plan", hue='Gender', ci=None, kind="line", data=insurance_results_8col_df); sns.relplot(x="Age_group", y="Avg_claim_amount", hue='Gender', ci=None, kind="line", data=insurance_results_8col_df); sns.relplot(x="Age_group", y="Claims/plan avg", hue='Gender', ci=None, kind="line", data=insurance_results_8col_df); sns.relplot(x="Age_group", y="Claims/plan no.", hue='Gender', ci=None, kind="line", data=insurance_results_8col_df); sns.relplot(x="Age_group", y="Plans/group", hue='Gender', ci=None, kind="line", data=insurance_results_8col_df); sns.relplot(x="Age_group", y="Claims/group", hue='Gender', ci=None, kind="line", data=insurance_results_8col_df); from matplotlib.pyplot import figure figure(figsize=(8, 6), dpi=80) fig.set_size_inches(12, 8) fig.set_dpi(100) # + #sns.lmplot(x=x, y=y, hue=hue, data=df, height=10, fit_reg=False, scatter_kws={"s": 5}) sns.set_theme() # Plot sepal width as a function of sepal_length across days g = sns.lmplot( data=insurance_table_2_df, x="Plan_age", y="Claim_amount", hue="Gender") #g.set_size_inches(12, 8) #g.set_dpi(100) # Use more informative axis labels than are provided by default g.set_axis_labels("Age", "Claims") # + def scatter_plot_df(df, x, y, hue, title, xlabel, ylabel): sns.set(style="whitegrid") g = sns.lmplot(x=x, y=y, hue=hue, data=df, height=10, fit_reg=False, scatter_kws={"s": 5}) #g.set_size_inches(12, 8) #g.set_dpi(100) plt.title(title) plt.xlabel(xlabel) plt.ylabel(ylabel) plt.show() scatter_plot_df(insurance_table_2_df, 'Plan_age', 'Claim_amount', 'Gender', 'Claims amount vs Age', 'Age', 'Claims')
insurancedb/Insurance - 2 From MySQL to Pandas df .ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # environment set up import tensorflow as tf import tensorflow.keras from tensorflow.keras.models import Sequential, load_model, Model from tensorflow.keras.layers import Reshape, Conv1D, MaxPooling1D, Flatten, Dense, Dropout from tensorflow.keras.utils import to_categorical from tensorflow.keras.callbacks import ModelCheckpoint, TensorBoard from tensorflow.keras import metrics from tensorflow.keras import regularizers from sklearn.ensemble import RandomForestClassifier from tensorflow.keras.callbacks import EarlyStopping import numpy as np import matplotlib.pyplot as plt import matplotlib.image as mpimg from sklearn.metrics import accuracy_score import os import random import pandas as pd import ast from scipy import stats as st import time spectrum_len = 250 # automate this parent_dir = os.environ['PWD'] stddata_path = os.path.join(os.environ['DATA_DIR'], "StdData-" + str(spectrum_len)) os.chdir(os.path.join(parent_dir, "lab-notebook", "smunukutla")) data = pd.read_csv("data.csv", sep=",") record_nums = data.iloc[0, :].tolist() spectrum_names = data.iloc[1, :].tolist() y = data.iloc[2, :].astype(int).tolist() y = np.reshape(y, (len(y), 1)) num_samples = len(y) spectra = np.zeros((num_samples,spectrum_len)) for i in range(len(record_nums)): data = pd.read_csv(os.path.join(stddata_path,"{}.csv".format(record_nums[i]))) # if i == 0: # wavelengths[i,:] = data.iloc[:, 0].to_numpy() spectra[i,:] = data.iloc[:, 1].to_numpy() spectra.shape y_cat = to_categorical(y) from sklearn.decomposition import FastICA model = FastICA(n_components=3) results = model.fit_transform(data) results results.shape def g(x): return np.tanh(x) def g_der(x): return 1 - g(x) * g(x) def center(X): X = np.array(X) mean = X.mean(axis=1, keepdims=True) return X- mean def whitening(X): cov = np.cov(X) d, E = np.linalg.eigh(cov) D = np.diag(d) D_inv = np.sqrt(np.linalg.inv(D)) X_whiten = np.dot(E, np.dot(D_inv, np.dot(E.T, X))) return X_whiten def calculate_new_w(w, X): w_new = (X * g(np.dot(w.T, X))).mean(axis=1) - g_der(np.dot(w.T, X)).mean() * w w_new /= np.sqrt((w_new ** 2).sum()) return w_new def ica(X, iterations, tolerance=1e-5): X = center(X) X = whitening(X) components_nr = X.shape[0] W = np.zeros((components_nr, components_nr), dtype=X.dtype) for i in range(components_nr): w = np.random.rand(components_nr) for j in range(iterations): w_new = calculate_new_w(w, X) if i >= 1: w_new -= np.dot(np.dot(w_new, W[:i].T), W[:i]) distance = np.abs(np.abs((w * w_new).sum()) - 1) w = w_new if distance < tolerance: break W[i, :] = w S = np.dot(W, X) return S def plot_mixture_sources_predictions(X, original_sources, S): fig = plt.figure() plt.subplot(3, 1, 1) for x in X: plt.plot(x) plt.title("mixtures") plt.subplot(3, 1, 2) for s in original_sources: plt.plot(s) plt.title("real sources") plt.subplot(3,1,3) for s in S: plt.plot(s) plt.title("predicted sources") fig.tight_layout() plt.show()
lab-notebook/smunukutla/2020-07-07-SAM - ICA.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="VXMWgtmQiRer" colab_type="text" # # Libraries # + id="8oGfEVz3iERi" colab_type="code" colab={} import numpy as np import matplotlib.pyplot as plt from simple_markov_chain_lib import markov_chain from numpy import random, pi, mean from mpl_toolkits.mplot3d import Axes3D # + [markdown] id="LeCBGKhYcHFy" colab_type="text" # # Παραδοτέο 1 # ## Ερώτημα 1 # Κάθε φορά που η αλυσίδα φτάνει στην κατάσταση 1 σύμφωνα με τη Markov Property δηλαδή την ιδιότητα έλειψης μνήμης, είναι σαν να ξεκινήσαμε την αλυσίδα από την αρχή, αφού είμαστε στην ίδια initial state και έχουμε τον ίδιο πίνακα μεταβάσεων. Άρα δεν χρειάζεται κάθε φορά να σταματάμε και να ξανα-ξεκινάμε από την κατάσταση 1, απλά συνεχίζουμε. # # # ## Ερώτημα 2 & 3 # + id="GXCmmA7Xbigi" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 357} outputId="da793e10-27c1-4f3e-8839-0151d78f49a8" ## Build Markov Chain markov_table = { 1: {2: 0.5, 3: 0.5}, 2: {1: 1/3, 4: 2/3}, 3: {3: 0.8, 4: 0.2}, 4: {1: 0.6, 4: 0.4} } for starting in [1,2,3,4]: init_dist = {starting: 1.0} #the chain starts from state 1 since we are interested in excursions around state 1. mc = markov_chain(markov_table, init_dist) N = 100_000 ##Initialize the dictionary "visits". Every state is a key of this dictionary, with value 0 visits = {state: 0 for state in (1, 2, 3, 4)} mc.start() completed = 0 stoppingTimeCounter = 0 stoppingTimes = [] ##Simulate N excursions. Every time we complete an excursion we increase completed by 1. while completed < N: visits[mc.running_state] += 1 mc.move() stoppingTimeCounter += 1 if mc.running_state == starting: stoppingTimes.append(stoppingTimeCounter) stoppingTimeCounter = 0 completed +=1 print("Invariant Distribution starting from {}:".format(starting)) for x, y in visits.items(): print("%d: %.3f" % (x, (y / N)/np.mean(stoppingTimes))) # + [markdown] id="IO74QBUzeNUy" colab_type="text" # Όσον αφορά το ερώτημα 2, αφού υπολογίσαμε τις αναλλοίωτες κατανομές θεωρητικά, παρατηρούμε ότι απέχουν ελάχιστα τα αποτελέσματα τις προσομοίωσης από τον θεωριτικό υπολογισμό. # # Έπειτα, παρατηρούμε ότι όποια και να ναι η αρχική κατάσταση, οι αναλλοίωτες κατανομές παραμένουν ίδιες. # + [markdown] id="IH8SaM78fDgh" colab_type="text" # # Παραδοτέο 2 # ## Ερώτημα 1 # # Στο επόμενο κελί ακολουθεί η απάντηση για το ερώτημα και στο μεθεπόμενο μία οπτικοποίηση του αποτελέσματος. # + id="FX3ME3WnfC-k" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="21386716-5458-478d-fbb7-3c6032179254" Ntrials, Nhits = 1_000_000, 0 for n in range(Ntrials): x, y, z = random.uniform(-1, 1, 3) # draw 2 samples, each uniformly distributed over (-1,1) if x**2 + y**2 + z**2 < 1: Nhits += 1 print("Monte Carlo estimator of V(3): %.5f" % ((2**3)*(Nhits / Ntrials))) print("Actual value of V(3) up to 5 decimal digits: %.5f" % (4*pi/3)) print("The relative error is %.5f%%" % (100 * abs((2**3)*(Nhits / Ntrials) - (4*pi/3)))) # + id="t0uweJZqgS7q" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 391} outputId="c90ade25-b450-4d72-85bc-c73508dacece" # Parameters N = 10_000 # number of required points Ntrials, Nhits = 0, 0 acc_x, acc_y, acc_z = [], [], [] # accepted x, y rej_x, rej_y, rej_z = [], [], [] # rejected x, y # Rejection Sampling while Nhits < N: Ntrials += 1 x, y, z = random.uniform(-1, 1, 3) if x**2 + y**2 + z**2 < 1: acc_x.append(x) acc_y.append(y) acc_z.append(z) Nhits += 1 else: rej_x.append(x) rej_y.append(y) rej_z.append(z) print("Total number of samples drawn %d" % Ntrials) print("Number of samples in the sphere %d" % N) plt.rcParams['figure.figsize'] = (6, 6) fig = plt.figure() ax = fig.gca(projection='3d') ax.scatter(acc_x, acc_y, acc_z, color = 'green', s = 1) # parameter s determines the size of each dot in the scatter plot ax.scatter(rej_x, rej_y, rej_z, color = 'red', s = 1) ax.set_xlim([-1, 1]) ax.set_ylim([-1, 1]) ax.set_zlim([-1, 1]) #ax.set_aspect('equal') # set aspect ratio 1:1 plt.show() # + [markdown] id="v5Sr5sE4hMUO" colab_type="text" # ## Ερώτημα 2 # + id="LeNwbcsnhPS-" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 412} outputId="b067907a-7b27-4e73-a444-f5475e1b57d1" N = 10_000 # number of required points Ntrials, Nhits = 0, 0 acc_x, acc_y = [], [] # accepted x, y rej_x, rej_y = [], [] # rejected x, y # Rejection Sampling while Nhits < N: Ntrials += 1 x, y = random.uniform(-1, 1, 2) if (x**2 + y**2)**2 <= 2*abs(x*y): acc_x.append(x) acc_y.append(y) Nhits += 1 else: rej_x.append(x) rej_y.append(y) print("Total number of samples drawn S = %d" % Ntrials) print("Number of samples in the disk N = %d" % N) ig, ax = plt.subplots() plt.scatter(acc_x, acc_y, color = 'green', s = 1) # parameter s determines the size of each dot in the scatter plot plt.scatter(rej_x, rej_y, color = 'red', s = 1) ax.set_xlim([-1, 1]) ax.set_ylim([-1, 1]) ax.set_aspect('equal') # set aspect ratio 1:1 plt.show() # + [markdown] id="23XlcHfwhk-Z" colab_type="text" # ## Ερώτημα 3 # + id="SSjjPoEChkm8" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="c4aa23de-2f9d-4602-b9a7-79ddcf6d6f4e" Ntrials, Nhits = 1_000_000, 0 absXPlusY = [] for n in range(Ntrials): x, y = random.uniform(-1, 1, 2) # draw 2 samples, each uniformly distributed over (-1,1) if (x**2 + y**2)**2 <= 2*abs(x*y): absXPlusY.append(abs(x+y)) Nhits += 1 VL = 4 * Nhits / Ntrials print("Monte Carlo estimator of V(L): %.5f" % (VL)) print("Monte Carlo estimator of I: %.5f" % (VL * mean(absXPlusY)))
Lab5_2020/Lab5_03112163.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/JulioHenri/Tecnicas-de-amostragem/blob/main/tec_amostragem.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="ewTpkhhZ7lDV" # # Importando bibliotecas # + id="Dj_IMDaK7gNC" import pandas as pd import numpy as np from sklearn.model_selection import train_test_split # + [markdown] id="iM_m-qEp7tZF" # # Carregando e lendo o banco de dados # + id="NsWtAGIM7kTM" colab={"base_uri": "https://localhost:8080/", "height": 292} outputId="98d712c8-0b6c-4828-d057-ee410876413b" dados = pd.read_csv('HR_Engagement_Sat_Sales_UpdatedV4.0.csv') #carregando arquivo csv dados.head() #lendo os 5 primeiros registros do dataset # + [markdown] id="Ns_4UQHsypoS" # ###Excluindo variáveis não significantes para o estudo # # variáveis com média e/ou variância nula, muito pequena ou com muitos dados faltantes. # + id="U29qSyrxyo94" dados = dados.drop(['ID','Name','Rising_Star','Critical','Trending Perf','Talent_Level','Validated_Talent_Level','EMP_Sat_OnPrem_1','CSR Factor','sales','Women_Leave', 'Men_Leave','EMP_Sat_OnPrem_2','EMP_Sat_OnPrem_3','EMP_Sat_OnPrem_4','EMP_Sat_OnPrem_5', 'EMP_Sat_Remote_3', 'EMP_Sat_Remote_4', 'EMP_Engagement_1', 'EMP_Engagement_2','EMP_Engagement_3','EMP_Engagement_4','EMP_Engagement_5'], axis = 1) # + colab={"base_uri": "https://localhost:8080/", "height": 334} id="MUw6MLJkl-mC" outputId="8115bcf2-0139-4390-8b65-59da5aff2808" dados.describe() # + id="uqZ-4Gsp7_ZY" colab={"base_uri": "https://localhost:8080/"} outputId="7c1140b6-7e0c-4598-a406-bac1d48546f3" dados.info() # + [markdown] id="CDN05bWf8hVh" # Podemos notar acima que há 14999 registros e 62 colunas. Também há várias atributos com valores faltantes que serão analisados posteriormente. # + [markdown] id="9Q6QNUpMMv5D" # # Exercícios # + [markdown] id="_w31JoW1K7WO" # Exercício 1.1 # # a. unidade de pesquisa: Se o empregado continua ou não na empresa # # b. população: Funcionários de uma empresa # # c. instrumento de coleta de dados: Sistema de cadastro da empresa # # d. unidade respondente: Empresas na área de Recursos Humanos # # e. possível sistema de referência: # # f. unidade amostral mais provável: Apenas funcionários com salário "baixo" # # g. unidades amostrais alternativas: Funcionários do departamento de Finanças e os que não foram promovidos nos últimos 5 anos # + [markdown] id="apgu3wVGOcav" # Exercício 1.5 # # a. Árvores em uma floresta: Nesse caso, iria fazer a amostra aleatória sistemática pois ela é de rápida e fácil execução e dessa maneira poderíamos selecionar as diversas árvores e separa-las por características específicas e depois de ter feito isso, e depois escolher um número igual de árvores para cada grupo. Sendo assim, tentando ao máximo ter uma amostra que represente a população. Poderíamos fazer um estudo que prevê se uma árvore será desmatada ou não, baseado em suas características, tempo de vida e localidade em que se encontra. # # b. Crianças abaixo de 5 anos e que tiveram sarampo: # Como a população em questão já é bem ninchada (específica), a amostragem aleatória simples é o mais indicado pela sua facilidade de aplicação. A variável a ser estudada poderia ser as sequelas que cada criança teve e a gravidade que a doença causou, para isso, iriamos ter que saber quanto tempo ele esteve no hospital, quanto tempo esteve com a doença, e características físicas e mentais de cada criança # # c. Operários em indústrias têxteis: Amostragem estratificada é o melhor tipo de forma de obter uma amostra para esta população, pois separamos a população em grupos e subgrupos e após isso, podemos usar amostragem aleatória simples para cada grupo especificado. Através de entrevistas, poderíamos estudar a satisfação dos funcionários em relação ao trabalho e concluir cada avaliação dos empregados entre três classes (bom, médio ou ruim). Para obter essas informações, os questionários teriam informações pessoais de cada operário (idade, raça, gênero) e informações relacionadas ao trabalho como quanto tempo trabalha nessa indústria, se tem parentes trabalhando no mesmo setor e etc. # # # + [markdown] id="AsSNRudcDF-Y" # # Amostragem Simples # + [markdown] id="jCfCT0GWDIdi" # Simplesmente selecionar alguns valores do banco de dados de maneira aleatória. Nesse caso irei selecionar 40% dos meus registros. # + id="pB4o5kCeDVcj" colab={"base_uri": "https://localhost:8080/", "height": 292} outputId="0ea22d25-6bff-4020-d067-06de573fe36d" amostra1 = dados.sample(frac=.4) amostra1.head() # + id="OIc6uD0DDe0t" colab={"base_uri": "https://localhost:8080/"} outputId="e27e50ac-0fc9-4e86-deb1-a12a8985d857" amostra1.info() # + colab={"base_uri": "https://localhost:8080/"} id="x8NUb5mSbZ1Z" outputId="0626450e-b6d3-4012-9159-b21d578b5d2a" print('a média é: {:.2f}'.format(amostra1['Sensor_Heartbeat(Average/Min)'].mean())) print('A variância é: {:.2f}'.format(amostra1['Sensor_Heartbeat(Average/Min)'].std())) # + [markdown] id="JY5ejhc0Mz06" # Obtendo a amostra, temos 6000 registros, selecionando 40% da população de maneira aleatória. # + [markdown] id="7BxutT059TW7" # # Amostragem Sistemática # + [markdown] id="sep_sgbh9WEU" # Os elementos da população serão colocados em uma lista e cada enésimo elemento da lista é escolhido (sistematicamente) e irá compor a amostra. # + id="GpzKpo1h8fN-" colab={"base_uri": "https://localhost:8080/"} outputId="0611f9f7-d737-44c7-bf42-82471a211bf0" seed = np.random.choice(10,1) #selecionando número aleatório de 1 a 10 seed # + id="lYdjGYjW9li6" colab={"base_uri": "https://localhost:8080/"} outputId="7e7ad969-ce41-4d34-f0bd-5f8f9269cbcb" indices = np.arange(0, len(dados), seed) indices # + [markdown] id="m5gKWi6A951t" # No primeiro parâmetro do código acima eu seleciono em qual número o array deve se iniciar, no segundo em que número ele deve parar e por último, o intervalo com que os números devem ser escolhidos. # # # **Obs1**: o método 'np.arange' retorna um array (lista) de valores. # # **Obs2**: len(dados) retorna a quantidade total de registros do DataFrame **dados**. # + id="a1DJ6R1u9mA1" amostra2 = dados.loc[indices] # + [markdown] id="Jm6MfqJC_Lop" # No código acima eu localizei, dentro dos meus dados, somente os indíces que correspondem ao meu array 'indices', explicado anteriormente. # # **OBS**: Índice de um DataFrame é o primeiro valor do registro da esquerda para direita, que não tem nenhuma label em cima. # + id="87ZgocAp91EK" colab={"base_uri": "https://localhost:8080/", "height": 292} outputId="d53a674b-7e8a-4539-f50c-ecdeb1148ece" amostra2.head() # + id="j4s7NDnC-xG2" colab={"base_uri": "https://localhost:8080/"} outputId="28209f6f-eaaa-4821-fa35-8195f180f56f" amostra2.info() # + colab={"base_uri": "https://localhost:8080/"} id="jnkQ2J_oclhS" outputId="82fcfe11-9e4c-4400-8934-882fbac352ae" print('a média é: {:.2f}'.format(amostra2['Sensor_Heartbeat(Average/Min)'].mean())) print('A variância é: {:.2f}'.format(amostra2['Sensor_Heartbeat(Average/Min)'].std())) # + [markdown] id="UGw0FSAe-3su" # Como podemos observar, a nossa amostra gerada varia a quantidade de registros de acordo com o número gerado aleatoriamente, selecionando assim, de maneira sistemática. # + [markdown] id="i21-agelAoXP" # # Exemplo 2.1 # + id="c99VhPhzKz7a" colab={"base_uri": "https://localhost:8080/", "height": 230} outputId="76b48fc9-da39-4de7-8e74-9a9343b4c27f" #criando nova amostra com somente 3 registros. nova_amostra = amostra2.sample(n=3) nova_amostra # + [markdown] id="6OOXzYu0BMZX" # Para os dados descritos na nova amostra, os seguintes parâmetros populacionais podem ser definidos: # # i. Departamento de cada funcionário # + id="SMvuG3ZUBWYW" colab={"base_uri": "https://localhost:8080/"} outputId="0d71f52d-9abf-442a-aa85-e94c985a4951" #Todos os departamentos dos registros obtidos na amostra nova_amostra['Department'].unique() # + id="Tqyq2G45B-BC" colab={"base_uri": "https://localhost:8080/"} outputId="6d09c26d-3415-4fcd-8941-ea45a22c70c2" print('D = {} = Y'.format(nova_amostra['Department'].unique())) # + [markdown] id="5y2z8DqLDiaZ" # ii. Para o vetor (FiTi)' # + id="MehtfLZSCH4A" colab={"base_uri": "https://localhost:8080/"} outputId="e63b4b2c-899d-45aa-da85-c95c50229695" # Obtendo os valores da última pontuação da avaliação desse funcionário nova_amostra.last_evaluation # + id="LZzv9qodF47J" colab={"base_uri": "https://localhost:8080/"} outputId="6e908350-9e90-4cf7-eda0-b1391f683170" # Obtendo os valores dos números de projetos que o funcionário trabalhou nova_amostra.number_project # + [markdown] id="rqHpaNflEHqn" # B = <br/> # (0.99 0.94 0.61 <br/> # 3.00 7.00 5.00)<br/> # # + [markdown] id="XnrVpghwGV4T" # Com relação à funções paramétricas populacionais, tem-se: # # + [markdown] id="iGV6w0LoGpDp" # 1. Tempo gasto em média na empresa. # + id="JaM_OR-qF8yD" colab={"base_uri": "https://localhost:8080/"} outputId="ddaba243-c5f9-41b1-c0e8-c8dcf44983ba" # O método mean() devolve a média aritmética dos números dados a ele. nova_amostra['time_spend_company'].mean() # + [markdown] id="tXYv65RDI78_" # 2. Média das variáveis 'last_evaluation' e 'number_project'. # + id="aMEOqbkeGnkd" colab={"base_uri": "https://localhost:8080/"} outputId="689d189b-b3d9-403c-ab47-5ffac4515650" print('média da última avaliação: {:.2f}'.format(nova_amostra['last_evaluation'].mean())) print('média do número de projetos feitos pelos funcionários {}'.format(nova_amostra['number_project'].mean())) # + [markdown] id="o99BrUxEKhpm" # 3. Média de vezes que o funcionário visita o LinkedIn por nível de salário. # + id="NZg8pK0jL_Jg" colab={"base_uri": "https://localhost:8080/"} outputId="92502f6a-df5c-4a5b-9a16-1901b4c28ee1" nova_amostra.groupby(by='salary').mean()['LinkedIn_Hits'] # + [markdown] id="M3KxEJVPNxk7" # a. Total populacional # # + id="9uAv7DsbMXXd" colab={"base_uri": "https://localhost:8080/"} outputId="92ebf906-2316-4d9a-94d2-5c2bb43d10c8" nova_amostra['time_spend_company'].sum() # + [markdown] id="KIi6zazPP-vi" # b. Média populacional # + id="xjGfjaD9OABf" colab={"base_uri": "https://localhost:8080/"} outputId="34bff980-9fbc-4881-f4a6-2becaf0bd3bc" nova_amostra['time_spend_company'].mean() # + [markdown] id="b7mvcSxyQg6I" # c. Variância populacional # + id="opqt2U55Q4a6" colab={"base_uri": "https://localhost:8080/"} outputId="b9aa3e1c-c28d-4be7-c2f7-92d035750dfb" nova_amostra['time_spend_company'].var() # + [markdown] id="nc8ZCjj0QrLN" # d. Covariância populacional # + id="VHV-l4_DRANt" colab={"base_uri": "https://localhost:8080/"} outputId="d0ec7df1-a68e-4e03-9ac2-ab90a686ccba" import numpy as np X = np.stack((nova_amostra['time_spend_company'], nova_amostra['number_project']), axis=0) np.cov(X) # + [markdown] id="w7nfimQaQtj1" # e. Correlação populacional # + id="0iknBNMhS5-L" colab={"base_uri": "https://localhost:8080/"} outputId="ace18c65-3fc1-4877-a6f1-335bf6d68605" #correlação corr = nova_amostra.corr() #valores absolutos corr_target = abs(corr['time_spend_company']) #Selecionando as features com maior correlação com a variável de interesse relevant_features = corr_target[corr_target>0.8] relevant_features # + [markdown] id="h6s5CoY_QwrB" # f. Razão populacional # + id="v3vZOrhRQOZR" colab={"base_uri": "https://localhost:8080/"} outputId="b5f69671-d028-4ae3-e1e9-85068217f70f" x = nova_amostra['time_spend_company'].sum() y = nova_amostra['number_project'].sum() r_populacional = x/y r_populacional # + [markdown] id="KfOknRN6VQXR" # g. Razão média populacional # + id="eY1P7TdNU7_D" colab={"base_uri": "https://localhost:8080/"} outputId="5b60c9e7-cf33-4c73-9efa-0f908b256665" r_populacional/3 # + [markdown] id="4ps17kxWm8Iq" # # Amostragem Estratificada # + [markdown] id="fpgH55jApb7m" # Consiste em dividir toda a população em estratos, de maneira que um indivíduo pode fazer parte apenas de um único estrato. Após tais camadas serem definidas, selecionam-se os indivíduos utilizando qualquer técnica de amostragem em cada um dos estratos de forma separada. # # Esse método é muito utilizado em métodos de Aprendizagem de Máquina Supervisionada quando é separada uma amostra de teste e outra de treino. Nesse sentido, separamos a amostra abaixo de maneira estratificada para, quem sabe, futuros projetos de Machine Learning. # # + id="8HEcWIpkVha7" x = dados.drop('left_Company', axis=1) #excluindo somente a variável que nos diz se o funcionário em questão deixou ou não a empreas y = dados['left_Company'] #selecionando somente a variável left_company (explicada acima) do dataset #separando os dados em treino e teste x_train, amostra3, y_train, y_test = train_test_split(x, y, test_size=0.3, stratify=y) # + [markdown] id="z_EfaOV7rBub" # Agora, nosso banco de dados foi divido em 4 'amostras', onde em duas (y_train e y_test) estão somente registros da variável 'Department', e outras duas (x_train e x_test) que estão os restantes das variáveis. # # O que difere nessas amostras é a quantidade de registros, pois os que tem o final 'train', contêm 70% dos registros do banco de dados original, enquanto que os que tem final 'test' eu defini apenas 30% dos dados, como mostrado a seguir. # + colab={"base_uri": "https://localhost:8080/"} id="6EcQ-r-Ql72q" outputId="9aa2d508-5ce3-4793-b4c5-df419c52f798" #primeiro número se refere ao número de registros e o segundo ao número de variáveis. amostra3.shape # + colab={"base_uri": "https://localhost:8080/"} id="P-9rShN0emNp" outputId="4e0c694a-4204-407d-a2dc-e739216c24cc" print('a média é: {:.2f}'.format(amostra3['Sensor_Heartbeat(Average/Min)'].mean())) print('A variância é: {:.2f}'.format(amostra3['Sensor_Heartbeat(Average/Min)'].std())) # + [markdown] id="23ZSOgBlnFEX" # # Amostragem por conglomerado # + [markdown] id="GoppicNio-D6" # Consiste na seleção de pessoas em grupos. Esse método é muito usado em questionários por questão de praticidade. Diferentemente da amostragem estratificada, no qual todos os estratos devem fazer parte da amostra, a amostragem por conglomerado pode selecionar por amostragem. # # No nosso caso queremos mapear somente os registros de funcionários do banco de dados que trabalham no setor de vendas (um público bem específico). Isso pode ser visto com os códigos abaixo: # + id="WwPk-kTEnGyc" #Agrupando por Departamento e o transformando em um dataframe com 30% dos registros grupo = dados.groupby('Department').apply(pd.DataFrame.sample, frac=.3) # + id="FwINLnTNn_e2" #dentro do grupo, selecionando somente o departamento de vendas amostra4 = grupo[grupo.Department=='Sales'] # + colab={"base_uri": "https://localhost:8080/", "height": 323} id="SbqWwsLVoKvP" outputId="84bcfcf7-bca0-4f86-f6e0-e6de858b5118" #mostrando a amostra selecionada amostra4.head() # + colab={"base_uri": "https://localhost:8080/"} id="F4a4h8nGdgWn" outputId="b5bf894a-9182-41b4-915f-a00b65536a3c" #750 registros com 39 colunas amostra4.shape # + colab={"base_uri": "https://localhost:8080/"} id="5XraYplmdkFc" outputId="d22fa9d7-1c33-4505-9297-e35467497654" print('a média é: {:.2f}'.format(amostra4['Sensor_Heartbeat(Average/Min)'].mean())) print('A variância é: {:.2f}'.format(amostra4['Sensor_Heartbeat(Average/Min)'].std())) # + [markdown] id="-Wxu2Euca5zc" # # Comparando o valor das médias e das variâncias das amostras e da população. # + id="5ryJ7N53oLdS" colab={"base_uri": "https://localhost:8080/"} outputId="2edb53ae-21bb-4674-8ee1-7107bcc62e88" print('A média da amostra 1 é: {:.2f}'.format(amostra1['Sensor_Heartbeat(Average/Min)'].mean())) print('A variância da amostra 1 é: {:.2f}'.format(amostra1['Sensor_Heartbeat(Average/Min)'].std())) print('A média da amostra 2 é: {:.2f}'.format(amostra2['Sensor_Heartbeat(Average/Min)'].mean())) print('A variância da amostra 2 é: {:.2f}'.format(amostra2['Sensor_Heartbeat(Average/Min)'].std())) print('A média da amostra 3 é: {:.2f}'.format(amostra3['Sensor_Heartbeat(Average/Min)'].mean())) print('A variância da amostra 3 é: {:.2f}'.format(amostra3['Sensor_Heartbeat(Average/Min)'].std())) print('A média da amostra 3 é: {:.2f}'.format(amostra4['Sensor_Heartbeat(Average/Min)'].mean())) print('A variância da amostra 3 é: {:.2f}'.format(amostra4['Sensor_Heartbeat(Average/Min)'].std())) print('A média da população é: {:.2f}'.format(dados['Sensor_Heartbeat(Average/Min)'].mean())) print('A variância da população é: {:.2f}'.format(dados['Sensor_Heartbeat(Average/Min)'].std())) # + id="n0JyCJxYigfe"
tec_amostragem.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np import math from scipy import stats import scipy scipy.stats.f.ppf(q=1-0.05,dfn=15,dfd=10) scipy.stats.f.ppf(q=0.05,dfn=15,dfd=10) X=[3,7,25,10,15,6,12,25,15,7] Y=[48,44,40,38,33,21,20,12,1,18] F=np.var(X)/np.var(Y) dfn=len(X)-1 dfd=len(Y)-1 p_value=scipy.stats.f.cdf(F,dfn,dfd) def sample_size(alpha,beta,mu1,mu2,sigma): z1=-1*stats.norm.ppf(alpha) z2=-1*stats.norm.ppf(beta) n=(((z1+z2)**2)*(sigma**2))/((mu1-mu2)**2) print(n) sample_size(0.05,0.1,12,12.75,3.2)
Week 5 Two sample Test -3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Randomized Benchmarking # ## Introduction # # One of the main challenges in building a quantum information processor is the non-scalability of completely # characterizing the noise affecting a quantum system via process tomography. In addition, process tomography is sensitive to noise in the pre- and post rotation gates plus the measurements (SPAM errors). Gateset tomography can take these errors into account, but the scaling is even worse. A complete characterization # of the noise is useful because it allows for the determination of good error-correction schemes, and thus # the possibility of reliable transmission of quantum information. # # Since complete process tomography is infeasible for large systems, there is growing interest in scalable # methods for partially characterizing the noise affecting a quantum system. A scalable (in the number $n$ of qubits comprising the system) and robust algorithm for benchmarking the full set of Clifford gates by a single parameter using randomization techniques was presented in [1]. The concept of using randomization methods for benchmarking quantum gates is commonly called **Randomized Benchmarking # (RB)**. # # # ### References # # 1. <NAME>, <NAME>, and <NAME>, *Robust randomized benchmarking of quantum processes*, # https://arxiv.org/pdf/1009.3639 # # 2. <NAME>, <NAME>, and <NAME>, *Characterizing Quantum Gates via Randomized Benchmarking*, # https://arxiv.org/pdf/1109.6887 # # 3. <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>, *Process verification of two-qubit quantum gates by randomized benchmarking*, https://arxiv.org/pdf/1210.7011 # # 4. <NAME>, <NAME>´orcoles, <NAME>, <NAME>, <NAME>, <NAME>, # <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>, # *Characterization of addressability by simultaneous randomized benchmarking*, https://arxiv.org/pdf/1204.6308 # # 5. <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>, *Three Qubit Randomized Benchmarking*, https://arxiv.org/pdf/1712.06550 # ## The Randomized Benchmarking Protocol # # A RB protocol (see [1,2]) consists of the following steps: # # (We should first import the relevant qiskit classes for the demonstration). # + #Import general libraries (needed for functions) import numpy as np import matplotlib.pyplot as plt from IPython import display #Import the RB Functions import qiskit.ignis.verification.randomized_benchmarking as rb #Import Qiskit classes import qiskit from qiskit.providers.aer.noise import NoiseModel from qiskit.providers.aer.noise.errors.standard_errors import depolarizing_error, thermal_relaxation_error # - # ### Step 1: Generate RB sequences # # The RB sequences consist of random Clifford elements chosen uniformly from the Clifford group on $n$-qubits, # including a computed reversal element, # that should return the qubits to the initial state. # # More precisely, for each length $m$, we choose $K_m$ RB sequences. # Each such sequence contains $m$ random elements $C_{i_j}$ chosen uniformly from the Clifford group on $n$-qubits, and the $m+1$ element is defined as follows: $C_{i_{m+1}} = (C_{i_1}\cdot ... \cdot C_{i_m})^{-1}$. It can be found efficiently by the Gottesmann-Knill theorem. # # For example, we generate below several sequences of 2-qubit Clifford circuits. # + #Generate RB circuits (2Q RB) #number of qubits nQ=2 rb_opts = {} #Number of Cliffords in the sequence rb_opts['length_vector'] = [1, 10, 20, 50, 75, 100, 125, 150, 175, 200] #Number of seeds (random sequences) rb_opts['nseeds'] = 5 #Default pattern rb_opts['rb_pattern'] = [[0,1]] rb_circs, xdata = rb.randomized_benchmarking_seq(**rb_opts) # - # As an example, we print the circuit corresponding to the first RB sequence print(rb_circs[0][0]) # One can verify that the Unitary representing each RB circuit should be the identity (with a global phase). # We simulate this using Aer unitary simulator. # Create a new circuit without the measurement qregs = rb_circs[0][-1].qregs cregs = rb_circs[0][-1].cregs qc = qiskit.QuantumCircuit(*qregs, *cregs) for i in rb_circs[0][-1][0:-nQ]: qc.data.append(i) # The Unitary is an identity (with a global phase) backend = qiskit.Aer.get_backend('unitary_simulator') basis_gates = ['u1','u2','u3','cx'] # use U,CX for now job = qiskit.execute(qc, backend=backend, basis_gates=basis_gates) print(np.around(job.result().get_unitary(),3)) # ### Step 2: Execute the RB sequences (with some noise) # # We can execute the RB sequences either using Qiskit Aer Simulator (with some noise model) or using IBMQ provider, and obtain a list of results. # # By assumption each operation $C_{i_j}$ is allowed to have some error, represented by $\Lambda_{i_j,j}$, and each sequence can be modeled by the operation: # $$\textit{S}_{\textbf{i}_\textbf{m}} = \bigcirc_{j=1}^{m+1} (\Lambda_{i_j,j} \circ C_{i_j})$$ # where ${\textbf{i}_\textbf{m}} = (i_1,...,i_m)$ and $i_{m+1}$ is uniquely determined by ${\textbf{i}_\textbf{m}}$. # + # Run on a noisy simulator noise_model = NoiseModel() # Depolarizing_error dp = 0.005 noise_model.add_all_qubit_quantum_error(depolarizing_error(dp, 1), ['u1', 'u2', 'u3']) noise_model.add_all_qubit_quantum_error(depolarizing_error(2*dp, 2), 'cx') backend = qiskit.Aer.get_backend('qasm_simulator') # - # ### Step 3: Get statistics about the survival probabilities # # For each of the $K_m$ sequences the survival probability $Tr[E_\psi \textit{S}_{\textbf{i}_\textbf{m}}(\rho_\psi)]$ # is measured. # Here $\rho_\psi$ is the initial state taking into account preparation errors and $E_\psi$ is the # POVM element that takes into account measurement errors. # In the ideal (noise-free) case $\rho_\psi = E_\psi = | \psi {\rangle} {\langle} \psi |$. # # In practice one can measure the probability to go back to the exact initial state, i.e. all the qubits in the ground state $ {|} 00...0 {\rangle}$ or just the probability for one of the qubits to return back to the ground state. Measuring the qubits independently can be more convenient if a correlated measurement scheme is not possible. Both measurements will fit to the same decay parameter according to the properties of the *twirl*. # ### Step 4: Find the averaged sequence fidelity # # Average over the $K_m$ random realizations of the sequence to find the averaged sequence **fidelity**, # $$F_{seq}(m,|\psi{\rangle}) = Tr[E_\psi \textit{S}_{K_m}(\rho_\psi)]$$ # where # $$\textit{S}_{K_m} = \frac{1}{K_m} \sum_{\textbf{i}_\textbf{m}} \textit{S}_{\textbf{i}_\textbf{m}}$$ # is the average sequence operation. # ### Step 5: Fit the results # # Repeat Steps 1 through 4 for different values of $m$ and fit the results for the averaged sequence fidelity to the model: # $$ \textit{F}_{seq}^{(0)} \big(m,{|}\psi {\rangle} \big) = A_0 \alpha^m +B_0$$ # where $A_0$ and $B_0$ absorb state preparation and measurement errors as well as an edge effect from the # error on the final gate. # # $\alpha$ determines the average error-rate $r$, which is also called **Error per Clifford (EPC)** # according to the relation $$ r = 1-\alpha-\frac{1-\alpha}{2^n} = \frac{2^n-1}{2^n}(1-\alpha)$$ # (where $n=nQ$ is the number of qubits). # # As an example, we calculate the average sequence fidelity for each of the RB sequences, fit the results to the exponential curve, and compute the parameters $\alpha$ and EPC. # Create the RB fitter backend = qiskit.Aer.get_backend('qasm_simulator') basis_gates = ['u1','u2','u3','cx'] shots = 200 qobj_list = [] rb_fit = rb.RBFitter(None, xdata, rb_opts['rb_pattern']) for rb_seed,rb_circ_seed in enumerate(rb_circs): print('Compiling seed %d'%rb_seed) new_rb_circ_seed = qiskit.compiler.transpile(rb_circ_seed, basis_gates=basis_gates) qobj = qiskit.compiler.assemble(new_rb_circ_seed, shots=shots) print('Simulating seed %d'%rb_seed) job = backend.run(qobj, noise_model=noise_model, backend_options={'max_parallel_experiments': 0}) qobj_list.append(qobj) # Add data to the fitter rb_fit.add_data(job.result()) print('After seed %d, alpha: %f, EPC: %f'%(rb_seed,rb_fit.fit[0]['params'][1], rb_fit.fit[0]['epc'])) # ### Plot the results # + plt.figure(figsize=(8, 6)) ax = plt.subplot(1, 1, 1) # Plot the essence by calling plot_rb_data rb_fit.plot_rb_data(0, ax=ax, add_label=True, show_plt=False) # Add title and label ax.set_title('%d Qubit RB'%(nQ), fontsize=18) plt.show() # - # ### The intuition behind RB # # The depolarizing quantum channel has a parameter $\alpha$, and works like this: with probability $\alpha$, the state remains the same as before; with probability $1-\alpha$, the state becomes the totally mixed state, namely: # # $$\rho_f = \alpha \rho_i + \frac{1-\alpha}{2^n} * \mathbf{I}$$ # # Suppose that we have a sequence of $m$ gates, not necessarily Clifford gates, # where the error channel of the gates is a depolarizing channel with parameter $\alpha$ # (same $\alpha$ for all the gates). # Then with probability $\alpha^m$ the state is correct at the end of the sequence, # and with probability $1-\alpha^m$ it becomes the totally mixed state, therefore: # # $$\rho_f^m = \alpha^m \rho_i + \frac{1-\alpha^m}{2^n} * \mathbf{I}$$ # # Now suppose that in addition we start with the ground state; # that the entire sequence amounts to the identity; # and that we measure the state at the end of the sequence with the standard basis. # We derive that the probability of success at the end of the sequence is: # # $$\alpha^m + \frac{1-\alpha^m}{2^n} = \frac{2^n-1}{2^n}\alpha^m + \frac{1}{2^n} = A_0\alpha^m + B_0$$ # # It follows that the probability of success, aka fidelity, decays exponentially with the sequence length, with exponent $\alpha$. # # The last statement is not necessarily true when the channel is other than the depolarizing channel. However, it turns out that if the gates are uniformly-randomized Clifford gates, then the noise of each gate behaves on average as if it was the depolarizing channel, with some parameter that can be computed from the channel, and we obtain the exponential decay of the fidelity. # # Formally, taking an average over a finite group $G$ (like the Clifford group) of a quantum channel $\bar \Lambda$ is also called a *twirl*: # $$ W_G(\bar \Lambda) \frac{1}{|G|} \sum_{u \in G} U^{\dagger} \circ \bar \Lambda \circ U$$ # Twirling over the entire unitary group yields exactly the same result as the Clifford group. The Clifford group is a *2-design* of the unitary group. # ## Simultaneous Randomized Benchmarking # # # RB is designed to address fidelities in multiqubit systems in two ways. For one, RB over the full $n$-qubit space # can be performed by constructing sequences from the $n$-qubit Clifford group. Additionally, the $n$-qubit space # can be subdivided into sets of qubits $\{n_i\}$ and $n_i$-qubit RB performed in each subset simultaneously [4]. # Both methods give metrics of fidelity in the $n$-qubit space. # # For example, it is common to perform 2Q RB on the subset of two-qubits defining a CNOT gate while the other qubits are quiescent. As explained in [4], this RB data will not necessarily decay exponentially because the other qubit subspaces are not twirled. Subsets are more rigorously characterized by simultaneous RB, which also measures some level of crosstalk error since all qubits are active. # # An example of simultaneous RB (1Q RB and 2Q RB) can be found in: # https://github.com/Qiskit/qiskit-tutorials/blob/master/qiskit/ignis/randomized_benchmarking.ipynb # ## Predicted Gate Fidelity # # If we know the errors on the underlying gates (the gateset) we can predict the fidelity. First we need to count the number of these gates per Clifford. # # Then, the two qubit Clifford gate error function gives the error per 2Q Clifford. It assumes that the error in the underlying gates is depolarizing. This function is derived in the supplement to [5]. #Count the number of single and 2Q gates in the 2Q Cliffords gates_per_cliff = rb.rb_utils.gates_per_clifford(qobj_list, xdata[0],basis_gates, rb_opts['rb_pattern'][0]) for i in range(len(basis_gates)): print("Number of %s gates per Clifford: %f"%(basis_gates[i], np.mean([gates_per_cliff[0][i],gates_per_cliff[1][i]]))) # + # Prepare lists of the number of qubits and the errors ngates = np.zeros(7) ngates[0:3] = gates_per_cliff[0][0:3] ngates[3:6] = gates_per_cliff[1][0:3] ngates[6] = gates_per_cliff[0][3] gate_qubits = np.array([0, 0, 0, 1, 1, 1, -1], dtype=int) gate_errs = np.zeros(len(gate_qubits)) gate_errs[[1, 4]] = dp/2 #convert from depolarizing error to epg (1Q) gate_errs[[2, 5]] = 2*dp/2 #convert from depolarizing error to epg (1Q) gate_errs[6] = dp*3/4 #convert from depolarizing error to epg (2Q) #Calculate the predicted epc pred_epc = rb.rb_utils.twoQ_clifford_error(ngates,gate_qubits,gate_errs) print("Predicted 2Q Error per Clifford: %e"%pred_epc)
content/ch-quantum-hardware/randomized-benchmarking.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/marianvinas/DS-Unit-2-Applied-Modeling/blob/master/Marian_Vinas_DSPT6_LS_DS_231_Assignment.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="k6dnf2dZxgwt" colab_type="text" # Lambda School Data Science # # *Unit 2, Sprint 3, Module 1* # # --- # + [markdown] id="PpjBYa_Sxiar" colab_type="text" # # Define ML problems # - Choose a target to predict, and check its distribution # - Avoid leakage of information from test to train or from target to features # - Choose an appropriate evaluation metric # # + [markdown] id="ulaaBNkuxLVt" colab_type="text" # ## Challenge # # You will use your portfolio project dataset for all assignments this sprint. (If you haven't found a dataset yet, do that today. [Review requirements for your portfolio project](https://lambdaschool.github.io/ds/unit2) and choose your dataset.) # # Complete these tasks for your project, and document your decisions. # # - Choose your target. Which column in your tabular dataset will you predict? # - Is your problem regression or classification? # - How is your target distributed? # - Classification: How many classes? Are the classes imbalanced? # - Regression: Is the target right-skewed? If so, you may want to log transform the target. # - Choose your evaluation metric(s). # - Classification: Is your majority class frequency >= 50% and < 70% ? If so, you can just use accuracy if you want. Outside that range, accuracy could be misleading. What evaluation metric will you choose, in addition to or instead of accuracy? # - Regression: Will you use mean absolute error, root mean squared error, R^2, or other regression metrics? # - Choose which observations you will use to train, validate, and test your model. # - Are some observations outliers? Will you exclude them? # - Will you do a random split or a time-based split? # - Begin to clean and explore your data. # - Begin to choose which features, if any, to exclude. Would some features "leak" future information? # # Some students worry, ***what if my model isn't “good”?*** Then, [produce a detailed tribute to your wrongness. That is science!](https://twitter.com/nathanwpyle/status/1176860147223867393) # + [markdown] id="5-HiuATzG5_R" colab_type="text" # # House Price # <NAME> - June 3, 2020 # # # # + [markdown] id="GCWRrL50Hzwi" colab_type="text" # # #New home sales # # New home sales is a housing market statistic that measures the sales of newly built homes over a given period. It provides a broad view of activity in the housing market. For example, an increase in new home sales suggests demand is picking up. Because changes are often seen in new home sales before the market at large, new home sales is considered a leading indicator. The statistic is also a sign of the health of the U.S. economy because an increase in new home sales suggests an increase in consumer confidence and spending. The most closely watched data on new home sales is the U.S. Census Bureau’s New Residential Sales, released around the 20th of each month. # + id="B3LXWG573SF5" colab_type="code" colab={} # %%capture import sys # If you're on Colab: if 'google.colab' in sys.modules: DATA_PATH = 'https://query.data.world/s/4bmd5zk5xyaeuvzdeqbd754m3lqg3l' # !pip install category_encoders==2.* # + id="r6NPmnZl4An3" colab_type="code" outputId="9ad46d20-3054-45db-b1e1-621f9fa49273" colab={"base_uri": "https://localhost:8080/", "height": 217} import pandas as pd pd.options.display.max_columns = None import pandas as pd df = pd.read_csv('https://query.data.world/s/4bmd5zk5xyaeuvzdeqbd754m3lqg3l') df.head() # + id="IKBoMLGR5zic" colab_type="code" outputId="392967d5-18b5-43f0-b7e5-c057a2af3e34" colab={"base_uri": "https://localhost:8080/", "height": 34} df.shape # + id="WeEXrQQD8As9" colab_type="code" outputId="1f2c56fb-8878-4673-d6c5-a36f7dfb126f" colab={"base_uri": "https://localhost:8080/", "height": 336} #columns df.columns # + id="8ub9Xyj6Sf0a" colab_type="code" outputId="2b748422-7c30-4eec-910a-760a5294f907" colab={"base_uri": "https://localhost:8080/", "height": 176} #Target df['SalePrice'].describe() # + id="_vm_KkmKiU_I" colab_type="code" outputId="68a75c54-3a04-43ae-a6b8-c97d06b83103" colab={"base_uri": "https://localhost:8080/", "height": 217} #Which column in your tabular dataset will you predict? #BedroomAbvGr df = df.dropna(subset=['BedroomAbvGr']) df['Great'] = df['BedroomAbvGr'] >= 4 df.head() # + [markdown] id="TtEFqQnz1Nhn" colab_type="text" # #How is your target distributed? # # Classification: How many classes? Are the classes imbalanced? # # Regression: Is the target right-skewed? If so, you may want to log transform the target. # + id="e9RDbDHLjR4-" colab_type="code" outputId="e5bd90fb-11ae-4995-8fe2-b2ec9f59d4f2" colab={"base_uri": "https://localhost:8080/", "height": 34} y = df['Great'] y.nunique() # + id="9f-1ARVltnr4" colab_type="code" outputId="dd4d13be-d233-4d19-c1c7-de60d966aafe" colab={"base_uri": "https://localhost:8080/", "height": 70} y.value_counts() # + id="TYGFbfJUttzy" colab_type="code" outputId="b289bc84-d475-4b69-b6cb-e0c0b349ec82" colab={"base_uri": "https://localhost:8080/", "height": 70} y.value_counts(normalize=True) # + id="_fj5_NYMSIUX" colab_type="code" outputId="208f861d-2501-4565-8b1a-083954435ea6" colab={"base_uri": "https://localhost:8080/", "height": 34} #Cleaning data df['BldgType'].unique() # + id="7amWzPF7TnVk" colab_type="code" outputId="00c3546e-404c-491a-e42d-80ba91edb208" colab={"base_uri": "https://localhost:8080/", "height": 123} df['BldgType'].value_counts() # + id="urXW3F-6TnM3" colab_type="code" outputId="f21b6620-ca28-4022-fe6b-b2b72cb87e37" colab={"base_uri": "https://localhost:8080/", "height": 34} cardinality = df.select_dtypes(exclude='number').nunique() high_cardinality_feat = cardinality[cardinality > 30].index.tolist() high_cardinality_feat # + id="MBEN5xxrTnAB" colab_type="code" outputId="da823f59-fb33-4f2e-8446-6ad756b42171" colab={"base_uri": "https://localhost:8080/", "height": 426} df = df.drop(columns = high_cardinality_feat) df # + id="KF61KC-PXv17" colab_type="code" outputId="0d64cadf-dd0d-4f14-b8af-4d654934244c" colab={"base_uri": "https://localhost:8080/", "height": 230} df.isna().sum().sort_values() # + id="vnaqomkRxEcQ" colab_type="code" colab={} df = df.fillna('Missing') # + id="eNyzSgPxXwJy" colab_type="code" outputId="f3d13070-bda1-4181-8801-f07e3ccd9848" colab={"base_uri": "https://localhost:8080/", "height": 426} df # + [markdown] id="sar7Od_GZaC8" colab_type="text" # **time-based split:** # # - Train on reviews from 2008 & earlier. # - Validate on 2009. # - Test on 2010 & later. # + id="wDzqpgOLf0ie" colab_type="code" outputId="eb00f2a9-3b44-404e-bb2e-fa3401792c76" colab={"base_uri": "https://localhost:8080/", "height": 426} #df['YrSold'] = pd.to_datetime(df['YrSold']) df # + id="6NUqWWOEZnhQ" colab_type="code" outputId="b774bdbd-f167-4540-ce2d-945ffbc69ce7" colab={"base_uri": "https://localhost:8080/", "height": 34} train = df[df['YrSold'] <= 2016] val = df[df['YrSold'] == 2007] test = df[df['YrSold'] <= 2008] train.shape, val.shape, test.shape # + [markdown] id="ZeT6U9Ouajfz" colab_type="text" # Begin to choose which features, if any, to exclude. **Would some features “leak” future information?** # # What happens if we _DON’T_ drop features with leakage? # + id="6ByUBTALZnk8" colab_type="code" outputId="1f3ec24b-37e5-4aad-f974-f13e87161be9" colab={"base_uri": "https://localhost:8080/", "height": 336} import category_encoders as ce from sklearn.pipeline import make_pipeline from sklearn.tree import DecisionTreeClassifier target = 'Great' features = train.columns.drop([target, 'YrSold']) features # + id="4KA2QzT7avEM" colab_type="code" colab={} X_train = train[features] y_train = train[target] X_val = val[features] y_val = val[target] # + id="aFnB35ElaxoV" colab_type="code" outputId="19fcfda7-7b0f-434f-8662-f70ed2d236fc" colab={"base_uri": "https://localhost:8080/", "height": 34} pipeline = make_pipeline( ce.ordinal.OrdinalEncoder(), DecisionTreeClassifier(max_depth=5) ) pipeline.fit(X_train, y_train) print(f'Validation accuracy: {pipeline.score(X_val, y_val)}') # + id="Kts47IrO0D_2" colab_type="code" outputId="a58615ef-2577-4548-f2e7-9890e475948c" colab={"base_uri": "https://localhost:8080/", "height": 240} import graphviz from sklearn.tree import export_graphviz tree = pipeline.named_steps['decisiontreeclassifier'] dot_data = export_graphviz( tree, out_file=None, feature_names=X_train.columns, class_names=y_train.unique().astype(str), filled=True, impurity=False, proportion=True ) graphviz.Source(dot_data) # + [markdown] id="I95jl2r40JLh" colab_type="text" # #Column with "leakage" # + id="MqLl7cFW0RW-" colab_type="code" outputId="0f5850d8-188f-41f4-e75b-2b4e0228d7c3" colab={"base_uri": "https://localhost:8080/", "height": 34} target = 'Great' features = train.columns.drop([target, 'YrSold']) X_train = train[features] y_train = train[target] X_val = val[features] y_val = val[target] pipeline = make_pipeline( ce.ordinal.OrdinalEncoder(), DecisionTreeClassifier(max_depth=3) ) pipeline.fit(X_train, y_train) print(f'Validation accuracy: {pipeline.score(X_val, y_val)}') # + id="sBwigZ961WEV" colab_type="code" outputId="bd8746c2-1304-4697-f9ff-9cef756adb1e" colab={"base_uri": "https://localhost:8080/", "height": 240} tree = pipeline.named_steps['decisiontreeclassifier'] dot_data = export_graphviz( tree, out_file=None, feature_names=X_train.columns, class_names=y_train.unique().astype(str), filled=True, impurity=False, proportion=True ) graphviz.Source(dot_data) # + [markdown] id="CK0IfD1ch_CB" colab_type="text" # #Difference between Regression and Classification: # The main difference between them is that the output variable in regression is numerical (or continuous) while that for classification is categorical (or discrete). # + id="4rim4DSm38_y" colab_type="code" outputId="b3c17451-79cc-46af-c58a-5b4c111714fa" colab={"base_uri": "https://localhost:8080/", "height": 34} from sklearn.metrics import roc_auc_score y_pred_proba = pipeline.predict_proba(X_val)[:, 1] roc_auc_score(y_val, y_pred_proba) # + id="yiKe8zBX1ggg" colab_type="code" colab={} from sklearn.metrics import roc_curve fpr, tpr, thresholds = roc_curve(y_val, y_pred_proba) # + id="FViog4cH1jk6" colab_type="code" outputId="a3be497d-f329-43c9-f05b-43cab1ee1114" colab={"base_uri": "https://localhost:8080/", "height": 137} pd.DataFrame( { 'False Positive Rate': fpr, 'True Positive Rate': tpr, 'Thresholds': thresholds } ) # + id="1SrgM-QD1ng5" colab_type="code" outputId="a254a0c0-9bdb-4271-ecae-f72be76b7d9e" colab={"base_uri": "https://localhost:8080/", "height": 312} import matplotlib.pyplot as plt plt.scatter(fpr, tpr) plt.plot(fpr, tpr) plt.title('ROC curve') plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') # + [markdown] id="IZtICEpP4Hix" colab_type="text" # #ROC curve for a test with no overlap # # We have a worthless test. # A worthless test has a discriminating ability equal to flipping a coin.
Marian_Vinas_DSPT6_LS_DS_231_Assignment.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="AeE_RnZ_Pkda" outputId="575bd30b-7ed9-4a9b-c6c2-39951d9b34a8" executionInfo={"status": "ok", "timestamp": 1642237159173, "user_tz": -480, "elapsed": 5589, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "00979579278418875317"}} colab={"base_uri": "https://localhost:8080/"} # !pip install torch # !pip3 install torchvision # + id="muANAQ-IfEsO" import torch from torch import nn import torch.nn.functional as F import os from torch.utils.data import Dataset import cv2 from tqdm import tqdm import numpy as np # %matplotlib inline import matplotlib.pyplot as plt # + id="BeyvrU0cDAwq" outputId="4e873a47-038c-4a35-db09-172377ff89ee" executionInfo={"status": "ok", "timestamp": 1642249361132, "user_tz": -480, "elapsed": 18327, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "00979579278418875317"}} colab={"base_uri": "https://localhost:8080/"} # Load the Drive helper and mount from google.colab import drive # This will prompt for authorization. drive.mount('/content/drive') # + colab={"base_uri": "https://localhost:8080/"} id="Icq7DNNTAPKy" executionInfo={"status": "ok", "timestamp": 1642249377384, "user_tz": -480, "elapsed": 13978, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "00979579278418875317"}} outputId="8f2de269-a718-407f-d70f-dd1918c7fc58" import tensorflow as tf device_name = tf.test.gpu_device_name() if device_name != '/device:GPU:0': raise SystemError('GPU device not found') print('Found GPU at: {}'.format(device_name)) # + id="0XNAba2ggOKa" class UnetModel(nn.Module): def conv(self, in_channels, out_channels): block = nn.Sequential( nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=(1,1)), nn.BatchNorm2d(out_channels), nn.ReLU(), nn.Conv2d(out_channels, out_channels, kernel_size=3,padding=(1,1)), nn.BatchNorm2d(out_channels), nn.ReLU() ) return block def up_conv(self, in_channels, out_channels): block = nn.Sequential( nn.Upsample(scale_factor=2), nn.Conv2d(in_channels, out_channels, kernel_size=3,padding=(1,1)), nn.BatchNorm2d(out_channels), nn.ReLU() ) return block def __init__(self, in_channel, out_channel): super(UnetModel, self).__init__() self.conv1 = self.conv(in_channel,64) self.conv1_maxpool = nn.MaxPool2d(kernel_size=2, stride=2) self.conv2 = self.conv(64, 128) self.conv2_maxpool = nn.MaxPool2d(kernel_size=2, stride=2) self.conv3 = self.conv(128, 256) self.conv3_maxpool = nn.MaxPool2d(kernel_size=2, stride=2) self.conv4 = self.conv(256, 512) self.conv4_maxpool = nn.MaxPool2d(kernel_size=2, stride=2) self.conv5 = self.conv(512, 1024) self.up_conv4 = self.up_conv(1024, 512) self.up4_conv =self.conv(1024,512) self.up_conv3 = self.up_conv(512, 256) self.up3_conv = self.conv(512,256) self.up_conv2 = self.up_conv(256,128) self.up2_conv = self.conv(256,128) self.up_conv1 = self.up_conv(128,64) self.up1_conv = self.conv(128,64) self.conv_1x1 = nn.Conv2d(64,out_channel,kernel_size=1) self.sigmoid = nn.Sigmoid() def forward(self, x): out1 = self.conv1(x) out2 = self.conv1_maxpool(out1) out2 = self.conv2(out2) out3 = self.conv2_maxpool(out2) out3 = self.conv3(out3) out4 = self.conv3_maxpool(out3) out4 = self.conv4(out4) out5 = self.conv4_maxpool(out4) out5 = self.conv5(out5) exp5 = self.up_conv4(out5) exp5 = torch.cat((out4, exp5), dim=1) exp5 = self.up4_conv(exp5) exp4 = self.up_conv3(exp5) exp4 = torch.cat((out3, exp4), dim=1) exp4 = self.up3_conv(exp4) exp3 = self.up_conv2(exp4) exp3 = torch.cat((out2, exp3), dim=1) exp3 = self.up2_conv(exp3) exp2 = self.up_conv1(exp3) exp2 = torch.cat((out1, exp2), dim=1) exp2 = self.up1_conv(exp2) exp1 = self.conv_1x1(exp2) exp1 = self.sigmoid(exp1) return exp1 # + id="AjFLD439-mZp" class MyDataset(Dataset): def __init__(self, len, home_directory, noise=2, mode="Train"): self.len = len self.examples = [] self.iter_index = 0 self.X = torch.empty((len, 128,128)) self.Y = torch.empty((len,128,128), dtype=torch.long) self.input_directory = os.path.join(home_directory, mode, 'input') self.mask_directory = os.path.join(home_directory, mode, 'mask') print("dataset input path {}".format(self.input_directory)) print("dataset mask path {}".format(self.mask_directory)) input_names = os.listdir(self.input_directory) input_names.sort() mask_names = os.listdir(self.mask_directory) mask_names.sort() self.set_dataset(self.input_directory, input_names, True) self.set_dataset(self.mask_directory, mask_names, False) def set_dataset(self, directory, names, input_na = True): # print(self.len) # print(len(names)) # print(names) index = 0 for name in names: img_path = directory + '/' + name img = cv2.imread(img_path) img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) img =img/255 resize_img = cv2.resize(img, (128,128)) if input_na: # print(index) self.X[index] = torch.tensor(resize_img) else: resize_img = torch.from_numpy(resize_img).float() self.Y[index] = resize_img index += 1 def __len__(self): return self.len def __getitem__(self, idx): return (self.X[idx], self.Y[idx]) # + id="GM2jVNFHM6RK" outputId="865b0c92-f112-4b61-94b2-ed8474c4ad66" executionInfo={"status": "ok", "timestamp": 1642250308879, "user_tz": -480, "elapsed": 929279, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "00979579278418875317"}} colab={"base_uri": "https://localhost:8080/"} dataset_train = MyDataset(60,'/content/drive/My Drive/A3/cat_data/cat_data') trainloader = torch.utils.data.DataLoader(dataset_train, batch_size=20, shuffle=True) # dataset_test = MyDataset(20,'/content/drive/My Drive/A3/cat_data/cat_data', 'Test') model = UnetModel(1, 1) criterion = nn.MSELoss() optimizer = torch.optim.Adam(model.parameters(), lr=0.7) epochs = 10 model.train() a = True for e in range(epochs): running_loss = 0 for images, labels in tqdm(trainloader): optimizer.zero_grad() images = images.unsqueeze(1) labels = labels.unsqueeze(1) labels = labels.float() log_ps = model(images) loss = criterion(log_ps, labels) loss.backward() optimizer.step() running_loss += loss.item() else: print(f"Traning loss: {running_loss/len(trainloader)}") # + id="UNboZq1_7QtB" outputId="dfe48291-073d-4b3c-f772-3d54e8e62af7" executionInfo={"status": "ok", "timestamp": 1642250931802, "user_tz": -480, "elapsed": 11289, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "00979579278418875317"}} colab={"base_uri": "https://localhost:8080/"} dataset_test = MyDataset(21,'/content/drive/My Drive/A3/cat_data/cat_data', mode='Test') testloader = torch.utils.data.DataLoader(dataset_test, batch_size=20, shuffle=True) with torch.no_grad(): for images, labels in tqdm(testloader): optimizer.zero_grad() images = images.unsqueeze(1) labels = labels.unsqueeze(1) log_ps = model(images) loss = criterion(log_ps, labels) running_loss += loss.item() print(f"Test loss: {running_loss/len(testloader)}")
A3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %reload_ext autoreload # %autoreload 2 # %matplotlib inline import shutil import numpy as np import pandas as pd from pathlib import Path import json import torch from tqdm import tqdm import matplotlib.pyplot as plt import seaborn as sn from src.data import IFCNetNumpy from src.models.models import MeshNet from torch.utils.data import DataLoader, Subset, Dataset import torch.nn.functional as F import sklearn.metrics as metrics import torch.nn as nn from sklearn.preprocessing import label_binarize # + data_root = Path("../data/processed/MeshNet/IFCNetCore") with open("../IFCNetCore_Classes.json", "r") as f: class_names = json.load(f) # + train_dataset = IFCNetNumpy(data_root, 2048, class_names, partition="train") val_dataset = IFCNetNumpy(data_root, 2048, class_names, partition="train") test_dataset = IFCNetNumpy(data_root, 2048, class_names, partition="test") np.random.seed(42) perm = np.random.permutation(range(len(train_dataset))) train_len = int(0.7 * len(train_dataset)) train_dataset = Subset(train_dataset, sorted(perm[:train_len])) val_dataset = Subset(val_dataset, sorted(perm[train_len:])) train_loader = DataLoader(train_dataset, batch_size=32, num_workers=8) val_loader = DataLoader(val_dataset, batch_size=32, num_workers=8) test_loader = DataLoader(test_dataset, batch_size=32, num_workers=8) # + model_dir = Path("../models/") with (model_dir/"MeshNetParams.json").open("r") as f: config = json.load(f) model = MeshNet(config["num_kernel"], config["sigma"], config["aggregation_method"], output_channels=len(class_names)) model_state, _ = torch.load(model_dir/"MeshNetWeights+Optimizer") model.load_state_dict(model_state) # - device = torch.device("cuda") model.eval() model.to(device) # + def calc_metrics(probabilities, labels): predictions = np.argmax(probabilities, axis=1) acc = metrics.accuracy_score(labels, predictions) balanced_acc = metrics.balanced_accuracy_score(labels, predictions) precision = metrics.precision_score(labels, predictions, average="weighted") recall = metrics.recall_score(labels, predictions, average="weighted") f1 = metrics.f1_score(labels, predictions, average="weighted") return { f"accuracy_score": acc, f"balanced_accuracy_score": balanced_acc, f"precision_score": precision, f"recall_score": recall, f"f1_score": f1 } def plot_confusion_matrix(confusion_matrix, display_labels, fname=None): labels = list(map(lambda x: x[3:], display_labels)) df = pd.DataFrame(confusion_matrix, index=labels, columns=labels) plt.figure(figsize=(7, 5)) sn.heatmap(df, cmap="Blues", annot=True, fmt="d", cbar=False) plt.ylabel("Actual class") plt.xlabel("Predicted class") if fname: plt.savefig(fname, dpi=300, bbox_inches="tight") def eval(model, loader, device, class_names, fname=None): model.eval() all_probs = [] all_labels = [] with torch.no_grad(): for data, labels in tqdm(loader): data, labels = data.to(device), labels.to(device) outputs = model(data) probs = F.softmax(outputs, dim=1) all_probs.append(probs.cpu().detach().numpy()) all_labels.append(labels.cpu().numpy()) all_probs = np.concatenate(all_probs) all_labels = np.concatenate(all_labels) result = calc_metrics(all_probs, all_labels) predictions = np.argmax(all_probs, axis=1) confusion_matrix = metrics.confusion_matrix(all_labels, predictions) plot_confusion_matrix(confusion_matrix, class_names, fname=fname) return all_labels, all_probs # - eval(model, train_loader, device, class_names) eval(model, val_loader, device, class_names) test_labels, test_probs = eval(model, test_loader, device, class_names, fname="../reports/figures/meshnet_confusion.png") np.savez("MeshNetProbs.npz", labels=test_labels, probs=test_probs) test_predictions = np.argmax(test_probs, axis=1) wrong_predictions = np.where(test_labels != test_predictions)[0] wrong_pred_dir = Path("../data/external/MeshNet/wrong_classes/IFCNetCore") raw_data_dict = {path.stem: path for path in Path("../data/raw/IFCNetCore").glob("**/test/*.obj")} wrong_pred_dir.mkdir(parents=True, exist_ok=True) for i in wrong_predictions: label_str = class_names[test_labels[i]] prediction_str = class_names[test_predictions[i]] print(f"{test_dataset.files[i].stem}, Label: {label_str}, Prediction: {prediction_str}") target_dir = wrong_pred_dir / label_str target_dir.mkdir(exist_ok=True) filename = test_dataset.files[i] shutil.copy(str(raw_data_dict[filename.stem]), str(target_dir / f"{filename.stem}_{prediction_str}.obj"))
notebooks/1.1-cemunds-MeshNet-Evaluation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ___ # # <a href='http://www.pieriandata.com'><img src='../Pierian_Data_Logo.png'/></a> # ___ # <center><em>Copyright by Pierian Data Inc.</em></center> # <center><em>For more information, visit us at <a href='http://www.pieriandata.com'>www.pieriandata.com</a></em></center> # # Series # The first main data type we will learn about for pandas is the Series data type. Let's import Pandas and explore the Series object. # # A Series is very similar to a NumPy array (in fact it is built on top of the NumPy array object). What differentiates the NumPy array from a Series, is that a Series can have axis labels, meaning it can be indexed by a label, instead of just a number location. It also doesn't need to hold numeric data, it can hold any arbitrary Python Object. # # Let's explore this concept through some examples: # ## Imports import numpy as np import pandas as pd # ## Creating a Series from Python Objects help(pd.Series) # ### Index and Data Lists # # We can create a Series from Python lists (also from NumPy arrays) myindex = ['USA','Canada','Mexico'] mydata = [1776,1867,1821] myser = pd.Series(data=mydata) myser pd.Series(data=mydata,index=myindex) ran_data = np.random.randint(0,100,4) ran_data names = ['Andrew','Bobo','Claire','David'] ages = pd.Series(ran_data,names) ages # ### From a Dictionary ages = {'Sammy':5,'Frank':10,'Spike':7} ages pd.Series(ages) # # Key Ideas of a Series # ## Named Index # Imaginary Sales Data for 1st and 2nd Quarters for Global Company q1 = {'Japan': 80, 'China': 450, 'India': 200, 'USA': 250} q2 = {'Brazil': 100,'China': 500, 'India': 210,'USA': 260} # Convert into Pandas Series sales_Q1 = pd.Series(q1) sales_Q2 = pd.Series(q2) sales_Q1 # Call values based on Named Index sales_Q1['Japan'] # Integer Based Location information also retained! sales_Q1[0] # **Be careful with potential errors!** # + # Wrong Name # sales_Q1['France'] # + # Accidental Extra Space # sales_Q1['USA '] # + # Capitalization Mistake # sales_Q1['usa'] # - # ## Operations # Grab just the index keys sales_Q1.keys() # Can Perform Operations Broadcasted across entire Series sales_Q1 * 2 sales_Q2 / 100 # ## Between Series # Notice how Pandas informs you of mismatch with NaN sales_Q1 + sales_Q2 # You can fill these with any value you want sales_Q1.add(sales_Q2,fill_value=0) # That is all we need to know about Series, up next, DataFrames!
practical_ai/archive/02-Pandas-and-Scikit-Learn/00-Series.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Automatic Speech Recognition combined with Speaker Diarization # + """ You can run either this notebook locally (if you have all the dependencies and a GPU) or on Google Colab. Instructions for setting up Colab are as follows: 1. Open a new Python 3 notebook. 2. Import this notebook from GitHub (File -> Upload Notebook -> "GITHUB" tab -> copy/paste GitHub URL) 3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select "GPU" for hardware accelerator) 4. Run this cell to set up dependencies. """ # If you're using Google Colab and not running locally, run this cell. ## Install dependencies # !pip install wget # !apt-get install sox libsndfile1 ffmpeg # !pip install unidecode # ## Install NeMo BRANCH = 'main' # !python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[asr] ## Install TorchAudio # !pip install torchaudio -f https://download.pytorch.org/whl/torch_stable.html # - # # Introduction # In the early years, speaker diarization algorithms were developed for speech recognition on multispeaker audio recordings to enable speaker adaptive processing, but also gained its own value as a stand-alone application over # time to provide speaker-specific meta information for downstream tasks such as audio retrieval. # Automatic Speech Recognition output when combined with Speaker labels has shown immense use in many tasks, ranging from analyzing telephonic conversation to decoding meeting transcriptions. # # In this tutorial we demonstrate how one can get ASR transcriptions combined with Speaker labels along with voice activity time stamps using NeMo asr collections. # # For detailed understanding of transcribing words with ASR refer to this [ASR tutorial](https://github.com/NVIDIA/NeMo/blob/stable/tutorials/asr/ASR_with_NeMo.ipynb), and for detailed understanding of speaker diarizing an audio refer to this [Diarization inference](https://github.com/NVIDIA/NeMo/blob/main/tutorials/speaker_tasks/Speaker_Diarization_Inference.ipynb) tutorial # Let's first import nemo asr and other libraries for visualization purposes import nemo.collections.asr as nemo_asr import numpy as np from IPython.display import Audio, display import librosa import os import wget import matplotlib.pyplot as plt # We demonstrate this tutorial using merged an4 audio, that has two speakers(male and female) speaking dates in different formats. If not exists already download the data and listen to it # + ROOT = os.getcwd() data_dir = os.path.join(ROOT,'data') os.makedirs(data_dir, exist_ok=True) an4_audio_url = "https://nemo-public.s3.us-east-2.amazonaws.com/an4_diarize_test.wav" if not os.path.exists(os.path.join(data_dir,'an4_diarize_test.wav')): AUDIO_FILENAME = wget.download(an4_audio_url, data_dir) else: AUDIO_FILENAME = os.path.join(data_dir,'an4_diarize_test.wav') signal, sample_rate = librosa.load(AUDIO_FILENAME, sr=None) display(Audio(signal,rate=sample_rate)) # - def show_figure(signal,text='Audio',overlay_color=[]): fig,ax = plt.subplots(1,1) fig.set_figwidth(20) fig.set_figheight(2) plt.scatter(np.arange(len(signal)),signal,s=1,marker='o',c='k') if len(overlay_color): plt.scatter(np.arange(len(signal)),signal,s=1,marker='o',c=overlay_color) fig.suptitle(text, fontsize=16) plt.xlabel('time (secs)', fontsize=18) plt.ylabel('signal strength', fontsize=14); plt.axis([0,len(signal),-0.5,+0.5]) time_axis,_ = plt.xticks(); plt.xticks(time_axis[:-1],time_axis[:-1]/sample_rate); # plot the audio show_figure(signal) # We start our demonstration by first transcribing the audio using our pretrained model `QuartzNet15x5Base-En` and use the CTC output probabilities to get timestamps for words spoken. We then later use these timestamps to get speaker label information using speaker diarizer model. # Download and load pretrained quartznet asr model #Load model asr_model = nemo_asr.models.EncDecCTCModel.from_pretrained(model_name='QuartzNet15x5Base-En', strict=False) # Transcribe the audio files = [AUDIO_FILENAME] transcript = asr_model.transcribe(paths2audio_files=files)[0] print(f'Transcript: "{transcript}"') # Get CTC log probabilities with output labels # + # softmax implementation in NumPy def softmax(logits): e = np.exp(logits - np.max(logits)) return e / e.sum(axis=-1).reshape([logits.shape[0], 1]) # let's do inference once again but without decoder logits = asr_model.transcribe(files, logprobs=True)[0] probs = softmax(logits) # 20ms is duration of a timestep at output of the model time_stride = 0.02 # get model's alphabet labels = list(asr_model.decoder.vocabulary) + ['blank'] labels[0] = 'space' # - # We use CTC labels for voice activity detection. To detect speech and non-speech segments in the audio, we use blank and space labels in the CTC outputs. Consecutive labels with spaces or blanks longer than a threshold are considered non-speech segments # + blanks = [] state = '' idx_state = 0 if np.argmax(probs[0]) == 28: state = 'blank' for idx in range(1, probs.shape[0]): current_char_idx = np.argmax(probs[idx]) if state == 'blank' and current_char_idx != 0 and current_char_idx != 28: blanks.append([idx_state, idx-1]) state = '' if state == '': if current_char_idx == 28: state = 'blank' idx_state = idx if state == 'blank': blanks.append([idx_state, len(probs)-1]) threshold=20 #minimun width to consider non-speech activity non_speech=list(filter(lambda x:x[1]-x[0]>threshold,blanks)) # get timestamps for space symbols spaces = [] state = '' idx_state = 0 if np.argmax(probs[0]) == 0: state = 'space' for idx in range(1, probs.shape[0]): current_char_idx = np.argmax(probs[idx]) if state == 'space' and current_char_idx != 0 and current_char_idx != 28: spaces.append([idx_state, idx-1]) state = '' if state == '': if current_char_idx == 0: state = 'space' idx_state = idx if state == 'space': spaces.append([idx_state, len(pred)-1]) # calibration offset for timestamps: 180 ms offset = -0.18 # split the transcript into words words = transcript.split() # - # Frame level stamps for non speech frames print(non_speech) # write to rttm type file for later use in extracting speaker labels frame_offset=offset/time_stride speech_labels=[] uniq_id = os.path.basename(AUDIO_FILENAME).split('.')[0] with open(uniq_id+'.rttm','w') as f: for idx in range(len(non_speech)-1): start = (non_speech[idx][1]+frame_offset)*time_stride end = (non_speech[idx+1][0]+frame_offset)*time_stride f.write("SPEAKER {} 1 {:.3f} {:.3f} <NA> <NA> speech <NA>\n".format(uniq_id,start,end-start)) speech_labels.append("{:.3f} {:.3f} speech".format(start,end)) if non_speech[-1][1] < len(probs): start = (non_speech[-1][1]+frame_offset)*time_stride end = (len(probs)+frame_offset)*time_stride f.write("SPEAKER {} 1 {:.3f} {:.3f} <NA> <NA> speech <NA>\n".format(uniq_id,start,end-start)) speech_labels.append("{:.3f} {:.3f} speech".format(start,end)) # Time stamps for speech frames print(speech_labels) COLORS="b g c m y".split() def get_color(signal,speech_labels,sample_rate=16000): c=np.array(['k']*len(signal)) for time_stamp in speech_labels: start,end,label=time_stamp.split() start,end = int(float(start)*16000),int(float(end)*16000), if label == "speech": code = 'red' else: code = COLORS[int(label.split('_')[-1])] c[start:end]=code return c # With voice activity time stamps extracted from CTC outputs, here we show the Voice Activity signal in <span style="color:red">**red**</span> color and background speech in **black** color color=get_color(signal,speech_labels) show_figure(signal,'an4 audio signal with vad',color) # We use helper function from speaker utils to convert voice activity rttm file to manifest to diarize using # speaker diarizer clustering inference model from nemo.collections.asr.parts.utils.speaker_utils import write_rttm2manifest output_dir = os.path.join(ROOT, 'oracle_vad') os.makedirs(output_dir,exist_ok=True) oracle_manifest = os.path.join(output_dir,'oracle_manifest.json') write_rttm2manifest(paths2audio_files=files, paths2rttm_files=[uniq_id+'.rttm'], manifest_file=oracle_manifest) # !cat {output_dir}/oracle_manifest.json # Set up diarizer model # + from omegaconf import OmegaConf MODEL_CONFIG = os.path.join(data_dir,'speaker_diarization.yaml') if not os.path.exists(MODEL_CONFIG): config_url = "https://raw.githubusercontent.com/NVIDIA/NeMo/main/examples/speaker_tasks/diarization/conf/speaker_diarization.yaml" MODEL_CONFIG = wget.download(config_url,data_dir) config = OmegaConf.load(MODEL_CONFIG) pretrained_speaker_model='speakerdiarization_speakernet' config.diarizer.paths2audio_files = files config.diarizer.out_dir = output_dir #Directory to store intermediate files and prediction outputs config.diarizer.speaker_embeddings.model_path = pretrained_speaker_model # Ignoring vad we just need to pass the manifest file we created config.diarizer.speaker_embeddings.oracle_vad_manifest = oracle_manifest config.diarizer.oracle_num_speakers = 2 # - # Diarize the audio at provided time stamps from nemo.collections.asr.models import ClusteringDiarizer oracle_model = ClusteringDiarizer(cfg=config); oracle_model.diarize(); from nemo.collections.asr.parts.utils.speaker_utils import rttm_to_labels pred_rttm=os.path.join(output_dir,'pred_rttms',uniq_id+'.rttm') labels=rttm_to_labels(pred_rttm) print("speaker labels with time stamps\n",labels) # Now let us see the audio plot color coded per speaker color=get_color(signal,labels) show_figure(signal,'audio with speaker labels',color) display(Audio(signal,rate=16000)) # Finally transcribe audio with time stamps and speaker label information # + pos_prev = 0 idx=0 start_point,end_point,speaker=labels[idx].split() print("{} [{:.2f} - {:.2f} sec]".format(speaker,float(start_point),float(end_point)),end=" ") for j, spot in enumerate(spaces): pos_end = offset + (spot[0]+spot[1])/2*time_stride if pos_prev < float(end_point): print(words[j],end=" ") else: print() idx+=1 start_point,end_point,speaker=labels[idx].split() print("{} [{:.2f} - {:.2f} sec]".format(speaker,float(start_point),float(end_point)),end=" ") print(words[j],end=" ") pos_prev = pos_end print(words[j+1],end=" ")
tutorials/speaker_tasks/ASR_with_SpeakerDiarization.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # https://discuss.huggingface.co/t/multilabel-sequence-classification-with-roberta-value-error-expected-input-batch-size-to-match-target-batch-size/1653/2 from transformers import Trainer import torch class CustomTrainer(Trainer): def compute_loss(self, model, inputs, return_outputs=False): outputs = model( input_ids=inputs['input_ids'], attention_mask=inputs['attention_mask'], token_type_ids=inputs['token_type_ids'] ) loss = torch.nn.BCEWithLogitsLoss()(outputs['logits'].float(), inputs['labels'].float()) return (loss, outputs) if return_outputs else loss from transformers import DataCollatorWithPadding, TrainingArguments, BertTokenizer, BertForSequenceClassification from datasets import load_dataset # load dataset, tokenize, adapt columns, and apply datacollator checkpoint = "bert-base-cased" transformers_tokenizer = BertTokenizer.from_pretrained(checkpoint) def transformers_tokenize_function(item): return transformers_tokenizer(item["text"], padding=True, truncation=True) transformers_tokenized_datasets = ( load_dataset("mdroth/transformers_issues_labels") .map(transformers_tokenize_function, batched=True) .remove_columns(column_names=["url", "text", "num_labels", "labels"]) .rename_column("arr_labels", "labels") ) transformers_data_collator = DataCollatorWithPadding(tokenizer=transformers_tokenizer) # training arguments training_args = TrainingArguments( "5_try_transformers_dataset", evaluation_strategy="epoch", num_train_epochs=3, per_device_train_batch_size=4, per_device_eval_batch_size=4 ) # model num_labels = 57 transformers_model = BertForSequenceClassification.from_pretrained(checkpoint, num_labels=num_labels) # trainer trainer = CustomTrainer( transformers_model, training_args, train_dataset=transformers_tokenized_datasets["dev"], eval_dataset=transformers_tokenized_datasets["dev"], data_collator=transformers_data_collator, tokenizer=transformers_tokenizer ) # train trainer.train()
drop/multilabel_classification/anotherTry.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Pairs Trading # # _<NAME> (based on the Quantopian Lecture Series)._ # # As usual, we first import our libraries: # + import math import numpy as np import pandas as pd # statistical analysis import statsmodels.api as sm from statsmodels import regression from statsmodels.tsa.stattools import adfuller, coint # plot + styling import seaborn import matplotlib.pyplot as plt from matplotlib import style style.use('seaborn-whitegrid') plt.rcParams["figure.figsize"] = (10,6) # get pricing data using Yahoo Finance from quantopian import get_pricing # set the seed for the random number generator np.random.seed(107) # - # Pairs trading is a classic example of a strategy based on mathematical analysis. The principle is as follows. Let's say you have a pair of securities $X$ and $Y$ that have some underlying economic link. An example might be two companies that manufacture the same product (e.g. PEP (Pepsi) and KO (Coca-Cola)), or two companies in one supply chain. If we can model this economic link with a mathematical model, we can make trades on it. # # We'll start by constructing a toy example. We model $X$'s daily returns by drawing from a normal distribution. Then we perform a cumulative sum to get the value of $X$ on each day (we shift all prices up by $50$ to make them positive). X_returns = np.random.normal(0, 1, 100) # Generate the daily returns X = pd.Series(np.cumsum(X_returns), name='X') + 50 X.plot(); # Now we generate $Y$. As $Y$ is supposed to have a deep economic link to $X$, we model this by taking $X$, shifting it up by $5$ and adding some random noise drawn from a normal distribution. Y = X + 5 + np.random.normal(0, 1, 100) Y.name = 'Y' pd.concat([X, Y], axis=1).plot(); # ## Stationarity # # A commonly untested assumption in time series analysis is the stationarity of the data. Many statistical tests, deep down in the fine print of their assumptions, require that the data being tested are stationary. Also, if you naively use certain statistics on a non-stationary data set, you will get garbage results. Data are stationary when the parameters of the data generating process do not change over time. We can check for stationarity using a statistical test, the so-called _Augmented Dickey-Fuller test:_ def check_for_stationarity(X, cutoff=0.01): # H_0 in adfuller if unit root exists (non-stationary) # We assume a series to be stationary if the p-value is significant. pvalue = adfuller(X)[1] if pvalue < cutoff: print(f'p-value = {str(pvalue)}. The series is likely stationary.') return True else: print(f'p-value = {str(pvalue)}. The series is likely non-stationary.') return False # + T = 100 # create two series, one of which is stationary A = [np.random.normal(0, 1) for t in range(T)] B = [np.random.normal(0.1 * t, 1) for t in range(T)] check_for_stationarity(A) check_for_stationarity(B); # - # ## Order of Integration # # An important concept in time series analysis is _moving average representation_. This representation expresses any time series $Y_t$ as # # \begin{align*} # Y_t = \sum_{j=0}^\infty b_j \epsilon_{t-j} + \eta_t # \end{align*} # # * $\epsilon$ is the 'innovation' series; # * $b_j$ are the moving average weights of the innovation series; # * $\eta$ is a deterministic series. # # The key here is as follows. $\eta$ is deterministic, such as a sine wave. Therefore we could perfectly model it. The innovation process is stochastic and there to simulate new information occuring over time. Specifically, $\epsilon_t = \hat Y_t - Y_t$ where $\hat Y_t$ is the in the optimal forecast of $Y_t$ using only information from time before $t$. In other words, the best prediction you can make at time $t-1$ cannot account for the randomness in $\epsilon$. Each $b_j$ just says how much previous values of $\epsilon$ influence $Y_t$. # # # We will note integration order $i$ as $I(i)$. A time series is said to be $I(0)$ if the following condition holds in a moving average representation: # # \begin{align*} # \sum_{k=0}^\infty |b_k|^2 < \infty. # \end{align*} # # In hand-wavy english, the autocorrelation of the series decays sufficiently quickly. This property turns out to be true of all stationary series, but by itself is not enough for stationarity to hold. This means that stationarity implies $I(0)$, but $I(0)$ does not imply stationarity. # # In practice testing whether the sum of the autocorrelations is finite may not be possible. It is possible in a mathematical derivation, but when we have a finite set of data and a finite number of estimated autocorrelations, the sum will always be finite. Given this difficulty, tests for $I(0)$ rely on stationarity implying the property. If we find that a series is stationary, then it must also be $I(0)$. # # ### Inductively Building Up Orders of Integration # # If one takes an $I(0)$ series and cumulatively sums it (_discrete integration_), the new series will be $I(1)$. Notice how this is related to the calculus concept of integration. The same relation applies in general, to get $I(n)$ take an $I(0)$ series and iteratively take the cumulative sum $n$ times. Now let's make an $I(1)$ series by taking the cumulative sum of $A$. # + A1 = np.cumsum(A) plt.plot(A1) plt.xlabel('Time') plt.ylabel('Value') plt.legend(['Series A1']); # - # Now let's make one $I(2)$ by taking the cumlulative sum again. # + A2 = np.cumsum(A1) plt.plot(A2) plt.xlabel('Time') plt.ylabel('Value') plt.legend(['Series A2']); # - # ### Breaking Down Orders of Integration # # Conversely, to find the order of integration of a given series, we perform the inverse of a cumulative sum, which is the $\Delta$ or _difference operator_. Specifically # # \begin{align*} # (1-L) X_t = X_t - X_{t-1} = \Delta X. # \end{align*} # # In this case $L$ is the _lag/backshift operator_. $L$ fetches the second to last elements in a time series, and $L^k$ fetches the $k$-th to last elements. So # # \begin{align*} # L X_t = X_{t-1} # \end{align*} # # and # # \begin{align*} # (1-L) X_t = X_t - X_{t-1}. # \end{align*} # # A series $Y_t$ is $I(1)$ if the $Y_t - Y_{t-1}$ is $I(0)$. In other words, if you take an $I(0)$ series and cumulatively sum it, you should get an $I(1)$ series. Once all the math has settled, remember that any stationary series is $I(0)$. # # Let's try this out on some real pricing data. X = get_pricing('MSFT', fields=['price'], start_date='2019-01-01', end_date='2020-01-01')['price'] check_for_stationarity(X); plt.plot(X.index, X.values) plt.ylabel('Price') plt.legend(['MSFT']); # Now let's take the delta of the series, giving us the additive returns. We'll check if this is stationary. X1 = X.diff()[1:] check_for_stationarity(X1) plt.plot(X1) plt.ylabel('Additive Returns') plt.legend(['MSFT (Additive Returns)']); # Seems like the additive returns are stationary over 2019. That means we will probably be able to model the returns much better than the price. It also means that the price was $I(1)$. Let's also check the multiplicative returns. X1 = X.pct_change()[1:] check_for_stationarity(X1) plt.plot(X1) plt.ylabel('Multiplicative Returns') plt.legend(['MSFT (Multiplicative Returns)']); # It seems like the multiplicative returns are also stationary. Both the multiplicative and additive deltas on a series get at similar pieces of information, so it's not surprising both are stationary. In practice this might not always be the case. # # As always, you should not naively assume that because a time series is stationary in the past it will continue to be stationary in the future. Tests for consistency of stationarity such as _cross validation_ and _out of sample testing_ are necessary. This is actually true for any statistical property. Returns may also go in and out of stationarity, and may be stationary or non-stationary depending on the timeframe and sampling frequency. The reason that returns are usually used for modeling in quantitive finance is that they are far more stationary than prices. This makes them easier to model and returns forecasting more feasible. Forecasting prices is more difficult, as there are many trends induced by their $I(1)$ integration. Even using a returns forecasting model to forecast price can be tricky, as any error in the returns forecast will be magnified over time. # # ## Cointegration # # Now that we've discussed stationarity and order of integration, we can discuss _cointegration_. A set of time series $\{X_1,X_2,\dots,X_k\}$ is _cointegrated_ if all series are $I(1)$ and some linear combination of them is $I(0)$. # # The intuition here is that for some linear combination of the series, the result lacks much auto-covariance and is mostly noise. This is useful for cases such as pairs trading, in which we find two assets whose prices are cointegrated. Since the linear combination of their prices $b_1A_1 + b_2A_2$ is noise, we can bet on the relationship $b_1A_1 + b_2A_2$ mean reverting and place trades accordingly. Remember as with anything else, you should not assume that because some set of assets have passed a cointegration test historically, they will continue to remain cointegrated. You need to verify that consistent behavior occurs, and use various model validation techniques as you would with any model. # # There are a bunch of ways to test for cointegration. In general we're just trying to solve for the coefficients $b_1, \dots b_k$ that will produce an $I(0)$ linear combination. If our best guess for these coefficients does not pass a stationarity check, then we reject the hypothesis that the set is cointegrated. This will lead to risk of Type II errors (false negatives), as we will not exhaustively test for stationarity on all coefficent combinations. However Type II errors are generally okay here, as they are safe and do not lead to us making any wrong forecasts. # # In practice a common way to do this for pairs of time series is to use linear regression to estimate $\beta$ in the following model. # # \begin{align*} # X_2 = \alpha + \beta X_1 + \epsilon. # \end{align*} # # The idea is that if the two are cointegrated we can remove $X_2$'s depedency on $X_1$, leaving behind stationary noise. The combination $X_2 - \beta X_1 = \alpha + \epsilon$ should be stationary. # # # One of the most important things done in finance is to make many independent bets. Here a quant would find many pairs of assets they hypothesize are cointegrated, and evenly distribute their dollars between them in bets. This only requires more than half of the asset pairs to remain cointegrated for the strategy to work. # # ### Testing for Cointegration # # We've constructed an example of two cointegrated series $X$ and $Y$ before. Let's plot the difference between the two series: # + X_returns = np.random.normal(0, 1, 100) X = pd.Series(np.cumsum(X_returns), name='X') + 50 Y = X + 5 + np.random.normal(0, 1, 100) (Y - X).plot() # plot the spread plt.axhline((Y - X).mean(), color='red', linestyle='--') # add the mean plt.xlabel('Time') plt.legend(['Price Spread', 'Mean']); # - # There is a convenient cointegration test that lives in `statsmodels.tsa.stattools`. Let's say that our confidence level is $0.05$. We should see a p-value below our cutoff, as we've artifically created two series that are the textbook definition of cointegration. Hence, the spread between the two time series is stationary around its mean. score, pvalue, _ = coint(X,Y) print(pvalue) # ## Hedging # # Because you'd like to protect yourself from bad markets, often short sales will be used to hedge long investments. Because a short sale makes money if the security sold loses value, and a long purchase will make money if a security gains value, one can long parts of the market and short others. That way if the entire market falls off a cliff, we'll still make money on the shorted securities and hopefully break even. In the case of two securities we'll call it a hedged position when we are long on one security and short on the other. # # Because the securities drift towards and apart from each other, there will be times when the distance is high and times when the distance is low. The trick of pairs trading comes from maintaining a hedged position across $X$ and $Y$. If both securities go down, we neither make nor lose money, and likewise if both go up. We make money on the spread of the two reverting to the mean. In order to do this we'll watch for when $X$ and $Y$ are far apart, then short $Y$ and long $X$. Similarly we'll watch for when they're close together, and long $Y$ and short $X$. Recall that the spread is $Y - \beta X$, where $\beta$ is the regression coefficient. # # #### Going Long the Spread # This is when the spread is small and we expect it to become larger. We place a bet on this by longing $Y$ and shorting $X$. # # #### Going Short the Spread # This is when the spread is large and we expect it to become smaller. We place a bet on this by shorting $Y$ and longing $X$. # # One important concept here is that we are placing a bet on one specific thing, and trying to reduce our bet's dependency on other factors such as the market. The best way to find real securities that behave like this is to start with securities you suspect may be cointegrated and perform a statistical test. If you just run statistical tests over all pairs, you'll fall prey to multiple comparison bias. # # Here's a method to look through a list of securities and test for cointegration between all pairs. It returns a cointegration test score matrix, a p-value matrix, and any pairs for which the p-value was less than $0.05$. def find_cointegrated_pairs(data, cutoff=0.05): """ Returns a list of all cointegrated pairs. """ n = data.shape[1] score_matrix = np.zeros((n, n)) pvalue_matrix = np.ones((n, n)) keys = data.keys() pairs = [] for i in range(n): for j in range(i+1, n): S1 = data[keys[i]] S2 = data[keys[j]] result = coint(S1, S2) score = result[0] pvalue = result[1] score_matrix[i, j] = score pvalue_matrix[i, j] = pvalue if pvalue < cutoff: pairs.append((keys[i], keys[j])) return score_matrix, pvalue_matrix, pairs # ## Looking for Cointegrated Pairs of Alternative Energy Securities # We are looking through a set of solar company stocks to see if any of them are cointegrated. We'll start by defining the list of securities we want to look through. Then we'll get the pricing data for each security for the year of 2014. # # Our approach here is somewhere in the middle of the spectrum that we mentioned before. We have formulated an economic hypothesis that there is some sort of link between a subset of securities within the energy sector and we want to test whether there are any cointegrated pairs. This incurs significantly less multiple comparisons bias than searching through hundreds of securities and slightly more than forming a hypothesis for an individual test. # # **NOTE:** We include the market in our data. This is because the market drives the movement of so many securities that you often times might find two seemingingly cointegrated securities, but in reality they are not cointegrated and just both cointegrated with the market. This is known as a confounding variable and it is important to check for market involvement in any relationship you find. symbol_list = ['ABG-P.MC','ASTI','CSOL','DQ','FSLR', 'SPY'] prices_df = get_pricing(symbol_list, fields='price', start_date='2014-01-01', end_date='2015-01-01') prices_df.head() # Now we'll run our method on the list and see if any pairs are cointegrated. scores, pvalues, pairs = find_cointegrated_pairs(prices_df) seaborn.heatmap(pvalues, xticklabels=symbol_list, yticklabels=symbol_list, cmap='RdYlGn_r', mask=(pvalues >= 0.05)); # Looks like 'ABG-P.MC' and 'FSLR' are cointegrated. Let's take a look at the prices to make sure there's nothing weird going on. S1 = prices_df['ABG-P.MC'] S2 = prices_df['FSLR'] score, pvalue, _ = coint(S1, S2) pvalue # ## Calculating the Spread # Now we will plot the spread of the two series. In order to actually calculate the spread, we use linear regression to get the coefficient for the linear combination to construct between our two securities. This is known as the Engle-Granger method. # + S1 = sm.add_constant(S1) results = sm.OLS(S2, S1).fit() S1 = S1['ABG-P.MC'] b = results.params['ABG-P.MC'] spread = S2 - b * S1 spread.plot() plt.axhline(spread.mean(), color='red') plt.legend(['Spread']); # - # Alternatively, we could examine the ratio between the two series. ratio = S1/S2 ratio.plot() plt.axhline(ratio.mean(), color='red') plt.legend(['Price Ratio']); # Examining the price ratio of a trading pair is a traditional way to handle pairs trading. Part of why this works as a signal is based in our assumptions of how stock prices move, specifically because stock prices are typically assumed to be log-normally distributed. What this implies is that by taking a ratio of the prices, we are taking a linear combination of the returns associated with them (since prices are just the exponentiated returns). # # This can be a little irritating to deal with for our purposes as purchasing the precisely correct ratio of a trading pair may not be practical. We choose instead to move forward with simply calculating the spread between the cointegrated stocks using linear regression. This is a very simple way to handle the relationship, however, and is likely not feasible for non-toy examples. There are other potential methods for estimating the spread, such as the _Kalman filter_. # # So, back to our example. The absolute spread isn't very useful in statistical terms. It is more helpful to normalize our signal by treating it as a z-score. # # In practice this is usually done to try to give some scale to the data, but this assumes some underlying distribution, usually a normal distribution. Under a normal distribution, we would know that approximately 84% of all spread values will be smaller. However, much financial data is not normally distributed, and one must be very careful not to assume normality, nor any specific distribution when generating statistics. It could be the case that the true distribution of spreads was very fat-tailed and prone to extreme values. This could mess up our model and result in large losses. def zscore(series): return (series - series.mean()) / np.std(series) zscore(spread).plot() plt.axhline(zscore(spread).mean(), color='black') plt.axhline(1.0, color='red', linestyle='--') plt.axhline(-1.0, color='green', linestyle='--') plt.legend(['Spread z-score', 'Mean', '+1', '-1']); # ### Simple Strategy: # * Go "Long" the spread whenever the z-score is below -1.0 # * Go "Short" the spread when the z-score is above 1.0 # * Exit positions when the z-score approaches zero # # This is just the tip of the iceberg, and only a very simplistic example to illustrate the concepts. In practice you would want to compute a more optimal weighting for how many shares to hold for S1 and S2. # # ## Trading using constantly updating statistics # In general taking a statistic over your whole sample size can be bad. For example, if the market is moving up, and both securities with it, then your average price over the last 3 years may not be representative of today. For this reason traders often use statistics that rely on rolling windows of the most recent data. # # ## Moving Averages # A moving average is just an average over the last $n$ datapoints for each given time. It will be undefined for the first $n$ datapoints in our series. Shorter moving averages will be more jumpy and less reliable, but respond to new information quickly. Longer moving averages will be smoother, but take more time to incorporate new information. # # We also need to use a rolling beta, a rolling estimate of how our spread should be calculated, in order to keep all of our parameters up to date. from quantopian import RollingOLS # + # get the spread between the 2 stocks # calculate rolling beta coefficient rolling_beta = RollingOLS(y=S1, x=S2, window=30) spread = S2 - rolling_beta['x'] * S1 # get the 1 day and 30 day moving average of the price spread spread_mavg1 = spread.rolling(window=1).mean() spread_mavg30 = spread.rolling(window=30).mean() plt.plot(spread_mavg1) plt.plot(spread_mavg30) plt.legend(['1 Day Spread MAVG', '30 Day Spread MAVG']) plt.ylabel('Spread'); # - # We can use the moving averages to compute the z-score of the spread at each given time. This will tell us how extreme the spread is and whether it's a good idea to enter a position at this time. Let's take a look at the z-score now. # + # take a rolling 30 day standard deviation std_30 = spread.rolling(window=30).std() # compute the z-score for each day zscore_30_1 = (spread_mavg1 - spread_mavg30)/std_30 zscore_30_1.plot() plt.axhline(0, color='black') plt.axhline(1.0, color='red', linestyle='--'); # - # The z-score doesn't mean much out of context, let's plot it next to the prices to get an idea of what it looks like. We'll take the negative of the z-score because the spreads were all negative and that is a little counterintuitive to trade on. # Plot the prices scaled down along with the negative z-score # just divide the stock prices by 10 to make viewing it on the plot easier plt.plot(S1.index, S1.values/10) plt.plot(S2.index, S2.values/10) plt.plot(zscore_30_1) plt.legend(['S1 Price / 10', 'S2 Price / 10', 'Price Spread Rolling z-Score']); # ## Out of Sample Test # Now that we have constructed our spread appropriately and have an idea of how we will go about making trades, it is time to conduct some out of sample testing. Our whole model is based on the premise that these securities are cointegrated, but we built it on information from a certain time period. If we actually want to implement this model, we need to conduct an out of sample test to confirm that the principles of our model are still valid going forward. # # Since we initially built the model on the 2014 - 2015 year, let's see if this cointegrated relationship holds for 2015 - 2016. Historical results do not guarantee future results so this is a sanity check to see if the work we have done holds strong. symbol_list = ['ABG-P.MC','FSLR'] prices_df = get_pricing(symbol_list, fields='price', start_date='2015-01-01', end_date='2016-01-01') prices_df.head() S1 = prices_df['ABG-P.MC'] S2 = prices_df['FSLR'] score, pvalue, _ = coint(S1, S2) print('p-value: ', pvalue) # Unfortunately, since our p-value is above the cutoff of $0.05$, we conclude that our model will no longer be valid due to the lack of cointegration between our chosen securities. If we tried to deploy this model without the underlying assumptions holding, we would have no reason to believe that it would actually work. Out of sample testing is a vital step to make sure that our work will actually be viable in the market. # # ## Implementation # When actually implementing a pairs trading strategy you would normally want to be trading many different pairs at once. If you find a good pair relationship by analyzing data, there is no guarantee that that relationship will continue into the future. Trading many different pairs creates a diversified portfolio to mitigate the risk of individual pairs "falling out of" cointegration. # _This document is based on the Quantiopian Lecture written by <NAME> and <NAME>._
Pairs Trading.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + [markdown] id="c1604535" # <a href="https://colab.research.google.com/github/open-mmlab/mmselfsup/blob/master/demo/mmselfsup_colab_tutorial.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="d76e94d0" # # MMSelfSup Tutorial # In this tutorial, we will introduce the following content: # # - How to install MMSelfSup # - How to train algorithms in MMSelfSup # - How to train downstream tasks # # If you have any other questions, welcome to report issues. # + [markdown] id="8159aadf" # ## How to install MMSelfSup # # Before using MMSelfSup, we need to prepare the environment with the following steps: # # 1. Install Python, CUDA, C/C++ compiler and git # 2. Install PyTorch (CUDA version) # 3. Install dependent codebase (mmcv, mmcls) # 4. Clone mmselfsup source code from GitHub and install it # # Because this tutorial is on Google Colab and all necessary packages have been installed, we can skip the first two steps. # + colab={"base_uri": "https://localhost:8080/"} id="66ed8cfe" outputId="46f7db9f-770d-4339-aff0-d7f8f005634e" # !pwd # + colab={"base_uri": "https://localhost:8080/"} id="86d5SBUQxpOm" outputId="c3521c08-831a-446d-fc35-d86fa89a35de" # Check nvcc version # !nvcc -V # + colab={"base_uri": "https://localhost:8080/"} id="rINWzY4ixpT-" outputId="742c399a-eee9-48d4-a0c5-c7eaf717a817" # Check GCC version # !gcc --version # + colab={"base_uri": "https://localhost:8080/"} id="ab8155aa" outputId="64072a74-c830-4206-8b3c-9f7398d4e7a9" # Check PyTorch installation import torch, torchvision print(torch.__version__) print(torch.cuda.is_available()) # + [markdown] id="18aad462" # ## Install MMCV # # MMCV is the basic package of all OpenMMLab packages. We have pre-built wheels on Linux, so we can download and install them directly. # # Please pay attention to PyTorch and CUDA versions to match the wheel. # # In the above steps, we have checked the version of PyTorch and CUDA, and they are 1.10.2 and 11.3 respectively, so we need to choose the corresponding wheel. # # In addition, we can also install the full version of mmcv (mmcv-full). It includes full features and various CUDA ops out of the box, but needs a longer time to build. # + [markdown] id="89532489" # MIM is recommended: https://github.com/open-mmlab/mim # + colab={"base_uri": "https://localhost:8080/"} id="fb3da020" outputId="dfd43ff9-e51d-4dc1-bf7c-f21dde61d7c1" # !pip install openmim # + colab={"base_uri": "https://localhost:8080/"} id="e3e73f09" outputId="4db8c20e-9ce3-4a1c-8755-6c8e17988268" # !mim install mmcv-full # + [markdown] id="86e6d589" # Besides, you can also use pip to install the packages, but you are supposed to check the pytorch and cuda version mannually. The example commmand is provided below, but you need to modify it according to your PyTorch and CUDA version. # + id="5b004a18" # Install mmcv and mmcls # !pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/cu111/torch1.10/index.html # + [markdown] id="54815b81" # ## Clone and install mmselfsup # + colab={"base_uri": "https://localhost:8080/"} id="207561bb" outputId="cc3ecfd5-f812-4910-ba46-f46fb99aa42c" # Clone MMSelfSup repository # !git clone https://github.com/open-mmlab/mmselfsup.git # %cd mmselfsup/ # Install MMSelfSup from source # !pip install -e . # + colab={"base_uri": "https://localhost:8080/"} id="53cda7d3" outputId="9e039eb8-24af-4b11-87c5-fd15d3195eee" # Check MMSelfSup installation import mmselfsup print(mmselfsup.__version__) # + [markdown] id="d36ec528" # ## Example to start a self-supervised task # # Before you start training, you need to prepare your dataset, please check [prepare_data.md](https://github.com/open-mmlab/mmselfsup/blob/master/docs/en/prepare_data.md) file carefully. # # **Note**: As we follow the original algorithms to implement our codes, so many algorithms are supposed to run on distributed mode, they are not supported on 1 GPU training officially. You can check it [here](https://github.com/open-mmlab/mmselfsup/blob/master/tools/train.py#L120). # # + colab={"base_uri": "https://localhost:8080/"} id="4fCR3h5nn26l" outputId="7dc71982-7177-4a11-fda7-ae0e99e4bc89" # !pwd # + [markdown] id="i-7LXl36VV--" # Here we provide a example and download a small dataset to display the demo. # + colab={"base_uri": "https://localhost:8080/"} id="VDM0ZHHKUZNP" outputId="c9f13fac-4e23-4bc8-fff1-3244bc1f2956" # !mkdir data # !wget https://download.openmmlab.com/mmselfsup/data/imagenet_examples.zip # !unzip -q imagenet_examples.zip -d ./data/ # + colab={"base_uri": "https://localhost:8080/"} id="tOGY00U4WPGC" outputId="5a54a9aa-5c4f-4ebf-d636-4b4e8af68908" # Check data directory # !apt-get install tree # !tree -d ./data # + [markdown] id="SF5vqi1nmqGh" # ### Create a new config file # To reuse the common parts of different config files, we support inheriting multiple base config files. For example, to train `relative_loc` algorithm, the new config file can create the model's basic structure by inheriting `configs/_base_/models/relative-loc.py`. # + colab={"base_uri": "https://localhost:8080/"} id="h2y2bh9DnE8b" outputId="b2a0a019-c5aa-420e-ad6c-4a8c6c3c3c58" # %%writefile configs/selfsup/relative_loc/relative-loc_resnet50_8xb64-steplr-70e_in1k_colab.py _base_ = [ '../_base_/models/relative-loc.py', '../_base_/datasets/imagenet_relative-loc.py', '../_base_/schedules/sgd_steplr-200e_in1k.py', '../_base_/default_runtime.py', ] log_config = dict(interval=10) # optimizer optimizer = dict( type='SGD', lr=0.2, weight_decay=1e-4, momentum=0.9, paramwise_options={ '\\Aneck.': dict(weight_decay=5e-4), '\\Ahead.': dict(weight_decay=5e-4) }) # learning policy lr_config = dict(policy='step', step=[1]) # runtime settings runner = dict(type='EpochBasedRunner', max_epochs=70) # the max_keep_ckpts controls the max number of ckpt file in your work_dirs # if it is 3, when CheckpointHook (in mmcv) saves the 4th ckpt # it will remove the oldest one to keep the number of total ckpts as 3 checkpoint_config = dict(interval=1, max_keep_ckpts=3) # + [markdown] id="ahdJBk_2xXbQ" # ### Read the config file and modify config # # We can modify the loaded config file. # + id="9zlGPEAAx4Z5" # Load the basic config file from mmcv import Config cfg = Config.fromfile('configs/selfsup/relative_loc/relative-loc_resnet50_8xb64-steplr-70e_in1k_colab.py') # Specify the data settings cfg.data.samples_per_gpu = 64 cfg.data.workers_per_gpu = 2 # Modify the path and meta files of validation dataset cfg.data.val.data_source.data_prefix = 'data/imagenet/train' cfg.data.val.data_source.ann_file = 'data/imagenet/meta/train.txt' # Specify the optimizer cfg.optimizer = dict(type='SGD', lr=0.005, momentum=0.9, weight_decay=0.0001) cfg.optimizer_config = dict(grad_clip=None) # Specify the learning rate scheduler cfg.lr_config = dict(policy='step', step=[1]) # Modify runtime setting cfg.runner = dict(type='EpochBasedRunner', max_epochs=2) # Specify the work directory cfg.work_dir = './work_dirs/selfsup/relative-loc_resnet50_8xb64-steplr-70e_in1k_colab' # Output logs for every 10 iterations cfg.log_config.interval = 10 # Set the random seed and enable the deterministic option of cuDNN # to keep the results' reproducible. from mmselfsup.apis import set_random_seed cfg.seed = 0 set_random_seed(0, deterministic=True) cfg.gpu_ids = range(1) # + [markdown] id="e2j-EtlHukMu" # ### Start self-supervised pre-train task # + colab={"base_uri": "https://localhost:8080/"} id="ufvDKC5Wvmni" outputId="2dafe4cf-01f7-4864-f6c7-bb37c0e6ef3d" import time import mmcv import os.path as osp from mmselfsup.datasets import build_dataset from mmselfsup.models import build_algorithm from mmselfsup.apis import train_model # Create the work directory mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir)) # Build the algorithm model = build_algorithm(cfg.model) model.init_weights() # Build the dataset datasets = [build_dataset(cfg.data.train)] # Start pre-train train_model( model, datasets, cfg, distributed=False, timestamp=time.strftime('%Y%m%d_%H%M%S', time.localtime()), meta=dict()) # + [markdown] id="4a78a656" # ## Example to start a downstream task # # + colab={"base_uri": "https://localhost:8080/"} id="GH7JAKYifrsJ" outputId="972c3ce0-9c68-4213-8bb3-ab877b9cccc6" # !pwd # + [markdown] id="Qy4OauSGomPv" # ### Extract backbone weights from pre-train model # + id="QfFjDyl-ow5M" # !python tools/model_converters/extract_backbone_weights.py \ # work_dirs/selfsup/relative-loc_resnet50_8xb64-steplr-70e_in1k_colab/epoch_2.pth \ # work_dirs/selfsup/relative-loc_resnet50_8xb64-steplr-70e_in1k_colab/relative-loc_backbone-weights.pth # + [markdown] id="zlrOaDo6tQtr" # ### Prepare config file # # Here we create a new config file for demo dataset, actually we provided various config files in directory `configs/benchmarks`. # + id="tYVW1halrrtJ" # Load the basic config file from mmcv import Config benchmark_cfg = Config.fromfile('configs/benchmarks/classification/imagenet/resnet50_8xb32-steplr-100e_in1k.py') # Modify the model checkpoint_file = 'work_dirs/selfsup/relative-loc_resnet50_8xb64-steplr-70e_in1k_colab/relative-loc_backbone-weights.pth' # Or directly using pre-train model provided by us # checkpoint_file = 'https://download.openmmlab.com/mmselfsup/moco/mocov2_resnet50_8xb32-coslr-200e_in1k_20220225-89e03af4.pth' benchmark_cfg.model.backbone.frozen_stages=4 benchmark_cfg.model.backbone.init_cfg = dict(type='Pretrained', checkpoint=checkpoint_file) # As the imagenet_examples dataset folder doesn't have val dataset # Modify the path and meta files of validation dataset benchmark_cfg.data.val.data_source.data_prefix = 'data/imagenet/train' benchmark_cfg.data.val.data_source.ann_file = 'data/imagenet/meta/train.txt' # Specify the learning rate scheduler benchmark_cfg.lr_config = dict(policy='step', step=[1]) # Output logs for every 10 iterations benchmark_cfg.log_config.interval = 10 # Modify runtime settings for demo benchmark_cfg.runner = dict(type='EpochBasedRunner', max_epochs=2) # Specify the work directory benchmark_cfg.work_dir = './work_dirs/benchmarks/classification/imagenet/resnet50_8xb32-steplr-100e_in1k_colab' # Set the random seed and enable the deterministic option of cuDNN # to keep the results' reproducible. from mmselfsup.apis import set_random_seed benchmark_cfg.seed = 0 set_random_seed(0, deterministic=True) benchmark_cfg.gpu_ids = range(1) # + [markdown] id="9SeQvcRgzSgZ" # ### Load extracted backbone weights to start a downstream task # + colab={"base_uri": "https://localhost:8080/"} id="ZmqFyBjYu8Cx" outputId="ba933799-e4c9-48ac-b244-454548dbd2c6" import time import mmcv import os.path as osp from mmselfsup.datasets import build_dataset from mmselfsup.models import build_algorithm from mmselfsup.apis import train_model # Create the work directory mmcv.mkdir_or_exist(osp.abspath(benchmark_cfg.work_dir)) # Build the algorithm model = build_algorithm(benchmark_cfg.model) model.init_weights() # Build the dataset datasets = [build_dataset(benchmark_cfg.data.train)] # Start linear probing train_model( model, datasets, benchmark_cfg, distributed=False, timestamp=time.strftime('%Y%m%d_%H%M%S', time.localtime()), meta=dict()) # + [markdown] id="3DUKbf3Rs2D_" # **Note: As the demo only has one class in dataset, the model collapsed and the results of loss and acc should be ignored.**
demo/mmselfsup_colab_tutorial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from seq2seq_beam import Seq2Seq import sys if int(sys.version[0]) == 2: from io import open def read_data(path): with open(path, 'r', encoding='utf-8') as f: return f.read() # end function read_data def build_map(data): specials = ['<GO>', '<EOS>', '<PAD>', '<UNK>'] chars = list(set([char for line in data.split('\n') for char in line])) idx2char = {idx: char for idx, char in enumerate(specials + chars)} char2idx = {char: idx for idx, char in idx2char.items()} return idx2char, char2idx # end function build_map def preprocess_data(): X_data = read_data('temp/letters_source.txt') Y_data = read_data('temp/letters_target.txt') X_idx2char, X_char2idx = build_map(X_data) Y_idx2char, Y_char2idx = build_map(Y_data) x_unk = X_char2idx['<UNK>'] y_unk = Y_char2idx['<UNK>'] y_eos = Y_char2idx['<EOS>'] X_indices = [[X_char2idx.get(char, x_unk) for char in line] for line in X_data.split('\n')] Y_indices = [[Y_char2idx.get(char, y_unk) for char in line] + [y_eos] for line in Y_data.split('\n')] return X_indices, Y_indices, X_char2idx, Y_char2idx, X_idx2char, Y_idx2char # end function preprocess_data def main(): BATCH_SIZE = 128 X_indices, Y_indices, X_char2idx, Y_char2idx, X_idx2char, Y_idx2char = preprocess_data() X_train = X_indices[BATCH_SIZE:] Y_train = Y_indices[BATCH_SIZE:] X_test = X_indices[:BATCH_SIZE] Y_test = Y_indices[:BATCH_SIZE] model = Seq2Seq( rnn_size = 50, n_layers = 2, X_word2idx = X_char2idx, encoder_embedding_dim = 15, Y_word2idx = Y_char2idx, decoder_embedding_dim = 15, ) model.fit(X_train, Y_train, val_data=(X_test, Y_test), batch_size=BATCH_SIZE) model.infer('common', X_idx2char, Y_idx2char) model.infer('apple', X_idx2char, Y_idx2char) model.infer('zhedong', X_idx2char, Y_idx2char) # end function main if __name__ == '__main__': main() # -
src_nlp/tensorflow/depreciated/seq2seq_beam_test.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline import intake catalog = intake.open_catalog("./catalog.yml") open_data = catalog.la_open_data service_requests = open_data.search('Small Cell') for entry_id, entry in service_requests.items(): display(entry) small_cells = service_requests['https://data.lacity.org/api/views/3nrm-mq6k'].read() small_cells.head() # ## Making a Map using Geopandas # # Now that we have a dataframe, we can turn it into a Geodataframe and plot it using geopandas. # # However, this is pretty hard to use as a map. import geopandas as gpd from ipyleaflet import Map, GeoData, basemaps, LayersControl gdf = gpd.GeoDataFrame(small_cells) gdf.plot() # # Point Map using ipyleaflet # # Using ipyleaflet, we can add a geodataframe layer m = Map(center=(33.8711,-117.8628), zoom = 9, basemap= basemaps.Esri.WorldTopoMap) small_cell_layer = GeoData(geo_dataframe = gdf, name = 'Small Cells') m.add_layer(small_cell_layer) m # # Heatmap, instead
small-cell-map.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## StackOverflow Survey Data analysis # # # In this notebook, I will go through a comparison of some [data from StackOverflow surveys](https://insights.stackoverflow.com/survey/), for the years from 2013 to 2020. # # The analiysis will follow the **CRISP-DM** (Cross Industry Process for Data Mining) approach, identifying 6 steps: # # 1. **Business Understanding** # 2. **Data Understanding** # 3. **Data Preparation** # 4. **Modeling** # 5. **Evaluation** # 6. **Deployment** # ### 1 - Business Understanding # # According to the CRISP-DM definition, this is the section that introduces the question of interest, claryfying what are we trying to achieve through data. # # In this case, my intent was to: # # * Acquire some insights on the characteristics of a reference population of Software Developers; # * Identify some patterns that show the evolution of such characteristics through time; # * Provide some forecast for the future. # # I was interested both in professional/technical aspects (education, technology) and demographic ones (gender, country, ethnicity). As a matter of fact, I found the demographic data quite revealing, and decided to build a simple model to provide a future forecast for that. # ### 2 - Data Understanding # # The purpose of this section is to introduce the data source to be used for the analysis, and clarify whether or not they are covering enough scope to answer the question of interest. # # As mentioned at the very beginning, as a source here I'm using the [StackOverflow annual surveys](https://insights.stackoverflow.com/survey/). # # Through the last decade or so StackOverflow has presented its users with a survey to better understand them. Given the popularity of the forum, I thought this was a good place where to look to identify change patterns in the characteristics of a SW developer. I foucused on the years from 2013 to 2020. # # **Note**: looking at this from a **data science process** perspective, this section covers mainly the _gather_ and _assess_ parts. # First, let's read in the necessary libraries. import numpy as np import pandas as pd import matplotlib.pyplot as plt from collections import defaultdict # %matplotlib inline # Then, let's read in the data for the various years as DataFrames. **Note:** The paths to the file will have to be changed accordingly to the users's local setup. # # You can see that the files have different name formats and that some require some adjustments in loading, forcing the data type to be a `string` to avoid error messages and misrepresentations. # Change data file path as needed df_13 = pd.read_csv('./data/stack_overflow_2013/2013 Stack Overflow Survey Responses.csv',dtype='string') # 2013 df_14 = pd.read_csv('./data/stack_overflow_2014/2014 Stack Overflow Survey Responses.csv') # 2014 df_15 = pd.read_csv('./data/stack_overflow_2015/2015 Stack Overflow Developer Survey Responses.csv',dtype='string') # 2015 df_16 = pd.read_csv('./data/stack_overflow_2016/2016 Stack Overflow Survey Responses.csv') # 2016 df_17 = pd.read_csv('./data/stack_overflow_2017/survey_results_public.csv') # 2017 df_18 = pd.read_csv('./data/stack_overflow_2018/survey_results_public.csv',dtype='string') # 2018 df_19 = pd.read_csv('./data/stack_overflow_2019/survey_results_public.csv') # 2019 df_20 = pd.read_csv('./data/stack_overflow_2020/survey_results_public.csv') # 2020 # Some further cleaning is needed for some of the years. Namely: # # * 2013, 2014 files present no information in the first row # * 2015 file has the name of the columns in the first row, while the column's name is meaningless. # # You can uncomment any of the rows in the following cell to check this out, or proceed directly to the next one. # + # Uncomment to check data for 2013/14/15 # df_13.head() # df_14.head() # df_15.head() # + # Remove meaningless rows 2013/14/15 # 2013, 2014 - get rid of first row df_13.drop(0, inplace = True) df_14.drop(0, inplace = True) # 2015 - rename columns and get rid of first row col_names_dict = {} for col in df_15.columns: col_names_dict[col] = df_15[col][0] df_15.rename(columns = col_names_dict, inplace = True) df_15.drop(0, inplace = True) # - # You can use the next cell to take a look at the final datasets, their columns etc. # + # Look at any df you like # df_13.head() # df_20.tail() # See details of the columns available # for col in df_13.columns: # print(col) # - # Once loaded all the data, let's take a look at the # of rows and columns for the various years, to have a sense of the number of partecipants and questions asked: # + num_rows_13 = df_13.shape[0] # rows, 2013 num_cols_13 = df_13.shape[1] # columns, 2013 num_rows_14 = df_14.shape[0] # rows, 2014 num_cols_14 = df_14.shape[1] # columns, 2014 num_rows_15 = df_15.shape[0] # rows, 2015 num_cols_15 = df_15.shape[1] # columns, 2015 num_rows_16 = df_16.shape[0] # rows, 2016 num_cols_16 = df_16.shape[1] # columns, 2016 num_rows_17 = df_17.shape[0] # rows, 2017 num_cols_17 = df_17.shape[1] # columns, 2017 num_rows_18 = df_18.shape[0] # rows, 2018 num_cols_18 = df_18.shape[1] # columns, 2018 num_rows_19 = df_19.shape[0] # rows, 2019 num_cols_19 = df_19.shape[1] # columns, 2019 num_rows_20 = df_20.shape[0] # rows, 2020 num_cols_20 = df_20.shape[1] # columns, 2020 print("Year 2013 - Rows in the dataset: ", num_rows_13, "; Colums in the dataset: ", num_cols_13) print("Year 2014 - Rows in the dataset: ", num_rows_14, "; Colums in the dataset: ", num_cols_14) print("Year 2015 - Rows in the dataset: ", num_rows_15, "; Colums in the dataset: ", num_cols_15) print("Year 2016 - Rows in the dataset: ", num_rows_16, "; Colums in the dataset: ", num_cols_16) print("Year 2017 - Rows in the dataset: ", num_rows_17, "; Colums in the dataset: ", num_cols_17) print("Year 2018 - Rows in the dataset: ", num_rows_18, "; Colums in the dataset: ", num_cols_18) print("Year 2019 - Rows in the dataset: ", num_rows_19, "; Colums in the dataset: ", num_cols_19) print("Year 2020 - Rows in the dataset: ", num_rows_20, "; Colums in the dataset: ", num_cols_20) # - # It seems that the number of respondents has grown with time, while the number of questions is reducing. # We can plot the size of the population vs. the year: # + # Group size info together and plot df_sample_size = pd.DataFrame({'Respondents': [num_rows_13, num_rows_14, num_rows_15, num_rows_16, num_rows_17, num_rows_18, num_rows_19, num_rows_20], 'Questions': [num_cols_13, num_cols_14, num_cols_15, num_cols_16, num_cols_17, num_cols_18, num_cols_19, num_cols_20]}, index=[2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020]) df_sample_size.Respondents.plot(grid=True, figsize = (10,7), xlabel='Year', ylabel='Number of Respondents'); # - # As an indication of how effective the questions where, we can take a look at the percentage of `null` # + null_per_13 = df_13.isnull().sum().sum()/(num_rows_13*num_cols_13) null_per_14 = df_14.isnull().sum().sum()/(num_rows_14*num_cols_14) null_per_15 = df_15.isnull().sum().sum()/(num_rows_15*num_cols_15) null_per_16 = df_16.isnull().sum().sum()/(num_rows_16*num_cols_16) null_per_17 = df_17.isnull().sum().sum()/(num_rows_17*num_cols_17) null_per_18 = df_18.isnull().sum().sum()/(num_rows_18*num_cols_18) null_per_19 = df_19.isnull().sum().sum()/(num_rows_19*num_cols_19) null_per_20 = df_20.isnull().sum().sum()/(num_rows_20*num_cols_20) print ("Year 2013 - Percentage of Null: ", "{:.2f}".format(null_per_13*100), "%") print ("Year 2014 - Percentage of Null: ", "{:.2f}".format(null_per_14*100), "%") print ("Year 2015 - Percentage of Null: ", "{:.2f}".format(null_per_15*100), "%") print ("Year 2016 - Percentage of Null: ", "{:.2f}".format(null_per_16*100), "%") print ("Year 2017 - Percentage of Null: ", "{:.2f}".format(null_per_17*100), "%") print ("Year 2018 - Percentage of Null: ", "{:.2f}".format(null_per_18*100), "%") print ("Year 2019 - Percentage of Null: ", "{:.2f}".format(null_per_19*100), "%") print ("Year 2020 - Percentage of Null: ", "{:.2f}".format(null_per_20*100), "%") # - # It can be seen that the percentages of `null` was higher in the first years. However, a better look at the dataframes would also show that their structure is different, with the answer to some questions distributed to more than one column. # # In the end, for the analysis of technical/professional aspects I decided to focus on the years from 2016/17 onwards given that they seem to present the more detailed information, and more consistency in the format. For the demographic data I will make use of the full data pool. # ### 3 - Data Preparation # # This will be the section where some data subset are extracted from the original dataframes, eventually further manipulated and then compared to identify existing patterns. From a **data science process** perspective, this section will cover mostly the _clean_ and _visualize_ steps. # # I will separate the technical from the demographic caratcteristics. # # #### 3.a - Technical/Professional Characteristics # # As a first step, I'll look into the **professional categories** (Professional Developer, Student etc.), to see if/how they change with time. For this part I'll use data from 2017 to 2020. # # Extract columns with data about the professional status # NOTE: title of the column containing professional information changed from 2017 to 2020 status_vals_17 = df_17.Professional.value_counts() #2017 # Data in 2018 do not seem to include this information, even if it was asked to the respondent status_vals_19 = df_19.MainBranch.value_counts() #2019 status_vals_20 = df_20.MainBranch.value_counts() #2020 # + # Bar chart of the proportion of individuals in each professional category fig, ((ax1, ax2, ax3)) = plt.subplots(1, 3) fig.suptitle("Developer characterization") fig.set_figheight(9) fig.set_figwidth(19.2) # Percentages are calculated with respect to the non-null answers (status_vals_17/(num_rows_17 - df_17.Professional.isnull().sum())*100).plot(ax=ax1, kind="bar") ax1.set_title("2017") ax1.set_ylim([0, 80]) ax1.grid() (status_vals_19/(num_rows_19 - df_19.MainBranch.isnull().sum())*100).plot(ax=ax2, kind="bar", color="purple") ax2.set_title("2019") ax2.set_ylim([0, 80]) ax2.grid() (status_vals_20/(num_rows_20 - df_20.MainBranch.isnull().sum())*100).plot(ax=ax3, kind="bar", color="green") ax3.set_title("2020") ax3.set_ylim([0, 80]) ax3.grid(); # - # The distribution of answers seems consitent in indicating a significant majority of professional developers, followed by students. These two categories always cover more than 80% of the total. # # Another characteristics I thought worth looking in was the **formal education** of the Respondent. # Extract columns with data about the education level # NOTE: title of the column containing formal education information changed from 2017 to 2020 ed_vals_17 = df_17.FormalEducation.value_counts() #2017 ed_vals_18 = df_18.FormalEducation.value_counts() #2018 ed_vals_19 = df_19.EdLevel.value_counts() #2018 ed_vals_20 = df_20.EdLevel.value_counts() #2020 # + # Bar chart of the proportion of individuals in ed_vals # Split in 2 figures for readibility of the labels fig, (ax1, ax2) = plt.subplots(1, 2) fig.suptitle("Formal Education (1/2)") fig.set_figheight(9) fig.set_figwidth(12.8) # Percentages are calculated with respect to the non-null answers (ed_vals_17[:5]/(num_rows_17 - df_17.FormalEducation.isnull().sum())*100).plot(ax=ax1, kind="bar"); ax1.set_title("2017") ax1.set_ylim([0, 50]) ax1.grid() (ed_vals_18[:5]/(num_rows_18 - df_18.FormalEducation.isnull().sum())*100).plot(ax=ax2, kind="bar", color = "orange"); ax2.set_title("2018") ax2.set_ylim([0, 50]) ax2.grid(); # + # Split in 2 figures for readibility of the labels fig, (ax3, ax4) = plt.subplots(1, 2) fig.suptitle("Formal Education (2/2)") fig.set_figheight(9) fig.set_figwidth(12.8) # Percentages are calculated with respect to the non-null answers (ed_vals_19[:5]/(num_rows_19 - df_19.EdLevel.isnull().sum())*100).plot(ax=ax3, kind="bar", color = "purple"); ax3.set_title("2019") ax3.set_ylim([0, 50]) ax3.grid(); (ed_vals_20[:5]/(num_rows_20 - df_20.EdLevel.isnull().sum())*100).plot(ax=ax4, kind="bar", color = "green"); ax4.set_title("2020") ax4.set_ylim([0, 50]) ax4.grid(); # - # The distributions in this case seem more diluted, but the combination of Bachelor and Master degrees always covers more than 60% of the populuation, followed by a more diverse kind of education. # Finally, let's take a look at **technologies** (languages used/languages wanted). # For this, some further processing is needed given how the data is collected. # # I will focus on the years from 2016 to 2020, that present some commonality in format, with all the answers to similar questions ("What technology have you used in the past year"/"What technology would you like to use in the next year") grouped in a single column. However, the content of the column will have to be parsed to split the various items composing the answer. # # For example, a respondent might answer "C++; Java; SQL", and we'll have to correctly identify and count one occurrence each of "C++", "Java", SQL". For this I'll use the following function. # Function to be used to parse the content on the various columns def total_count(df, col1, col2, separator): ''' INPUT: df - the pandas dataframe you want to search col1 - the column name you want to look through col2 - the column you want to count values from look_for - a list of strings you want to search for in each row of df[col] separator - separator between strings (comma, semicolon etc.) OUTPUT: new_df - a dataframe of each look_for with the count of how often it shows up items_list - a list of the different items that were identified parsing the dataframe ''' new_df = defaultdict(int) items_list = [] # loop through rows for idx in range(df.shape[0]): # Split the string to separate the various languages used by the responder (if > 1) langs = df[col1][idx].split(separator) # Loop in the list of items for idy in range(len(langs)): # Update counts in the dataframe val = langs[idy] new_df[val] += int(df[col2][idx]) # If not detected before update list if (not(val in items_list)): items_list.append(val) new_df = pd.DataFrame(pd.Series(new_df)).reset_index() new_df.columns = [col1, col2] new_df.sort_values('count', ascending=False, inplace=True) return new_df, items_list # Let's start with the **most used languages** # Extract columns with data about the language/technology used # NOTE: title of the column containing formal education information changed from 2016 to 2020 lang_used_16 = df_16.tech_do.value_counts() #2016 lang_used_17 = df_17.HaveWorkedLanguage.value_counts() #2017 lang_used_18 = df_18.LanguageWorkedWith.value_counts() #2018 lang_used_19 = df_19.LanguageWorkedWith.value_counts() #2019 lang_used_20 = df_20.LanguageWorkedWith.value_counts() #2020 # Reset data frames lu_16 = lang_used_16.reset_index() lu_17 = lang_used_17.reset_index() lu_18 = lang_used_18.reset_index() lu_19 = lang_used_19.reset_index() lu_20 = lang_used_20.reset_index() # Rename columns lu_16.rename(columns={'index': 'language', 'tech_do': 'count'}, inplace=True) lu_17.rename(columns={'index': 'language', 'HaveWorkedLanguage': 'count'}, inplace=True) lu_18.rename(columns={'index': 'language', 'LanguageWorkedWith': 'count'}, inplace=True) lu_19.rename(columns={'index': 'language', 'LanguageWorkedWith': 'count'}, inplace=True) lu_20.rename(columns={'index': 'language', 'LanguageWorkedWith': 'count'}, inplace=True) # Parse, separate and count the content of the columns lu_16_df, worked_languages_16 = total_count(lu_16, 'language', 'count', '; ') # NOTE: separator for 2016 requires a space lu_17_df, worked_languages_17 = total_count(lu_17, 'language', 'count', '; ') # NOTE: separator for 2017 requires a space lu_18_df, worked_languages_18 = total_count(lu_18, 'language', 'count', ';') lu_19_df, worked_languages_19 = total_count(lu_19, 'language', 'count', ';') lu_20_df, worked_languages_20 = total_count(lu_20, 'language', 'count', ';') # + # Bar chart of the proportion of the top 5 languages/combinations for the individuals in count_vals fig, axes = plt.subplots(1, 5) fig.suptitle("Languages Used") fig.set_figheight(10) fig.set_figwidth(15) # Percentages are calculated with respect to the non-null answers axes[0].bar(lu_16_df[:5]['language'].to_list(), (lu_16_df[:5]['count'].to_list() /(num_rows_16 - df_16.tech_do.isnull().sum()))*100, color = "red") axes[0].tick_params('x', labelrotation=90) axes[0].set_title("2016") axes[0].set_ylim([0, 75]) axes[0].grid() axes[1].bar(lu_17_df[:5]['language'].to_list(), (lu_17_df[:5]['count'].to_list() /(num_rows_17 - df_17.HaveWorkedLanguage.isnull().sum()))*100) axes[1].tick_params('x', labelrotation=90) axes[1].set_title("2017") axes[1].set_ylim([0, 75]) axes[1].grid() axes[2].bar(lu_18_df[:5]['language'].to_list(), (lu_18_df[:5]['count'].to_list() /(num_rows_18 - df_18.LanguageWorkedWith.isnull().sum()))*100, color = "orange") axes[2].tick_params('x', labelrotation=90) axes[2].set_title("2018") axes[2].set_ylim([0, 75]) axes[2].grid() axes[3].bar(lu_19_df[:5]['language'].to_list(), (lu_19_df[:5]['count'].to_list() /(num_rows_19 - df_19.LanguageWorkedWith.isnull().sum()))*100, color = "purple") axes[3].tick_params('x', labelrotation=90) axes[3].set_title("2019") axes[3].set_ylim([0, 75]) axes[3].grid() axes[4].bar(lu_20_df[:5]['language'].to_list(), (lu_20_df[:5]['count'].to_list() /(num_rows_20 - df_20.LanguageWorkedWith.isnull().sum()))*100, color = "green") axes[4].tick_params('x', labelrotation=90) axes[4].set_title("2020") axes[4].set_ylim([0, 75]) axes[4].grid(); # - # From this plot it’s clear that JavaScript and SQL are a consistent presence in the lives of the respondents, and we can also see how Python seems to gain some traction. # Now let's look at the **languages people want to use**: # Extract columns with data about the language/technology wanted # NOTE: title of the column containing formal education information changed from 2016 to 2020 lang_wanted_16 = df_16.tech_want.value_counts() #2016 lang_wanted_17 = df_17.WantWorkLanguage.value_counts() #2017 lang_wanted_18 = df_18.LanguageDesireNextYear.value_counts() #2018 lang_wanted_19 = df_19.LanguageDesireNextYear.value_counts() #2019 lang_wanted_20 = df_20.LanguageDesireNextYear.value_counts() #2020 # Reset data frames lw_16 = lang_wanted_16.reset_index() lw_17 = lang_wanted_17.reset_index() lw_18 = lang_wanted_18.reset_index() lw_19 = lang_wanted_19.reset_index() lw_20 = lang_wanted_20.reset_index() # Rename columns lw_16.rename(columns={'index': 'language', 'tech_want': 'count'}, inplace=True) lw_17.rename(columns={'index': 'language', 'WantWorkLanguage': 'count'}, inplace=True) lw_18.rename(columns={'index': 'language', 'LanguageDesireNextYear': 'count'}, inplace=True) lw_19.rename(columns={'index': 'language', 'LanguageDesireNextYear': 'count'}, inplace=True) lw_20.rename(columns={'index': 'language', 'LanguageDesireNextYear': 'count'}, inplace=True) # Parse, separate and count the content of the columns lw_16_df, wanted_languages_16 = total_count(lw_16, 'language', 'count', '; ') # NOTE: separator for 2016 requires a space lw_17_df, wanted_languages_17 = total_count(lw_17, 'language', 'count', '; ') # NOTE: separator for 2017 requires a space lw_18_df, wanted_languages_18 = total_count(lw_18, 'language', 'count', ';') lw_19_df, wanted_languages_19 = total_count(lw_19, 'language', 'count', ';') lw_20_df, wanted_languages_20 = total_count(lw_20, 'language', 'count', ';') # + # Bar chart of the proportion of the top 5 languages/combinations for the individuals in count_vals fig, axes = plt.subplots(1, 5) fig.suptitle("Languages Wanted") fig.set_figheight(10) fig.set_figwidth(15) # Percentages are calculated with respect to the non-null answers axes[0].bar(lw_16_df[:5]['language'].to_list(), (lw_16_df[:5]['count'].to_list() /(num_rows_16 - df_16.tech_do.isnull().sum()))*100, color = "red") axes[0].tick_params('x', labelrotation=90) axes[0].set_title("2016") axes[0].set_ylim([0, 60]) axes[0].grid() axes[1].bar(lw_17_df[:5]['language'].to_list(), (lw_17_df[:5]['count'].to_list() /(num_rows_17 - df_17.HaveWorkedLanguage.isnull().sum()))*100) axes[1].tick_params('x', labelrotation=90) axes[1].set_title("2017") axes[1].set_ylim([0, 75]) axes[1].grid() axes[2].bar(lw_18_df[:5]['language'].to_list(), (lw_18_df[:5]['count'].to_list() /(num_rows_18 - df_18.LanguageWorkedWith.isnull().sum()))*100, color = "orange") axes[2].tick_params('x', labelrotation=90) axes[2].set_title("2018") axes[2].set_ylim([0, 60]) axes[2].grid() axes[3].bar(lw_19_df[:5]['language'].to_list(), (lw_19_df[:5]['count'].to_list() /(num_rows_19 - df_19.LanguageWorkedWith.isnull().sum()))*100, color = "purple") axes[3].tick_params('x', labelrotation=90) axes[3].set_title("2019") axes[3].set_ylim([0, 60]) axes[3].grid() axes[4].bar(lw_20_df[:5]['language'].to_list(), (lw_20_df[:5]['count'].to_list() /(num_rows_20 - df_20.LanguageWorkedWith.isnull().sum()))*100, color = "green") axes[4].tick_params('x', labelrotation=90) axes[4].set_title("2020") axes[4].set_ylim([0, 60]) axes[4].grid(); # - # We can see a confirmation here of the fact that Python has been growing in use and benefits for a consistent popularity. # #### 3.b - Demographic/Non-Professional Characteristics # # For this parte of the analysis I will consider also the datasets for 2013 to 2015. # # Let's start with a look at the **Country** of the repondent # Extract columns with data about the Country # NOTE: title of the column containing Country information changed (slightly) from 2013 to 2020 count_vals_13 = df_13['What Country or Region do you live in?'].value_counts() #2013 count_vals_14 = df_14['What Country do you live in?'].value_counts() #2014 count_vals_15 = df_15.Country.value_counts() #2015 count_vals_16 = df_16.country.value_counts() #2016 count_vals_17 = df_17.Country.value_counts() #2017 count_vals_18 = df_18.Country.value_counts() #2018 count_vals_19 = df_19.Country.value_counts() #2019 count_vals_20 = df_20.Country.value_counts() #2020 # + # Bar chart of the first 10 countries in count_vals # Split in two to improve readability of labels fig, (ax1, ax2, ax3, ax4) = plt.subplots(1, 4) fig.suptitle("Country (1/2)") fig.set_figheight(9) fig.set_figwidth(14) # Percentages are calculated with respect to the non-null answers (count_vals_13[:10]/(num_rows_13 - df_13['What Country or Region do you live in?'].isnull().sum())*100).plot(ax=ax1, kind="bar") ax1.set_title("2013") ax1.set_ylim([0, 35]) ax1.grid() (count_vals_14[:10]/(num_rows_14 - df_14['What Country do you live in?'].isnull().sum())*100).plot(ax=ax2, kind="bar") ax2.set_title("2014") ax2.set_ylim([0, 35]) ax2.grid() (count_vals_15[:10]/(num_rows_15 - df_15.Country.isnull().sum())*100).plot(ax=ax3, kind="bar") ax3.set_title("2015") ax3.set_ylim([0, 35]) ax3.grid() (count_vals_16[:10]/(num_rows_16 - df_16.country.isnull().sum())*100).plot(ax=ax4, kind="bar"); ax4.set_title("2016") ax4.set_ylim([0, 35]) ax4.grid(); # + # Split in two to inprove readibility of labels fig, (ax1, ax2, ax3, ax4) = plt.subplots(1, 4) fig.suptitle("Country (2/2)") fig.set_figheight(9) fig.set_figwidth(14) # Percentages are calculated with respect to the non-null answers (count_vals_17[:10]/(num_rows_17 - df_17.Country.isnull().sum())*100).plot(ax=ax1, kind="bar") ax1.set_title("2017") ax1.set_ylim([0, 35]) ax1.grid() (count_vals_18[:10]/(num_rows_18 - df_18.Country.isnull().sum())*100).plot(ax=ax2, kind="bar") ax2.set_title("2018") ax2.set_ylim([0, 35]) ax2.grid() (count_vals_19[:10]/(num_rows_19 - df_19.Country.isnull().sum())*100).plot(ax=ax3, kind="bar"); ax3.set_title("2019") ax3.set_ylim([0, 35]) ax3.grid(); (count_vals_20[:10]/(num_rows_20 - df_20.Country.isnull().sum())*100).plot(ax=ax4, kind="bar"); ax4.set_title("2020") ax4.set_ylim([0, 35]) ax4.grid(); # - # Now let's take a look at information about **gender**. # # Note that the question about gender has changed through the years giving the respondent more options to better self-identify. This will be visible in the next plots. # Extract columns with data about gender # NOTE: title of the column containing gender information changed (slightly) from 2014 to 2020. # Also 2013 does not include this question gend_vals_14 = df_14['What is your gender?'].value_counts() #2014 gend_vals_15 = df_15.Gender.value_counts() #2015 gend_vals_16 = df_16.gender.value_counts() #2016 gend_vals_17 = df_17.Gender.value_counts() #2017 gend_vals_18 = df_18.Gender.value_counts() #2018 gend_vals_19 = df_19.Gender.value_counts() #2019 gend_vals_20 = df_20.Gender.value_counts() #2020 # + # Bar chart of the proportion of the top 5 values for the individuals in gend_vals # Splitting to improve readability of labels fig, (ax1, ax2, ax3, ax4) = plt.subplots(1, 4) fig.suptitle("Gender (1/2)") fig.set_figheight(9) fig.set_figwidth(14) # Percentages are calculated with respect to the non-null answers (gend_vals_14[:5]/(num_rows_14 - df_14['What is your gender?'].isnull().sum())*100).plot(ax=ax1, kind="bar") ax1.set_title("2014") ax1.set_ylim([0, 95]) ax1.grid() (gend_vals_15[:5]/(num_rows_15 - df_15.Gender.isnull().sum())*100).plot(ax=ax2, kind="bar") ax2.set_title("2015") ax2.set_ylim([0, 95]) ax2.grid() (gend_vals_16[:5]/(num_rows_16 - df_16.gender.isnull().sum())*100).plot(ax=ax3, kind="bar") ax3.set_title("2016") ax3.set_ylim([0, 95]) ax3.grid() (gend_vals_17[:5]/(num_rows_17 - df_17.Gender.isnull().sum())*100).plot(ax=ax4, kind="bar") ax4.set_title("2017") ax4.set_ylim([0, 95]) ax4.grid() # + # Splitting to improve readability of labels fig, (ax1, ax2, ax3) = plt.subplots(1, 3) fig.suptitle("Gender (2/2)") fig.set_figheight(9) fig.set_figwidth(10.5) # Percentages are calculated with respect to the non-null answers (gend_vals_18[:5]/(num_rows_18 - df_18.Gender.isnull().sum())*100).plot(ax=ax1, kind="bar"); ax1.set_title("2018") ax1.set_ylim([0, 95]) ax1.grid(); (gend_vals_19[:5]/(num_rows_19 - df_19.Gender.isnull().sum())*100).plot(ax=ax2, kind="bar") ax2.set_title("2019") ax2.set_ylim([0, 95]) ax2.grid() (gend_vals_20[:5]/(num_rows_20 - df_20.Gender.isnull().sum())*100).plot(ax=ax3, kind="bar"); ax3.set_title("2020") ax3.set_ylim([0, 95]) ax3.grid(); # - # The fact that the majority of respondents are men is clearly visible here. For the sake of simplicity we can focus on the Male/Female answer, aggregate data and show percentages over the years. # + perc_male = [] perc_female = [] perc_male.append((gend_vals_14[0]/(num_rows_14 - df_14['What is your gender?'].isnull().sum()))*100) perc_male.append((gend_vals_15[0]/(num_rows_15 - df_15.Gender.isnull().sum()))*100) perc_male.append((gend_vals_16[0]/(num_rows_16 - df_16.gender.isnull().sum()))*100) perc_male.append((gend_vals_17[0]/(num_rows_17 - df_17.Gender.isnull().sum()))*100) perc_male.append((gend_vals_18[0]/(num_rows_18 - df_18.Gender.isnull().sum()))*100) perc_male.append((gend_vals_19[0]/(num_rows_19 - df_19.Gender.isnull().sum()))*100) perc_male.append((gend_vals_20[0]/(num_rows_20 - df_20.Gender.isnull().sum()))*100) perc_female.append((gend_vals_14[1]/(num_rows_14 - df_14['What is your gender?'].isnull().sum()))*100) perc_female.append((gend_vals_15[1]/(num_rows_15 - df_15.Gender.isnull().sum()))*100) perc_female.append((gend_vals_16[1]/(num_rows_16 - df_16.gender.isnull().sum()))*100) perc_female.append((gend_vals_17[1]/(num_rows_17 - df_17.Gender.isnull().sum()))*100) perc_female.append((gend_vals_18[1]/(num_rows_18 - df_18.Gender.isnull().sum()))*100) perc_female.append((gend_vals_19[1]/(num_rows_19 - df_19.Gender.isnull().sum()))*100) perc_female.append((gend_vals_20[1]/(num_rows_20 - df_20.Gender.isnull().sum()))*100) # - df_gend_perc = pd.DataFrame({'Male': perc_male, 'Female': perc_female}, index=[2014, 2015, 2016, 2017, 2018, 2019, 2020]) df_gend_perc.head(7) # Plot df_gend_perc.plot(grid=True, figsize = (10,7), xlabel='Year', ylabel='Percentage', title='Gender of the Stack Overflow survey respondents'); # Judging from the plot there seem to be trends in the data, and they will be further analysed in the Modeling section # + # As an additional point, note that 2016 included a specific "women_on_teams" columns wot_16 = df_16.women_on_team.value_counts() (wot_16*100/(num_rows_16 - df_16.women_on_team.isnull().sum())).plot(kind="bar", grid=True, figsize = (10,7), title='% of Women in a team (2016)'); # - # Finally, let's take a look at **ethnicity** # Extract columns with data about ethnicity # NOTE: title of the column containing ethnicity information changed from 2017 to 2020. # Also 2013/17 do not include information about ethnicity in results ethn_vals_17 = df_17.Race.value_counts() #2017 ethn_vals_18 = df_18.RaceEthnicity.value_counts() #2018 ethn_vals_19 = df_19.Ethnicity.value_counts() #2019 ethn_vals_20 = df_20.Ethnicity.value_counts() #2020 # + # Bar chart of the proportion of the top 5 values for the individuals in ethn_vals fig, (ax1, ax2, ax3, ax4) = plt.subplots(1, 4) fig.suptitle("Ethnicity") fig.set_figheight(10) fig.set_figwidth(15) # Percentages are calculated with respect to the non-null answers (ethn_vals_17[:5]/(num_rows_17 - df_17.Race.isnull().sum())).plot(ax=ax1, kind="bar") ax1.set_title("2017") ax1.set_ylim([0, 0.80]) ax1.grid() (ethn_vals_18[:5]/(num_rows_18 - df_18.RaceEthnicity.isnull().sum())).plot(ax=ax2, kind="bar") ax2.set_title("2018") ax2.set_ylim([0, 0.80]) ax2.grid() (ethn_vals_19[:5]/(num_rows_19 - df_19.Ethnicity.isnull().sum())).plot(ax=ax3, kind="bar") ax3.set_title("2019") ax3.set_ylim([0, 0.80]) ax3.grid() (ethn_vals_20[:5]/(num_rows_20 - df_20.Ethnicity.isnull().sum())).plot(ax=ax4, kind="bar") ax4.set_title("2020") ax4.set_ylim([0, 0.80]) ax4.grid() # - # For this category too, there is a clear majority identifiable in the answers. # ### 4 - Modeling # The level of unbalance identifiable in the demographic data led me try to provide some forecast, to see when, in the foreseeable future, things might be subject to change. From a **data science process** perspective, this section will cover mostly the _model_ and _visualize_ steps. # # I focused on the **gender** analyis, and given the behaviour visible in the plots, I opted for a simple linear fit. # + # First order polynomial to fit data coefficients_male, residuals_male, _,_,_ = np.polyfit(df_gend_perc.index.array,df_gend_perc.Male,1, full=True) coefficients_female, residuals_female, _,_,_ = np.polyfit(df_gend_perc.index.array,df_gend_perc.Female,1, full=True) # Accuracy of the linear predictors # mse = mean square error # nrmse = normalised root mean square error mse_male = residuals_male[0]/(len(df_gend_perc.index)) nrmse_male = np.sqrt(mse_male)/(df_gend_perc.Male.max() - df_gend_perc.Male.min()) mse_female = residuals_female[0]/(len(df_gend_perc.index)) nrmse_female = np.sqrt(mse_female)/(df_gend_perc.Female.max() - df_gend_perc.Female.min()) print('Slope - Male: ' + str(coefficients_male[0]) + '; Female: ' + str(coefficients_female[0])) print('NRMSE - Male: ' + str(nrmse_male)+ '; Female: ' + str(nrmse_female)) # - # Judging from the NRMSE, the linear predictors are not bad, and so I used them and extended the trend lines into the future. # + # Plot, including trendlines ax = df_gend_perc.plot(grid=True, figsize = (10,7), xlabel='Year', ylabel='Percentage', title='Gender of the Stack Overflow survey respondents'); # Extrapolate linear models xx = [x for x in (df_gend_perc.index.array)] yy_m = [coefficients_male[0]*x + coefficients_male[1] for x in (df_gend_perc.index.array)] yy_f = [coefficients_female[0]*x + coefficients_female[1] for x in (df_gend_perc.index.array)] ax.plot(xx,yy_m,'--') ax.plot(xx,yy_f,'--') ax.legend(['Male', 'Female', 'Male trend', 'Female trend']); # + # Plot, with extended trendlines ax = df_gend_perc.plot(grid=True, figsize = (10,7), xlabel='Year', ylabel='Percentage', title='Gender of the Stack Overflow survey respondents - extended trendlines'); # Extend predictions towards a year with an even distribution, and add 5 years x_ext = int((50 - coefficients_male[1])/coefficients_male[0]) + 5 xx.append(x_ext) yy_m.append(coefficients_male[0]*x_ext + coefficients_male[1]) yy_f.append(coefficients_female[0]*x_ext + coefficients_female[1]) ax.plot(xx,yy_m,'--') ax.plot(xx,yy_f,'--') ax.grid(b=True) ax.legend(['Male', 'Female', 'Male trend', 'Female trend']); # - # ### 5 - Evaluation # # The intent of the analysis was to look for patterns in the characteristics of a reference population of SW developers and eventually foresee an evolution based on them. # # Of all the categories provided through the StackOverflow data survey, I found the gender distribution of the respondents most striking because: # # * A clear pattern is visible there, with a strong majority of th respondents being men; # * The data can be well approximated with a trend line, showing that a rate of change is present that moves towards a more even ditribution; # * Extending this trends into the future shows that, at the current pace, the reaching of a state where genders are equally distributed within the workforce would require more or less another century. # # The last point particularly seems at odds with the reputation of the SW world to be one where things can change quickly. # ### 6 - Deployment # # Besides this noetbook, I published some of the findings in a **Medium story** available [here](https://michelangelo-russo.medium.com/do-things-really-change-in-software-2d5f2c7c9244). # # The story doesn't cover all the findings and analyses decribed in the notebook: I decided for it to focus on some of the things that I thought could be more easily expected (like the distribution of languages used/wanted by developers) and then on the findings about the gender distribution. #
StackOverflow_survey_analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # Python 샘플 코드 # from urllib3 import Request, urlopen from urllib import urlencode, quote_plus url = 'http://dataopen.kospo.co.kr/openApi/Trade/EteMarketOperList' queryParams = '?' + urlencode({ quote_plus('ServiceKey') : '<KEY>2BquBOM38h8g%3D%3D',\ quote_plus('strSdate') : '20170101', quote_plus('strEdate') : '20171231',\ quote_plus('numOfRows') : '10', quote_plus('pageNo') : '1' }) request = Request(url + queryParams) request.get_method = lambda: 'GET' response_body = urlopen(request).read() print (response_body) # -
etc/supply_api_data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## _*Tutorial Algorithm*_ # # This notebook is part of the simple example of an algorithm for `Qiskit Aqua`. This samaple, called `Evolution Fidelity`, illustrates how to implement an algorithm and what steps to take to configure and run it. The implementation of the algorithm itself is located in the `evolutionfidelity` directory under `examples`. # # **Note: This notebook explicitly registers the example algorithm. If instead the register_algorithm is commented and the implementation folder `evolutionfidelity` moved/copied under `qiskit_aqua` then the algorithm will be automatically discovered and explicit registration is not needed**. # # Assuming the above done, we can easily get an instance of the algorithm as well as a random initial state to be used for checking quantum evolution fidelity, as follows. # + import numpy as np from qiskit_aqua.operator import Operator from qiskit_aqua import get_algorithm_instance from qiskit_aqua import get_initial_state_instance from qiskit_aqua import register_algorithm from evolutionfidelity import EvolutionFidelity # np.random.seed(2) num_qubits = 2 temp = np.random.random((2 ** num_qubits, 2 ** num_qubits)) qubitOp = Operator(matrix=temp + temp.T) # While the algorithm can be automatically discovered if placed in an # approriate folder, we can manually register it like this. If the sample # algorithm folder was copied/moved to qiskit_aqua then this # line below can be commented out as it will be discovered/registered automatically register_algorithm(EvolutionFidelity) # get an instance of Dynamics ef = get_algorithm_instance('EvolutionFidelity') ef.setup_quantum_backend() state_in = get_initial_state_instance('CUSTOM') state_in.init_args(num_qubits, state='random') # - # With the necessary pieces in place, we can then change the `expansion_order` and run the algorithm to see how the quantum evolution fidelity is affected by the different orders. # + import math ordinal = lambda n: "%d%s" % (n,"tsnrhtdd"[(math.floor(n/10)%10!=1)*(n%10<4)*n%10::4]) for expansion_order in [1, 2, 3, 4]: ef.init_args( qubitOp, state_in, expansion_order=expansion_order ) print('The evolution fidelity under {} order suzuki expansion is {}.'.format( ordinal(expansion_order), ef.run()['score'] ))
aqua/evolution_fidelity.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # #### 프로젝트 생성 # !scrapy startproject naver_article # #### items.py # + # %%writefile naver_article/naver_article/items.py import scrapy class NaverArticleItem(scrapy.Item): title = scrapy.Field() date = scrapy.Field() press = scrapy.Field() content = scrapy.Field() category = scrapy.Field() link = scrapy.Field() photo_url = scrapy.Field() # - # #### spider.py # + # %%writefile naver_article/naver_article/spiders/spider.py import scrapy import datetime from naver_article.items import NaverArticleItem from selenium import webdriver class ArticleSpider(scrapy.Spider): name = 'NaverArticle' def __init__(self, keyword="자살", start_date="2015.01.01", end_date="2018.12.31", **kwargs): self.start_urls = "https://search.naver.com/search.naver?where=news&sm=tab_jum&query={}".format(keyword) self.search_press_ls = [ # 종합지 "경향신문", "국민일보", "동아일보", "문화일보", "서울신문", "세계일보", "조선일보", "중앙일보", "한겨레", "한국일보", # 방송/통신사 "JTBC", "KBS", "MBC", "MBN", "SBS CNBC", "SBS", "TV조선","YTN", "뉴스1", "뉴시스", "연합뉴스", "연합뉴스TV", "채널A", "한국경제TV", # 경제지 "매일경제", "머니투데이", "서울경제", "아시아경제", "이데일리", "조선비즈", "조세일보", "파이낸셜뉴스", "한국경제", "헤럴드경제", # 인터넷/IT지 "ZDNet Korea", "노컷뉴스", "데일리안", "디지털데일리", "디지털타임스", "머니S", "미디어오늘", "블로터", "아이뉴스24", "오마이뉴스", "전자신문", "프레시안", # 매거진 "뉴스위크", "매경이코노미", "시사IN", "시사저널", "신동아", "월간 산", "이코노미스트", "주간경향", "주간동아", "주간조선", "중앙SUNDAY", "한겨레21", "한경비즈니스", # 전문지/포토 "기자협회보", "뉴스타파", "동아사이언스", "여성신문", "일다", "참세상", "코리아헤럴드", "코메디닷컴", "헬스조선", ] self.start_date = datetime.datetime.strptime(start_date, "%Y.%m.%d") self.end_date = datetime.datetime.strptime(end_date, "%Y.%m.%d") super().__init__(**kwargs) ## 사용하는 함수들 ## 네이버 뉴스 신문사별 id 가져오기: {"경향신문" : "1032"} def get_press_dict(self, driver): driver.find_element_by_xpath('//*[@id="news_popup"]').click() keys = [element.get_attribute("title") for element in driver.find_elements_by_css_selector('#order_cat li label')] values = [element.get_attribute("value") for element in driver.find_elements_by_css_selector('#order_cat li input')] driver.find_element_by_xpath('//*[@id="news_popup"]').click() return {key:value for key, value in zip(keys, values)} ## 네이버 뉴스 검색할 신문사 선택하기 def set_search_press(self, driver): press_dict = self.get_press_dict(driver) driver.find_element_by_xpath('//*[@id="news_popup"]').click() ## 선택되어있는거 초기화 categorys = driver.find_elements_by_css_selector('#order_cat .viewtit input') for category in categorys: if category.get_attribute("checked") != "true": category.click() category.click() ## 검색할 신문사 선택 for press in self.search_press_ls: driver.find_element_by_xpath('//*[@id="ca_{}"]'.format(press_dict[press])).click() driver.find_element_by_xpath('//*[@id="_nx_option_media"]/div[2]/div[3]/button[1]').click() ## 네이버 뉴스 검색 기간 설정하기(왜이렇게 입력이 안됨...) def set_search_date(self, driver, start_date, end_date): driver.find_element_by_xpath('//*[@id="snb"]/div/ul/li[2]').click() while True: driver.find_element_by_xpath('//*[@id="news_input_period_begin"]').clear() driver.find_element_by_xpath('//*[@id="news_input_period_begin"]').send_keys(start_date) input_start_date = driver.find_element_by_xpath('//*[@id="news_input_period_begin"]').get_attribute("value") if start_date == input_start_date: break while True: driver.find_element_by_xpath('//*[@id="news_input_period_end"]').clear() driver.find_element_by_xpath('//*[@id="news_input_period_end"]').send_keys(end_date) input_end_date = driver.find_element_by_xpath('//*[@id="news_input_period_end"]').get_attribute("value") if end_date == input_end_date: break driver.find_element_by_xpath('//*[@id="_nx_option_date"]/div[2]/span/button').click() ## 텍스트 다듬어 넣기 def content_clear(self, content): return content.replace("\n", "").replace("\t", "").replace("\xa0", "").strip() ## 스파이더 시작 def start_requests(self): url = self.start_urls yield scrapy.Request(url, callback=self.parse) def parse(self, response): options = webdriver.ChromeOptions() options.add_argument('headless') options.add_argument('window-size=1920x1080') options.add_argument("disable-gpu") options.add_argument("Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36") driver = webdriver.Chrome(options=options) driver.get(response.url) driver.find_element_by_xpath('//*[@id="_search_option_btn"]').click() driver.find_element_by_xpath('//*[@id="main_pack"]/div/div[1]/div[3]/ul/li[2]/a').click() self.set_search_press(driver) temp_start_date = self.start_date temp_end_date = temp_start_date + datetime.timedelta(days=9) while True: if temp_end_date >= self.end_date: temp_end_date = self.end_date self.set_search_date(driver, temp_start_date.strftime("%Y.%m.%d"), temp_end_date.strftime("%Y.%m.%d")) try: while True: links = [element.get_attribute("href") for element in driver.find_elements_by_xpath('//*[@id="main_pack"]/div/ul/li/dl/dd/a')] for link in links: yield scrapy.Request(link, callback=self.parse_page_contents, dont_filter=True) ## 다음페이지 클릭 driver.find_element_by_css_selector('#main_pack > div > div.paging > a.next').click() ## 다음 페이지가 없으면 에러남 except: pass if temp_end_date == self.end_date: driver.quit() break temp_start_date += datetime.timedelta(days=10) temp_end_date += datetime.timedelta(days=10) def parse_page_contents(self, response): if "entertain" in response.url: title = self.content_clear(response.xpath('//*[@id="content"]/div[1]/div/h2/text()').extract()[0]) category = "TV연예" press = response.xpath('//*[@id="content"]/div[1]/div/div[1]/a/img/@alt').extract()[0] try: date = response.xpath('//*[@id="main_content"]/div[1]/div[3]/div/span/text()').extract()[0].replace("오전", "AM").replace("오후", "PM") date = datetime.datetime.strptime(date, "%Y.%m.%d. %p %I:%M") except: date = response.xpath('//*[@id="content"]/div[1]/div/div[2]/span/em/text()').extract()[0].replace("오전", "AM").replace("오후", "PM") date = datetime.datetime.strptime(date, "%Y.%m.%d. %p %I:%M") content = [cont.strip() for cont in response.xpath('//*[@id="articeBody"]/text()').extract()] content = self.content_clear(" ".join(content)) photo_url = response.xpath('//*[@class="end_photo_org"]/img/@src').extract() else: title = self.content_clear(response.xpath('//*[@id="articleTitle"]/text()').extract()[0]) try: category = response.xpath('//*[@id="articleBody"]/div[2]/a/em/text()').extract()[0] except: category = "-" press = response.xpath('//*[@id="main_content"]/div[1]/div[1]/a/img/@title').extract()[0] try: date = response.xpath('//*[@id="main_content"]/div[1]/div[3]/div/span/text()').extract()[0].replace("오전", "AM").replace("오후", "PM") date = datetime.datetime.strptime(date, "%Y.%m.%d. %p %I:%M") except: date = response.xpath('//*[@id="main_content"]/div[1]/div[3]/div/span[2]/text()').extract()[0].replace("오전", "AM").replace("오후", "PM") date = datetime.datetime.strptime(date, "%Y.%m.%d. %p %I:%M") content = [cont.strip() for cont in response.xpath('//*[@id="articleBodyContents"]/text()').extract()] content = self.content_clear(" ".join(content)) photo_url = response.xpath('//*[@class="end_photo_org"]/img/@src').extract() item = NaverArticleItem() item["title"] = title item["link"] = response.url item["category"] = category item["press"] = press item["date"] = date item["content"] = content item["photo_url"] = photo_url yield item # - # #### robots.txt 설정 무시 # !sed -i .bak 's/ROBOTSTXT_OBEY = True/ROBOTSTXT_OBEY = False/' naver_article/naver_article/settings.py # #### 실행 파일 제작 # %%writefile run.sh # cd naver_article/ scrapy crawl NaverArticle -o naver_article.csv -a keyword="자살" -a start_date="2008.01.01" -a end_date="2014.12.31" # #### 실행 # !source run.sh df = pd.read_csv("naver_article/naver_article.csv") df.tail() ## 중복 기사 제거 print("중복 기사 제거 이전: {}개".format(len(df))) df = df.drop_duplicates(['content'], keep='first') print("중복 기사 제거 이후: {}개".format(len(df))) 1
Naver_Article_Scrapy.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %load_ext autoreload # %autoreload 2 # + import pandas as pd from IPython.display import display, HTML pd.set_option('display.max_rows', None) from toy_recommend import Toy_Recommend r = Toy_Recommend() # - def display_recs(recs): for artist in recs: print('5 Most Similar and Novel Artists to ' + recs[artist]['name']) display(HTML(recs[artist]['recs'].to_html())) # Bach display_recs(r.get_recs(['5aIqB5nVVvmFsvSdExz408'])) # Drake, Bad Bunny, The Weeknd display_recs(r.get_recs(['3TVXtAsR1Inumwj472S9r4', '4q3ewBCX7sLwd24euuV69X', '1Xyo4u8uXC1ZmMpatF05PJ'])) # <NAME>, <NAME>, BTS display_recs(r.get_recs(['1uNFoZAHBGtllmzznpCI3s', '06HL4z0CvFAxyc27GXpf02', '3Nrfpe0tUJi4K4DXYWgMUX'])) # System Of A Down, Nirvana, Pink Floyd display_recs(r.get_recs(['5eAWCfyUhZtHHtBdNk56l1', '6olE6TJLqED3rqDCT0FyPh', '0k17h0D3J5VfsdmQ1iZtE9'])) # 2Pac, The Notorious B.I.G., Dr. Dre display_recs(r.get_recs(['1ZwdS5xdxEREPySFridCfh', '5me0Irg2ANcsgc93uaYrpb', '6DPYiyq5kWVQS4RGwxzPC7']))
toy_demo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.5 64-bit (''venv'': venv)' # metadata: # interpreter: # hash: 09fe6ddd8fa8aae826e541431acd955c570f97d9955535e4d811cff13e7bd1de # name: python3 # --- # # Test SecondOrderElec.plot # Setup import numpy as np try: from SecondOrderElec.plot import plot_bode, plot_pzmap, plot_time from SecondOrderElec.core import BP except ImportError: import sys sys.path.append('../.') from SecondOrderElec.plot import plot_bode, plot_pzmap, plot_time from SecondOrderElec.core import BP # ### Let's plot time related things ! # --- # setup x = np.linspace(-5,5,100) fx = np.sin(x) + np.cos(x*50)*0.3 + np.cos(x*200)*0.2 # noisy sinusoidal curve # let's plot plot_time(x,fx) # ### Let's plot some frequency response ! # --- w = np.logspace(0.1,5,10000) filter_instance = BP(1,0.5,4000) # Bandpass filter with Tm=1, m=0.5, w0=4000 rad/s t,s = filter_instance.freqresp(w) plot_bode(t,s) # ### Let's plot somes poles and zeros # --- # + poles, zeros = np.array([complex(0.5,1), complex(2,1.5), complex(0.2,-12), complex(12.2,-4), complex(7,-2)]),np.array([complex(0.9,5)]) plot_pzmap(poles, zeros)
exemples/plot_tools_exemples.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Gaussian mixture model random sample generation <br> # Reference : <br> # https://datachemeng.com/post-3315/ # + import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns # %matplotlib inline # - # Load dataset df = pd.read_csv('glass.csv') df.head() df = df.drop(df.columns[[0,1,10]], axis=1) print(df.shape) df.head() # + # See the distributions of variables start = df.columns.get_loc('Na') # column of 'cylinders' end = df.columns.get_loc('Fe') + 1 # column of 'acceleration' fig = plt.figure(figsize=(10, 4)) sns.boxplot(data=df.iloc[:, start:end]) plt.xlabel('variables') plt.ylabel('values') plt.show() # - # Remove outliers if necessary df = df[df['K'] < 5.0] df = df[df['Ca'] < 10.0] print(df.shape) # ### Standardization # + # standardize df df_std = (df - df.mean(axis=0)) / df.std(axis=0) print(df_std.head()) plt.figure(figsize=(4,2)) plt.hist(df_std.iloc[:, 2]) # see INDUS just for example plt.show() # + # Just another method from sklearn.preprocessing import StandardScaler scaler = StandardScaler() scaler.fit(df) scaler.transform(df) df_std2 = pd.DataFrame(scaler.transform(df), columns=df.columns) print(df_std2.head()) plt.figure(figsize=(4,2)) plt.hist(df_std2.iloc[:, 2]) plt.show() # - # ### Bayesian information criterion (BIC) # Check BIC to choose the best covariavce type max_number_of_components = 10 covariance_types = ['full', 'diag', 'tied', 'spherical'] bic_values = np.empty([max_number_of_components, len(covariance_types)]) bic_values.shape # + from sklearn.mixture import GaussianMixture # Grid search using BIC for covariance_type_index, covariance_type in enumerate(covariance_types): for number_of_components in range(max_number_of_components): gmm_model = GaussianMixture(n_components = number_of_components + 1, covariance_type = covariance_type) gmm_model.fit(df_std) bic_values[number_of_components, covariance_type_index] = gmm_model.bic(df_std) # + # Plot plt.rcParams["font.size"] = 12 plt.figure() plt.plot(bic_values[:, 0], 'b-', label='full') plt.plot(bic_values[:, 1], 'g-', label='diag') plt.plot(bic_values[:, 2], 'k-', label='tied') plt.plot(bic_values[:, 3], 'r-', label='spherical') plt.xlabel('Number of components') plt.ylabel('BIC values') plt.legend(bbox_to_anchor=(0.88, 0.9, 0.5, 0.001), borderaxespad=0., ) plt.show() # + # Optimal parameters optimal_index = np.where(bic_values == bic_values.min()) optimal_number_of_components = optimal_index[0][0] + 1 optimal_covariance_type = covariance_types[optimal_index[1][0]] print(optimal_index) print(optimal_number_of_components) print(optimal_covariance_type) # - # GMM gmm = GaussianMixture(n_components = optimal_number_of_components, covariance_type = optimal_covariance_type) gmm.fit(df_std) # + # Mean and covariance means = gmm.means_ if gmm.covariance_type == 'full': all_covariances = gmm.covariances_ elif gmm.covariance_type == 'diag': all_covariances = np.empty( [gmm.n_components, gmm.covariances_.shape[1], gmm.covariances_.shape[1]]) for component_number in range(gmm.n_components): all_covariances[component_number, :, :] = np.diag(gmm.covariances_[component_number, :]) elif gmm.covariance_type == 'tied': all_covariances = np.tile(gmm.covariances_, (gmm.n_components, 1, 1)) elif gmm.covariance_type == 'spherical': all_covariances = np.empty([gmm.n_components, gmm.means_.shape[1], gmm.means_.shape[1]]) for component_number in range(gmm.n_components): all_covariances[component_number, :, :] = np.diag( gmm.covariances_[component_number] * np.ones(gmm.means_.shape[1])) # - # Input the size of random variables N = input('Input number of random variables for simulation : ') # simulation sample size like 10000 number_of_samples_generated = int(N) # + # Sample generation all_samples_generated = np.zeros([0, df.shape[1]]) for component in range(gmm.n_components): generated_samples = np.random.multivariate_normal(means[component, :], all_covariances[component, :, :], \ int(np.ceil(number_of_samples_generated * gmm.weights_[component]))) all_samples_generated = np.r_[all_samples_generated, generated_samples] all_samples_generated = pd.DataFrame(all_samples_generated, columns=df.columns) all_samples_generated = all_samples_generated * df.std(axis=0, ddof=1) + df.mean(axis=0) # - all_samples_generated.head() # Save all_samples_generated.to_csv('generated_samples_GMM.csv', index = None) # + # Draw scatter plots of samples generated for reverse analysis horz = 5 # vertical number of graph vert = 5 # horizontal number of graph graph_num = horz * vert # maximum number of graphs axes = list() print('Samples generated for reverse analysis') fig = plt.figure(figsize=(15, 15)) for i in range(0, end-start): axes.append(fig.add_subplot(vert, horz, i+1)) for j in range(start, end) : x_sample = all_samples_generated.iloc[:, start] y_sample = all_samples_generated.iloc[:, i] x_train = df.iloc[:, start] y_train = df.iloc[:, i] axes[i].scatter(x_sample, y_sample, c='g', marker = '.', alpha = 0.1) axes[i].scatter(x_train, y_train, c='red', marker = 'x', alpha = 0.2) axes[i].set_xlabel(df.columns[start], size = 12) axes[i].set_ylabel(df.columns[i], size = 12) plt.subplots_adjust(wspace=0.5, hspace=0.4) plt.show() # - # Check the total amount of elements ttl = all_samples_generated.sum(axis = 1) plt.hist(ttl, 50, alpha = 0.5) plt.show() # Check values of generated samples ==> negative velues for elements are found df_recall = pd.read_csv('generated_samples_GMM.csv') print(df_recall.shape) df_recall.head() fig = plt.figure(figsize=(10, 4)) sns.boxplot(data=df_recall) plt.hlines(y=0.0, xmin=-0.5, xmax=7.5, colors='r', linewidths=2, linestyles='dashed') plt.xlabel('element') plt.ylabel('content (%)') plt.show() # Remove negative values df_recall = df_recall[df_recall > 0.0] print(df_recall.shape) df_recall.head() # Negative values are small so let's use zero instead df_recall.fillna(0, inplace=True) print(df_recall.shape) df_recall.head() # See the distrubution fig = plt.figure(figsize=(10, 4)) sns.boxplot(data=df_recall) plt.hlines(y=0.0, xmin=-0.5, xmax=7.5, colors='r', linewidths=2, linestyles='dashed') plt.xlabel('element') plt.ylabel('content (%)') plt.show() # + # Adjust total amount of elements is 100% df_adj = df_recall.apply(lambda x: 100 * x / np.sum(x), axis=1) print(df_adj.shape) df_adj.head() # + # Draw scatter plots of samples generated for reverse analysis horz = 5 # vertical number of graph vert = 5 # horizontal number of graph graph_num = horz * vert # maximum number of graphs axes = list() #print('Samples generated for reverse analysis') fig = plt.figure(figsize=(15, 15)) for i in range(0, end-start): axes.append(fig.add_subplot(vert, horz, i+1)) for j in range(start, end) : x_sample = df_adj.iloc[:, start] y_sample = df_adj.iloc[:, i] x_train = df.iloc[:, start] y_train = df.iloc[:, i] axes[i].scatter(x_sample, y_sample, c='g', marker = '.', alpha = 0.1) axes[i].scatter(x_train, y_train, c='red', marker = 'x', alpha = 0.5) axes[i].set_xlabel(df.columns[start], size = 12) axes[i].set_ylabel(df.columns[i], size = 12) plt.subplots_adjust(wspace=0.5, hspace=0.4) plt.show() # - # Sanity check adjustment result test = np.sum(df_adj, axis=1) print(test[0:2]) print(test[998:1000]) df_adj.to_csv('generated_samples_GMM_adj.csv', index = None)
[4]_Random_sample_generation_for_backward_prediction/GMM_random_sample_generation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Dask # # Dask is a flexible library for parallel computing in Python. # # Dask is composed of two parts: # 1. Dynamic task scheduling optimized for computation. # 2. “Big Data” collections like parallel arrays, dataframes, and lists. # # ![Dask overview](https://docs.dask.org/en/latest/_images/dask-overview.svg) # Image source: Dask documentation. # # Task graphs # # Suppose we want to perform a simple computation: increment numbers 1 and 2 and add up results. # $$(1+1) + (2+1)$$ # # To make execution times more visible, assume that every operation takes a second. # + from time import sleep def inc(x): sleep(1) return x + 1 def add(x, y): sleep(1) return x + y # - # Sequential execution of the 3 operations will take 3 seconds. # %%time x = inc(1) y = inc(2) z = add(x, y) z # Notice that first two operations (`inc(1)` and `inc(2)`) are independent and can be executed in parallel, while the last operation depends on the results of the first two. # # If we wrap our funcitons in `dask.delayed` and then call them, instead of computation we will get a `Delayed` object, which is **task graph**. Execution is instant, because no actual computation is performed at this stage. # + from dask import delayed delayed_inc = delayed(inc) delayed_add = delayed(add) # - # %%time x = delayed_inc(1) y = delayed_inc(2) z = delayed_add(x, y) z z.visualize() # We call `Delayed.compute()` method to execute the task graph and obtain result. How long do you think it would take in each case? # # Notice the time overhead with processes compared to threads. # %%time z.compute(scheduler='synchronous') # %%time z.compute(scheduler='threads', num_workers=2) # %%time z.compute(scheduler='processes', num_workers=2) # ## Download and unzip census tracts shapefiles # # `dask.delayed` can be used as *decorator*. This is just a more compact way to write wrapping code. # # ```python # @delayed # def fun(): # return 1 # ``` # is equivalent to # ```python # def fun(): # return 1 # fun = delayed(fun) # ``` # # Here we repeat the process of downloading and unzipping census state tracts, but this time we delegate parallelization and scheduling to dask. # + from dask import compute, delayed from tools import download_file, unzip, tracts_state_00_aa import threading @delayed def download_state_tracts(state_code): print(threading.current_thread().name, 'start', state_code) url = f'https://www2.census.gov/geo/tiger/GENZ2019/shp/cb_2019_{state_code}_tract_500k.zip' f = download_file(url, f'data/tracts/{state_code}', overwrite=True, verbose=False) return state_code, f @delayed def unzip_state_tracts(state_code_and_filepath): state_code, filepath = state_code_and_filepath unzip(filepath, f'data/tracts/{state_code}', overwrite=True, verbose=False) # - tasks = [] # short list for demonstration state_codes = list(tracts_state_00_aa.keys())[:10] for sc in state_codes: code_and_path = download_state_tracts(sc) z = unzip_state_tracts(code_and_path) tasks.append(z) # %%time _ = compute(tasks, scheduler='synchronous') # %%time _ = compute(tasks, scheduler='threads', num_workers=10) # # Dask arrays # # Have the same interface as NumPy arrays, but are internally represented as multiple chunks. Computations on chunks are turned into a task graph and performed in parallel where possible. # # NumPy is already using very performant low level code for linear algebra, but all data needs to be in memory. Dask relaxes this constraint. # # ![Dask array](https://docs.dask.org/en/latest/_images/dask-array-black-text.svg) # Image source: Dask documentation. # ## Example: linear regression estimation # # Suppose we want to estimate a linear regression model using ordinary least squares ([wikipedia](https://en.wikipedia.org/wiki/Ordinary_least_squares)). # # The model is given by the following eqation. # $$y = \beta_1 x_1 + \beta_2 x_2 + ... + \beta_k x_k + e$$ # # In matrix notation: # $$y = X \beta + e$$ # where $y$ and $e$ are vectors of length $n$, $X$ is $n$-by-$k$ matrix, and $\beta$ is vector of length $k$. # # OLS estimates $\hat{\beta}$ can be calculated as # $$\hat{\beta} = (X'X)^{-1}X'y$$ # # First let's generate some random data with $\beta = [1, 2, ..., k]$ and store it to disk. # + from pathlib import Path import numpy as np import dask.array as da np.random.seed(0) def generate_data(n, k): b = np.arange(1, k+1) x = np.random.rand(n, k) y = x.dot(b) + np.random.rand(n) Path('data/arr').mkdir(parents=True, exist_ok=True) # single files for numpy np.save('data/arr/x.npy', x) np.save('data/arr/y.npy', y) # directories for dask arrays da.to_npy_stack('data/arr/x/', da.from_array(x)) da.to_npy_stack('data/arr/y/', da.from_array(y)) generate_data(1_000_000, 50) # - # Estimate OLS with NumPy. # + import numpy as np def ols_numpy(): x = np.load('data/arr/x.npy') y = np.load('data/arr/y.npy') xpxi = np.linalg.inv(x.T.dot(x)) xpy = x.T.dot(y) b_hat = xpxi.dot(xpy) return b_hat # - # %%time bh = ols_numpy() bh[:5] # Delayed version is created if we replace `numpy.array` with `dask.array`. # + import dask.array as da def ols_dask(): x = da.from_npy_stack('data/arr/x') y = da.from_npy_stack('data/arr/y') xpxi = da.linalg.inv(x.T.dot(x)) xpy = x.T.dot(y) b_hat = xpxi.dot(xpy) return b_hat tasks = ols_dask() tasks # - # Under the hood, the task graph is rather complicated. tasks.visualize('tasks.pdf') # %%time bh = tasks.compute() bh[:5] # Try changing sizes of input arrays, schedulers and number of workers. # # Dask dataframes # # Similarly to arrays, dask dataframes are chunked for parallel processing. # ## Example revisited: employment by year # # We can write code as if we worked with the whole data in memory. # + import dask.dataframe as dd from tools import ResourceMonitor from time import sleep df = dd.read_csv('data/synig/*.csv', usecols=['YEAR', 'EMPLOYEES']) res = df.groupby('YEAR')['EMPLOYEES'].agg(['size', 'sum', 'mean']) # - res.visualize('tasks.pdf') mon = ResourceMonitor(interval=0.3) mon.start() sleep(0.5) r = res.compute() display(r) sleep(0.5) mon.stop() mon.plot() # ## Example revisited: size vs age # # Again, the code is simple. But this may take a while on limited hardware. # + import dask.dataframe as dd from tools import ResourceMonitor from time import sleep df = dd.read_csv('data/synig/*.csv', usecols=['YEAR', 'ABI', 'EMPLOYEES']) fy = df.groupby('ABI')['YEAR'].min().to_frame('FIRST_YEAR') df = df.merge(fy, 'left', 'ABI') df['AGE'] = df['YEAR'] - df['FIRST_YEAR'] res = df.groupby('AGE')['EMPLOYEES'].mean() # - res.visualize('tasks.pdf') mon = ResourceMonitor() mon.start() sleep(0.5) r = res.compute() display(r) sleep(0.5) mon.stop() mon.plot() # # Trade-offs # # | | in memory | split-apply-combine | dask | # |-----------------|:--------:|:--------:|:--------:| # | Code complexity | low | medium/high | low-high | # | Running time | fast | medium | medium | # | Memory usage | high | varies | varies | # # # Dask is a powerful tool that is easy to start with, because it mimics interface of `numpy` and `pandas`. But here are some things to keep in mind. # - You will need to learn how Dask works to approach more difficult computations. # - Some operations in Dask are slow. Some are not available (eg. `df.sort_values()` remains unimplemented for [5 years](https://github.com/dask/dask/issues/958)). Some work slightly differently than in pandas (`df.drop_duplicates(keep='first')`). # - Debugging of distributed algorithms is more difficult. # - Time and memory usage will depend on number of threads, processes, worker memory limits and chunk sizes. These may need to be tuned to your hardware. # # Learn more # # [Dask homepage](https://dask.org/) # [Docs](https://docs.dask.org/en/latest/) # An excellent interactive tutorial from Dask creators: # [How to learn Dask in 2021](https://coiled.io/blog/how-to-learn-dask-in-2021)
dask.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.6.4 64-bit # name: python36464bitc2077ed07ea84d23aa5b518d224882ab # --- # ### Importante: # # El primer paso para poder responder a la pregunta: # # ¿Cuánto de buenos son los resultados de las métricas de tu modelo? (mae,rmse,...) # # Necesitas tener unas métricas con las que poder compararlas. Para ello, debes entrenar el modelo más sencillo (regresión/clasificación) para poder hacerlo. Este modelo se denomina "baseline". Con las métricas de este modelo ya puedes realizar una comparación y saber si el siguiente modelo da mejores o peores resultados. # # + import numpy as np import sklearn.metrics as metrics #importing the Linear Regression Algorithm from sklearn.linear_model import LinearRegression import matplotlib.pyplot as plt import seaborn as sns def regression_results(y_true, y_pred): # Regression metrics explained_variance=metrics.explained_variance_score(y_true, y_pred) mean_absolute_error=metrics.mean_absolute_error(y_true, y_pred) mse=metrics.mean_squared_error(y_true, y_pred) # RMSLE es usado cuando la variable targen se ha convertido al logaritmo (por ser su valor muy grande) if (y_true >= 0).all() and (y_pred >= 0).all(): mean_squared_log_error=metrics.mean_squared_log_error(y_true, y_pred) print('mean_squared_log_error: ', round(mean_squared_log_error,4)) median_absolute_error=metrics.median_absolute_error(y_true, y_pred) r2=metrics.r2_score(y_true, y_pred) print('explained_variance: ', round(explained_variance,4)) # Si se acerca a 1, habrá aprendido todos los patrones de nuestro modelo. print('r2: ', round(r2,4)) print('MAE: ', round(mean_absolute_error,4)) print('MSE: ', round(mse,4)) print('RMSE: ', round(np.sqrt(mse),4)) # - X = 4 * np.random.rand(1000, 1) X = X.reshape(-1, 1) y = np.random.normal(2*X+2,20) plt.scatter(X, y, color='b') # + from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2) plt.scatter(X_train, y_train, color='b') # - linear_model = LinearRegression() linear_model.fit(X_train, y_train) y_real = y_train y_pred = linear_model.predict(X) y_pred_train = linear_model.predict(X_train) y_pred_test = linear_model.predict(X_test) regression_results(y_true=y_train, y_pred=y_pred_train) # + tags=[] regression_results(y_true=y_test, y_pred=y_pred_test) #si el r2 es menor en un modelo que en otro, ha de ser menor todo lo demas # + #r2: 0.0042 va de menos infinito a 1, si es negativo mal mal si es entre 0 y 1 ve la validez del modelo # - linear_model.score(X, y)#este valor siempre ha de ser positivo, el score mas alto es el mejor entre varios modleos sns.distplot((y_test - y_pred_test), bins = 50, hist_kws=dict(edgecolor="black", linewidth=1),color='Blue') sns.distplot((y_train - y_pred_train), bins = 50, hist_kws=dict(edgecolor="black", linewidth=1),color='Blue') sns.distplot((y - y_pred), bins = 50, hist_kws=dict(edgecolor="black", linewidth=1),color='Blue')
week9_ML_svm_poly_norm/day3_cicd_encoding_metrics_knn_svm_nlr/regression_metrics/metrics_with_linear_regression_exp2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + import sys sys.path.append("../") import time from linear_classifier import softmax_loss_noloop from linear_classifier import softmax_loss_loop from utils import data_utils import numpy as np import os print os.getcwd() # + cifar10_dir = "../datasets/cifar-10-batches-py" X_train, y_train, X_test, y_test = data_utils.load_CIFAR10(cifar10_dir) # As a sanity check, we print out the size of the training and test data. print('Training data shape: ', X_train.shape) print('Training labels shape: ', y_train.shape) print('Test data shape: ', X_test.shape) print('Test labels shape: ', y_test.shape) # - num_tests = X_test.shape[0] X_dev = X_test.reshape((num_tests, -1)) y_dev = y_test W = np.random.randn(3072, 10) * 0.0001 # + tic = time.time() loss_naive, grad_naive = softmax_loss_loop(W, X_dev, y_dev, 0.000005) toc = time.time() print('naive loss: %e computed in %fs' % (loss_naive, toc - tic)) tic = time.time() loss_vectorized, grad_vectorized = softmax_loss_noloop(W, X_dev, y_dev, 0.000005) toc = time.time() print('vectorized loss: %e computed in %fs' % (loss_vectorized, toc - tic)) # + # softmax_loss_noloop?? # + # softmax_loss_loop?? # -
lesson3_vector_speed/.ipynb_checkpoints/1.vector_speed-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="EHUwSNfz7l28" colab_type="code" outputId="89652ef4-bb39-45af-cd15-3b699aba0cf9" colab={"base_uri": "https://localhost:8080/", "height": 141} # !git clone https://github.com/cdli-gh/Unsupervised-NMT-for-Sumerian-English.git # + id="QvX5TjZy71bf" colab_type="code" outputId="9b86dda6-1995-4a33-9292-27212206152a" colab={"base_uri": "https://localhost:8080/", "height": 34} # %cd Unsupervised-NMT-for-Sumerian-English/Dataset/Cleaned_Data # + id="zDj7Ekz-Wi2Q" colab_type="code" outputId="9da0fbed-e8f0-4791-af17-f7b0d2c62a96" colab={"base_uri": "https://localhost:8080/", "height": 102} # !pip install tokenizers # + id="Sk7UJ6NrX-DZ" colab_type="code" colab={} import torch # + [markdown] id="J0KBTsRmNpzU" colab_type="text" # ## CLTK # + id="W12-j4ydNr43" colab_type="code" colab={} # !pip install cltk # + id="d4wUe9n-Nr2z" colab_type="code" colab={} import os from cltk.tokenize.word import WordTokenizer from cltk.tokenize.line import LineTokenizer # + id="arMq76jhW0-U" colab_type="code" colab={} line_tokenizer = LineTokenizer('akkadian') # + id="FM0AlCb3XCVF" colab_type="code" colab={} with open('sumerian_pll.txt') as f: lines = f.read() # + id="m6pCdKroUSuM" colab_type="code" colab={} lines = line_tokenizer.tokenize(lines) # + id="5upzl_5xOil5" colab_type="code" colab={} word_tokenizer = WordTokenizer('akkadian') # + id="GuVZQ01kOij6" colab_type="code" outputId="2f5ab693-ca34-4304-db40-f27f2dae3562" colab={"base_uri": "https://localhost:8080/", "height": 550} for text in lines[70:100]: print(f'Original: {text}, Tokenized: {word_tokenizer.tokenize(text)}') # + id="-tPpBk1rXk4v" colab_type="code" colab={} # + [markdown] id="dFtarbuzW-NL" colab_type="text" # ## BBPE # + id="f0S12jAOWwR1" colab_type="code" outputId="7f10c925-2b23-45c3-97eb-2e489695e648" colab={"base_uri": "https://localhost:8080/", "height": 34} from tokenizers import ByteLevelBPETokenizer tokeniser_ep = ByteLevelBPETokenizer() tokeniser_ep.train(['english_pll.txt'], special_tokens= ['<sos>', '<eos>', '<pad>']) tokeniser_sp = ByteLevelBPETokenizer() tokeniser_sp.train(['sumerian_pll.txt'], special_tokens= ['<sos>', '<eos>', '<pad>']) vocab_size_eng = tokeniser_ep.get_vocab_size() vocab_size_sum = tokeniser_sp.get_vocab_size() print(vocab_size_eng, vocab_size_sum) # + id="kLO9TooOXyPq" colab_type="code" outputId="49e83213-946f-4aa3-d04c-d8acd0d8d83a" colab={"base_uri": "https://localhost:8080/", "height": 34} tokeniser_sp.decode([vocab_size_sum-1]) # + id="FuH-nX1wX6_G" colab_type="code" outputId="7f0d4860-e110-49c9-bf96-ccec92d8f5c0" colab={"base_uri": "https://localhost:8080/", "height": 34} tokeniser_ep.decode([vocab_size_eng-1]) # + id="9RdF2abjBKr7" colab_type="code" colab={} vocab_sp = tokeniser_sp.get_vocab() vocab_ep = tokeniser_ep.get_vocab() # + id="gKrckIUsBNtL" colab_type="code" outputId="384abe68-4e87-42e1-b2da-c0593b9b101d" colab={"base_uri": "https://localhost:8080/", "height": 51} print(len(list(vocab_sp.keys()))) print(len(list(vocab_ep.keys()))) # + id="fCXHZpu4YSIN" colab_type="code" outputId="dc8e445a-ce59-45df-a934-39d9753bc1a1" colab={"base_uri": "https://localhost:8080/", "height": 34} tokeniser_sm = ByteLevelBPETokenizer() tokeniser_sm.train(['sumerian_mono.txt'], special_tokens= ['<sos>', '<eos>', '<pad>']) vocab_size_sm = tokeniser_sm.get_vocab_size() print(vocab_size_sm) # + id="0T178Jz_YodP" colab_type="code" outputId="7b5cfd96-3d89-41f6-f02e-cc5f5648bce4" colab={"base_uri": "https://localhost:8080/", "height": 34} tokeniser_sm.decode([vocab_size_sm-1]) # + id="vMaD7wVIAQ1T" colab_type="code" colab={} vocab_sm = tokeniser_sm.get_vocab() # + id="bPfDpeEHBVkk" colab_type="code" outputId="ad4b6078-7bfb-400a-acb3-2ea05f8aa0e6" colab={"base_uri": "https://localhost:8080/", "height": 34} print(len(list(vocab_sm.keys()))) # + id="L3NEzUQRQDl7" colab_type="code" colab={} # + [markdown] id="F-vkpAknfFKN" colab_type="text" # ## BPE # + id="DOu4qYZ4fG_7" colab_type="code" outputId="e99a7a0e-5138-42e3-8a25-e5fa21e61ee6" colab={"base_uri": "https://localhost:8080/", "height": 34} from tokenizers import CharBPETokenizer tokeniser_ep_2 = CharBPETokenizer() tokeniser_ep_2.train(['english_pll.txt'], special_tokens= ['<sos>', '<eos>', '<pad>']) tokeniser_sp_2 = CharBPETokenizer() tokeniser_sp_2.train(['sumerian_pll.txt'], special_tokens= ['<sos>', '<eos>', '<pad>']) vocab_size_en_2 = tokeniser_ep_2.get_vocab_size() vocab_size_sm_2 = tokeniser_sp_2.get_vocab_size() print(vocab_size_en_2, vocab_size_sm_2) # + id="BA-UQgTkfG9Q" colab_type="code" outputId="8fc4b00d-6b62-417d-8bea-0979882bc0f9" colab={"base_uri": "https://localhost:8080/", "height": 34} tokeniser_sp_2.decode([vocab_size_sm_2 - 1]) # + id="Xj4TNVSQfG0F" colab_type="code" outputId="128d4450-b8f0-4c16-a8af-4e801469027f" colab={"base_uri": "https://localhost:8080/", "height": 34} tokeniser_ep_2.decode([vocab_size_en_2 - 1]) # + id="--xJOU6LAaG9" colab_type="code" colab={} vocab_sp_2 = tokeniser_sp_2.get_vocab() vocab_ep_2 = tokeniser_ep_2.get_vocab() # + id="y-rMbtWKAt1r" colab_type="code" outputId="72baed72-86f6-4d21-ab1c-ecbf67d99b70" colab={"base_uri": "https://localhost:8080/", "height": 51} print(len(list(vocab_sp_2.keys()))) print(len(list(vocab_ep_2.keys()))) # + id="kQYER_-3958f" colab_type="code" outputId="6ebb562c-8e5f-471b-bb5c-4a44faaefa14" colab={"base_uri": "https://localhost:8080/", "height": 34} tokeniser_sm_2 = CharBPETokenizer() tokeniser_sm_2.train(['sumerian_mono.txt'], special_tokens= ['<sos>', '<eos>', '<pad>']) vocab_size_sm = tokeniser_sm_2.get_vocab_size() print(vocab_size_sm) # + id="md1F7WAO-H8g" colab_type="code" outputId="e7fb1cb6-8776-4f92-df55-dbb2606662fb" colab={"base_uri": "https://localhost:8080/", "height": 34} tokeniser_sm_2.decode([vocab_size_sm - 1]) # + id="UmcyoKunADIf" colab_type="code" colab={} vocab_sm_2 = tokeniser_sm_2.get_vocab() # + id="-ccnefBzAoAB" colab_type="code" outputId="fff9a0d6-b447-4e8f-e139-acfce80f7d64" colab={"base_uri": "https://localhost:8080/", "height": 34} print(len(list(vocab_sm_2.keys()))) # + id="HGlL8Uk2L2uq" colab_type="code" colab={} # + [markdown] id="jQZ6-yxJMxo3" colab_type="text" # ## BERT WordPiece # + colab_type="code" outputId="594245a0-d6e3-4399-8b41-5aa3facee456" id="EULNINE1NbR0" colab={"base_uri": "https://localhost:8080/", "height": 34} from tokenizers import BertWordPieceTokenizer tokeniser_ep_3 = BertWordPieceTokenizer() tokeniser_ep_3.train(['english_pll.txt'], special_tokens= ['<sos>', '<eos>', '<pad>']) tokeniser_sp_3 = BertWordPieceTokenizer() tokeniser_sp_3.train(['sumerian_pll.txt'], special_tokens= ['<sos>', '<eos>', '<pad>']) vocab_size_en_3 = tokeniser_ep_3.get_vocab_size() vocab_size_sm_3 = tokeniser_sp_3.get_vocab_size() print(vocab_size_en_3, vocab_size_sm_3) # + colab_type="code" outputId="44f9ebe0-c488-42d7-d7bb-87570794ebf8" id="bOi-3kfRNbSG" colab={"base_uri": "https://localhost:8080/", "height": 34} tokeniser_sp_3.decode([vocab_size_sm_3 - 1]) # + colab_type="code" outputId="2dd9eaf1-b19c-490b-c291-98da78f3813b" id="5dcIBQDSNbSO" colab={"base_uri": "https://localhost:8080/", "height": 34} tokeniser_ep_3.decode([vocab_size_en_3 - 1]) # + colab_type="code" id="X1ny58J0NbST" colab={} vocab_sp_3 = tokeniser_sp_3.get_vocab() vocab_ep_3 = tokeniser_ep_3.get_vocab() # + colab_type="code" outputId="49e66eb2-6abd-4630-80a8-2a8a9b2f60c1" id="WavAPEE8NbSZ" colab={"base_uri": "https://localhost:8080/", "height": 51} print(len(list(vocab_sp_3.keys()))) print(len(list(vocab_ep_3.keys()))) # + colab_type="code" outputId="cd6a381c-f67b-418b-df57-9b81b447a7f2" id="_gfGZ-TFNbSd" colab={"base_uri": "https://localhost:8080/", "height": 34} tokeniser_sm_3 = BertWordPieceTokenizer() tokeniser_sm_3.train(['sumerian_mono.txt'], special_tokens= ['<sos>', '<eos>', '<pad>']) vocab_size_sm = tokeniser_sm_3.get_vocab_size() print(vocab_size_sm) # + colab_type="code" outputId="acaecd57-b496-4f76-d716-38777442dbf1" id="NSblFpyfNbSg" colab={"base_uri": "https://localhost:8080/", "height": 34} tokeniser_sm_3.decode([vocab_size_sm - 1]) # + colab_type="code" id="xM83-HOhNbSj" colab={} vocab_sm_3 = tokeniser_sm_3.get_vocab() # + colab_type="code" outputId="83b00d0e-6fcd-4240-a4e3-0bed0da30a31" id="uzbuVX8CNbSm" colab={"base_uri": "https://localhost:8080/", "height": 34} print(len(list(vocab_sm_3.keys()))) # + colab_type="code" id="ecw5DptuNbSp" colab={} # + [markdown] id="UaQ1bAmxchAf" colab_type="text" # ## White Space # + id="SYaZAok0cgLs" colab_type="code" colab={} import nltk # + id="1DTSYvLFdhNv" colab_type="code" colab={} from nltk.tokenize import WhitespaceTokenizer # + id="mGl1NxBfd9Mv" colab_type="code" colab={} with open('sumerian_pll.txt') as f: sum_text = f.read() with open('english_pll.txt') as f: eng_text = f.read() # + id="lI8H6usYdh3l" colab_type="code" colab={} tokenised_smp = WhitespaceTokenizer().tokenize(sum_text) # + id="X0HZOHTae2F7" colab_type="code" colab={} tokenised_enp = WhitespaceTokenizer().tokenize(eng_text) # + id="Ba4nDp4Ld0Wc" colab_type="code" outputId="b07976fb-d184-442b-c2d5-c711249819ad" colab={"base_uri": "https://localhost:8080/", "height": 34} len(tokenised_smp) # + id="6RNvnU6aeKCw" colab_type="code" colab={} smp_vocab = [] for word in tokenised_smp: if word not in smp_vocab: smp_vocab.append(word) # + id="1eTIiupve5VX" colab_type="code" colab={} enp_vocab = [] for word in tokenised_enp: if word not in enp_vocab: enp_vocab.append(word) # + id="b8Xb3mHmeh7P" colab_type="code" outputId="94eb16db-0c28-443b-c9b3-0e7f36e2795e" colab={"base_uri": "https://localhost:8080/", "height": 34} len(smp_vocab) # + id="Zktgl591e9ly" colab_type="code" outputId="1b0b33e2-9af4-411e-eee1-a0227cf5b543" colab={"base_uri": "https://localhost:8080/", "height": 34} len(enp_vocab) # + id="JLA93WewBduJ" colab_type="code" colab={} # + [markdown] id="0W_p_MfvBos7" colab_type="text" # ## Saving vocabulary # + id="2IX0wEPLBoJy" colab_type="code" outputId="ca00fc95-f8fe-49d4-b90f-f3217e50d14c" colab={"base_uri": "https://localhost:8080/", "height": 34} # %cd ../../Tokenizers/ # + id="kcHBbS6XOgQ1" colab_type="code" outputId="22088ac3-19cd-4a2e-d63d-e0cf30eea08a" colab={"base_uri": "https://localhost:8080/", "height": 68} # !mkdir BBPE # !mkdir BPE # !mkdir BertWordPiece # + id="vkqmio6UDFIY" colab_type="code" colab={} import json # + id="3SUMPOi2L50u" colab_type="code" outputId="d4ece780-d457-48b8-db5c-9b57cb913294" colab={"base_uri": "https://localhost:8080/", "height": 34} tokeniser_sp.save('./BBPE', "sumerian_pll") tokeniser_ep.save('./BBPE', "english_pll") # + id="U24AZCGgL_FM" colab_type="code" outputId="548bc8f4-d43e-4bee-eeb6-bcdfbaabc561" colab={"base_uri": "https://localhost:8080/", "height": 34} tokeniser_sm.save('./BBPE', "sumerian_mono") # + id="3RnA6dkCL9es" colab_type="code" outputId="a27e6fb4-ab1d-498f-d4d0-605b5f8dc0be" colab={"base_uri": "https://localhost:8080/", "height": 34} tokeniser_sp_2.save('./BPE', "sumerian_pll") tokeniser_ep_2.save('./BPE', "english_pll") # + id="YXPyGogOL3-b" colab_type="code" outputId="9baf1848-0035-4389-95ee-fe3387b75e6c" colab={"base_uri": "https://localhost:8080/", "height": 34} tokeniser_sm_2.save('./BPE', "sumerian_mono") # + id="yk0ueYb-L4aG" colab_type="code" outputId="f2ee8fee-cd88-422b-fd4d-ef828ee80c71" colab={"base_uri": "https://localhost:8080/", "height": 34} tokeniser_sp_3.save('./BertWordPiece', "sumerian_pll") tokeniser_ep_3.save('./BertWordPiece', "english_pll") # + id="3EyOJ7EBL4zX" colab_type="code" outputId="cd71a012-b89f-4e8c-976d-105c77d6c3d5" colab={"base_uri": "https://localhost:8080/", "height": 34} tokeniser_sm_3.save('./BertWordPiece', "sumerian_mono") # + id="iec3NEbvJXYs" colab_type="code" colab={} # + id="K_IwKcgdTRhK" colab_type="code" colab={} # + [markdown] id="dyfCOgjxTTB-" colab_type="text" # ## Testing # + [markdown] id="BCNtz9rLTeQs" colab_type="text" # ### BBPE # + id="locUnxlIQDYT" colab_type="code" outputId="25dc0cbe-b6ed-4a64-af51-a5295c0cba88" colab={"base_uri": "https://localhost:8080/", "height": 51} encc = tokeniser_sm.encode('lu2 ki-inim-ma-me') print(encc.ids) print(encc.tokens) # + id="Is_Ze_bAQTSX" colab_type="code" outputId="c5bf3a43-4c57-4645-dfc1-28ea52511f3d" colab={"base_uri": "https://localhost:8080/", "height": 51} encc = tokeniser_sp.encode('lu2 ki-inim-ma-me') print(encc.ids) print(encc.tokens) # + id="DpEp4g2tQn16" colab_type="code" outputId="4c7d9c9c-c171-42ec-b1f2-0ceb8c8a3837" colab={"base_uri": "https://localhost:8080/", "height": 51} encc = tokeniser_sm.encode('2(asz) sze lid2-ga') print(encc.ids) print(encc.tokens) # + id="MyhAyLG0TBGE" colab_type="code" outputId="9c497fc4-cadc-4f58-9b3d-639da80f2bc8" colab={"base_uri": "https://localhost:8080/", "height": 51} encc = tokeniser_sm.encode('3(asz) kur2 |LAGABx(HA.A)|') print(encc.ids) print(encc.tokens) # + id="ALCItsaiThUR" colab_type="code" colab={} # + [markdown] id="cWrrOUHYTkfG" colab_type="text" # ### BPE # + colab_type="code" id="s6aJ-KvCTjFX" outputId="cf1cb706-1abe-49c8-d034-bae0b85f3c5b" colab={"base_uri": "https://localhost:8080/", "height": 51} encc = tokeniser_sm_2.encode('lu2 ki-inim-ma-me') print(encc.ids) print(encc.tokens) # + colab_type="code" id="-h14oF0wTjFu" outputId="2b0caa87-9f85-4861-852b-e6ea4a259d8e" colab={"base_uri": "https://localhost:8080/", "height": 51} encc = tokeniser_sp_2.encode('lu2 ki-inim-ma-me') print(encc.ids) print(encc.tokens) # + colab_type="code" id="zZ1qyym6TjF4" outputId="2a3514a5-6dc9-4d7d-c87c-e1966416edaf" colab={"base_uri": "https://localhost:8080/", "height": 51} encc = tokeniser_sm_2.encode('2(asz) sze lid2-ga') print(encc.ids) print(encc.tokens) # + colab_type="code" id="zZNV-CdUTjF_" outputId="0bf70965-ffd6-449c-b12e-9dd5962ca2b9" colab={"base_uri": "https://localhost:8080/", "height": 51} encc = tokeniser_sm_2.encode('3(asz) kur2 |LAGABx(HA.A)|') print(encc.ids) print(encc.tokens) # + id="_KjJzUv8T4XI" colab_type="code" colab={} # + [markdown] id="zWg3WvXQT5BX" colab_type="text" # ### BERT WordPiece # + colab_type="code" id="fkA7Ur8iT51j" outputId="ea799cfb-2d8e-49b8-d9ca-99d63f06d4ee" colab={"base_uri": "https://localhost:8080/", "height": 51} encc = tokeniser_sm_3.encode('lu2 ki-inim-ma-me') print(encc.ids) print(encc.tokens) # + colab_type="code" id="lx9pJkgZT52D" outputId="749311bb-6170-44c6-bdd3-6428c9712446" colab={"base_uri": "https://localhost:8080/", "height": 51} encc = tokeniser_sp_3.encode('lu2 ki-inim-ma-me') print(encc.ids) print(encc.tokens) # + colab_type="code" id="GHCRQ2i6T52M" outputId="1cceaf31-c028-4e9d-859d-1751623d5efb" colab={"base_uri": "https://localhost:8080/", "height": 51} encc = tokeniser_sm_3.encode('2(asz) sze lid2-ga') print(encc.ids) print(encc.tokens) # + colab_type="code" id="JtMgTHAQT52T" outputId="38e2194d-f6dd-495a-acca-3ac107a524d7" colab={"base_uri": "https://localhost:8080/", "height": 51} encc = tokeniser_sm_3.encode('3(asz) kur2 |LAGABx(HA.A)|') print(encc.ids) print(encc.tokens) # + id="kjjPfmL8UMGI" colab_type="code" colab={} # + [markdown] id="VV0lN03RVvsE" colab_type="text" # ## Additional # + id="oGONvYmIVxWW" colab_type="code" outputId="a70bf439-8ba2-4883-af99-b4fcf74a5b11" colab={"base_uri": "https://localhost:8080/", "height": 372} # !zip -r ../Tokenizers.zip ./Tokenizers # + id="wuzrctklWKjB" colab_type="code" colab={} from google.colab import files files.download("../Tokenizers.zip") # + id="Gge0zepsWSJR" colab_type="code" colab={}
Notebooks/DataPreperation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- BRANCH='main' # + """ You can run either this notebook locally (if you have all the dependencies and a GPU) or on Google Colab. Instructions for setting up Colab are as follows: 1. Open a new Python 3 notebook. 2. Import this notebook from GitHub (File -> Upload Notebook -> "GITHUB" tab -> copy/paste GitHub URL) 3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select "GPU" for hardware accelerator) 4. Run this cell to set up dependencies. """ # If you're using Google Colab and not running locally, run this cell # install NeMo # !python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[nlp] # + from nemo.collections import nlp as nemo_nlp from nemo.utils.exp_manager import exp_manager import os import wget import torch import pytorch_lightning as pl from omegaconf import OmegaConf # - # In the era of super large language models, the traditional "pre-train, fine-tune" procedure is replaced by "pre-train, prompt, and predict" method as shown in the [survey paper](https://arxiv.org/pdf/2107.13586.pdf). The prompt method is versatile enough to support all kinds of NLP tasks as shown in the following table: # # <table> # <thead> # <tr> # <th>Type</th> # <th>Task</th> # <th>Input ([X])</th> # <th>Template</th> # <th>Answer([Y])</th> # </tr> # </thead> # <tbody> # <tr> # <td rowspan=3>Text CLS</td> # <td>Sentiment</td> # <td>I love this movie.</td> # <td>[X] The movie is [Y]</td> # <td>great<br>fantastic<br>...</td> # </tr> # <tr> # <td>Topics</td> # <td>He prompted the LM.</td> # <td>[X] The text is about [Y]</td> # <td>sports<br>science<br>...</td> # </tr> # <tr> # <td>Intention</td> # <td>What is taxi fare to Denver?</td> # <td>[X] The question is about [Y]</td> # <td>quantity<br>city<br>...</td> # </tr> # <tr> # <td rowspan=1>Text-span CLS</td> # <td>Aspect<br>Sentiment</td> # <td>Poor service but good food.</td> # <td>[X] What about service? [Y]</td> # <td>Bad<br>Terrible<br>...</td> # </tr> # <tr> # <td rowspan=1>Text-pair CLS</td> # <td>NLI</td> # <td>[X1]: An old man with ...<br>[X2]: A man walks ...</td> # <td>Hypothesis: [X1], Premise: [X2], Answer: [Y]</td> # <td>Contradiction<br>Entailment<br>...</td> # </tr> # <tr> # <td rowspan=1>Tagging</td> # <td>NER</td> # <td>[X1]: Mike went to Paris.<br>[X2]: Paris</td> # <td>[X1] [X2] is a [Y]</td> # <td>Yes<br>No<br>...</td> # </tr> # <tr> # <td rowspan=2>Text Generation</td> # <td>Summarization</td> # <td>Las Vegas police ...</td> # <td>[X] TL;DR: [Y]</td> # <td>The victim ...<br>A woman ...<br>...</td> # </tr> # <tr> # <td>Translation</td> # <td>Je vous aime.</td> # <td>French [X] English: [Y]</td> # <td>I love you.<br>I fancy you.<br>...</td> # </tr> # </tbody> # </table> # # In this tutorial, we are going to describe how to use [P-Tuning method](https://arxiv.org/pdf/2103.10385.pdf) , which is one of the prompt engineering methods, to find good prompts for large GPT models. We show it can solve multiple downstream NLP tasks with good performance. P-Tuning leverages few continuous free parameters to serve as prompts fed as the input to the pre-trained language models. Freezing the large language model weights, P-Tuning model can be trained efficiently while delivering stats of art performance. # # Large Language Model can be trained with [NeMo Megatron](https://github.com/NVIDIA/NeMo/tree/main/examples/nlp/language_modeling), up to multi-billion parameters. In this notebook, we will use the pre-trained 344M GPT model released from NGC. # # # Task Description # P-Tuning method can be applied to solve various NLP tasks. Without losing generality, in this notebook, we are going to use P-Tuning method to solve two NLP tasks: **Sentiment Analysis** task and **Question and Answer** task. # # **Sentiment Analysis** task is also known as opinion mining or emotion AI. It is a sub-field of NLP that tries to identify and extract opinions within a given text across blogs, reviews, social media, forums, news etc. # # For instance, **given sentences from news title, is it a good or bad news?**<br> # # **Question and Answer** task is to find the answer to a question given the context text. # # For instance, # ``` # Context: # <NAME>-Carter (/biːˈjɒnseɪ/ bee-YON-say) (born September 4, 1981) is an American singer, songwriter, record producer and actress. Born and raised in Houston, Texas, she performed in various singing and dancing competitions as a child, and rose to fame in the late 1990s as lead singer of R&B girl-group Destiny\'s Child. Managed by her father, <NAME>, the group became one of the world\'s best-selling girl groups of all time. Their hiatus saw the release of Beyoncé\'s debut album, Dangerously in Love (2003), which established her as a solo artist worldwide, earned five Grammy Awards and featured the Billboard Hot 100 number-one singles "Crazy in Love" and "Baby Boy". # Question: # How many Grammy awards did Beyoncé win for her first solo album? # ``` # # # Dataset # We will use [Financial PhraseBank dataset](https://huggingface.co/datasets/financial_phrasebank) for sentiment analysis task and [SQuAD](https://rajpurkar.github.io/SQuAD-explorer/) for question and answer task. # # The [Financial PhraseBank dataset](https://huggingface.co/datasets/financial_phrasebank) contains the sentiments for financial news headlines from the perspective of a retail investor. Further details about the dataset can be found in: Malo, P., Sinha, A., Takala, P., Korhonen, P. and <NAME>. (2014): “Good debt or bad debt: Detecting semantic orientations in economic texts.” Journal of the American Society for Information Science and Technology. # # Here's an example of what an annotated abstract from the corpus looks like: # # ``` # <NAME> Financial - Shares in Cargotec fell sharply in early afternoon trade after the cargo handling group posted a surprise drop in April-June profits , which overshadowed the large number of new orders received during the three months .@negative # LONDON MarketWatch -- Share prices ended lower in London Monday as a rebound in bank stocks failed to offset broader weakness for the FTSE 100 .@negative # Operating profit fell to EUR 35.4 mn from EUR 68.8 mn in 2007 , including vessel sales gain of EUR 12.3 mn .@negative # Sales in Finland decreased by 10.5 % in January , while sales outside Finland dropped by 17 % .@negative # ``` # # The [SQuAD](https://rajpurkar.github.io/SQuAD-explorer/) is a reading comprehension dataset, consisting of questions posed by crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span, from the corresponding reading passage, or the question might be unanswerable. # # Let's download the dataset. DATA_DIR = "DATA_DIR" os.makedirs(DATA_DIR, exist_ok=True) # ## Downloading Financial Phrase Bank Dataset # # The datase is collected by Malo et al. 2014, and can be downloaded from this [link](https://www.researchgate.net/profile/Pekka_Malo/publication/251231364_FinancialPhraseBank-v10/data/0c96051eee4fb1d56e000000/FinancialPhraseBank-v10.zip). The zip file for the Financial Phrase Bank Dataset has been provided for ease of download and use. # !wget https://www.researchgate.net/profile/Pekka_Malo/publication/251231364_FinancialPhraseBank-v10/data/0c96051eee4fb1d56e000000/FinancialPhraseBank-v10.zip # !unzip FinancialPhraseBank-v10.zip -d {DATA_DIR} # + # If you want to see more examples, you can explore the text of the corpus using the file browser to the left, or open files directly, for example typing a command like the following in a code-cell: # ! head -1 $DATA_DIR/FinancialPhraseBank-v1.0/Sentences_50Agree.txt # - # ## Download the SQuAD dataset # # Download a copy of the dataset (distributed under the CC BY-SA 4.0 license): # !wget https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v2.0.json # !mv train-v2.0.json {DATA_DIR} # ## Pre-process Financial Phrase Bank Dataset # # In this pre-process step, we are going to convert the downloaded dataset into the format that can be used for P-Tuning dataloader. The data is split into 10 folds so we can do 10-fold cross validation. In this notebook, we will use the first fold. # + import json import random random.seed(1234) files = ['Sentences_50Agree.txt', 'Sentences_66Agree.txt', 'Sentences_75Agree.txt', 'Sentences_AllAgree.txt'] base_dir = DATA_DIR + '/FinancialPhraseBank-v1.0/' files = [base_dir + f for f in files] alllines = [] for fn in files: with open(fn, 'r', encoding="ISO-8859-1") as f: alllines.extend(f.readlines()) random.shuffle(alllines) fold = 10 fold_size = len(alllines) // fold chunk_start = list(range(0, 14780, 1478)) chunks = [] for start_id in chunk_start: chunks.append(alllines[start_id:start_id+fold_size]) def gen_file(data, fold_id, split_type): filename = "{}/{}_{}.txt".format(base_dir, split_type, fold_id) with open(filename, 'w') as f: obj = {} for line in data: splits = line.split('@') part1 = splits[0].strip() part2 = splits[1].strip() obj['sentence'] = part1 obj['label'] = part2 obj['prompt_tag'] = 'sentiment-task' f.write(json.dumps(obj)+'\n') def gen_fold(fold_number): lists = list(range(fold)) test_id = (fold_number + fold) % fold val_id = (fold_number + fold - 1) % fold test_set = chunks[test_id] val_set = chunks[val_id] lists.remove(test_id) lists.remove(val_id) train_set = [] for idd in lists: train_set += chunks[idd] gen_file(train_set, fold_number, 'train') gen_file(val_set, fold_number, 'validation') gen_file(test_set, fold_number, 'test') gen_fold(0) # - # The data is converted to the loss json file. Each line has three keys "sentence", "label" and "prompt_tag". # Here are the first two lines of converted data: # !head -n 2 $DATA_DIR/FinancialPhraseBank-v1.0/train_0.txt # ### Preprocess SQuAD Dataset # # + file_name = DATA_DIR + '/train-v2.0.json' with open(file_name, 'r') as f: data_obj = json.load(f) articles = data_obj['data'] test_len = 40 validation_len = 40 train_len = len(articles) - test_len - validation_len train_records = [] validation_records = [] test_records = [] def get_records(sub_articals, records): for article in sub_articals: paragraphs = article['paragraphs'] for paragraph in paragraphs: qas = paragraph['qas'] context = paragraph['context'].strip() for qa in qas: record = {} record['question'] = qa['question'].strip() record['context'] = context if qa['is_impossible']: record['label'] = 'NA' else: record['label'] = qa['answers'][0]['text'].strip() record['prompt_tag'] = 'qa-task' records.append(json.dumps(record)) get_records(articles[:train_len], train_records) get_records(articles[train_len:train_len+validation_len], validation_records) get_records(articles[train_len+validation_len:], test_records) random.shuffle(train_records) random.shuffle(validation_records) random.shuffle(test_records) squad_dir = "DATA_DIR/squad" os.makedirs(squad_dir, exist_ok=True) with open(squad_dir+'/train.txt', 'w') as f: f.write("\n".join(train_records)) with open(squad_dir+'/validation.txt', 'w') as f: f.write("\n".join(validation_records)) with open(squad_dir+'/test.txt', 'w') as f: f.write("\n".join(test_records)) # - # The data is converted to the loss json file. Each line has three keys "question", "context", "label" and "prompt_tag". # Here are the first two lines of converted data: # !head -n 2 {squad_dir}/train.txt # ### Combine the two datasets # # The P-tune model includes a prompt encoder which is used to generate virtual tokens. Its output can be conditioned on the task tags so the P-tune model supports multiple tasks simultaneously. We are going to mix the Financial phrase bank dataset and SQuAD dataset together. mix_data_dir = f"{DATA_DIR}/mix" os.makedirs(mix_data_dir, exist_ok=True) # !cat $DATA_DIR/FinancialPhraseBank-v1.0/train_0.txt {squad_dir}/train.txt | shuf > {mix_data_dir}/train.txt # !cat $DATA_DIR/FinancialPhraseBank-v1.0/validation_0.txt {squad_dir}/validation.txt | shuf > {mix_data_dir}/validation.txt # !cat $DATA_DIR/FinancialPhraseBank-v1.0/test_0.txt {squad_dir}/test.txt | shuf > {mix_data_dir}/test.txt # Here are the first two lines of converted data: # !head -n 2 {mix_data_dir}/train.txt # ## Convert the Megatron-LM Weights to Nemo file # # P-Tuning method works the best with large GPT lanague models. From our experiences, models of size 5B or above give good performance. If you already have a large GPT model ready, skip this section. # # In this example, we will use the pretrained 344M NeMo Megatron GPT model from [Megatron-LM project](https://github.com/NVIDIA/Megatron-LM). To load it in NeMo Megatron, We first need to convert the Megatron-LM checkpoint to the `.nemo` file. Let's download the pretrained model weights and vocabulary file. # # # + import pathlib gpt_file = 'megatron_lm_345m_v0.0.zip' vocab_file = 'gpt2-vocab.json' merge_file = 'gpt2-merge.txt' checkpoint_filename = 'model_optim_rng.pt' if not pathlib.Path(gpt_file).exists(): # !wget --content-disposition https://api.ngc.nvidia.com/v2/models/nvidia/megatron_lm_345m/versions/v0.0/zip -O $gpt_file # !unzip -f $gpt_file # !wget https://s3.amazonaws.com/models.huggingface.co/bert/$vocab_file -O $vocab_file # !wget https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-merges.txt -O $merge_file # + WORK_DIR = "WORK_DIR" os.makedirs(WORK_DIR, exist_ok=True) # Prepare the model parameters # download the model's configuration file config_dir = WORK_DIR + '/configs/' MODEL_CONFIG = "megatron_gpt_config.yaml" os.makedirs(config_dir, exist_ok=True) if not os.path.exists(config_dir + MODEL_CONFIG): print('Downloading config file...') wget.download(f'https://raw.githubusercontent.com/NVIDIA/NeMo/{BRANCH}/examples/nlp/language_modeling/conf/' + MODEL_CONFIG, config_dir) else: print ('config file is already exists') # - # this line will print the entire config of the model config_path = f'{WORK_DIR}/configs/{MODEL_CONFIG}' print(config_path) config = OmegaConf.load(config_path) config.model.num_layers = 24 config.model.hidden_size = 1024 config.model.ffn_hidden_size = 4096 config.model.num_attention_heads = 16 config.model.tokenizer.vocab_file = vocab_file config.model.tokenizer.merge_file = merge_file config.model.tensor_model_parallel_size = 1 config.model.data.data_prefix = '' config.model.max_position_embeddings = 1024 config.model.data.seq_length = 1024 config.model.encoder_seq_length = 1024 config.cfg = {} config.cfg.cfg = config.model with open('hparams.yaml', 'w') as f: f.write(OmegaConf.to_yaml(config.cfg)) import os PWD = os.getcwd() wget.download(f'https://raw.githubusercontent.com/NVIDIA/NeMo/{BRANCH}/examples/nlp/language_modeling/megatron_lm_ckpt_to_nemo.py') # !python -m torch.distributed.run --nproc_per_node=1 megatron_lm_ckpt_to_nemo.py --checkpoint_folder=$PWD/release/mp_rank_00/ --checkpoint_name=$checkpoint_filename --hparams_file=$PWD/hparams.yaml --nemo_file_path=$PWD/gpt_344m.nemo --model_type=gpt --tensor_model_parallel_size=1 # # Model configuration # # Our P-Tuning text classification model is comprised of the pretrained GPT LM model followed by a prompt encoder layer. # # The model is defined in a config file which declares multiple important sections. They are: # - **model**: All arguments that are related to the Model - language model, token classifier, optimizer and schedulers, datasets and any other related information # # - **trainer**: Any argument to be passed to PyTorch Lightning MODEL_CONFIG = "megatron_ptune_gpt.yaml" # download the model's configuration file config_dir = WORK_DIR + '/configs/' os.makedirs(config_dir, exist_ok=True) if not os.path.exists(config_dir + MODEL_CONFIG): print('Downloading config file...') wget.download(f'https://raw.githubusercontent.com/NVIDIA/NeMo/{BRANCH}/examples/nlp/language_modeling/conf/' + MODEL_CONFIG, config_dir) else: print ('config file is already exists') # this line will print the entire config of the model config_path = f'{WORK_DIR}/configs/{MODEL_CONFIG}' print(config_path) config = OmegaConf.load(config_path) # Note: these are small batch-sizes - increase as appropriate to available GPU capacity config.model.data.train_ds.batch_size=8 config.model.data.validation_ds.batch_size=8 # # Model Training # ## Setting up Data within the config # # Among other things, the config file contains dictionaries called train_ds, validation_ds and test_ds. These are configurations used to setup the Dataset and DataLoaders of the corresponding config. # # + # in this tutorial train and dev datasets are located in the same folder, so it is enough to add the path of the data directory to the config config.model.data.train_ds.file_path = DATA_DIR+'/mix/train.txt' config.model.data.validation_ds.file_path = DATA_DIR+'/mix/validation.txt' config.model.data.test_ds.file_path = DATA_DIR+'/mix/test.txt' # if you want to decrease the size of your datasets, uncomment the lines below: # NUM_SAMPLES = 1000 # config.model.data.train_ds.num_samples = NUM_SAMPLES # config.model.data.validation_ds.num_samples = NUM_SAMPLES # - # ## Add the Data Processors to Generate the Prompts # # To customize different prompts for different tasks, we can configure the `TemplateProcessor` to define the template for the prompt input. The curley brackets defines the variables in the templte string. `{v0}`, `{v1}`, `{v2}` indicates the virtual token of length `prompt_encoder.template[0]`, `prompt_encoder.template[1]` and `prompot_encoder.template[2]`. The other variables `{var}` refers to the variables in the data record. For example. # # Given the data record, **{"sentence1": "And he said, Mama, I'm home.", "sentence2": "He didn't say a word."}** and template list [3, 3, 3], # the template string **{v0} Hypothesis: [sentence1], {v1} Premise: [sentence2] {v2} Answer:** will be translated into **<span style="color:red">VVV</span> Hypothesis: And he said, Mama, I'm home.<span style="color:red">VVV</span> Premise: He didn't say a word.<span style="color:red">VVV</span> Answer:**, where <span style="color:red">VVV</span> is virtual token of space 3. # # Let's configure the proper template for the two dataset we prepared: config.model.task_processors = [ { "taskname": "qa-task", "template": "{v0} Context: {context}{v1} Question: {question}?{v2} Answer:", "limit_length_field": "content", }, { "taskname": "sentiment-task", "template": "{v0}{v1} Sentence: {sentence}{v2} Sentiment:", "limit_length_field": "sentence", }, ] # Note each `task_processors` items has 3 fields. Besides the `template` string, the `taskname` refers to the `prompt_tag` in the data record. The `limit_length_field` specifies the which field in the data is going to be cut if the length of the input exceeds the maximum sequence length of the model. # # Register the data processors with `register_taskdata_processor` method: # + from nemo.collections.nlp.data.glue_benchmark.gpt_ptune_dataset import register_taskdata_processor, TemplateProcessor for processor_config in config.model.task_processors: processor = TemplateProcessor( template=processor_config.template, limit_length_field=processor_config.limit_length_field ) register_taskdata_processor(processor_config.taskname, processor) # - print(OmegaConf.to_yaml(config)) # ## Building the PyTorch Lightning Trainer # # NeMo models are primarily PyTorch Lightning modules - and therefore are entirely compatible with the PyTorch Lightning ecosystem. # # Let's first instantiate a Trainer object print("Trainer config - \n") print(OmegaConf.to_yaml(config.trainer)) # + from nemo.collections.nlp.parts.nlp_overrides import NLPDDPPlugin # lets modify some trainer configs # checks if we have GPU available and uses it cuda = 1 if torch.cuda.is_available() else 0 config.trainer.gpus = cuda config.trainer.max_epochs = 100 config.trainer.val_check_interval=95230 # for PyTorch Native AMP set precision=16 config.trainer.precision = 16 if torch.cuda.is_available() else 32 # remove distributed training flags config.trainer.accelerator = None trainer = pl.Trainer(plugins=[NLPDDPPlugin()], **config.trainer) # - # ## Setting up a NeMo Experiment # # NeMo has an experiment manager that handles logging and checkpointing for us, so let's use it: # + exp_dir = exp_manager(trainer, config.get("exp_manager", None)) os.makedirs(WORK_DIR, exist_ok=True) # the exp_dir provides a path to the current experiment for easy access exp_dir = str(exp_dir) exp_dir # - # We will use the converted `.nemo` file as our LM model. # add the specified above model parameters to the config # config.model.language_model.pretrained_model_name = PRETRAINED_BERT_MODEL config.model.language_model.nemo_file = 'gpt_344m.nemo' config.model.tensor_model_parallel_size = 1 config.exp_manager.checkpoint_callback_params.save_top_k = 1 # Now, we are ready to initialize our model. During the model initialization call, the dataset and data loaders we'll be prepared for training and evaluation. from nemo.collections.nlp.models.language_modeling.megatron_ptune_gpt_model import MegatronGPTPTuneModel model_ptune = MegatronGPTPTuneModel(cfg=config.model, trainer=trainer) # ## Monitoring training progress # Optionally, you can create a Tensorboard visualization to monitor training progress. # If you're not using Colab, refer to [https://www.tensorflow.org/tensorboard/tensorboard_in_notebooks](https://www.tensorflow.org/tensorboard/tensorboard_in_notebooks) if you're facing issues with running the cell below. # + try: from google import colab COLAB_ENV = True except (ImportError, ModuleNotFoundError): COLAB_ENV = False # Load the TensorBoard notebook extension if COLAB_ENV: # %load_ext tensorboard # %tensorboard --logdir {exp_dir} else: print("To use tensorboard, please use this notebook in a Google Colab environment.") # - # start model training trainer.fit(model_ptune) # # Inference # # To see how the model performs, we can run model in the inference mode # + # let's first create a subset of our dev data query_examples = [ ] results = model_ptune.cuda().ptune_inference(queries=query_examples, batch_size=1, decode_token_len=15) print('The prediction results of some sample queries with the trained model:') for query, result in zip(query_examples, results): print(f'Query : {query}') print(f'Predicted label: {result}') # - # ## Training Script # # If you have NeMo installed locally, you can also train the model with `examples/nlp/text_classification/ptune_text_classification.py`. # # To run training script, use: # ``` # python examples/nlp/language_modeling/megatron_gpt_ptune.py \ # trainer.gpus=1 \ # model.tensor_model_parallel_size=1 \ # model.language_model.nemo_file=gpt_344m.nemo \ # model.train_ds.file_path=TRAIN_FILE \ # model.prompt_encoder.template=[3,3,3] \ # model.train_ds.batch_size=8 \ # model.validation_ds.file_path=VAL_FILE \ # model.test_ds.file_path=TEST_FILE \ # ``` #
tutorials/nlp/PTune_multiple_NLP_tasks.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Integrating Functional Data # # So far most of our work has been examining anatomical images - the reason being is that it provides a nice visual way of exploring the effects of data manipulation and visualization is easy. In practice, you will most likely not analyze anatomical data using <code>nilearn</code> since there are other tools that are better suited for that kind of analysis (freesurfer, connectome-workbench, mindboggle, etc...). # # In this notebook we'll finally start working with functional MR data - the modality of interest in this workshop. First we'll cover some basics about how the data is organized (similar to T1s but slightly more complex), and then how we can integrate our anatomical and functional data together using tools provided by <code>nilearn</code> # Functional data consists of full 3D brain volumes that are *sampled* at multiple time points. Therefore you have a sequence of 3D brain volumes, stepping through sequences is stepping through time and therefore time is our 4th dimension! Here's a visualization to make this concept more clear: # <img src="./static/images/4D_array.png" alt="Drawing" align="middle" width="500px"/> # Each index along the 4th dimensions (called TR for "Repetition Time", or Sample) is a full 3D scan of the brain. Pulling out volumes from 4-dimensional images is similar to that of 3-dimensional images except you're now dealing with: # # # <code> img.slicer[x,y,z,time] </code>! # # Let's try a couple of examples to familiarize ourselves with dealing with 4D images. But first, let's pull some functional data using PyBIDS! import os import matplotlib.pyplot as plt #to enable plotting within notebook from nilearn import image as img from nilearn import plotting as plot from bids.layout import BIDSLayout import numpy as np # %matplotlib inline fmriprep_dir = '../data/ds000030/derivatives/fmriprep/' layout=BIDSLayout(fmriprep_dir, validate=False) T1w_files = layout.get(subject='10788', datatype='anat', suffix='preproc') brainmask_files = layout.get(subject='10788', datatype='anat', suffix='brainmask') func_files = layout.get(subject='10788', datatype='func', suffix='preproc') func_mask_files = layout.get(subject='10788', datatype='func', suffix='brainmask') # We'll be using functional files in MNI space rather than T1w space. Recall, that MNI space data is data that was been warped into standard space. These are the files you would typically use for a group-level functional imaging analysis! func_mni = func_files[1].path func_mni_img = img.load_img(func_mni) # First, take a look at the shape of the functional image: func_mni_img.shape # Notice that the Functional MR scan contains *4 dimensions*. This is in the form of $(x,y,z,t)$, where $t$ is time. # We can use slicer as usual where instead of using 3 dimensions we use 4. # # For example: # # <code> func.slicer[x,y,z] </code> # # vs. # # <code> func.slicer[x,y,z,t] </code> # #Pull the 5th TR func_vol5 = func_mni_img.slicer[:,:,:,4] plot.plot_epi(func_vol5) # ## What fMRI actually represents # We've represented fMRI as a snapshot of MR signal over multiple timepoints. This is a useful way of understanding the organization of fMRI, however it isn't typically how we think about the data when we analyze fMRI data. fMRI is typically thought of as **time-series** data. We can think of each voxel (x,y,z coordinate) as having a time-series of length T. The length T represents the number of volumes/timepoints in the data. Let's pick an example voxel and examine its time-series using <code>func_mni_img.slicer</code>: func_mni_img.shape #Pick orne voxel at coordinate (60,45,88) single_vox = func_mni_img.slicer[59:60,45:46,30:31,:].get_data() single_vox.shape # As you can see here, we pulled one voxel that contains 152 timepoints. For plotting purposes as 4-dimensional array is difficult to deal with so we'll flatten it to 1 dimension (time) for convenience: single_vox = single_vox.flatten() single_vox.shape # Here we've pulled out a voxel at a specific coordinate at every single time-point. This voxel has a single value for each timepoint and therefore is a time-series. We can visualize this time-series signal by using a standard python plotting library. We won't go into too much detail about python plotting, the intuition about what the data looks like is what is most important: import matplotlib.pyplot as plt plt.plot(np.arange(0,single_vox.shape[0]),single_vox,'k') plt.xlabel('Timepoint') plt.ylabel('Signal Value') # ## Resampling # Recall from our introductory exploration of neuroimaging data: # # - T1 images are typically composed of voxels that are 1x1x1 in dimension # - Functional images are typically composed of voxels that are 4x4x4 in dimension # # If we'd like to overlay our functional on top of our T1 (for visualization purposes, or analyses), then we need to match the size of the voxels! # # Think of this like trying to overlay a 10x10 JPEG and a 20x20 JPEG on top of each other. To get perfect overlay we need to resize (or more accurately *resample*) our JPEGs to match! # # **Note**: # Resampling is a method of interpolating in between data-points. When we stretch an image we need to figure out what goes in the spaces that are created via stretching - resampling does just that. In fact, resizing any type of image is actually just resampling to new dimensions. # Let's resampling some MRI data using nilearn. # # **Goal**: Match the dimensions of the structural image to that of the functional image #Files we'll be using (Notice that we're using _space-MNI..._ which means they are normalized brains) T1_mni = T1w_files[1].path T1_mni_img = img.load_img(T1_mni) # Let's take a look at the sizes of both our functional and structural files: print(T1_mni_img.shape) print(func_mni_img.shape) # Resampling in nilearn is as easy as telling it which image you want to sample and what the target image is. # Structure of function: # # img.resample_to_img(source_img,target_img,interpolation) # - source_img = the image you want to sample # - target_img = the image you wish to *resample to* # - interpolation = the method of interpolation # # A note on **interpolation** # # nilearn supports 3 types of interpolation, the one you'll use depends on the type of data you're resampling! # 1. **continuous** - Interpolate but maintain some edge features. Ideal for structural images where edges are well-defined. Uses $3^\text{rd}$-order spline interpolation. # 2. **linear (default)** - Interpolate uses a combination of neighbouring voxels - will blur. Uses trilinear interpolation. # 3. **nearest** - matches value of closest voxel (majority vote from neighbours). This is ideal for masks which are binary since it will preserve the 0's and 1's and will not produce in-between values (ex: 0.342). Also ideal for numeric labels where values are 0,1,2,3... (parcellations). Uses nearest-neighbours interpolation with majority vote. # #Try playing around with methods of interpolation #options: 'linear','continuous','nearest' resamp_t1 = img.resample_to_img(source_img=T1_mni_img,target_img=func_mni_img,interpolation='continuous') print(resamp_t1.shape) print(func_mni_img.shape) plot.plot_anat(resamp_t1) import matplotlib.animation from IPython.display import HTML # + # %%capture # %matplotlib inline #Resample the T1 to the size of the functional image! resamp_t1 = img.resample_to_img(source_img=T1_mni_img, target_img=func_mni_img, interpolation='continuous') fig, ax = plt.subplots() def animate(image): plot.plot_anat(image, figure=fig, cut_coords=(0,0,0)) ax.set_facecolor('black') ani = matplotlib.animation.FuncAnimation(fig, animate, frames=[resamp_t1, T1_mni_img]) #change the frames to look at the functional mask over the resampled T1 # ani = matplotlib.animation.FuncAnimation(fig, animate, frames=[resamp_t1, func]) # - # Display animation HTML(ani.to_jshtml()) # ## **Exercise** # # Using **Native** T1 and **T1w** resting state functional do the following: # 1. Resample the native T1 image to resting state size # 2. Replace the brain in the T1 image with the first frame of the resting state brain func_files[0] func_mask_files # + #Files we'll need ####STRUCTURAL FILES #T1 image ex_t1 = img.load_img(T1w_files[0].path) #mask file ex_t1_bm = img.load_img(brainmask_files[0].path) ####FUNCTIONAL FILES #This is the pre-processed resting state data that hasn't been standardized ex_func = img.load_img(func_files[1].path) #This is the associated mask for the resting state image. ex_func_bm = img.load_img(func_mask_files[1].path) # - # The first step we need to do is to make sure the dimensions for our T1 image and resting state image match each other: #Resample the T1 to the size of the functional image! resamp_t1 = img.resample_to_img(source_img=ex_t1, target_img=ex_func, interpolation='continuous') plot.plot_anat(resamp_t1) print(resamp_t1.shape) # Next we want to make sure that the brain mask for the T1 is also the same dimensions as the functional image. This is exactly the same as above, except we use the brain mask as the source. # # What kind of interpolation should we use for masks? resamp_bm = img.resample_to_img(source_img=ex_t1_bm, target_img=ex_func,interpolation='nearest') plot.plot_anat(resamp_bm) print(resamp_bm.shape) # Once we've resampled both our T1 and our brain mask. We now want to remove the brain from the T1 image so that we can replace it with the funtional image instead. Remember to do this we need to: # # 1. Invert the T1 mask # 2. Apply the inverted mask to the brain inverted_bm_t1 = img.math_img('1-a',a=resamp_bm) plot.plot_anat(inverted_bm_t1) # Now apply the mask: resamp_t1_nobrain = img.math_img('a*b',a=resamp_t1,b=inverted_bm_t1) plot.plot_anat(resamp_t1_nobrain) # We now have a skull missing the structural T1 brain. The final steps is to stick in the brain from the functional image into the now brainless head. First we need to remove the surrounding signal from the functional image. # # Since a functional image is 4-Dimensional, we'll need to pull the first volume to work with. This is because the structural image is 3-dimensional and operations will fail if we try to mix 3D and 4D data. #Let's visualize the first volume of the functional image: first_vol = ex_func.slicer[:,:,:,0] plot.plot_epi(first_vol) # As shown in the figure above, the image has some "signal" outside of the brain. In order to place this within the now brainless head we made earlier, we need to mask out the functional MR data as well! masked_func = img.math_img('a*b', a=first_vol, b=ex_func_bm) plot.plot_epi(masked_func) # The final step is to stick this data into the head of the T1 data. Since the hole in the T1 data is represented as $0$'s. We can add the two images together to place the functional data into the void: #Now overlay the functional image on top of the anatomical combined_img = img.math_img('a+b',a=resamp_t1_nobrain,b=masked_func) plot.plot_anat(combined_img) # *** # In this section we explored functional MR imaging. Specifically we covered: # # 1. How the data in a fMRI scan is organized - with the additional dimension of timepoints # 2. How we can integrate functional MR images to our structural image using resampling # 3. How we can just as easily manipulate functional images using <code>nilearn</code> # # Now that we've covered all the basics, it's time to start working on data processing using the tools that we've picked up.
code/04-integrating_functional_data_solutions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Udacity DSND Project 1: Blog Post # ### Table of Contents # # - Project overview # - Business understanding # - Data understanding # - Data preparation # - Data modelling and results evaluation # - Conclusions # # Project overview # # This is the Blog Post Project for the Data Scientist ND. We will be applying CRISP-DM approach to the TMDb movie dataset. This dataset contains information about 10,000 movies regarding the following: budget, revenue, user rating, vote, genre and release year. It covers the period from 1960 to 2015. # # The libraries used in this project include pandas, numpy, seaborn, matplotlib. # # ## Business Understanding # We will try to answer the following three questions: # # - Which genres are the most popular? # - Which genres are associated with the highest revenues? # - What movie properties have highest impact on revenue? # import packages import pandas as pd import numpy as np import matplotlib.pyplot as plt # %matplotlib inline import seaborn as sns # + # Load data and display few rows. movies = pd.read_csv('tmdb-movies.csv') movies.head(10) # - # ## Data understanding # To understand the data, we need to walkthrough the dataset #Get modies information movies.info() movies.shape movies.describe() # **From the above results, we observe the following** # # - Columns count: 21 # - Rows count: 10866 # # ## Data preparation # The following steps are applied to prepare and clean the data, inspired by what was presented in the classroom: # - Delete columns that will not be used in the analysis. # - Delete duplicate values. # - Delete missing values and zero values in budget and revenue columns. # - split the multivalue data in the genre column. #drop unnecessary columns movies.drop(['homepage','tagline','overview','production_companies','release_date','vote_count','keywords','imdb_id','budget_adj','revenue_adj','cast'] , axis=1 , inplace=True) movies.head() movies.isnull().sum() # From the above results, we observe that the genre column has 23 missing values that will be deleted. movies.dropna(inplace = True) movies.isnull().sum() # #### Next, we will use dropna to remove zeor values. And for that, we need to convert zero values to null values first. Zero values in datasets indicate values that have been intilised but no actual values were provided for them. For example, if we keep zroe budgets and revenue movies, this will negatively affect further analysis of the data, so these values must be dropped. movies['budget'] = movies['budget'].replace(0, np.NAN) movies['revenue'] = movies['revenue'].replace(0, np.NAN) movies.dropna(inplace = True) #check movies.isnull().sum() # **Check for duplicate values** movies.duplicated().sum() # **Drop it** # movies.drop_duplicates(inplace= True) #check movies.duplicated().sum() #Prepare the genre column movies['genres'] = movies.genres.str.split("|",expand=True) movies.head() movies.info() # ## Data modelling and results evaluation # ### Question 1- Which genres are the most popular? #count number of movies per genre number_movies= movies.groupby('genres').release_year.count() number_movies #Visualise the results chart = number_movies.plot.barh(figsize = (6,6)) chart.set(title = 'Most Popular Genres') chart.set_ylabel('Genre') chart.set_xlabel('Number of movies'); # - The most popular genres are Drama, Comedy and Action whereas the least popular are TV movies, Western, War and History. # ### Question 2 - Which genres are associated with the highest revenues? top_revenue_genres = movies.groupby('genres').revenue.max() top_revenue_genres #Visualise the results highest_revenue = top_revenue_genres.plot.pie(fontsize = 11,figsize=(10, 8)) highest_revenue.set(title = 'Movie revenues per genre'); # Highest revenues are associated with Action and Drama, which is not surprising as they are among the most popular as well. # ### Question 3 - What movie properties have highest impact on revenue? # Find genres with above average revenue revenue_ave = movies['revenue'].mean() revenue_ave higher_revenue = movies['revenue'] > revenue_ave lower_revenue = movies['revenue'] <= revenue_ave higher_revenue.value_counts() # + #Create a function to make code related to plots more modular def my_plot (title, x , y, legend): plt.title(title) plt.ylabel(y) plt.xlabel(x) if legend == True: plt.legend() #Check if the release year has an impact on the average revenue movies.groupby('release_year')['revenue'].mean().plot(kind='line'); my_plot('Revenue Average per Year','Revenue','Year',False); # - # Although the curve is clumsy, we can see that movie revenues get higher as the years pass. This may indicate more interest in movies in recent years. #Check if the movie rating has an impact on revenues movies.vote_average[lower_revenue].hist(alpha=0.7,color='maroon' , label='Lower revenue') movies.vote_average[higher_revenue].hist(color ='lightblue' , label='Higher revenue') my_plot('Revenue per Rating','Rating','Revenue',True); # Obviously, movies with lower revenues are associated with lower ratings. # ## Conclusions # To sum up, great business insights can be taken from the TMDb movies dataset. It was interesting to learn that the most popular genre is Drama while the least popular is TV movies. Action movies has the highest revenues. We noticed that movie revenues got highr in recent years and that movies with low ratings negatively influence the revenue.
TMDb_movie.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # First Neurons import numpy as np import tensorflow as tf # ** Set Random Seeds for same results ** np.random.seed(101) tf.set_random_seed(101) # ** Data Setup ** # Setting Up some Random Data for Demonstration Purposes rand_a = np.random.uniform(0,100,(5,5)) rand_a rand_b = np.random.uniform(0,100,(5,1)) rand_b # CONFIRM SAME RANDOM NUMBERS (EXECUTE SEED IN SAME CELL!) Watch video for explanation np.random.seed(101) rand_a = np.random.uniform(0,100,(5,5)) rand_b = np.random.uniform(0,100,(5,1)) # ### Placeholders a = tf.placeholder(tf.float32) b = tf.placeholder(tf.float32) # ### Operations add_op = a+b # tf.add(a,b) mult_op = a*b #tf.multiply(a,b) # ### Running Sessions to create Graphs with Feed Dictionaries with tf.Session() as sess: add_result = sess.run(add_op,feed_dict={a:rand_a,b:rand_b}) print(add_result) print('\n') mult_result = sess.run(mult_op,feed_dict={a:rand_a,b:rand_b}) print(mult_result) # ________________________ # # ________________________ # ## Example Neural Network n_features = 10 n_dense_neurons = 3 # Placeholder for x x = tf.placeholder(tf.float32,(None,n_features)) # + # Variables for w and b b = tf.Variable(tf.zeros([n_dense_neurons])) W = tf.Variable(tf.random_normal([n_features,n_dense_neurons])) # - # ** Operation Activation Function ** xW = tf.matmul(x,W) z = tf.add(xW,b) # tf.nn.relu() or tf.tanh() a = tf.sigmoid(z) # ** Variable Intializer! ** init = tf.global_variables_initializer() with tf.Session() as sess: sess.run(init) layer_out = sess.run(a,feed_dict={x : np.random.random([1,n_features])}) print(layer_out) # We still need to finish off this process with optimization! Let's learn how to do this next. # # _____ # ## Full Network Example # # Let's work on a regression example, we are trying to solve a very simple equation: # # y = mx + b # # y will be the y_labels and x is the x_data. We are trying to figure out the slope and the intercept for the line that best fits our data! # ### Artifical Data (Some Made Up Regression Data) x_data = np.linspace(0,10,10) + np.random.uniform(-1.5,1.5,10) x_data y_label = np.linspace(0,10,10) + np.random.uniform(-1.5,1.5,10) plt.plot(x_data,y_label,'*') # ** Variables ** np.random.rand(2) m = tf.Variable(0.39) b = tf.Variable(0.2) # ### Cost Function # + error = 0 for x,y in zip(x_data,y_label): y_hat = m*x + b #Our predicted value error += (y-y_hat)**2 # The cost we want to minimize (we'll need to use an optimization function for the minimization!) # - # ### Optimizer optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001) train = optimizer.minimize(error) # ### Initialize Variables init = tf.global_variables_initializer() # ### Create Session and Run! with tf.Session() as sess: sess.run(init) epochs = 11 for i in range(epochs): sess.run(train) # Fetch Back Results final_slope , final_intercept = sess.run([m,b]) final_slope final_intercept # ### Evaluate Results # + x_test = np.linspace(-1,11,10) y_pred_plot = final_slope*x_test + final_intercept plt.plot(x_test,y_pred_plot,'r') plt.plot(x_data,y_label,'*') # - # # Great Job!
03-TF-Neural-Network.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from pyspark.sql import SparkSession spark = SparkSession.builder.appName('dataframe').getOrCreate() df = spark.read.csv('Grouping.csv',header = True,inferSchema = True) df.show() # ### Group by df.groupBy('Depertment').sum().show() df.groupBy('Depertment').min().show() df.groupBy('Depertment').avg().show()
PySpark/Pyspark 5 Grouping.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: conda_gluonts-multimodel # language: python # name: conda_gluonts-multimodel # --- # --- # # # Contents # # 7. [How to Build the Custom Sagemaker Container for Model Deployment](#7.-How-to-Build-the-Custom-Sagemaker-Container-for-Model-Deployment) # 8. [How to Deploy Models as Sagemaker Multi Model Endpoint and Invoke the Endpoint](#8.-How-to-Deploy-Models-as-Sagemaker-Multi-Model-Endpoint-and-Invoke-the-Endpoint) # 9. [How to Do Batch Transform in the Multi Model Server Framework](#9.-How-to-Do-Batch-Transform-in-the-Multi-Model-Server-Framework) # 10. [Clean up the resources](#10.-Clean-up-the-resources) # 11. [Conclusion](#11.-Conclusion) # # --- # # 7. How to Build the Custom Sagemaker Container for Model Deployment # Inspired by [an example of bringing your own container for deployment to a multi-model endpoint.](https://github.com/awslabs/amazon-sagemaker-examples/tree/master/advanced_functionality/multi_model_bring_your_own), here we use the [Multi Model Server](https://github.com/awslabs/multi-model-server) framework and the [SageMaker Inference Toolkit](https://github.com/aws/sagemaker-inference-toolkit) for hosting the multiple forecasting models at the same time using one endpoint: # # - Multi Model Server (MMS) is an open source framework for serving machine learning models. MMS supports a pluggable custom backend handler where you can implement your own algorithm. It provides the HTTP frontend and model management capabilities required by multi-model endpoints to host multiple models within a single container, load models into and unload models out of the container dynamically, and performing inference on a specified loaded model. MMS supports [various settings](https://github.com/awslabs/multi-model-server/blob/master/docker/advanced_settings.md#description-of-config-file-settings) for the frontend server it starts. # - SageMaker Inference Toolkit # [SageMaker Inference Toolkit](https://github.com/aws/sagemaker-inference-toolkit) is a library that bootstraps MMS in a way that is compatible with SageMaker multi-model endpoints, while still allowing you to tweak important performance parameters, such as the number of workers per model. # # In this way, we can compare all the model forecasts in real-time more efficiently, and can save the cost of creating multiple endpoints. # # The main steps for building the custom Sagemaker container are described as below. # ## Step 1: Import libraries # Before beggining, first import all the python modules needed. # + import boto3 import jsonlines import json import time from sagemaker import get_execution_role from time import gmtime, strftime # - # ## Step 2: Define model handler # The code snippet __`container/model_handler.py`__ below shows how we define a custom handler that supports loading and inference for the GluonTs models. # - The `initialize` method will be called when a model is loaded into memory. In this example, it loads the model artifacts at `model_dir` into the GluonTS Predictor class. # # - The `handle` method will be called when invoking the model. In this example, it validates the input payload and then forwards the input to the GluonTS Predictor class, returning the output. This handler class is instantiated for every model loaded into the container, so state in the handler is not shared across models. # !cat container/model_handler.py # ### Step 3: Unit testing for the model handler # Before we build the custom docker container, it is good habit to do some unit testing (__`container/test_model_handler.py`__) as below. # + language="bash" # # cd container # pytest -v test_model_handler.py # - # ## Step 4: Define Docker Entrypoint # The inference container in this example uses the Inference Toolkit to start MMS which can be seen in the __`container/dockerd-entrypoint.py`__ file as below. # !cat container/dockerd-entrypoint.py # ## Step 5: Building and registering a container # # The shell script below will first build a custome Docker image which uses MMS as the front end (configured through SageMaker Inference Toolkit in `container/dockerd-entrypoint.py`), and `container/model_handler.py` shown above as the backend handler. It will then upload the image to an ECR repository in your account. `This step may take a bit long when running for the first time.` # + language="bash" # # # The name of our algorithm # algorithm_name=demo-sagemaker-multimodel-gluonts # # cd container # # account=$(aws sts get-caller-identity --query Account --output text) # # # Get the region defined in the current configuration (default to us-west-2 if none defined) # region=$(aws configure get region) # region=${region:-us-west-2} # # fullname="${account}.dkr.ecr.${region}.amazonaws.com/${algorithm_name}:latest" # # # If the repository doesn't exist in ECR, create it. # aws ecr describe-repositories --repository-names "${algorithm_name}" > /dev/null 2>&1 # # if [ $? -ne 0 ] # then # aws ecr create-repository --repository-name "${algorithm_name}" > /dev/null # fi # # # Get the login command from ECR and execute it directly # $(aws ecr get-login --region ${region} --no-include-email) # # # Build the docker image locally with the image name and then push it to ECR # # with the full name. # # docker build -q -t ${algorithm_name} . # docker tag ${algorithm_name} ${fullname} # # docker push ${fullname} # - # # 8. How to Deploy Models as Sagemaker Multi Model Endpoint and Invoke the Endpoint # # After building and registering the custom Sagemaker container, we can start to deploy models as Sagemaker multi-model endpoint and invoke the endpoint. The main steps are outlined as below: # ## Step 1: Set up the environment # First, we need to define the S3 bucket and prefix of the model artifacts that will be invoked by the multi-model endpoint. we also need to define the IAM role that will give SageMaker access to the model artifacts and ECR image that was created above. # + sm_client = boto3.client(service_name='sagemaker') runtime_sm_client = boto3.client(service_name='sagemaker-runtime') account_id = boto3.client('sts').get_caller_identity()['Account'] region = boto3.Session().region_name bucket = 'sagemaker-{}-{}'.format(region, account_id) prefix = 'demo-multimodel-gluonts-endpoint' role = get_execution_role() models_dir = "models" # - # ## Step 2: Create a multi-model endpoint # ### Step 2-1: Import models into hosting # When creating the Model entity for multi-model endpoints, the container's `ModelDataUrl` is the S3 prefix where the model artifacts that are invokable by the endpoint are located. The rest of the S3 path will be specified when invoking the model. # # The `Mode` of container is specified as `MultiModel` to signify that the container will host multiple models. # + model_name = 'DEMO-MultiModelGluonTSModel' + strftime("%Y-%m-%d-%H-%M-%S", gmtime()) model_url = 'https://s3-{}.amazonaws.com/{}/{}/{}/'.format(region, bucket, prefix, models_dir) container = '{}.dkr.ecr.{}.amazonaws.com/{}:latest'.format(account_id, region, 'demo-sagemaker-multimodel-gluonts') print('Model name: ' + model_name) print('Model data Url: ' + model_url) print('Container image: ' + container) container = { 'Image': container, 'ModelDataUrl': model_url, 'Mode': 'MultiModel' } create_model_response = sm_client.create_model( ModelName = model_name, ExecutionRoleArn = role, Containers = [container]) print("Model Arn: " + create_model_response['ModelArn']) # - # ### Step 2-2: Create endpoint configuration # Endpoint config creation works the same way it does as single model endpoints. # + endpoint_config_name = 'DEMO-MultiModelGluonTSEndpointConfig-' + strftime("%Y-%m-%d-%H-%M-%S", gmtime()) print('Endpoint config name: ' + endpoint_config_name) create_endpoint_config_response = sm_client.create_endpoint_config( EndpointConfigName = endpoint_config_name, ProductionVariants=[{ 'InstanceType': 'ml.m5.xlarge', 'InitialInstanceCount': 2, 'InitialVariantWeight': 1, 'ModelName': model_name, 'VariantName': 'AllTraffic'}]) print("Endpoint config Arn: " + create_endpoint_config_response['EndpointConfigArn']) # - # ### Step 2-3: Create the multi model endpoint # Similarly, endpoint creation works the same way as for single model endpoints. # + endpoint_name = 'DEMO-MultiModelGluonTSEndpoint-' + strftime("%Y-%m-%d-%H-%M-%S", gmtime()) print('Endpoint name: ' + endpoint_name) create_endpoint_response = sm_client.create_endpoint( EndpointName=endpoint_name, EndpointConfigName=endpoint_config_name) print('Endpoint Arn: ' + create_endpoint_response['EndpointArn']) resp = sm_client.describe_endpoint(EndpointName=endpoint_name) status = resp['EndpointStatus'] print("Endpoint Status: " + status) print('Waiting for {} endpoint to be in service...'.format(endpoint_name)) waiter = sm_client.get_waiter('endpoint_in_service') waiter.wait(EndpointName=endpoint_name) # - # ## Step 4: Invoke models # Now we invoke the models that we uploaded to S3 previously. The first invocation of a model may be slow, since behind the scenes, SageMaker is downloading the model artifacts from S3 to the instance and loading it into the container. # ### Invoke the Mean Model # First we will prepare two time series as the payload to invoke the model, then call InvokeEndpoint to invoke the Mean model to forecast. The `TargetModel` field is concatenated with the S3 prefix specified in `ModelDataUrl` when creating the model, to generate the location of the model in S3. # + def read_data(file_path): data = [] with jsonlines.open(file_path) as reader: for obj in reader: data.append(obj) return data payload_jsonline = read_data('data/test.json') # - n_time_series = 2 # select 2 time series for quick response payload_list = [] for p in payload_jsonline[:n_time_series]: payload_list.append(json.dumps(p)) payload = '\n'.join(payload_list) # + # %%time response = runtime_sm_client.invoke_endpoint( EndpointName=endpoint_name, ContentType='application/json', TargetModel='MeanPredictor.tar.gz', # this is the rest of the S3 path where the model artifacts are located Body=payload) print(response['Body'].read().decode("utf-8"), sep = '\n') # - # When we invoke the same models a __`2nd`__ time, it is already downloaded to the instance and loaded in the container, so __`inference is faster`__. # + # %%time response = runtime_sm_client.invoke_endpoint( EndpointName=endpoint_name, ContentType='application/json', TargetModel='MeanPredictor.tar.gz', Body=payload) print(response['Body'].read().decode("utf-8"), sep = '\n') # - # ### Invoke other models # Exercising the power of a multi-model endpoint, we can specify different models (e.g., DeepAREstimator.tar.gz) as `TargetModel` and perform inference on it using the same endpoint. # + # %%time response = runtime_sm_client.invoke_endpoint( EndpointName=endpoint_name, ContentType='application/json', TargetModel='DeepAREstimator.tar.gz', Body=payload) print(response['Body'].read().decode("utf-8"), sep = '\n') # + # %%time response = runtime_sm_client.invoke_endpoint( EndpointName=endpoint_name, ContentType='application/json', TargetModel='RForecastPredictor.tar.gz', Body=payload) print(response['Body'].read().decode("utf-8"), sep = '\n') # + # %%time response = runtime_sm_client.invoke_endpoint( EndpointName=endpoint_name, ContentType='application/json', TargetModel='ProphetPredictor.tar.gz', Body=payload) print(response['Body'].read().decode("utf-8"), sep = '\n') # + # %%time response = runtime_sm_client.invoke_endpoint( EndpointName=endpoint_name, ContentType='application/json', TargetModel='SeasonalNaivePredictor.tar.gz', Body=payload) print(response['Body'].read().decode("utf-8"), sep = '\n') # - # # 9. How to Do Batch Transform in the Multi Model Server Framework # The MMS does not support batch transform directly, to perform batch tranform. We need to create models seperately in Sagemaker, and do the batch transform for each model one by one. Below shows an example of how to do batch transoform for one model. # ## Step 1: Create the Sagemaker model from the model artifact. # + from time import gmtime, strftime model = 'RForecastPredictor' model_name_bt = 'DEMO-GluonTSModel-{}-'.format(model) + strftime("%Y-%m-%d-%H-%M-%S", gmtime()) model_url = 'https://s3-{}.amazonaws.com/{}/{}/{}/{}.tar.gz'.format(region, bucket, prefix, models_dir, model) container = '{}.dkr.ecr.{}.amazonaws.com/{}:latest'.format(account_id, region, 'demo-sagemaker-multimodel-gluonts') print('Model name: ' + model_name_bt) print('Model data Url: ' + model_url) print('Container image: ' + container) container = { 'Image': container, 'ModelDataUrl': model_url, 'Mode': 'SingleModel' } create_model_response = sm_client.create_model( ModelName = model_name_bt, ExecutionRoleArn = role, Containers = [container]) print("Model Arn: " + create_model_response['ModelArn']) # - # ## Step 2: Start the Batch Transform Job Using the Model Created above # + test_data_s3_path = "s3://{}/{}/data/test.json".format(bucket, prefix) transform_job_name = 'DEMO-GluonTS-{}-BT-'.format(model) + strftime("%Y-%m-%d-%H-%M-%S", gmtime()) transform_input = { 'DataSource': { 'S3DataSource': { 'S3DataType': 'S3Prefix', 'S3Uri': test_data_s3_path } }, 'ContentType': 'application/json', 'CompressionType': 'None', 'SplitType': 'Line' } transform_output = { 'S3OutputPath': 's3://{}/{}/inference-results/{}'.format(bucket,prefix, model), } transform_resources = { 'InstanceType': 'ml.m5.xlarge', 'InstanceCount': 1 } sm_client.create_transform_job(TransformJobName = transform_job_name, ModelName = model_name_bt, BatchStrategy='SingleRecord', TransformInput = transform_input, TransformOutput = transform_output, TransformResources = transform_resources ) # - # ## Step 3: Check the Batch Transform Job Status # + print ('JobStatus') print('----------') from time import sleep describe_response = sm_client.describe_transform_job(TransformJobName = transform_job_name) job_run_status = describe_response['TransformJobStatus'] print (job_run_status) while job_run_status not in ('Failed', 'Completed', 'Stopped'): describe_response = sm_client.describe_transform_job(TransformJobName = transform_job_name) job_run_status = describe_response['TransformJobStatus'] print (job_run_status) sleep(30) # - # ## Step 4: Inspect Batch Transform Results s3_client = boto3.client('s3') s3_client.download_file(Filename='data/test.json.out', Bucket=bucket, Key='{}/inference-results/{}/test.json.out'.format(prefix, model)) test_out_jsonline = read_data('data/test.json.out') print(test_out_jsonline[:2]) # # 10. Clean up the resources # ## (Optional) Delete the hosting resources sm_client.delete_endpoint(EndpointName=endpoint_name) sm_client.delete_endpoint_config(EndpointConfigName=endpoint_config_name) sm_client.delete_model(ModelName=model_name) sm_client.delete_model(ModelName=model_name_bt) # # 11. Conclusion # Time series data is a highly valuable data source to various businesses, and the ability to forecast such data is critial to making optimal and accurate business decisions. In stead of using AWS build-in services or algorithms, this tutorial has demonstated how to use AWS Sagemaker to build your own custom algorithm to do forecast, and deploy multiple forecast models into one Sagemaker endpoint. This will facilitate businesses to compare state-of-the-art algorithms more efficientily and effectively, and enable the possiblilities to do smarter decisions based on the forecast. # # We have covered other use cases related to time series data as well, you can find other topics below: # # - Forecast air pollution with SageMaker processing and the AWS Open Data Registry by <NAME> # - Automate sales projections with Amazon Forecast, QuickSight and AWS Lambda by Y<NAME> # - Detect DDoS Attacks with Kineses Data Streams and SageMaker Isolation Forest by <NAME>
2_Predict_electricity_demand_with_the_GluonTS_and_SageMaker_custom_containers/02_deploy_gluonts_forecast_models_as_multi_model_endpoints.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # ![CMCC](http://cmcc.ufabc.edu.br/images/logo_site.jpg) # # # Lab 1: Revisão de Conceitos Básicos # # #### Nesse notebook revisaremos os conceitos básicos da linguagem de programação Python, Algebra Linear, o uso da biblioteca [NumPy](http://www.numpy.org/) e conceitos de Programação Funcional. # # #### A primeira parte abordará as diferenças de sintaxe da linguagem Python em relação ao Java e ao C/C++ e formas de otimizar o código. A segunda parte mostrará, os conceitos básicos da biblioteca Numpy e como utilizá-la para aplicar conceitos de Algebra Linear. Finalmente, a terceira parte mostrará os conceitos de Programação Funcional como *Expressões Lambda* e *Funções de Ordem Alta*. # # #### Para navegar pelo notebook execute cada uma das células utilizando a tecla de atalho SHIFT-ENTER, isso executará as intruções da célula *In* mostrando o resultado na célula *Out* correspondente. Todas as variáveis criadas em uma célula podem ser acessadas em todas as células subsequentes. # # #### As células-exercícios iniciam com o comentário `# EXERCICIO` e os códigos a serem completados estão marcados pelos comentários `<COMPLETAR>`. # # #### ** Nesse notebook: ** # #### *Parte 1:* Python # #### *Parte 2:* NumPy # #### *Parte 3:* Programação Funcional # ### **Parte 1: Python** # #### ** (1a) Declarações de Variáveis ** # # #### O tipo de variáveis no Python é definido dinâmicamente pelo interpretador. Não existe necessidade em declarar. # + x = 10 # x é um inteiro print type(x) x = 1.3 # x é um ponto flutuante print type(x) x = "Ola" # x é uma string print type(x) x = [1, 5, 10] # x é uma lista print type(x) # - # #### ** (1b) Indentações ** # # No Python a indentação faz o papel das chaves para determinar o bloco de comandos de uma função, condicional ou laço de repetição. O início de um bloco é definido pelo caractere **":"**. # + x = 10 for i in range(20): # Início da repetição For x = x + 1 if x%2 == 0: # Instrução se condição verdadeira x = x + 1 else: # Instrução se a condição for falsa x = x + 2 # Fim do bloco de repetição For print x # Isso está fora do for! Boa Ideia! # - # #### ** (1c) Funções ** # # As funções no Python podem receber quantas entradas necessárias e retornar múltiplas saídas. A declaração da função é precedida pela palavra-chave `def`. # + def Soma(x,y): return x+y def Mult(x,y): return x*y def SomaMult(x,y): return x+y, x*y # múltiplas saídas separadas por vírgula print Soma(10,2), Mult(10,2), SomaMult(10,2) # O retorno de múltiplas saídas podem ser atribuídas diretamente para múltiplas variáveis w,z = SomaMult(10,2) print w, z # - # #### ** (1d) Tipos Especiais ** # # #### Além dos tipos básicos de variáveis o Python possui os tipos lista (`list`), tupla (`tuple`) e dicionários (`dict`). # # #### As listas e tuplas são agregadores de valores e podem agrupar valores não necessariamente dos mesmos tipos. A diferença entre os dois é que listas são mutáveis (podem ser alterados) e tuplas são imutáveis. Elas são indexadas a partir do índice *0*. # # #### Os dicionários são arranjos associativos que permite associar uma chave (de qualquer tipo) a um valor (também de qualquer tipo). # + lista = [1, 2, True, "palavra"] tupla = (1, 2, True, "palavra") lista[1] = 3.0 print lista # - tupla[1] = 3.0 # Vai dar erro! # + # range(n) gera uma lista de valores entre 0 e n-1 # len(lista) retorna o tamanho de uma lista def DobraValores(lista): for i in range(len(lista)): lista[i] = lista[i]*2 return lista lista = [1,2,3,4] lista2 = DobraValores(lista) print lista, lista2 # As listas são passadas como referência para as funções # + dicionario = { "Ana":12, "Joao":13, "Jose":17 } # declaração inicial do dicionário, pode ser {} para dic. vazio print dicionario["Ana"] # acesso ao elemento pela chave entre colchetes dicionario["Maria"] = 11 # podemos alterar ou inserir um novo elemento print dicionario print "As chaves do dicionário são: ", dicionario.keys() print "Os valores do dicionário são: ", dicionario.values() # - # #### ** (1e) Iteradores ** # # #### Para iterar por uma lista, tupla ou dicionário podemos utilizar a palavra-chave `in`. Em conjunto com a instrução `for` ela percorre cada elemento iterativamente. Essa palavra-chave também pode ser utilizada para verificar se um elemento está contido na lista. # + lista = range(10) # gera a lista [0,..,9] print 8 in lista, 12 in lista for x in lista: print x # - # #### ** (1f) Geradores e List Comprehension ** # # #### O Python permite uma sintaxe mais enxuta (e otimizada) para gerar uma nova lista de acordo com alguma regra específica: # # #### `[ funcao(x) for x in listaGeradora ]` # + # Jeito tradicional, mas não otimizado listaOriginal = [1,2,3,4,5,6,7,8,9] listaQuadrada = [] for x in listaOriginal: listaQuadrada.append(x*x) print listaQuadrada # Através do List Comprehension listaQuadrada = [ x*x for x in listaOriginal ] print listaQuadrada # - # #### Quando precisamos trabalhar com listas muito grandes, mas sem a necessidade de acessar os elementos aleatóriamente, podemos utilizar os geradores. # # #### Um gerador define a instrução para gerar uma sequência, calculando cada elemento da sequência conforme requisitado. # + listaQuadrada = ( x*x for x in listaOriginal ) print listaQuadrada # os elementos ainda não foram calculados for x in listaQuadrada: print x # a cada iteração apenas o próximo elemento é calculado, a lista não existe na memória # - # #### ** (1g) Arquivos ** # # #### A leitura de arquivos é feita pelo comando `open()` que gera um apontador para arquivo, podemos utilizar o laço for para ler cada linha do arquivo iterativamente como uma string. # + import os.path caminho = os.path.join('Data','Aula01') # garante o uso correto de / ou \\ para diretórios arquivo = os.path.join(caminho,'exemplo.txt') f = open(arquivo) for linha in f: print linha f.close() # - # ### **Parte 2: Numpy** # #### [NumPy](http://docs.scipy.org/doc/numpy/reference/) é uma biblioteca do Python para trabalhar com arrays. Essa biblioteca provê abstrações para utilizar arrays como vetores e matrizes. Ela é otimizada para ser rápida e eficiente em relação ao uso de memória. O tipo básico do NumPy é o [ndarray](http://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html), que é uma array multidimensional de tamanho fixo que contém elementos de um tipo apenas. # #### ** (2a) Multiplicação por Escalar ** # # #### Para esse exercício, crie uma `ndarray` contendo os elementos \[1, 2, 3\] e multiplique essa array por 5. Use o comando [np.array()](http://docs.scipy.org/doc/numpy/reference/generated/numpy.array.html) para criar a array. Note que um dos possíveis parâmetros para essa função é ums lista do Python. A multiplicação escalar pode ser feita utilizando o operador `*`. # # #### Note que se você criar uma array partindo de uma lista do Python, você obterá uma array unidimensional, que é equivalente a um vetor. # Como convenção importaremos a biblioteca numpy como np import numpy as np # EXERCICIO # Crie uma array numpy com os valores 1, 2, 3 arraySimples = <COMPLETAR> # Faça o produto escalar multiplicando a array por 5 vezesCinco = <COMPLETAR> print arraySimples print vezesCinco # TESTE do exercício (2a) assert np.all(vezesCinco == [5, 10, 15]), 'valor incorreto para vezesCinco' print "Correto!" # #### ** (2b) Multiplicação elemento-a-elemento e produto interno ** # # #### A multiplicação elemento-a-elemento é calculada como: $$ \mathbf{x} \odot \mathbf{y} = \begin{bmatrix} x_1 y_1 \\\ x_2 y_2 \\\ \vdots \\\ x_n y_n \end{bmatrix} $$ # # #### E o do produto interno de dois vetores de mesmo tamanho $ n $: $$ \mathbf{w} \cdot \mathbf{x} = \sum_{i=1}^n w_i x_i $$ # # #### Em alguns livros você também vê $ \mathbf{w} \cdot \mathbf{x} $ escrito como $ \mathbf{w}^\top \mathbf{x} $ # # #### O tipo Numpy Array suporta essas duas operações, ao utilizar o operador `*` para multiplicar dois vetores ou matrizes, ele executará a multiplicação elemento-a-elemento. Para realizar o produto interno você pode utilizar tanto a função `np.dot()` ou `ndarray.dot()`. Ex.: dados os vetores $x$ e $y$ pode realizar a operação como `np.dot(x,y)` ou `x.dot(y)`. # # + # EXERCICIO # A função np.arange(inicio,fim,passo) cria uma lista iniciando em inicio, terminando antes do fim seguindo passo u = np.arange(0, 5, .5) # np.array([0,0.5,1.0,...,4.5]) v = np.arange(5, 10, .5) elementoAelemento = <COMPLETAR> prodInterno = <COMPLETAR> print 'u: {0}'.format(u) print 'v: {0}'.format(v) print '\nelementoAelemento\n{0}'.format(elementoAelemento) print '\nprodInterno\n{0}'.format(prodInterno) # - # TESTE do exercício (2b) assert np.all(elementoAelemento == [ 0., 2.75, 6., 9.75, 14., 18.75, 24., 29.75, 36., 42.75]), "Valores incorretos para elementoAelemento" print "Primeiro teste OK" assert prodInterno==183.75, "Valor incorreto para prodInterno" print "Segundo teste OK" # #### ** (2c) Multiplicação de Matriz ** # # #### A multiplicação de matriz é definida por: # #### $$ [\mathbf{X} \mathbf{Y}]_{i,j} = \sum_{r=1}^n \mathbf{X}_{i,r} \mathbf{Y}_{r,j} $$ # #### Note que o número de colunas da primeira matriz deve ser o mesmo do número de linhas da segunda matriz, representada por $ n $ # # #### No Numpy utilizamo `np.matrix()` quando queremos criar uma matriz a partir de listas do Python. Com esse tipo podemos utilizar o operador `*` para multiplicação de matrizes, `np.multiply()` para multiplicação elemento-a-elemento, `np.matrix.transpose()` ou `.T` para calcular a transposta e `np.linalg.inv()` para calcular a inversa de uma matriz quadrada. # + # EXERCICIO from numpy.linalg import inv # agora podemos utilizar o comando inv() sem preceder com np.linalg # Criar uma matriz com listas de listas A = np.matrix([[1,2,3,4],[5,6,7,8]]) print 'A:\n{0}'.format(A) # Imprima a matriz transposta print '\nA transposta:\n{0}'.format(<COMPLETAR>) # Multiplique A por sua Transposta AAt = <COMPLETAR> print '\nAAt:\n{0}'.format(AAt) # Inverta AAt com o comando inv() AAtInv = <COMPLETAR> print '\nAAtInv:\n{0}'.format(AAtInv) # Mostre que a matriz vezes sua inversa é a identidade # .round(n) arredonda os valores para n casas decimais print '\nAAtInv * AAt:\n{0}'.format((<COMPLETAR>).round(4)) # - # TESTE do exercício (2c) assert np.all(AAt == np.matrix([[30, 70], [70, 174]])), "Valores incorretos para AAt" print "Primeiro teste OK" assert np.allclose(AAtInv, np.matrix([[0.54375, -0.21875], [-0.21875, 0.09375]])), "Valor incorreto para AAtInv" print "Segundo teste OK" # #### ** (2d) Slices ** # # #### Nos vetores e matrizes do Numpy podemos selecionar sub-conjuntos de valores durante a indexação. Ex.: # # #### `v[:10]` seleciona os 10 primeiros elementos # #### `v[2:]` seleciona os elementos da terceira posição em diante # #### `v[-5:]` retorna os 5 últimos elementos # #### `v[:-5]` retorna os elementos do começo até o ultimo-5 # #### `v[1:3]` retorna os elementos 1 e 2 # + # EXERCICIO atributos = np.array([1, 2, 3, 4]) print 'atributos:\n{0}'.format(atributos) # Crie uma array com os 3 últimos elementos de atributos ultTres = <COMPLETAR> print '\nÚltimos três:\n{0}'.format(ultTres) # - # TEST do exercício (2d) assert np.all(ultTres == [2, 3, 4]), "Valores incorretos para ultTres" print "Teste OK" # ### ** Parte 3: Programação Funcional ** # #### ** (3a) Funções Anônimas (Lambda) ** # # #### Uma função/expressão lambda é utilizada para definir funções simples com apenas uma instrução. Para isso basta usar a instrução `lambda` seguido da lista de parâmetros de entrada, precedidos por `:` e a expressão a ser executada. Por exempo, `lambda x, y: x + y` é uma função anônima que calcula a soma de dois valores. # # #### Expressões lambda geram uma função quando interpretadas pelo Python. Elas são úteis quando precisamos aplicar uma função simples em diversos elementos de uma lista. # # #### Para saber mais sobre Lambdas: [Lambda Functions](http://www.secnetix.de/olli/Python/lambda_functions.hawk), [Lambda Tutorial](https://pythonconquerstheuniverse.wordpress.com/2011/08/29/lambda_tutorial/), and [Python Functions](http://www.bogotobogo.com/python/python_functions_lambda.php). # # #### No exercício abaixo crie uma função lambda que multiplique um valor por 10, atribua a variável designada. # + # EXERCICIO # Lembre-se que: "lambda x, y: x + y" cria uma função que adiciona dois valores mult10 = <COMPLETAR> print mult10(5) # Note that the function still shows its name as <lambda> print '\n', mult10 # - assert mult10(10)==100, "Função incorreta" print "Teste OK" # #### As funções lambdas tem restrições em relação a expressão computada. Essa expressão não pode conter `print` ou incremento `+=`, por exemplo. # # #### Além disso, os parâmetros de entrada podem ser de qualquer tipo, incluindo tuplas e listas. # + p1 = (1,3) p2 = (3,7) euclidiana2D = lambda (x0,y0), (x1,y1): np.sqrt(((x0-x1)**2) + ((y0-y1)**2)) # sqrt é a raíz quadrada print euclidiana2D(p1,p2) # - # #### ** (3b) Lógica Funcional ** # # #### No paradigma funcional trabalhamos com os conceitos de dados imutáveis, ou seja, não existe o conceito de variáveis, uma vez que um valor é designado a um nome, esse valor não pode mudar. # + # Lógica não-funcional a = 0 def inc(): global a a = a + 1 # Lógica funcional def incFn(a): return a+1 # - # #### ** (3c) Funções de Alta Ordem ** # # #### Desse modo, o uso de laços (for, while) é desencorajado e substituídos pela recursividade e funções de alta ordem. Uma função de alta ordem é uma função que recebe uma ou mais funções como parâmetro e retorna uma função. # + # Função para somar 3 valores def Soma3(a,b,c): return a+b+c # Função que soma apenas dois valores def Soma2(a,b): return a+b # Soma 3 poderia ser criado a partir de Soma2: Soma3Fn = lambda a,b,c: Soma2(Soma2(a,b),c) # - # #### Esse tipo de função ajuda a criar um código declarativo, em que o próprio código auto-explica o que está sendo feito # # #### Um exemplo interessante é a construção de uma função que retorna outra função. # + # Cria uma função que calcula a eq. do segundo grau no formato ax^2 + bx + c def Eq2grau(a,b,c): def f(x): return a*x**2 + b*x + c return f f = Eq2grau(10,2,1) print f(10) # + # EXERCICIO # Escreva uma função Soma(x) que retorna uma função que recebe um valor y e soma ao x. def Soma(x): <COMPLETAR> Soma2 = lambda a,b: Soma(a)(b) Soma3 = lambda a,b,c: Soma(Soma(a)(b))(c) print Soma2(1,3), Soma3(1,2,3) # - assert Soma3(1,2,3)==6, "Erro na função" print "Ok" # #### ** (3d) Map, Reduce, Filter ** # # #### Essas três funções são utilizadas para transformação de listas no paradigma funcional. Essas funções recebem como parâmetro uma função `f` e uma lista `l`. # # #### Map: aplica a função em cada elemento da lista, gerando uma nova lista # # #### Reduce: aplica a função cumulativamente em pares de elementos da lista, retornando um único valor agregado ao final # # #### Filter: gera uma nova lista contendo os elementos da lista `l` em que a aplicação de `f` returna `True` # # #### Para os próximos exercícios vamos utilizar a classe `FuncionalW` para criar uma sintaxe parecida com a que utilizaremos com o Spark. # class FuncionalW(object): def __init__(self, data): self.data = data def map(self, function): """Call `map` on the items in `data` using the provided `function`""" return FuncionalW(map(function, self.data)) def reduce(self, function): """Call `reduce` on the items in `data` using the provided `function`""" return reduce(function, self.data) def filter(self, function): """Call `filter` on the items in `data` using the provided `function`""" return FuncionalW(filter(function, self.data)) def __eq__(self, other): return (isinstance(other, self.__class__) and self.__dict__ == other.__dict__) def __getattr__(self, name): return getattr(self.data, name) def __getitem__(self, k): return self.data.__getitem__(k) def __repr__(self): return 'FuncionalW({0})'.format(repr(self.data)) def __str__(self): return 'FuncionalW({0})'.format(str(self.data)) # + # Exemplo de Map # Criaremos uma lista lista = FuncionalW(range(10)) # Criar uma função a ser aplicada nessa lista f = lambda x: x*x # Programação Imperativa resultado1 = FuncionalW([]) for x in lista: resultado1.append(f(x)) print "Resultado: {}".format(resultado1) # Funcional print "Resultado usando Map: {}".format(lista.map(f)) # + # Exemplo de Reduce # Criaremos uma lista lista = FuncionalW(range(1,10)) # Criar uma função a ser aplicada nessa lista f = lambda x,y: x*y # Programação Imperativa produtoria = 1 for x in lista: produtoria = f(produtoria,x) print "Resultado: {}".format(produtoria) # Funcional print "Resultado usando Reduce: {}".format(lista.reduce(f)) # + # EXERCICIO dataset = FuncionalW(range(10)) # Multiplique cada elemento por 5 mapResult = <COMPLETAR> # Filtre eliminando os elementos ímpares # No Python "x % 2" é o resultado do resto da divisão de x por 2 filterResult = <COMPLETAR> # Some os elementos reduceResult = <COMPLETAR> print 'mapResult: {0}'.format(mapResult) print '\nfilterResult: {0}'.format(filterResult) print '\nreduceResult: {0}'.format(reduceResult) # + assert mapResult == FuncionalW([0, 5, 10, 15, 20, 25, 30, 35, 40, 45]),"Valor incorreto para mapResult" print "Teste 1 OK" assert filterResult == FuncionalW([0, 2, 4, 6, 8]), "Valor incorreto para filterResult" print "Teste 2 OK" assert reduceResult == 45, "Valor incorreto para reduceResult" print "Teste 3 OK" # - # #### Para reduzir o tamanho do código e facilitar a leitura, podemos compor as funções em sequência # + dataset = FuncionalW(range(10)) Soma = (dataset .map(lambda x: x*5) .filter(lambda x: x%2==0) .reduce(lambda x,y: x+y) ) print Soma # + # EXERCICIO # split() divide a string em palavras Texto = FuncionalW("Esse texto tem varias palavras cada linha tem palavras escritas Esse texto esta escrito".split()) # Vamos fazer uma contagem da palavra 'palavras' no texto # Crie uma função lambda que recebe duas entradas e retorna se são iguais ou não Igual = <COMPLETAR> # Crie uma função lambda que utiliza a função Igual para detectar se a entrada é igual a palavra 'palavras' DetectaPalavra = <COMPLETAR> # 1) Filtre as palavras iguais a 'palavras' # 2) Mapeie todos os elementos para o valor 1 # 3) Reduza para a somatória contagem = (Texto .<COMPLETAR> .<COMPLETAR> .<COMPLETAR> ) print "Existem {} ocorrências de 'palavras'".format(contagem) # -
Spark/Lab1_Rev_Python_Numpy_AlgeLin.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python3 # --- # ## HCW and Tracer mass on shelf - Barkley import cmocean as cmo import matplotlib.pyplot as plt # %matplotlib inline import numpy as np import pandas as pd import seaborn as sns import xarray as xr import canyon_tools.readout_tools as rout import canyon_tools.savitzky_golay as sg sns.set_context('notebook') sns.set_style('whitegrid') # + # Grid, state and tracers datasets of base case grid_file = '/data/kramosmu/results/TracerExperiments/BARKLEY/run01/gridGlob.nc' grid = xr.open_dataset(grid_file) state_file = '/data/kramosmu/results/TracerExperiments/BARKLEY/run01/stateGlob.nc' state = xr.open_dataset(state_file) ptracers_file = '/data/kramosmu/results/TracerExperiments/BARKLEY/run01/ptracersGlob.nc' ptracers = xr.open_dataset(ptracers_file) ptracers_fileNoC = '/data/kramosmu/results/TracerExperiments/BARKLEY/run02/ptracersGlob.nc' ptracersNoC = xr.open_dataset(ptracers_fileNoC) # + fileLWR = ('/data/kramosmu/results/TracerExperiments/BARKLEY/HCW_TrMass_BARKLEY_run01.csv') fileLWRNoC = ('/data/kramosmu/results/TracerExperiments/BARKLEY/HCW_TrMass_BARKLEY_run02.csv') dfLWR = pd.read_csv(fileLWR) dfLWRnoc = pd.read_csv(fileLWRNoC) # + HCW_list = ['HCW01','HCW02','HCW03','HCW04','HCW05','HCW06','HCW07','HCW08'] Mass_list = ['TrMassHCW01','TrMassHCW02','TrMassHCW03','TrMassHCW04', 'TrMassHCW05','TrMassHCW06','TrMassHCW07','TrMassHCW08'] TotMass_list = ['TotTrMass01','TotTrMass02','TotTrMass03','TotTrMass04', 'TotTrMass05','TotTrMass06','TotTrMass07', 'TotTrMass08'] labels = ['Tr1','Tr2','Tr3','Tr4','Tr5','Tr6','Tr7','Tr8'] colours = ['purple','blue','green','gold','orange','red','orchid','teal'] # + fig,ax = plt.subplots(3,1,figsize=(10,8)) for HCW, TrMass, TotTrMass,lab, col in zip(HCW_list,Mass_list,TotMass_list,labels,colours): if lab=='Tr7' or lab=='Tr8': # because they are measured in nmol/l instead of mumol/l ax[0].plot(np.arange(0,19,1)/2.0,(dfLWR[HCW])/1E10,color=col,label=lab) ax[1].plot(np.arange(0,19,1)/2.0,(dfLWR[TrMass])/1E16,color=col,label=lab) ax[2].plot(np.arange(0,19,1)/2.0,(dfLWR[TotTrMass])/dfLWR[TotTrMass][0],color=col,label=lab) else: ax[0].plot(np.arange(0,19,1)/2.0,(dfLWR[HCW])/1E10,color=col,label=lab) ax[1].plot(np.arange(0,19,1)/2.0,(dfLWR[TrMass])/1E13,color=col,label=lab) ax[2].plot(np.arange(0,19,1)/2.0,(dfLWR[TotTrMass])/dfLWR[TotTrMass][0],color=col,label=lab) ax[0].set_ylabel('HCW (10$^{10}$ m$^3$)') ax[1].set_ylabel('Tr Mass (10$^{13}$ $\mu$M m$^3$)') ax[2].set_ylabel('Tr mass/ Initial Tr mass') ax[0].set_xlabel('Days') ax[0].set_title('HCW canyon case') ax[1].set_xlabel('Days') ax[1].set_title('Tr mass in HCW canyon case') ax[2].set_xlabel('Days') ax[2].set_title('Tr mass on shelf canyon case') ax[2].legend(loc=0) plt.tight_layout() # + fig,ax = plt.subplots(3,1,figsize=(10,8)) for HCW, TrMass, TotTrMass,lab, col in zip(HCW_list,Mass_list,TotMass_list,labels,colours): if lab=='Tr7' or lab=='Tr8': ax[0].plot(np.arange(0,19,1)/2.0,(dfLWR[HCW]-dfLWRnoc[HCW])/1E10,color=col,label=lab) ax[1].plot(np.arange(0,19,1)/2.0,(dfLWR[TrMass]-dfLWRnoc[TrMass])/1E16,color=col,label=lab) ax[2].plot(np.arange(0,19,1)/2.0,(dfLWR[TotTrMass]-dfLWRnoc[TotTrMass])/1E16,color=col,label=lab) else: ax[0].plot(np.arange(0,19,1)/2.0,(dfLWR[HCW]-dfLWRnoc[HCW])/1E10,color=col,label=lab) ax[1].plot(np.arange(0,19,1)/2.0,(dfLWR[TrMass]-dfLWRnoc[TrMass])/1E13,color=col,label=lab) ax[2].plot(np.arange(0,19,1)/2.0,(dfLWR[TotTrMass]-dfLWRnoc[TotTrMass])/1E13,color=col,label=lab) ax[0].set_ylabel('HCW (10$^{10}$ m$^3$)') ax[1].set_ylabel('Tr Mass in HCW (10$^{13}$ $\mu$M m$^3$)') ax[2].set_ylabel('Tr mass (10$^{13}$ $\mu$M m$^3$)') ax[0].set_xlabel('Days') ax[0].set_title('HCW-HCWnoc ') ax[1].set_xlabel('Days') ax[1].set_title('TrmassHCW - TrMassHCWnoc ') ax[2].set_xlabel('Days') ax[2].set_title('(Tr mass - Tr mass noc)/Tr mass noc (t=0) on shelf') ax[0].legend(loc=2) plt.tight_layout() # + HCW_list = ['HCW01','HCW03','HCW04','HCW05','HCW06','HCW07','HCW08'] Mass_list = ['TrMassHCW01','TrMassHCW03','TrMassHCW04', 'TrMassHCW05','TrMassHCW06','TrMassHCW07','TrMassHCW08'] TotMass_list = ['TotTrMass01','TotTrMass03','TotTrMass04', 'TotTrMass05','TotTrMass06','TotTrMass07','TotTrMass08'] labels = ['Tr1','Tr3','Tr4','Tr5','Tr6','Tr7','Tr8'] colours = ['purple','blue','green','gold','orange','red','orchid'] fig,ax = plt.subplots(3,1,figsize=(10,8)) for HCW, TrMass, TotTrMass,lab, col in zip(HCW_list,Mass_list,TotMass_list,labels,colours): if lab=='Tr7' or lab=='Tr8': ax[0].plot(np.arange(0,19,1)/2.0,(dfLWRnoc[HCW])/1E10,color=col,label=lab) ax[1].plot(np.arange(0,19,1)/2.0,(dfLWRnoc[TrMass])/1E16,color=col,label=lab) ax[2].plot(np.arange(0,19,1)/2.0,(dfLWRnoc[TotTrMass])/1E16,color=col,label=lab) else: ax[0].plot(np.arange(0,19,1)/2.0,(dfLWRnoc[HCW])/1E10,color=col,label=lab) ax[1].plot(np.arange(0,19,1)/2.0,(dfLWRnoc[TrMass])/1E13,color=col,label=lab) ax[2].plot(np.arange(0,19,1)/2.0,(dfLWRnoc[TotTrMass])/1E13,color=col,label=lab) ax[0].set_ylabel('HCW (10$^{10}$ m$^3$)') ax[1].set_ylabel('Tr Mass (10$^{13}$ $\mu$M m$^3$)') ax[2].set_ylabel('Tr mass') ax[0].set_xlabel('Days') ax[0].set_title('HCW no canyon case') ax[1].set_xlabel('Days') ax[1].set_title('Tr mass in HCW no canyon case ') ax[2].set_xlabel('Days') ax[2].set_title('Tr mass on shelf no canyon case ') ax[2].legend(loc=0) plt.tight_layout() # + Z = 20 # m, made up but constant for all tracers tau_v = np.empty(7) tracerList = [ptracers.Tr01.isel(T=0,X=180,Y=50,Z=slice(0,50)), ptracers.Tr03.isel(T=0,X=180,Y=50,Z=slice(0,50)), ptracers.Tr04.isel(T=0,X=180,Y=50,Z=slice(0,50)), ptracers.Tr05.isel(T=0,X=180,Y=50,Z=slice(0,50)), ptracers.Tr06.isel(T=0,X=180,Y=50,Z=slice(0,50)), ptracers.Tr07.isel(T=0,X=180,Y=50,Z=slice(0,50))/1000, ptracers.Tr08.isel(T=0,X=180,Y=50,Z=slice(0,50))/1000,] z = -1*grid.Z[:] for c,ii in zip(tracerList,range(7)): dzC = (c[30]-c[28])/(z[30]-z[28]) dz2C = (c[30]-(2*c[29])+c[28])/((z[30]-z[29])*(z[29]-z[28])) tau_v[ii] = -Z*dz2C/dzC tau_v_th = np.array([0,0.138,0.108,0.0543,0.0864,1.315,6.05]) time = state.variables['T'] # + fig,ax = plt.subplots(1,1,figsize=(4,4)) for TrMass, tau, lab, col in zip(Mass_list,tau_v,labels,colours): mass_can_eff = dfLWR[TrMass]-dfLWRnoc[TrMass] PhiTr = np.mean(np.array([(mass_can_eff[ii]-mass_can_eff[ii-1])/(time[ii]-time[ii-1]) for ii in range (8,15)])) PhiTr_std = np.std(np.array([(mass_can_eff[ii]-mass_can_eff[ii-1])/(time[ii]-time[ii-1]) for ii in range (8,15)])) ax.errorbar(tau, PhiTr, yerr=PhiTr_std, color=col, label=lab, marker='o') ax.legend(loc=0) ax.set_xlabel(r'$\tau_v$ numerical') ax.set_ylabel('Tr mass in HCW anom ($[C/C_0]$m$^{-3}$)') # + fig,ax = plt.subplots(1,1,figsize=(4,4)) for TrMass, tau, lab, col in zip(Mass_list,tau_v_th,labels,colours): mass_can_eff = dfLWR[TrMass]-dfLWRnoc[TrMass] PhiTr = np.mean(np.array([(mass_can_eff[ii]-mass_can_eff[ii-1])/(time[ii]-time[ii-1]) for ii in range (8,15)])) PhiTr_std = np.std(np.array([(mass_can_eff[ii]-mass_can_eff[ii-1])/(time[ii]-time[ii-1]) for ii in range (8,15)])) ax.errorbar(tau, PhiTr, yerr=PhiTr_std, color=col, label=lab, marker='o') ax.legend(loc=0) ax.set_xlabel(r'$\tau_v$ analytical') ax.set_ylabel('Tr mass in HCW anom ($[C/C_0]$m$^{-3}$)') # + fig,ax = plt.subplots(1,1,figsize=(4,4)) for TrMass, tau, lab, col in zip(Mass_list,tau_v,labels,colours): mass_can_eff = dfLWR[TrMass] PhiTr = np.mean(np.array([(mass_can_eff[ii]-mass_can_eff[ii-1])/(time[ii]-time[ii-1]) for ii in range (8,15)])) PhiTr_std = np.std(np.array([(mass_can_eff[ii]-mass_can_eff[ii-1])/(time[ii]-time[ii-1]) for ii in range (8,15)])) ax.errorbar(tau, PhiTr, yerr=PhiTr_std, color=col, label=lab, marker='o') ax.legend(loc=0) ax.set_xlabel(r'$\tau_v$ numerical') ax.set_ylabel('Tr mass in HCW canyon ($[C/C_0]$m$^{-3}$)') # + # plot_ini_profiles tracerList = [ptracers.Tr01.isel(T=0,X=180,Y=50,Z=slice(0,50))/ptracers.Tr01.isel(T=0,X=180,Y=50,Z=29), ptracers.Tr02.isel(T=0,X=180,Y=50,Z=slice(0,50))/ptracers.Tr02.isel(T=0,X=180,Y=50,Z=29), ptracers.Tr03.isel(T=0,X=180,Y=50,Z=slice(0,50))/ptracers.Tr03.isel(T=0,X=180,Y=50,Z=29), ptracers.Tr04.isel(T=0,X=180,Y=50,Z=slice(0,50))/ptracers.Tr04.isel(T=0,X=180,Y=50,Z=29), ptracers.Tr05.isel(T=0,X=180,Y=50,Z=slice(0,50))/ptracers.Tr05.isel(T=0,X=180,Y=50,Z=29), ptracers.Tr06.isel(T=0,X=180,Y=50,Z=slice(0,50))/ptracers.Tr06.isel(T=0,X=180,Y=50,Z=29), ptracers.Tr07.isel(T=0,X=180,Y=50,Z=slice(0,50))/ptracers.Tr07.isel(T=0,X=180,Y=50,Z=29), ptracers.Tr08.isel(T=0,X=180,Y=50,Z=slice(0,50))/ptracers.Tr08.isel(T=0,X=180,Y=50,Z=29),] labels = ['Linear','Salinty','Oxygen','Nitrate','Silicate','Phosphate','Nitrous Acid','Methane'] colours = ['purple','blue','green','gold','orange','red','orchid','teal'] fig,ax = plt.subplots(1,1,figsize=(4,6)) for tracer,col,lab in zip(tracerList,colours,labels): ax.plot(tracer,grid.Z[0:50],color=col,label=lab) ax.set_xlabel(r'$C/C_{SB}$ (t=0)') ax.set_ylabel('Depth (m)') ax.set_title('Initial profiles') ax.legend(loc=0) # -
NutrientProfiles/Parabolic/HCW_and_TracerMassOnShelf_BARKLEY.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.5 64-bit (''base'': conda)' # name: python385jvsc74a57bd0b3ba2566441a7c06988d0923437866b63cedc61552a5af99d1f4fb67d367b25f # --- # ## MISSON TO MARS # + # Dependencies import pandas as pd import os import requests import time from bs4 import BeautifulSoup from bs4 import BeautifulSoup as bs from splinter import Browser from webdriver_manager.chrome import ChromeDriverManager # - # + ## NASA Mars News # + ## Setup splinter, executable path, and Chrome web driver browser executable_path = {'executable_path': ChromeDriverManager().install()} browser = Browser('chrome', **executable_path, headless=False) ## URL of page to scrape url = 'https://redplanetscience.com/' browser.visit(url) ## HTML setup NewsHTML = browser.html NewsSoup = BeautifulSoup(NewsHTML, 'html.parser') # + ## Scrape article headline title and paragraph text/blerb NewsTitle = NewsSoup.find('div', class_ = 'col-md-12').find(class_='content_title').text NewsParagraph = NewsSoup.find('div', class_ = 'col-md-12').find(class_='article_teaser_body').text NewsParagraph = str(NewsParagraph) NewsTitle = str(NewsTitle) print(f"News Title: {NewsTitle}") print(f"Paragraph Blerb: '{NewsParagraph}'") browser.quit() # - # ## JPL Mars Space Images # + ## Setup splinter, executable path, and Chrome web driver browser executable_path = {'executable_path': ChromeDriverManager().install()} browser = Browser('chrome', **executable_path, headless=False) ## URL of page to scrape url = 'https://spaceimages-mars.com/' browser.visit(url) ## HTML setup JPLMarsHTML = browser.html JPLMarsSoup = BeautifulSoup(JPLMarsHTML, 'html.parser') # + ## Scrape website image path JPLMarsImageHTML = JPLMarsSoup.find('div', class_ = 'header').find(class_='floating_text_area') JPLMarsImageSRC = JPLMarsImageHTML.find('a')['href'] ## Create image url by combining base URL with returned image path featured_image_url = f"{url}{JPLMarsImageSRC}" print(featured_image_url) browser.quit() # - # ## Mars Facts # + ## URL of page to scrape url = 'https://galaxyfacts-mars.com/' GalaxyTable_DF = pd.read_html(url) GalaxyTable_DF = GalaxyTable_DF[1] #GalaxyTable_DF.head() GalaxyTable_DF.rename(columns = {0:'MARS DESCRIPTION', 1:'VALUE'}, inplace=True) #GalaxyTable_DF = GalaxyTable_DF.set_index('MARS DESCRIPTION') GalaxyTable_DF # - ## Converting dataframe back into HTML and removing line breaks ('\n') htmlTable = GalaxyTable_DF.to_html().replace('\n',"") htmlTable # # ## Mars Hemispheres # + ## Setup splinter, executable path, and Chrome web driver browser executable_path = {'executable_path': ChromeDriverManager().install()} browser = Browser('chrome', **executable_path, headless=False) ## URL of page to scrape #Hemisphere_url = 'https://marshemispheres.com/' MarsHemis_url = 'https://marshemispheres.com/' browser.visit(MarsHemis_url) ## HTML setup MarsHemisHTML = browser.html MarsHemisSoup = BeautifulSoup(MarsHemisHTML, 'html.parser') ## Returns the HTML code containing ALL page links/div class MarsHemisphereHTML = MarsHemisSoup.find_all('div', {'class' : 'item'}) ## For Loop and Dictionary Setup links = MarsHemisphereHTML Hemisphere_Image_URLS = [] for link in range(len(links)): ## Code that select a SPECIFIC Hemisphere page based on index (Cerberus, Schiaparelli, Syrtis, Valles) NextMarsHemis = MarsHemisphereHTML[link] ## Scrape for the Hemisphere/image titles on homepage to add to dictionary MarsHemisImageTitle = NextMarsHemis.find('h3').text title = MarsHemisImageTitle #print(title) ## WebScraper command to go down the html page list by clicking the specific/current index "h3" link browser.find_by_tag("h3")[link].click() ## HTML Beautiful soup re-initialization for new hemisphere page MarsHemisHTML = browser.html MarsHemisSoup = BeautifulSoup(MarsHemisHTML, 'html.parser') ## Find Hemisphere partial image path HemisphereImageSRC = MarsHemisSoup.find('img', class_ = 'wide-image')['src'] #HemisphereImageSRC ## Create full image url by combining base URL with returned image path FullHemisphereImageURL = f"{MarsHemis_url}{HemisphereImageSRC}" img_url = FullHemisphereImageURL #print(img_url) ## Return back to homepage for next loop step browser.back() ## HTML Beautiful soup re-initialization for homepage to setup next loop MarsHemisHTML = browser.html MarsHemisSoup = BeautifulSoup(MarsHemisHTML, 'html.parser') ### Append scraped Title/Image Url data to dictionary Hemisphere_Image_URLS.append({ "Title": title, "IMG URL": img_url }) ## End of page scrape, next loop iteration ## Close webscrape session browser.quit() ## Check/print dictionary scrape results print("Scrape Complete") print() #Hemisphere_Image_URLS for x in Hemisphere_Image_URLS: for key, value in x.items(): print(f"{key} : {value}") # - # ## End # # # # + ## Test of all variables/outputs into dictionary ScrapeData = {} ScrapeData["News Title"] = NewsTitle ScrapeData["News Paragraph"] = NewsParagraph ScrapeData["Featured Image"] = featured_image_url ScrapeData["Mars Hemispheres"] = Hemisphere_Image_URLS ScrapeData["Mars Facts"] = GalaxyTable_DF ScrapeData["Mars Table HMTL"] = htmlTable ScrapeData # + ## --------------------------------------------------------------- <NAME> ------------------------------------------------- ##
Mission_to_Mars.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Writing a Text File from Jupyter Notepad # %%writefile myfile.txt Hello this is a text file this is the second line this is the third line # Errors you may receive my file = open('myfile.txt') myfile = open('my file.txt') # In correct syntax and when the file exists myfile = open('myfile.txt') # To locate the path of your text file pwd myfile = open('myfile.txt') # Methods that can be called myfile.read() # If you attempt to read the same file again you will be returned an empty value as the 'marker' from the previous read is at the eof. Therefore you need to reinitialise the count. myfile.read() myfile.seek(0) myfile.read() # You can save the outut of the text file as a variable by doing the following myfile = open('myfile.txt') myfile.read() myfile.seek(0) contents = myfile.read() contents # To output as a single string in a reader friendy format myfile.seek(0) myfile.readlines() # # FILE LOCATIONS # # If you want to open files at another location you parse in the entire file path # # e.g. # # myfile = open ("c:\\Users\UserName\\Folder\myfile.txt") # # myfile = open ("/Users/UserName/Folder/myfile.txt") # # NB In order to avoid errors you must sometimes first close the already open file. # myfile.close() # # To avoid errors # with open ('myfile.txt') as my_new_file: contents = my_new_file.read() contents # # READING AND WRITING FILES # Modes # # mode = 'r' is read only # mode = 'w' is write only and WILL overwrite files or create a new file # mode = 'a' is append only # mode = 'r+' is reading and writing # mode = '+w' is writing and reading only and overwrites existing files or creates a new file with open ('myfile.txt', mode='r') as myfile: contents = myfile.read() contents # # Example of a permission error # with open ('myfile.txt', mode='w') as myfile: contents = myfile.read() # %%writefile my_new_file.txt ONE ON FIRST TWO ON SECOND THREE ON THIRD with open ('my_new_file.txt', mode ='r') as f: print(f.read()) # Adding a new line with open ('my_new_file.txt', mode = 'a') as f: f.write('THIS IS A NEW LINE') with open ('my_new_file.txt', mode ='r') as f: print(f.read()) # # Mode = 'w' # with open ('useanyfilenameyouwish.txt', mode ='w') as f: f.write('This Is Something I Created!') with open ('useanyfilenameyouwish.txt', mode ='r') as f: print(f.read()) #
[Self Help] Python/Notes - Learning Resources/The Basics/[Notes] Basic Input Output.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python (fd37) # language: python # name: fd37 # --- # ## 0. Load Data dataname = 'abalone' from scipy.io import loadmat import numpy as np import pandas as pd df = pd.read_csv('../datasets/OD/%s/meta_data/%s.original.csv'%(dataname, dataname)) gt = pd.read_csv('../datasets/OD/%s/meta_data/%s.diff.csv'%(dataname, dataname))['ground.truth'] gt_idx = gt.index.values[gt == 'anomaly'] df.head() len(gt_idx) from profiler.core import * # ## 1. Instantiate Engine # * workers : number of processes # * tol : tolerance for differences when creating training data (set to 0 if data is completely clean) # * eps : error bound for inverse covariance estimation (since we use conservative calculation when determining minimum sample size, we recommend to set eps <= 0.01) # * embedtxt: if set to true, differentiate b/w textual data and categorical data, and use word embedding for the former pf = Profiler(workers=2, tol=0.01, eps=0.05, embedtxt=False) # ## 2. Load Data # * name: any name you like # * src: \[FILE; DF; DB (not implemented)\] # * fpath: required if src == FILE # * df: required if src == DF # * check_param: print parameters used for data loading pf.session.load_data(src=DF, df=df, check_param=True) # ### 2.1 Change Data Types of Attributes # * required input: # * a list of attributes # * a list of data types (must match the order of the attributes; can be CATEGORICAL, NUMERIC, TEXT, DATE) # * optional input: # * a list of regular expression extractor # + # pf.session.change_dtypes(['ProviderNumber', 'ZipCode', 'PhoneNumber', 'State', 'EmergencyService','Score', 'Sample'], # [CATEGORICAL, NUMERIC, CATEGORICAL, TEXT, TEXT, NUMERIC, NUMERIC], # [None, None, None, None, None, r'(\d+)%', r'(\d+)\spatients']) # - # ### 2.2. Load/Train Embeddings for TEXT # * path: path to saved/to-save embedding folder # * load: set to true -- load saved vec from 'path'; set to false -- train locally # * save: (only for load = False) save trained vectors to 'path' # + #pf.session.load_embedding(save=True, path='data/hospital/', load=True) # - # ## 3. Load Training Data # * multiplier: if set to None, will infer the minimal sample size; otherwise, it will create (# samples) * (# attributes) * (multiplier) training samples pf.session.load_training_data(multiplier = None) # ## 4. Learn Structure # * sparsity: intensity of L1-regularizer in inverse covariance estimation (glasso) # * take_neg: if set to true, consider equal -> equal only autoregress_matrix = pf.session.learn_structure(sparsity=0, infer_order=True) # * score: # * "fit_error": mse for fitting y = B'X + c for each atttribute y # * "training_data_fd_vio_ratio": the higher the score, the more violations of FDs in the training data. (bounded: \[0,1\]) parent_sets = pf.session.get_dependencies(score="fit_error") # ## 5. Visualization pf.session.visualize_covariance() pf.session.visualize_inverse_covariance() pf.session.visualize_autoregression() pf.session.timer.get_stat() from profiler.app.od import * # + # detector = ScikitDetector(pf.session.ds.df, attr=pf.session.ds.dtypes, method="isf", gt_idx=gt_idx, min_neighbors=20) # detector.run_all(parent_sets) # detector.evaluate() # - detector2 = ScikitDetector(pf.session.ds.df, attr=pf.session.ds.dtypes, method="ocsvm", gt_idx=gt_idx, nu=0.1, gamma='auto', min_neighbors=50, knn=False, tol=0.01) detector2.run_all(parent_sets) detector2.evaluate(t=0.1) detector2.view_neighbor_info()
tutorials/Tutorial5_OD_abalone.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- import math import cv2 from matplotlib import pyplot as plt import spectrometer_functions as sf import numpy as np # # Taking pictures with your camera test_img = cv2.imread("waiting_for_the_bus.jpg") test_img_rgb = cv2.cvtColor(test_img,cv2.COLOR_BGR2RGB) plt.imshow(test_img_rgb) test_img[1215] test_img_rgb[2500,500] # # Angular FOV of camera # # Measuring the FOV by photographing an object of known width at known distance. width = 150 distance = 114 angle = 2 * math.atan(width / (2 * distance)) print(math.degrees(angle)) # ## Working out pixels per radian fov_image = cv2.imread("window.jpg") height_px , width_px = fov_image.shape radians_per_pixel = angle / width_px plt.imshow(fov_image[500:510,1000:1010]) print(np.matrix(fov_image[500:510,1000:1010,2])) # # Read in a spectrum photo s1 = cv2.imread("spec-ibl-auto.jpg") plt.imshow(s1) b1 = [p[0] for p in s1[1452]] g1 = [p[1] for p in s1[1452]] r1 = [p[2] for p in s1[1452]] wvlb,sb1 = sf.get_spectrum(b1,1e-6,radians_per_pixel) wvlg,sg1 = sf.get_spectrum(g1,1e-6,radians_per_pixel) wvlr,sr1 = sf.get_spectrum(r1,1e-6,radians_per_pixel) plt.plot(wvlb,sb1,'b-') plt.plot(wvlg,sg1,'g-') plt.plot(wvlr,sr1,'r-') sgrey = [float(r) + float(g) + float(b) for r,g,b in zip(sr1,sg1,sb1)] wvlgrey = [w1 for w1,w2,w3 in zip(wvlr,wvlg,wvlb)] plt.plot(wvlgrey,sgrey)
sw-notebook-notes.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] pycharm={"name": "#%% md\n"} # # Improving the chapter 5 classifier. # > Taking the fastbook chapter 5 challenge. # # - toc: true # - badges: true # - comments: true # - categories: [jupyter, exercise] # - image: images/chart-preview.png # + pycharm={"name": "#%%\n"} import fastbook fastbook.setup_book() # + pycharm={"name": "#%%\n"} from fastbook import * from fastai.vision.widgets import * # + pycharm={"name": "#%%\n"} import torch torch.cuda.empty_cache() # + pycharm={"name": "#%%\n"} import pandas as pd import numpy as np # + pycharm={"name": "#%%\n"} from fastai.vision.all import * path = untar_data(URLs.PETS) # + pycharm={"name": "#%%\n"} Path.BASE_PATH = path # + pycharm={"name": "#%%\n"} pets = DataBlock(blocks = (ImageBlock, CategoryBlock), get_items=get_image_files, splitter=RandomSplitter(seed=42), get_y=using_attr(RegexLabeller(r'(.+)_\d+.jpg$'), 'name'), item_tfms=Resize(460), batch_tfms=aug_transforms(size=224, min_scale=0.75)) dls = pets.dataloaders(path/"images", num_workers=0) # + pycharm={"name": "#%%\n"} from fastai.callback.fp16 import * learn = cnn_learner(dls, resnet34, metrics=error_rate).to_fp16() # + pycharm={"name": "#%%\n"} learn.lr_find() # + pycharm={"name": "#%%\n"} learn.fine_tune(48, freeze_epochs=3) # + pycharm={"name": "#%%\n"} learn.recorder.plot_loss() # + pycharm={"name": "#%%\n"} learn.recorder.plot_lr_find() # + pycharm={"name": "#%%\n"} learn.recorder.plot_sched()
_notebooks/2021-03-09-Improving-the-chapter-5-classifier.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import pandas as pd import time import os import re import copy import matplotlib.pyplot as plt import seaborn as sns import scipy.stats as stats import statsmodels.api as sm import statsmodels.formula.api as smf import math # # ExpW frame = pd.read_csv('ExpW_OpenFace_result_with_expression_gender_race_age.csv') frame.head() print(list(frame.columns)) frame[['AU04_c', 'AU05_c', 'AU07_c', 'AU23_c']].corr() frame[['AU04_r', 'AU05_r', 'AU07_r', 'AU23_r']].corr() # # ExpW expression label: # - "0" "angry" # - "1" "disgust" # - "2" "fear" # - "3" "happy" # - "4" "sad" # - "5" "surprise" # - "6" "neutral" frame.loc[frame['Expression'] == 0, 'Expression'] = 'angry' frame.loc[frame['Expression'] == 1, 'Expression'] = 'disgust' frame.loc[frame['Expression'] == 2, 'Expression'] = 'fear' frame.loc[frame['Expression'] == 3, 'Expression'] = 'happy' frame.loc[frame['Expression'] == 4, 'Expression'] = 'sad' frame.loc[frame['Expression'] == 5, 'Expression'] = 'surprise' frame.loc[frame['Expression'] == 6, 'Expression'] = 'neutral' frame.Expression.value_counts() frame['Angry'] = (frame['Expression'] =='angry').astype(int) frame['Disgust'] = (frame['Expression'] == 'disgust').astype(int) frame['Fear'] = (frame['Expression'] == 'fear').astype(int) frame['Happy'] = (frame['Expression'] =='happy').astype(int) frame['Sad'] = (frame['Expression'] == 'sad').astype(int) frame['Surprise'] = (frame['Expression'] == 'surprise').astype(int) frame['Neutral'] = (frame['Expression'] == 'neutral').astype(int) frame = frame.dropna() frame.shape # # Using the optimal threshold to re-create the AU presence # + AU_intensity_OpenFace_common2 = ['AU01_r', 'AU02_r', 'AU04_r', 'AU05_r', 'AU06_r', 'AU09_r', 'AU10_r', 'AU12_r', 'AU15_r', 'AU17_r', 'AU20_r', 'AU25_r', 'AU26_r'] optimal_threshold = np.array([4.4, 4.8, 2.4, 3.7, 1.8, 2.8, 0.2, 1.3, 1.3, 5. , 5. , 0.7, 5. ]) for i in range(len(AU_intensity_OpenFace_common2)): frame[AU_intensity_OpenFace_common2[i] + "_c"] = frame[AU_intensity_OpenFace_common2[i]].apply(lambda x: 0 if x <= optimal_threshold[i] else 1) frame.shape # - print(list(frame.columns)) frame['AU4_5_7_23_c'] = frame.AU04_c.apply(int).apply(str) + frame.AU05_c.apply(int).apply(str) + frame.AU07_c.apply(int).apply(str) + frame.AU23_c.apply(int).apply(str) frame.AU4_5_7_23_c.value_counts() frame['AU4_5_7_23_r_c'] = frame.AU04_r_c.apply(str) + frame.AU05_r_c.apply(str) + frame.AU07_c.apply(int).apply(str) + frame.AU23_c.apply(int).apply(str) frame.AU4_5_7_23_r_c.value_counts() # # P(expression|AU1, male) =? P(expression|AU1, female) male_frame = frame.loc[frame['gender_preds'] == 'Male'] female_frame = frame.loc[frame['gender_preds'] == 'Female'] print(male_frame.shape) print(female_frame.shape) # ## Anger # #### First check the simple proportion of male angry vs female angry: print(male_frame.Angry.mean()) print(female_frame.Angry.mean()) print(male_frame.AU04_r_c.mean()) print(female_frame.AU04_r_c.mean()) print(male_frame.AU05_r_c.mean()) print(female_frame.AU05_r_c.mean()) print(male_frame.AU07_c.mean()) print(female_frame.AU07_c.mean()) print(male_frame.AU23_c.mean()) print(female_frame.AU23_c.mean()) print((male_frame.AU4_5_7_23_c=='1111').mean()) print((female_frame.AU4_5_7_23_c=='1111').mean()) print(1-(male_frame.AU4_5_7_23_c=='0000').mean()) print(1-(female_frame.AU4_5_7_23_c=='0000').mean()) # #### Now check for conditional probability, controlled by AU: result_df = pd.concat([pd.crosstab(male_frame.AU04_r_c, male_frame.Angry, normalize = 'index')[1], pd.crosstab(female_frame.AU04_r_c, female_frame.Angry, normalize = 'index')[1]], axis=1) result_df.columns = ['male', 'female'] result_df['diff'] = result_df.female - result_df.male for index, row in result_df.iterrows(): cross_tab = pd.crosstab(frame[frame.AU04_r_c == index].gender_preds, frame[frame.AU04_r_c == index].Angry, margins = True) cross_tab.columns = ["Not Angry", "Angry", "row_totals"] cross_tab.index = ["Female", "Male", "col_totals"] observed = cross_tab.iloc[0:2,0:2] # Get table without totals for later use expected = np.outer(cross_tab["row_totals"][0:2], cross_tab.loc["col_totals"][0:2]) / len(frame[frame.AU04_r_c == index]) expected = pd.DataFrame(expected) expected.columns = ["Not Angry", "Angry"] expected.index = ["Female", "Male"] if (expected>=5).sum().sum() == 4: chi_squared_stat = (((observed-expected)**2)/expected).sum().sum() p_value = 1 - stats.chi2.cdf(x=chi_squared_stat, df=1) else: chi_squared_stat = np.nan p_value = np.nan print(chi_squared_stat) result_df.loc[index, "p-value"] = p_value result_df result_df = pd.concat([pd.crosstab(male_frame.AU05_r_c, male_frame.Angry, normalize = 'index')[1], pd.crosstab(female_frame.AU05_r_c, female_frame.Angry, normalize = 'index')[1]], axis=1) result_df.columns = ['male', 'female'] result_df['diff'] = result_df.female - result_df.male for index, row in result_df.iterrows(): cross_tab = pd.crosstab(frame[frame.AU05_r_c == index].gender_preds, frame[frame.AU05_r_c == index].Angry, margins = True) cross_tab.columns = ["Not Angry", "Angry", "row_totals"] cross_tab.index = ["Female", "Male", "col_totals"] observed = cross_tab.iloc[0:2,0:2] # Get table without totals for later use expected = np.outer(cross_tab["row_totals"][0:2], cross_tab.loc["col_totals"][0:2]) / len(frame[frame.AU05_r_c == index]) expected = pd.DataFrame(expected) expected.columns = ["Not Angry", "Angry"] expected.index = ["Female", "Male"] if (expected>=5).sum().sum() == 4: chi_squared_stat = (((observed-expected)**2)/expected).sum().sum() p_value = 1 - stats.chi2.cdf(x=chi_squared_stat, df=1) else: chi_squared_stat = np.nan p_value = np.nan print(chi_squared_stat) result_df.loc[index, "p-value"] = p_value result_df result_df = pd.concat([pd.crosstab(male_frame.AU07_c, male_frame.Angry, normalize = 'index')[1], pd.crosstab(female_frame.AU07_c, female_frame.Angry, normalize = 'index')[1]], axis=1) result_df.columns = ['male', 'female'] result_df['diff'] = result_df.female - result_df.male for index, row in result_df.iterrows(): cross_tab = pd.crosstab(frame[frame.AU07_c == index].gender_preds, frame[frame.AU07_c == index].Angry, margins = True) cross_tab.columns = ["Not Angry", "Angry", "row_totals"] cross_tab.index = ["Female", "Male", "col_totals"] observed = cross_tab.iloc[0:2,0:2] # Get table without totals for later use expected = np.outer(cross_tab["row_totals"][0:2], cross_tab.loc["col_totals"][0:2]) / len(frame[frame.AU07_c == index]) expected = pd.DataFrame(expected) expected.columns = ["Not Angry", "Angry"] expected.index = ["Female", "Male"] if (expected>=5).sum().sum() == 4: chi_squared_stat = (((observed-expected)**2)/expected).sum().sum() p_value = 1 - stats.chi2.cdf(x=chi_squared_stat, df=1) else: chi_squared_stat = np.nan p_value = np.nan print(chi_squared_stat) result_df.loc[index, "p-value"] = p_value result_df result_df = pd.concat([pd.crosstab(male_frame.AU23_c, male_frame.Angry, normalize = 'index')[1], pd.crosstab(female_frame.AU23_c, female_frame.Angry, normalize = 'index')[1]], axis=1) result_df.columns = ['male', 'female'] result_df['diff'] = result_df.female - result_df.male for index, row in result_df.iterrows(): cross_tab = pd.crosstab(frame[frame.AU23_c == index].gender_preds, frame[frame.AU23_c == index].Angry, margins = True) cross_tab.columns = ["Not Angry", "Angry", "row_totals"] cross_tab.index = ["Female", "Male", "col_totals"] observed = cross_tab.iloc[0:2,0:2] # Get table without totals for later use expected = np.outer(cross_tab["row_totals"][0:2], cross_tab.loc["col_totals"][0:2]) / len(frame[frame.AU23_c == index]) expected = pd.DataFrame(expected) expected.columns = ["Not Angry", "Angry"] expected.index = ["Female", "Male"] if (expected>=5).sum().sum() == 4: chi_squared_stat = (((observed-expected)**2)/expected).sum().sum() p_value = 1 - stats.chi2.cdf(x=chi_squared_stat, df=1) else: chi_squared_stat = np.nan p_value = np.nan print(chi_squared_stat) result_df.loc[index, "p-value"] = p_value result_df result_df = pd.concat([pd.crosstab(male_frame.AU4_5_7_23_r_c, male_frame.Angry, normalize = 'index')[1], pd.crosstab(female_frame.AU4_5_7_23_r_c, female_frame.Angry, normalize = 'index')[1]], axis=1) result_df.columns = ['male', 'female'] result_df['diff'] = result_df.female - result_df.male for index, row in result_df.iterrows(): cross_tab = pd.crosstab(frame[frame.AU4_5_7_23_r_c == index].gender_preds, frame[frame.AU4_5_7_23_r_c == index].Angry, margins = True) cross_tab.columns = ["Not Angry", "Angry", "row_totals"] cross_tab.index = ["Female", "Male", "col_totals"] observed = cross_tab.iloc[0:2,0:2] # Get table without totals for later use expected = np.outer(cross_tab["row_totals"][0:2], cross_tab.loc["col_totals"][0:2]) / len(frame[frame.AU4_5_7_23_r_c == index]) expected = pd.DataFrame(expected) expected.columns = ["Not Angry", "Angry"] expected.index = ["Female", "Male"] if (expected>=5).sum().sum() == 4: chi_squared_stat = (((observed-expected)**2)/expected).sum().sum() p_value = 1 - stats.chi2.cdf(x=chi_squared_stat, df=1) else: chi_squared_stat = np.nan p_value = np.nan print(chi_squared_stat) result_df.loc[index, "p-value"] = p_value result_df # + def proportion_calc(intensity_scores, angry, interval_length = 0.05, threshold = 25): min_score = min(intensity_scores) max_score = max(intensity_scores) multiple = math.ceil(max_score / interval_length) intensities = [] proportions = [] for k in range(multiple): interval_floor = k * interval_length interval_ceil = (k+1) * interval_length if intensity_scores[(intensity_scores >= interval_floor) & (intensity_scores < interval_ceil)].shape[0] >= threshold: intensities.append(intensity_scores[(intensity_scores >= interval_floor) & (intensity_scores < interval_ceil)].mean()) proportions.append(angry[(intensity_scores >= interval_floor) & (intensity_scores < interval_ceil)].mean()) return (intensities, proportions) intensities_m, proportions_m = proportion_calc(male_frame.AU04_r, male_frame.Angry, 0.035, 30) plt.scatter(intensities_m, proportions_m, label="Male", marker = "p", alpha=0.8, s=30, c='r')#,edgecolors='k') intensities_f, proportions_f = proportion_calc(female_frame.AU04_r, female_frame.Angry, 0.035, 30) plt.scatter(intensities_f, proportions_f, label="Female", marker = "*", alpha=0.8, s=30, c='#089BDF')#,edgecolors='k') plt.xlabel('AU4 intensity', fontsize = 16) plt.ylabel('Proportion of "Angry" Labels', fontsize = 16) plt.title('P(Angry|AU4) for ExpW', fontsize = 18) plt.legend(fontsize = 16) plt.ylim(0, np.max(proportions_m + proportions_f) + 0.02) plt.savefig('ExpW_AU4.pdf') plt.show() # + intensities_m, proportions_m = proportion_calc(male_frame.AU05_r, male_frame.Angry, 0.05, 140) plt.scatter(intensities_m, proportions_m, label="Male", marker = "p", alpha=0.8, s=30, c='r')#,edgecolors='k') intensities_f, proportions_f = proportion_calc(female_frame.AU05_r, female_frame.Angry, 0.05, 140) plt.scatter(intensities_f, proportions_f, label="Female", marker = "*", alpha=0.8, s=30, c='#089BDF')#,edgecolors='k') plt.xlabel('AU5 intensity', fontsize = 16) plt.ylabel('Proportion of "Angry" Labels', fontsize = 16) plt.title('P(Angry|AU5) for ExpW', fontsize = 18) plt.legend(fontsize = 16) plt.ylim(0, np.max(proportions_m + proportions_f) + 0.02) plt.savefig('ExpW_AU5.pdf') plt.show() # + intensities_m, proportions_m = proportion_calc(male_frame.AU07_r, male_frame.Angry, 0.08, 100) plt.scatter(intensities_m, proportions_m, label="Male", marker = "p", alpha=0.8, s=30, c='r')#,edgecolors='k') intensities_f, proportions_f = proportion_calc(female_frame.AU07_r, female_frame.Angry, 0.08, 100) plt.scatter(intensities_f, proportions_f, label="Female", marker = "*", alpha=0.8, s=30, c='#089BDF')#,edgecolors='k') plt.xlabel('AU7 intensity', fontsize = 16) plt.ylabel('Proportion of "Angry" Labels', fontsize = 16) plt.title('P(Angry|AU7) for ExpW', fontsize = 18) plt.legend(fontsize = 16) plt.ylim(0, np.max(proportions_m + proportions_f) + 0.02) plt.savefig('ExpW_AU7.pdf') plt.show() # + intensities_m, proportions_m = proportion_calc(male_frame.AU23_r, male_frame.Angry, 0.02, 110) plt.scatter(intensities_m, proportions_m, label="Male", marker = "p", alpha=0.8, s=30, c='r')#,edgecolors='k') intensities_f, proportions_f = proportion_calc(female_frame.AU23_r, female_frame.Angry, 0.02, 110) plt.scatter(intensities_f, proportions_f, label="Female", marker = "*", alpha=0.8, s=30, c='#089BDF')#,edgecolors='k') plt.xlabel('AU23 intensity', fontsize = 16) plt.ylabel('Proportion of "Angry" Labels', fontsize = 16) plt.title('P(Angry|AU23) for ExpW', fontsize = 18) plt.legend(fontsize = 16) plt.ylim(0 - 0.01, np.max(proportions_m + proportions_f) + 0.02) plt.savefig('ExpW_AU23.pdf') plt.show() # - # To formally show that there is a significant difference between the two logistic lines, we need logistic regression: formula = 'Angry ~ AU04_r + AU05_r + AU07_r + AU23_r + gender_preds' model = smf.glm(formula = formula, data=frame, family=sm.families.Binomial()) result = model.fit() print(result.summary()) formula = 'Angry ~ (AU04_r + AU05_r + AU07_r + AU23_r) * gender_preds' model = smf.glm(formula = formula, data=frame, family=sm.families.Binomial()) result = model.fit() print(result.summary()) formula = 'Angry ~ I(AU04_r + AU05_r + AU07_r + AU23_r) * gender_preds' model = smf.glm(formula = formula, data=frame, family=sm.families.Binomial()) result = model.fit() print(result.summary()) # # RAF-DB frame = pd.read_csv('RAFDB_OpenFace_result_with_Emotion_and_Gender.csv') print(frame.shape) frame.head() # 1: Surprise # 2: Fear # 3: Disgust # 4: Happiness # 5: Sadness # 6: Anger # 7: Neutral frame['Angry'] = frame.Emotion.apply(lambda x: (x == 6)*1) frame print(list(frame.columns)) frame[['AU04_c', 'AU05_c', 'AU07_c', 'AU23_c']].corr() frame[['AU04_r', 'AU05_r', 'AU07_r', 'AU23_r']].corr() frame = frame.rename(columns={'Gender': 'gender_preds'}) frame = frame.dropna() frame.shape frame['AU4_5_7_23_c'] = frame.AU04_c.apply(int).apply(str) + frame.AU05_c.apply(int).apply(str) + frame.AU07_c.apply(int).apply(str) + frame.AU23_c.apply(int).apply(str) frame.AU4_5_7_23_c.value_counts() frame['AU4_5_7_23_r_c'] = frame.AU04_r_c.apply(str) + frame.AU05_r_c.apply(str) + frame.AU07_c.apply(int).apply(str) + frame.AU23_c.apply(int).apply(str) frame.AU4_5_7_23_r_c.value_counts() # # P(expression|AU1, male) =? P(expression|AU1, female) frame.gender_preds = frame.gender_preds.apply(lambda x: 'Male' if x == 0 else 'Female') male_frame = frame.loc[frame['gender_preds'] == 'Male'] female_frame = frame.loc[frame['gender_preds'] == 'Female'] print(male_frame.shape) print(female_frame.shape) # ## Anger # #### First check the simple proportion of male angry vs female angry: print(male_frame.Angry.mean()) print(female_frame.Angry.mean()) print(male_frame.AU04_r_c.mean()) print(female_frame.AU04_r_c.mean()) print(male_frame.AU05_r_c.mean()) print(female_frame.AU05_r_c.mean()) print(male_frame.AU07_c.mean()) print(female_frame.AU07_c.mean()) print(male_frame.AU23_c.mean()) print(female_frame.AU23_c.mean()) print((male_frame.AU4_5_7_23_c=='1111').mean()) print((female_frame.AU4_5_7_23_c=='1111').mean()) print(1-(male_frame.AU4_5_7_23_c=='0000').mean()) print(1-(female_frame.AU4_5_7_23_c=='0000').mean()) # #### Now check for conditional probability, controlled by AU: result_df = pd.concat([pd.crosstab(male_frame.AU04_r_c, male_frame.Angry, normalize = 'index')[1], pd.crosstab(female_frame.AU04_r_c, female_frame.Angry, normalize = 'index')[1]], axis=1) result_df.columns = ['male', 'female'] result_df['diff'] = result_df.female - result_df.male for index, row in result_df.iterrows(): cross_tab = pd.crosstab(frame[frame.AU04_r_c == index].gender_preds, frame[frame.AU04_r_c == index].Angry, margins = True) cross_tab.columns = ["Not Angry", "Angry", "row_totals"] cross_tab.index = ["Female", "Male", "col_totals"] observed = cross_tab.iloc[0:2,0:2] # Get table without totals for later use expected = np.outer(cross_tab["row_totals"][0:2], cross_tab.loc["col_totals"][0:2]) / len(frame[frame.AU04_r_c == index]) expected = pd.DataFrame(expected) expected.columns = ["Not Angry", "Angry"] expected.index = ["Female", "Male"] if (expected>=5).sum().sum() == 4: chi_squared_stat = (((observed-expected)**2)/expected).sum().sum() p_value = 1 - stats.chi2.cdf(x=chi_squared_stat, df=1) else: chi_squared_stat = np.nan p_value = np.nan print(chi_squared_stat) result_df.loc[index, "p-value"] = p_value result_df result_df = pd.concat([pd.crosstab(male_frame.AU05_r_c, male_frame.Angry, normalize = 'index')[1], pd.crosstab(female_frame.AU05_r_c, female_frame.Angry, normalize = 'index')[1]], axis=1) result_df.columns = ['male', 'female'] result_df['diff'] = result_df.female - result_df.male for index, row in result_df.iterrows(): cross_tab = pd.crosstab(frame[frame.AU05_r_c == index].gender_preds, frame[frame.AU05_r_c == index].Angry, margins = True) cross_tab.columns = ["Not Angry", "Angry", "row_totals"] cross_tab.index = ["Female", "Male", "col_totals"] observed = cross_tab.iloc[0:2,0:2] # Get table without totals for later use expected = np.outer(cross_tab["row_totals"][0:2], cross_tab.loc["col_totals"][0:2]) / len(frame[frame.AU05_r_c == index]) expected = pd.DataFrame(expected) expected.columns = ["Not Angry", "Angry"] expected.index = ["Female", "Male"] if (expected>=5).sum().sum() == 4: chi_squared_stat = (((observed-expected)**2)/expected).sum().sum() p_value = 1 - stats.chi2.cdf(x=chi_squared_stat, df=1) else: chi_squared_stat = np.nan p_value = np.nan print(chi_squared_stat) result_df.loc[index, "p-value"] = p_value result_df result_df = pd.concat([pd.crosstab(male_frame.AU07_c, male_frame.Angry, normalize = 'index')[1], pd.crosstab(female_frame.AU07_c, female_frame.Angry, normalize = 'index')[1]], axis=1) result_df.columns = ['male', 'female'] result_df['diff'] = result_df.female - result_df.male for index, row in result_df.iterrows(): cross_tab = pd.crosstab(frame[frame.AU07_c == index].gender_preds, frame[frame.AU07_c == index].Angry, margins = True) cross_tab.columns = ["Not Angry", "Angry", "row_totals"] cross_tab.index = ["Female", "Male", "col_totals"] observed = cross_tab.iloc[0:2,0:2] # Get table without totals for later use expected = np.outer(cross_tab["row_totals"][0:2], cross_tab.loc["col_totals"][0:2]) / len(frame[frame.AU07_c == index]) expected = pd.DataFrame(expected) expected.columns = ["Not Angry", "Angry"] expected.index = ["Female", "Male"] if (expected>=5).sum().sum() == 4: chi_squared_stat = (((observed-expected)**2)/expected).sum().sum() p_value = 1 - stats.chi2.cdf(x=chi_squared_stat, df=1) else: chi_squared_stat = np.nan p_value = np.nan print(chi_squared_stat) result_df.loc[index, "p-value"] = p_value result_df result_df = pd.concat([pd.crosstab(male_frame.AU23_c, male_frame.Angry, normalize = 'index')[1], pd.crosstab(female_frame.AU23_c, female_frame.Angry, normalize = 'index')[1]], axis=1) result_df.columns = ['male', 'female'] result_df['diff'] = result_df.female - result_df.male for index, row in result_df.iterrows(): cross_tab = pd.crosstab(frame[frame.AU23_c == index].gender_preds, frame[frame.AU23_c == index].Angry, margins = True) cross_tab.columns = ["Not Angry", "Angry", "row_totals"] cross_tab.index = ["Female", "Male", "col_totals"] observed = cross_tab.iloc[0:2,0:2] # Get table without totals for later use expected = np.outer(cross_tab["row_totals"][0:2], cross_tab.loc["col_totals"][0:2]) / len(frame[frame.AU23_c == index]) expected = pd.DataFrame(expected) expected.columns = ["Not Angry", "Angry"] expected.index = ["Female", "Male"] if (expected>=5).sum().sum() == 4: chi_squared_stat = (((observed-expected)**2)/expected).sum().sum() p_value = 1 - stats.chi2.cdf(x=chi_squared_stat, df=1) else: chi_squared_stat = np.nan p_value = np.nan print(chi_squared_stat) result_df.loc[index, "p-value"] = p_value result_df result_df = pd.concat([pd.crosstab(male_frame.AU4_5_7_23_r_c, male_frame.Angry, normalize = 'index')[1], pd.crosstab(female_frame.AU4_5_7_23_r_c, female_frame.Angry, normalize = 'index')[1]], axis=1) result_df.columns = ['male', 'female'] result_df['diff'] = result_df.female - result_df.male for index, row in result_df.iterrows(): cross_tab = pd.crosstab(frame[frame.AU4_5_7_23_r_c == index].gender_preds, frame[frame.AU4_5_7_23_r_c == index].Angry, margins = True) cross_tab.columns = ["Not Angry", "Angry", "row_totals"] cross_tab.index = ["Female", "Male", "col_totals"] observed = cross_tab.iloc[0:2,0:2] # Get table without totals for later use expected = np.outer(cross_tab["row_totals"][0:2], cross_tab.loc["col_totals"][0:2]) / len(frame[frame.AU4_5_7_23_r_c == index]) expected = pd.DataFrame(expected) expected.columns = ["Not Angry", "Angry"] expected.index = ["Female", "Male"] if (expected>=5).sum().sum() == 4: chi_squared_stat = (((observed-expected)**2)/expected).sum().sum() p_value = 1 - stats.chi2.cdf(x=chi_squared_stat, df=1) else: chi_squared_stat = np.nan p_value = np.nan print(chi_squared_stat) result_df.loc[index, "p-value"] = p_value result_df # + intensities_m, proportions_m = proportion_calc(male_frame.AU04_r, male_frame.Angry, 0.035, 25) plt.scatter(intensities_m, proportions_m, label="Male", marker = "p", alpha=0.8, s=30, c='r')#,edgecolors='k') intensities_f, proportions_f = proportion_calc(female_frame.AU04_r, female_frame.Angry, 0.045, 30) plt.scatter(intensities_f, proportions_f, label="Female", marker = "*", alpha=0.8, s=30, c='#089BDF')#,edgecolors='k') plt.xlabel('AU4 intensity', fontsize = 16) plt.ylabel('Proportion of "Angry" Labels', fontsize = 16) plt.title('P(Angry|AU4) for RAF-DB', fontsize = 18) plt.legend(fontsize = 16) plt.ylim(0, np.max(proportions_m + proportions_f) + 0.02) plt.savefig('RAFDB_AU4.pdf') plt.show() # + intensities_m, proportions_m = proportion_calc(male_frame.AU05_r, male_frame.Angry, 0.03, 20) plt.scatter(intensities_m, proportions_m, label="Male", marker = "p", alpha=0.8, s=30, c='r')#,edgecolors='k') intensities_f, proportions_f = proportion_calc(female_frame.AU05_r, female_frame.Angry, 0.03, 20) plt.scatter(intensities_f, proportions_f, label="Female", marker = "*", alpha=0.8, s=30, c='#089BDF')#,edgecolors='k') plt.xlabel('AU5 intensity', fontsize = 16) plt.ylabel('Proportion of "Angry" Labels', fontsize = 16) plt.title('P(Angry|AU5) for RAF-DB', fontsize = 18) plt.legend(fontsize = 16) plt.ylim(0.005, 0.16) plt.savefig('RAFDB_AU5.pdf') plt.show() # + intensities_m, proportions_m = proportion_calc(male_frame.AU07_r, male_frame.Angry, 0.07, 30) plt.scatter(intensities_m, proportions_m, label="Male", marker = "p", alpha=0.8, s=30, c='r')#,edgecolors='k') intensities_f, proportions_f = proportion_calc(female_frame.AU07_r, female_frame.Angry, 0.06, 30) plt.scatter(intensities_f, proportions_f, label="Female", marker = "*", alpha=0.8, s=30, c='#089BDF')#,edgecolors='k') plt.xlabel('AU7 intensity', fontsize = 16) plt.ylabel('Proportion of "Angry" Labels', fontsize = 16) plt.title('P(Angry|AU7) for RAF-DB', fontsize = 18) plt.legend(fontsize = 16) plt.ylim(-0.01, np.max(proportions_m + proportions_f) + 0.02) plt.savefig('RAFDB_AU7.pdf') plt.show() # + intensities_m, proportions_m = proportion_calc(male_frame.AU23_r, male_frame.Angry, 0.012, 20) plt.scatter(intensities_m, proportions_m, label="Male", marker = "p", alpha=0.8, s=30, c='r')#,edgecolors='k') intensities_f, proportions_f = proportion_calc(female_frame.AU23_r, female_frame.Angry, 0.015, 20) plt.scatter(intensities_f, proportions_f, label="Female", marker = "*", alpha=0.8, s=30, c='#089BDF')#,edgecolors='k') plt.xlabel('AU23 intensity', fontsize = 16) plt.ylabel('Proportion of "Angry" Labels', fontsize = 16) plt.title('P(Angry|AU23) for RAF-DB', fontsize = 18) plt.legend(fontsize = 16) plt.ylim(0.005, np.max(proportions_m + proportions_f) + 0.02) plt.savefig('RAFDB_AU23.pdf') plt.show() # - # To formally show that there is a significant difference between the two logistic lines, we need logistic regression: formula = 'Angry ~ AU04_r + AU05_r + AU07_r + AU23_r + gender_preds' model = smf.glm(formula = formula, data=frame, family=sm.families.Binomial()) result = model.fit() print(result.summary()) formula = 'Angry ~ (AU04_r + AU05_r + AU07_r + AU23_r) * gender_preds' model = smf.glm(formula = formula, data=frame, family=sm.families.Binomial()) result = model.fit() print(result.summary()) formula = 'Angry ~ I(AU04_r + AU05_r + AU07_r + AU23_r) * gender_preds' model = smf.glm(formula = formula, data=frame, family=sm.families.Binomial()) result = model.fit() print(result.summary()) # # AffectNet Manual # + active="" # Emotion categories: # Eleven annotated emotions are provided for images and indexed as follows: # 0: Neutral, 1: Happiness, 2: Sadness, 3: Surprise, 4: Fear, 5: Disgust, 6: Anger, # 7: Contempt, 8: None, 9: Uncertain, 10: No-Face # - frame = pd.read_csv('AffectNet_Manual_OpenFace_result_with_expression_gender_race_age.csv') print(frame.shape) frame.head() frame['Angry'] = (frame.Expression == 6).astype(int) print(list(frame.columns)) frame[['AU04_c', 'AU05_c', 'AU07_c', 'AU23_c']].corr() frame[['AU04_r', 'AU05_r', 'AU07_r', 'AU23_r']].corr() frame = frame.dropna() frame.shape frame['AU4_5_7_23_c'] = frame.AU04_c.apply(int).apply(str) + frame.AU05_c.apply(int).apply(str) + frame.AU07_c.apply(int).apply(str) + frame.AU23_c.apply(int).apply(str) frame.AU4_5_7_23_c.value_counts() frame['AU4_5_7_23_r_c'] = frame.AU04_r_c.apply(str) + frame.AU05_r_c.apply(str) + frame.AU07_c.apply(int).apply(str) + frame.AU23_c.apply(int).apply(str) frame.AU4_5_7_23_r_c.value_counts() # # P(expression|AU1, male) =? P(expression|AU1, female) male_frame = frame.loc[frame['gender_preds'] == 'Male'] female_frame = frame.loc[frame['gender_preds'] == 'Female'] print(male_frame.shape) print(female_frame.shape) # ## Anger # #### First check the simple proportion of male angry vs female angry: print(male_frame.Angry.mean()) print(female_frame.Angry.mean()) print(male_frame.AU04_r_c.mean()) print(female_frame.AU04_r_c.mean()) print(male_frame.AU05_r_c.mean()) print(female_frame.AU05_r_c.mean()) print(male_frame.AU07_c.mean()) print(female_frame.AU07_c.mean()) print(male_frame.AU23_c.mean()) print(female_frame.AU23_c.mean()) print((male_frame.AU4_5_7_23_c=='1111').mean()) print((female_frame.AU4_5_7_23_c=='1111').mean()) print(1-(male_frame.AU4_5_7_23_c=='0000').mean()) print(1-(female_frame.AU4_5_7_23_c=='0000').mean()) # #### Now check for conditional probability, controlled by AU: result_df = pd.concat([pd.crosstab(male_frame.AU04_r_c, male_frame.Angry, normalize = 'index')[1], pd.crosstab(female_frame.AU04_r_c, female_frame.Angry, normalize = 'index')[1]], axis=1) result_df.columns = ['male', 'female'] result_df['diff'] = result_df.female - result_df.male for index, row in result_df.iterrows(): cross_tab = pd.crosstab(frame[frame.AU04_r_c == index].gender_preds, frame[frame.AU04_r_c == index].Angry, margins = True) cross_tab.columns = ["Not Angry", "Angry", "row_totals"] cross_tab.index = ["Female", "Male", "col_totals"] observed = cross_tab.iloc[0:2,0:2] # Get table without totals for later use expected = np.outer(cross_tab["row_totals"][0:2], cross_tab.loc["col_totals"][0:2]) / len(frame[frame.AU04_r_c == index]) expected = pd.DataFrame(expected) expected.columns = ["Not Angry", "Angry"] expected.index = ["Female", "Male"] if (expected>=5).sum().sum() == 4: chi_squared_stat = (((observed-expected)**2)/expected).sum().sum() p_value = 1 - stats.chi2.cdf(x=chi_squared_stat, df=1) else: chi_squared_stat = np.nan p_value = np.nan print(chi_squared_stat) result_df.loc[index, "p-value"] = p_value result_df result_df = pd.concat([pd.crosstab(male_frame.AU05_r_c, male_frame.Angry, normalize = 'index')[1], pd.crosstab(female_frame.AU05_r_c, female_frame.Angry, normalize = 'index')[1]], axis=1) result_df.columns = ['male', 'female'] result_df['diff'] = result_df.female - result_df.male for index, row in result_df.iterrows(): cross_tab = pd.crosstab(frame[frame.AU05_r_c == index].gender_preds, frame[frame.AU05_r_c == index].Angry, margins = True) cross_tab.columns = ["Not Angry", "Angry", "row_totals"] cross_tab.index = ["Female", "Male", "col_totals"] observed = cross_tab.iloc[0:2,0:2] # Get table without totals for later use expected = np.outer(cross_tab["row_totals"][0:2], cross_tab.loc["col_totals"][0:2]) / len(frame[frame.AU05_r_c == index]) expected = pd.DataFrame(expected) expected.columns = ["Not Angry", "Angry"] expected.index = ["Female", "Male"] if (expected>=5).sum().sum() == 4: chi_squared_stat = (((observed-expected)**2)/expected).sum().sum() p_value = 1 - stats.chi2.cdf(x=chi_squared_stat, df=1) else: chi_squared_stat = np.nan p_value = np.nan print(chi_squared_stat) result_df.loc[index, "p-value"] = p_value result_df result_df = pd.concat([pd.crosstab(male_frame.AU07_c, male_frame.Angry, normalize = 'index')[1], pd.crosstab(female_frame.AU07_c, female_frame.Angry, normalize = 'index')[1]], axis=1) result_df.columns = ['male', 'female'] result_df['diff'] = result_df.female - result_df.male for index, row in result_df.iterrows(): cross_tab = pd.crosstab(frame[frame.AU07_c == index].gender_preds, frame[frame.AU07_c == index].Angry, margins = True) cross_tab.columns = ["Not Angry", "Angry", "row_totals"] cross_tab.index = ["Female", "Male", "col_totals"] observed = cross_tab.iloc[0:2,0:2] # Get table without totals for later use expected = np.outer(cross_tab["row_totals"][0:2], cross_tab.loc["col_totals"][0:2]) / len(frame[frame.AU07_c == index]) expected = pd.DataFrame(expected) expected.columns = ["Not Angry", "Angry"] expected.index = ["Female", "Male"] if (expected>=5).sum().sum() == 4: chi_squared_stat = (((observed-expected)**2)/expected).sum().sum() p_value = 1 - stats.chi2.cdf(x=chi_squared_stat, df=1) else: chi_squared_stat = np.nan p_value = np.nan print(chi_squared_stat) result_df.loc[index, "p-value"] = p_value result_df result_df = pd.concat([pd.crosstab(male_frame.AU23_c, male_frame.Angry, normalize = 'index')[1], pd.crosstab(female_frame.AU23_c, female_frame.Angry, normalize = 'index')[1]], axis=1) result_df.columns = ['male', 'female'] result_df['diff'] = result_df.female - result_df.male for index, row in result_df.iterrows(): cross_tab = pd.crosstab(frame[frame.AU23_c == index].gender_preds, frame[frame.AU23_c == index].Angry, margins = True) cross_tab.columns = ["Not Angry", "Angry", "row_totals"] cross_tab.index = ["Female", "Male", "col_totals"] observed = cross_tab.iloc[0:2,0:2] # Get table without totals for later use expected = np.outer(cross_tab["row_totals"][0:2], cross_tab.loc["col_totals"][0:2]) / len(frame[frame.AU23_c == index]) expected = pd.DataFrame(expected) expected.columns = ["Not Angry", "Angry"] expected.index = ["Female", "Male"] if (expected>=5).sum().sum() == 4: chi_squared_stat = (((observed-expected)**2)/expected).sum().sum() p_value = 1 - stats.chi2.cdf(x=chi_squared_stat, df=1) else: chi_squared_stat = np.nan p_value = np.nan print(chi_squared_stat) result_df.loc[index, "p-value"] = p_value result_df result_df = pd.concat([pd.crosstab(male_frame.AU4_5_7_23_r_c, male_frame.Angry, normalize = 'index')[1], pd.crosstab(female_frame.AU4_5_7_23_r_c, female_frame.Angry, normalize = 'index')[1]], axis=1) result_df.columns = ['male', 'female'] result_df['diff'] = result_df.female - result_df.male for index, row in result_df.iterrows(): cross_tab = pd.crosstab(frame[frame.AU4_5_7_23_r_c == index].gender_preds, frame[frame.AU4_5_7_23_r_c == index].Angry, margins = True) cross_tab.columns = ["Not Angry", "Angry", "row_totals"] cross_tab.index = ["Female", "Male", "col_totals"] observed = cross_tab.iloc[0:2,0:2] # Get table without totals for later use expected = np.outer(cross_tab["row_totals"][0:2], cross_tab.loc["col_totals"][0:2]) / len(frame[frame.AU4_5_7_23_r_c == index]) expected = pd.DataFrame(expected) expected.columns = ["Not Angry", "Angry"] expected.index = ["Female", "Male"] if (expected>=5).sum().sum() == 4: chi_squared_stat = (((observed-expected)**2)/expected).sum().sum() p_value = 1 - stats.chi2.cdf(x=chi_squared_stat, df=1) else: chi_squared_stat = np.nan p_value = np.nan print(chi_squared_stat) result_df.loc[index, "p-value"] = p_value result_df # + intensities_m, proportions_m = proportion_calc(male_frame.AU04_r, male_frame.Angry, 0.02, 35) plt.scatter(intensities_m, proportions_m, label="Male", marker = "p", alpha=0.8, s=30, c='r')#,edgecolors='k') intensities_f, proportions_f = proportion_calc(female_frame.AU04_r, female_frame.Angry, 0.02, 25) plt.scatter(intensities_f, proportions_f, label="Female", marker = "*", alpha=0.8, s=30, c='#089BDF')#,edgecolors='k') plt.xlabel('AU4 intensity', fontsize = 16) plt.ylabel('Proportion of "Angry" Labels', fontsize = 16) plt.title('P(Angry|AU4) for AffectNet-Manual', fontsize = 18) plt.legend(fontsize = 16) plt.ylim(0 - 0.01, np.max(proportions_m + proportions_f) + 0.02) plt.savefig('AffectNet_Manual_AU4.pdf') plt.show() # + intensities_m, proportions_m = proportion_calc(male_frame.AU05_r, male_frame.Angry, 0.03, 10) plt.scatter(intensities_m, proportions_m, label="Male", marker = "p", alpha=0.8, s=30, c='r')#,edgecolors='k') intensities_f, proportions_f = proportion_calc(female_frame.AU05_r, female_frame.Angry, 0.045, 12) plt.scatter(intensities_f, proportions_f, label="Female", marker = "*", alpha=0.8, s=30, c='#089BDF')#,edgecolors='k') plt.xlabel('AU5 intensity', fontsize = 16) plt.ylabel('Proportion of "Angry" Labels', fontsize = 16) plt.title('P(Angry|AU5) for AffectNet-Manual', fontsize = 18) plt.legend(fontsize = 16) plt.ylim(0.005, np.max(proportions_m + proportions_f) + 0.02) plt.savefig('AffectNet_Manual_AU5.pdf') plt.show() # + intensities_m, proportions_m = proportion_calc(male_frame.AU07_r, male_frame.Angry, 0.037, 50) plt.scatter(intensities_m, proportions_m, label="Male", marker = "p", alpha=0.8, s=30, c='r')#,edgecolors='k') intensities_f, proportions_f = proportion_calc(female_frame.AU07_r, female_frame.Angry, 0.048, 45) plt.scatter(intensities_f, proportions_f, label="Female", marker = "*", alpha=0.8, s=30, c='#089BDF')#,edgecolors='k') plt.xlabel('AU7 intensity', fontsize = 16) plt.ylabel('Proportion of "Angry" Labels', fontsize = 16) plt.title('P(Angry|AU7) for AffectNet-Manual', fontsize = 18) plt.legend(fontsize = 16) plt.ylim(0.005, np.max(proportions_m + proportions_f) + 0.02) plt.savefig('AffectNet_Manual_AU7.pdf') plt.show() # + intensities_m, proportions_m = proportion_calc(male_frame.AU23_r, male_frame.Angry, 0.04, 30) plt.scatter(intensities_m, proportions_m, label="Male", marker = "p", alpha=0.8, s=30, c='r')#,edgecolors='k') intensities_f, proportions_f = proportion_calc(female_frame.AU23_r, female_frame.Angry, 0.03, 15) plt.scatter(intensities_f, proportions_f, label="Female", marker = "*", alpha=0.8, s=30, c='#089BDF')#,edgecolors='k') plt.xlabel('AU23 intensity', fontsize = 16) plt.ylabel('Proportion of "Angry" Labels', fontsize = 16) plt.title('P(Angry|AU23) for AffectNet-Manual', fontsize = 18) plt.legend(fontsize = 16) plt.ylim(0.005, np.max(proportions_m + proportions_f) + 0.02) plt.savefig('AffectNet_Manual_AU23.pdf') plt.show() # - # ##### To formally show that there is a significant difference between the two logistic lines, we need logistic regression: formula = 'Angry ~ AU04_r + AU05_r + AU07_r + AU23_r + gender_preds' model = smf.glm(formula = formula, data=frame, family=sm.families.Binomial()) result = model.fit() print(result.summary()) formula = 'Angry ~ (AU04_r + AU05_r + AU07_r + AU23_r) * gender_preds' model = smf.glm(formula = formula, data=frame, family=sm.families.Binomial()) result = model.fit() print(result.summary()) formula = 'Angry ~ I(AU04_r + AU05_r + AU07_r + AU23_r) * gender_preds' model = smf.glm(formula = formula, data=frame, family=sm.families.Binomial()) result = model.fit() print(result.summary()) # # AffectNet Automatic frame = pd.read_csv('AffectNet_Automatic_OpenFace_result_with_expression_gender_race_age.csv') print(frame.shape) frame.head() frame['Angry'] = (frame.Expression == 6).astype(int) print(list(frame.columns)) frame[['AU04_c', 'AU05_c', 'AU07_c', 'AU23_c']].corr() frame[['AU04_r', 'AU05_r', 'AU07_r', 'AU23_r']].corr() frame = frame.dropna() frame.shape frame['AU4_5_7_23_c'] = frame.AU04_c.apply(int).apply(str) + frame.AU05_c.apply(int).apply(str) + frame.AU07_c.apply(int).apply(str) + frame.AU23_c.apply(int).apply(str) frame.AU4_5_7_23_c.value_counts() frame['AU4_5_7_23_r_c'] = frame.AU04_r_c.apply(str) + frame.AU05_r_c.apply(str) + frame.AU07_c.apply(int).apply(str) + frame.AU23_c.apply(int).apply(str) frame.AU4_5_7_23_r_c.value_counts() # # P(expression|AU1, male) =? P(expression|AU1, female) male_frame = frame.loc[frame['gender_preds'] == 'Male'] female_frame = frame.loc[frame['gender_preds'] == 'Female'] print(male_frame.shape) print(female_frame.shape) # ## Anger # #### First check the simple proportion of male angry vs female angry: print(male_frame.Angry.mean()) print(female_frame.Angry.mean()) print(male_frame.AU04_r_c.mean()) print(female_frame.AU04_r_c.mean()) print(male_frame.AU05_r_c.mean()) print(female_frame.AU05_r_c.mean()) print(male_frame.AU07_c.mean()) print(female_frame.AU07_c.mean()) print(male_frame.AU23_c.mean()) print(female_frame.AU23_c.mean()) print((male_frame.AU4_5_7_23_c=='1111').mean()) print((female_frame.AU4_5_7_23_c=='1111').mean()) print(1-(male_frame.AU4_5_7_23_c=='0000').mean()) print(1-(female_frame.AU4_5_7_23_c=='0000').mean()) # #### Now check for conditional probability, controlled by AU: result_df = pd.concat([pd.crosstab(male_frame.AU04_r_c, male_frame.Angry, normalize = 'index')[1], pd.crosstab(female_frame.AU04_r_c, female_frame.Angry, normalize = 'index')[1]], axis=1) result_df.columns = ['male', 'female'] result_df['diff'] = result_df.female - result_df.male for index, row in result_df.iterrows(): cross_tab = pd.crosstab(frame[frame.AU04_r_c == index].gender_preds, frame[frame.AU04_r_c == index].Angry, margins = True) cross_tab.columns = ["Not Angry", "Angry", "row_totals"] cross_tab.index = ["Female", "Male", "col_totals"] observed = cross_tab.iloc[0:2,0:2] # Get table without totals for later use expected = np.outer(cross_tab["row_totals"][0:2], cross_tab.loc["col_totals"][0:2]) / len(frame[frame.AU04_r_c == index]) expected = pd.DataFrame(expected) expected.columns = ["Not Angry", "Angry"] expected.index = ["Female", "Male"] if (expected>=5).sum().sum() == 4: chi_squared_stat = (((observed-expected)**2)/expected).sum().sum() p_value = 1 - stats.chi2.cdf(x=chi_squared_stat, df=1) else: chi_squared_stat = np.nan p_value = np.nan print(chi_squared_stat) result_df.loc[index, "p-value"] = p_value result_df result_df = pd.concat([pd.crosstab(male_frame.AU05_r_c, male_frame.Angry, normalize = 'index')[1], pd.crosstab(female_frame.AU05_r_c, female_frame.Angry, normalize = 'index')[1]], axis=1) result_df.columns = ['male', 'female'] result_df['diff'] = result_df.female - result_df.male for index, row in result_df.iterrows(): cross_tab = pd.crosstab(frame[frame.AU05_r_c == index].gender_preds, frame[frame.AU05_r_c == index].Angry, margins = True) cross_tab.columns = ["Not Angry", "Angry", "row_totals"] cross_tab.index = ["Female", "Male", "col_totals"] observed = cross_tab.iloc[0:2,0:2] # Get table without totals for later use expected = np.outer(cross_tab["row_totals"][0:2], cross_tab.loc["col_totals"][0:2]) / len(frame[frame.AU05_r_c == index]) expected = pd.DataFrame(expected) expected.columns = ["Not Angry", "Angry"] expected.index = ["Female", "Male"] if (expected>=5).sum().sum() == 4: chi_squared_stat = (((observed-expected)**2)/expected).sum().sum() p_value = 1 - stats.chi2.cdf(x=chi_squared_stat, df=1) else: chi_squared_stat = np.nan p_value = np.nan print(chi_squared_stat) result_df.loc[index, "p-value"] = p_value result_df result_df = pd.concat([pd.crosstab(male_frame.AU07_c, male_frame.Angry, normalize = 'index')[1], pd.crosstab(female_frame.AU07_c, female_frame.Angry, normalize = 'index')[1]], axis=1) result_df.columns = ['male', 'female'] result_df['diff'] = result_df.female - result_df.male for index, row in result_df.iterrows(): cross_tab = pd.crosstab(frame[frame.AU07_c == index].gender_preds, frame[frame.AU07_c == index].Angry, margins = True) cross_tab.columns = ["Not Angry", "Angry", "row_totals"] cross_tab.index = ["Female", "Male", "col_totals"] observed = cross_tab.iloc[0:2,0:2] # Get table without totals for later use expected = np.outer(cross_tab["row_totals"][0:2], cross_tab.loc["col_totals"][0:2]) / len(frame[frame.AU07_c == index]) expected = pd.DataFrame(expected) expected.columns = ["Not Angry", "Angry"] expected.index = ["Female", "Male"] if (expected>=5).sum().sum() == 4: chi_squared_stat = (((observed-expected)**2)/expected).sum().sum() p_value = 1 - stats.chi2.cdf(x=chi_squared_stat, df=1) else: chi_squared_stat = np.nan p_value = np.nan print(chi_squared_stat) result_df.loc[index, "p-value"] = p_value result_df result_df = pd.concat([pd.crosstab(male_frame.AU23_c, male_frame.Angry, normalize = 'index')[1], pd.crosstab(female_frame.AU23_c, female_frame.Angry, normalize = 'index')[1]], axis=1) result_df.columns = ['male', 'female'] result_df['diff'] = result_df.female - result_df.male for index, row in result_df.iterrows(): cross_tab = pd.crosstab(frame[frame.AU23_c == index].gender_preds, frame[frame.AU23_c == index].Angry, margins = True) cross_tab.columns = ["Not Angry", "Angry", "row_totals"] cross_tab.index = ["Female", "Male", "col_totals"] observed = cross_tab.iloc[0:2,0:2] # Get table without totals for later use expected = np.outer(cross_tab["row_totals"][0:2], cross_tab.loc["col_totals"][0:2]) / len(frame[frame.AU23_c == index]) expected = pd.DataFrame(expected) expected.columns = ["Not Angry", "Angry"] expected.index = ["Female", "Male"] if (expected>=5).sum().sum() == 4: chi_squared_stat = (((observed-expected)**2)/expected).sum().sum() p_value = 1 - stats.chi2.cdf(x=chi_squared_stat, df=1) else: chi_squared_stat = np.nan p_value = np.nan print(chi_squared_stat) result_df.loc[index, "p-value"] = p_value result_df result_df = pd.concat([pd.crosstab(male_frame.AU4_5_7_23_r_c, male_frame.Angry, normalize = 'index')[1], pd.crosstab(female_frame.AU4_5_7_23_r_c, female_frame.Angry, normalize = 'index')[1]], axis=1) result_df.columns = ['male', 'female'] result_df['diff'] = result_df.female - result_df.male for index, row in result_df.iterrows(): cross_tab = pd.crosstab(frame[frame.AU4_5_7_23_r_c == index].gender_preds, frame[frame.AU4_5_7_23_r_c == index].Angry, margins = True) cross_tab.columns = ["Not Angry", "Angry", "row_totals"] cross_tab.index = ["Female", "Male", "col_totals"] observed = cross_tab.iloc[0:2,0:2] # Get table without totals for later use expected = np.outer(cross_tab["row_totals"][0:2], cross_tab.loc["col_totals"][0:2]) / len(frame[frame.AU4_5_7_23_r_c == index]) expected = pd.DataFrame(expected) expected.columns = ["Not Angry", "Angry"] expected.index = ["Female", "Male"] if (expected>=5).sum().sum() == 4: chi_squared_stat = (((observed-expected)**2)/expected).sum().sum() p_value = 1 - stats.chi2.cdf(x=chi_squared_stat, df=1) else: chi_squared_stat = np.nan p_value = np.nan print(chi_squared_stat) result_df.loc[index, "p-value"] = p_value result_df # + intensities_m, proportions_m = proportion_calc(male_frame.AU04_r, male_frame.Angry, 0.02, 35) plt.scatter(intensities_m, proportions_m, label="Male", marker = "p", alpha=0.8, s=30, c='r')#,edgecolors='k') intensities_f, proportions_f = proportion_calc(female_frame.AU04_r, female_frame.Angry, 0.02, 28) plt.scatter(intensities_f, proportions_f, label="Female", marker = "*", alpha=0.8, s=30, c='#089BDF')#,edgecolors='k') plt.xlabel('AU4 intensity', fontsize = 16) plt.ylabel('Proportion of "Angry" Labels', fontsize = 16) plt.title('P(Angry|AU4) for AffectNet-Automatic', fontsize = 18) plt.legend(fontsize = 16) plt.ylim(0 - 0.01, np.max(proportions_m + proportions_f) + 0.02) plt.savefig('AffectNet_Automatic_AU4.pdf') plt.show() # + intensities_m, proportions_m = proportion_calc(male_frame.AU05_r, male_frame.Angry, 0.03, 10) plt.scatter(intensities_m, proportions_m, label="Male", marker = "p", alpha=0.8, s=30, c='r')#,edgecolors='k') intensities_f, proportions_f = proportion_calc(female_frame.AU05_r, female_frame.Angry, 0.045, 12) plt.scatter(intensities_f, proportions_f, label="Female", marker = "*", alpha=0.8, s=30, c='#089BDF')#,edgecolors='k') plt.xlabel('AU5 intensity', fontsize = 16) plt.ylabel('Proportion of "Angry" Labels', fontsize = 16) plt.title('P(Angry|AU5) for AffectNet-Automatic', fontsize = 18) plt.legend(fontsize = 16) plt.xlim(-0.1, 4) plt.ylim(0.005, np.max(proportions_m + proportions_f) + 0.02) plt.savefig('AffectNet_Automatic_AU5.pdf') plt.show() # + intensities_m, proportions_m = proportion_calc(male_frame.AU07_r, male_frame.Angry, 0.04, 57) plt.scatter(intensities_m, proportions_m, label="Male", marker = "p", alpha=0.8, s=30, c='r')#,edgecolors='k') intensities_f, proportions_f = proportion_calc(female_frame.AU07_r, female_frame.Angry, 0.04, 50) plt.scatter(intensities_f, proportions_f, label="Female", marker = "*", alpha=0.8, s=30, c='#089BDF')#,edgecolors='k') plt.xlabel('AU7 intensity', fontsize = 16) plt.ylabel('Proportion of "Angry" Labels', fontsize = 16) plt.title('P(Angry|AU7) for AffectNet-Automatic', fontsize = 18) plt.legend(fontsize = 16) plt.ylim(0.003, np.max(proportions_m + proportions_f) + 0.02) plt.savefig('AffectNet_Automatic_AU7.pdf') plt.show() # + intensities_m, proportions_m = proportion_calc(male_frame.AU23_r, male_frame.Angry, 0.022, 40) plt.scatter(intensities_m, proportions_m, label="Male", marker = "p", alpha=0.8, s=30, c='r')#,edgecolors='k') intensities_f, proportions_f = proportion_calc(female_frame.AU23_r, female_frame.Angry, 0.03, 15) plt.scatter(intensities_f, proportions_f, label="Female", marker = "*", alpha=0.8, s=30, c='#089BDF')#,edgecolors='k') plt.xlabel('AU23 intensity', fontsize = 16) plt.ylabel('Proportion of "Angry" Labels', fontsize = 16) plt.title('P(Angry|AU23) for AffectNet-Automatic', fontsize = 18) plt.legend(fontsize = 16) plt.ylim(0.005, np.max(proportions_m + proportions_f) + 0.02) plt.savefig('AffectNet_Automatic_AU23.pdf') plt.show() # - # To formally show that there is a significant difference between the two logistic lines, we need logistic regression: formula = 'Angry ~ AU04_r + AU05_r + AU07_r + AU23_r + gender_preds' model = smf.glm(formula = formula, data=frame, family=sm.families.Binomial()) result = model.fit() print(result.summary()) formula = 'Angry ~ (AU04_r + AU05_r + AU07_r + AU23_r) * gender_preds' model = smf.glm(formula = formula, data=frame, family=sm.families.Binomial()) result = model.fit() print(result.summary()) formula = 'Angry ~ I(AU04_r + AU05_r + AU07_r + AU23_r) * gender_preds' model = smf.glm(formula = formula, data=frame, family=sm.families.Binomial()) result = model.fit() print(result.summary()) # # KDEF # + active="" # Codes: # Example: AF01ANFL.JPG # Letter 1: Session # A = series one # B = series two # Letter 2: Gender # F = female # M = male # Letter 3 & 4: Identity number # 01 - 35 # Letter 5 & 6: Expression # AF = afraid # AN = angry # DI = disgusted # HA = happy # NE = neutral # SA = sad # SU = surprised # Letter 7 & 8: Angle # FL = full left profile # HL = half left profile # S = straight # HR = half right profile # FR = full right profile # - frame = pd.read_csv('KDEF_OpenFace_result_with_Emotion_and_Gender.csv') print(frame.shape) frame.head() frame['Angry'] = frame.ImageName.apply(lambda x: x[4:6] == 'AN').astype(int) frame frame = frame[frame.ImageName.apply(lambda x: x[6] == 'S')] frame.shape print(list(frame.columns)) frame[['AU04_c', 'AU05_c', 'AU07_c', 'AU23_c']].corr() frame[['AU04_r', 'AU05_r', 'AU07_r', 'AU23_r']].corr() frame = frame.rename(columns={'Gender': 'gender_preds'}) frame = frame.dropna() frame.shape frame['AU4_5_7_23_c'] = frame.AU04_c.apply(int).apply(str) + frame.AU05_c.apply(int).apply(str) + frame.AU07_c.apply(int).apply(str) + frame.AU23_c.apply(int).apply(str) frame.AU4_5_7_23_c.value_counts() frame['AU4_5_7_23_r_c'] = frame.AU04_r_c.apply(str) + frame.AU05_r_c.apply(str) + frame.AU07_c.apply(int).apply(str) + frame.AU23_c.apply(int).apply(str) frame.AU4_5_7_23_r_c.value_counts() # # P(expression|AU1, male) =? P(expression|AU1, female) frame.gender_preds = frame.gender_preds.apply(lambda x: 'Male' if x == 'M' else 'Female') male_frame = frame.loc[frame['gender_preds'] == 'Male'] female_frame = frame.loc[frame['gender_preds'] == 'Female'] print(male_frame.shape) print(female_frame.shape) # ## Anger # #### First check the simple proportion of male angry vs female angry: print(male_frame.Angry.mean()) print(female_frame.Angry.mean()) print(male_frame.AU04_r_c.mean()) print(female_frame.AU04_r_c.mean()) print(male_frame.AU05_r_c.mean()) print(female_frame.AU05_r_c.mean()) print(male_frame.AU07_c.mean()) print(female_frame.AU07_c.mean()) print(male_frame.AU23_c.mean()) print(female_frame.AU23_c.mean()) print((male_frame.AU4_5_7_23_c=='1111').mean()) print((female_frame.AU4_5_7_23_c=='1111').mean()) print(1-(male_frame.AU4_5_7_23_c=='0000').mean()) print(1-(female_frame.AU4_5_7_23_c=='0000').mean()) # #### Now check for conditional probability, controlled by AU: result_df = pd.concat([pd.crosstab(male_frame.AU04_r_c, male_frame.Angry, normalize = 'index')[1], pd.crosstab(female_frame.AU04_r_c, female_frame.Angry, normalize = 'index')[1]], axis=1) result_df.columns = ['male', 'female'] result_df['diff'] = result_df.female - result_df.male for index, row in result_df.iterrows(): cross_tab = pd.crosstab(frame[frame.AU04_r_c == index].gender_preds, frame[frame.AU04_r_c == index].Angry, margins = True) cross_tab.columns = ["Not Angry", "Angry", "row_totals"] cross_tab.index = ["Female", "Male", "col_totals"] observed = cross_tab.iloc[0:2,0:2] # Get table without totals for later use expected = np.outer(cross_tab["row_totals"][0:2], cross_tab.loc["col_totals"][0:2]) / len(frame[frame.AU04_r_c == index]) expected = pd.DataFrame(expected) expected.columns = ["Not Angry", "Angry"] expected.index = ["Female", "Male"] if (expected>=5).sum().sum() == 4: chi_squared_stat = (((observed-expected)**2)/expected).sum().sum() p_value = 1 - stats.chi2.cdf(x=chi_squared_stat, df=1) else: chi_squared_stat = np.nan p_value = np.nan print(chi_squared_stat) result_df.loc[index, "p-value"] = p_value result_df result_df = pd.concat([pd.crosstab(male_frame.AU05_r_c, male_frame.Angry, normalize = 'index')[1], pd.crosstab(female_frame.AU05_r_c, female_frame.Angry, normalize = 'index')[1]], axis=1) result_df.columns = ['male', 'female'] result_df['diff'] = result_df.female - result_df.male for index, row in result_df.iterrows(): cross_tab = pd.crosstab(frame[frame.AU05_r_c == index].gender_preds, frame[frame.AU05_r_c == index].Angry, margins = True) cross_tab.columns = ["Not Angry", "Angry", "row_totals"] cross_tab.index = ["Female", "Male", "col_totals"] observed = cross_tab.iloc[0:2,0:2] # Get table without totals for later use expected = np.outer(cross_tab["row_totals"][0:2], cross_tab.loc["col_totals"][0:2]) / len(frame[frame.AU05_r_c == index]) expected = pd.DataFrame(expected) expected.columns = ["Not Angry", "Angry"] expected.index = ["Female", "Male"] if (expected>=5).sum().sum() == 4: chi_squared_stat = (((observed-expected)**2)/expected).sum().sum() p_value = 1 - stats.chi2.cdf(x=chi_squared_stat, df=1) else: chi_squared_stat = np.nan p_value = np.nan print(chi_squared_stat) result_df.loc[index, "p-value"] = p_value result_df result_df = pd.concat([pd.crosstab(male_frame.AU05_c, male_frame.Angry, normalize = 'index')[1], pd.crosstab(female_frame.AU05_c, female_frame.Angry, normalize = 'index')[1]], axis=1) result_df.columns = ['male', 'female'] result_df['diff'] = result_df.female - result_df.male for index, row in result_df.iterrows(): cross_tab = pd.crosstab(frame[frame.AU05_c == index].gender_preds, frame[frame.AU05_c == index].Angry, margins = True) cross_tab.columns = ["Not Angry", "Angry", "row_totals"] cross_tab.index = ["Female", "Male", "col_totals"] observed = cross_tab.iloc[0:2,0:2] # Get table without totals for later use expected = np.outer(cross_tab["row_totals"][0:2], cross_tab.loc["col_totals"][0:2]) / len(frame[frame.AU05_c == index]) expected = pd.DataFrame(expected) expected.columns = ["Not Angry", "Angry"] expected.index = ["Female", "Male"] if (expected>=5).sum().sum() == 4: chi_squared_stat = (((observed-expected)**2)/expected).sum().sum() p_value = 1 - stats.chi2.cdf(x=chi_squared_stat, df=1) else: chi_squared_stat = np.nan p_value = np.nan print(chi_squared_stat) result_df.loc[index, "p-value"] = p_value result_df result_df = pd.concat([pd.crosstab(male_frame.AU07_c, male_frame.Angry, normalize = 'index')[1], pd.crosstab(female_frame.AU07_c, female_frame.Angry, normalize = 'index')[1]], axis=1) result_df.columns = ['male', 'female'] result_df['diff'] = result_df.female - result_df.male for index, row in result_df.iterrows(): cross_tab = pd.crosstab(frame[frame.AU07_c == index].gender_preds, frame[frame.AU07_c == index].Angry, margins = True) cross_tab.columns = ["Not Angry", "Angry", "row_totals"] cross_tab.index = ["Female", "Male", "col_totals"] observed = cross_tab.iloc[0:2,0:2] # Get table without totals for later use expected = np.outer(cross_tab["row_totals"][0:2], cross_tab.loc["col_totals"][0:2]) / len(frame[frame.AU07_c == index]) expected = pd.DataFrame(expected) expected.columns = ["Not Angry", "Angry"] expected.index = ["Female", "Male"] if (expected>=5).sum().sum() == 4: chi_squared_stat = (((observed-expected)**2)/expected).sum().sum() p_value = 1 - stats.chi2.cdf(x=chi_squared_stat, df=1) else: chi_squared_stat = np.nan p_value = np.nan print(chi_squared_stat) result_df.loc[index, "p-value"] = p_value result_df result_df = pd.concat([pd.crosstab(male_frame.AU23_c, male_frame.Angry, normalize = 'index')[1], pd.crosstab(female_frame.AU23_c, female_frame.Angry, normalize = 'index')[1]], axis=1) result_df.columns = ['male', 'female'] result_df['diff'] = result_df.female - result_df.male for index, row in result_df.iterrows(): cross_tab = pd.crosstab(frame[frame.AU23_c == index].gender_preds, frame[frame.AU23_c == index].Angry, margins = True) cross_tab.columns = ["Not Angry", "Angry", "row_totals"] cross_tab.index = ["Female", "Male", "col_totals"] observed = cross_tab.iloc[0:2,0:2] # Get table without totals for later use expected = np.outer(cross_tab["row_totals"][0:2], cross_tab.loc["col_totals"][0:2]) / len(frame[frame.AU23_c == index]) expected = pd.DataFrame(expected) expected.columns = ["Not Angry", "Angry"] expected.index = ["Female", "Male"] if (expected>=5).sum().sum() == 4: chi_squared_stat = (((observed-expected)**2)/expected).sum().sum() p_value = 1 - stats.chi2.cdf(x=chi_squared_stat, df=1) else: chi_squared_stat = np.nan p_value = np.nan print(chi_squared_stat) result_df.loc[index, "p-value"] = p_value result_df result_df = pd.concat([pd.crosstab(male_frame.AU4_5_7_23_r_c, male_frame.Angry, normalize = 'index')[1], pd.crosstab(female_frame.AU4_5_7_23_r_c, female_frame.Angry, normalize = 'index')[1]], axis=1) result_df.columns = ['male', 'female'] result_df['diff'] = result_df.female - result_df.male for index, row in result_df.iterrows(): cross_tab = pd.crosstab(frame[frame.AU4_5_7_23_r_c == index].gender_preds, frame[frame.AU4_5_7_23_r_c == index].Angry, margins = True) cross_tab.columns = ["Not Angry", "Angry", "row_totals"] cross_tab.index = ["Female", "Male", "col_totals"] observed = cross_tab.iloc[0:2,0:2] # Get table without totals for later use expected = np.outer(cross_tab["row_totals"][0:2], cross_tab.loc["col_totals"][0:2]) / len(frame[frame.AU4_5_7_23_r_c == index]) expected = pd.DataFrame(expected) expected.columns = ["Not Angry", "Angry"] expected.index = ["Female", "Male"] if (expected>=5).sum().sum() == 4: chi_squared_stat = (((observed-expected)**2)/expected).sum().sum() p_value = 1 - stats.chi2.cdf(x=chi_squared_stat, df=1) else: chi_squared_stat = np.nan p_value = np.nan print(chi_squared_stat) result_df.loc[index, "p-value"] = p_value result_df # + #AU_values_male = list(male_frame.AU06_r.unique()) AU_values_male = list(male_frame.AU04_r.value_counts()[male_frame.AU04_r.value_counts() >= 25].index) happy_proportion_male = [] for AU_value in AU_values_male: subframe = male_frame.loc[male_frame.AU04_r == AU_value] happy_proportion_male.append(subframe.Angry.mean()) plt.scatter(AU_values_male, happy_proportion_male, label="Male", marker = "p", alpha=0.8, s=30, c='r')#,edgecolors='k') #AU_values_female = list(female_frame.AU06_r.unique()) AU_values_female = list(female_frame.AU04_r.value_counts()[female_frame.AU04_r.value_counts() >= 25].index) happy_proportion_female = [] for AU_value in AU_values_female: subframe = female_frame.loc[female_frame.AU04_r == AU_value] happy_proportion_female.append(subframe.Angry.mean()) plt.scatter(AU_values_female, happy_proportion_female, label="Female", marker = "*", alpha=0.8, s=30, c='#089BDF')#,edgecolors='k') plt.xlabel('AU4 intensity', fontsize = 16) plt.ylabel('Proportion of "Angry" Labels', fontsize = 16) plt.title('P(Angry|AU4) for KDEF', fontsize = 18) plt.legend(fontsize = 16) #plt.xlim(-0.1, 3) #plt.savefig('ExpW_AU6.pdf') plt.show() # + #AU_values_male = list(male_frame.AU06_r.unique()) AU_values_male = list(male_frame.AU05_r.value_counts()[male_frame.AU05_r.value_counts() >= 25].index) happy_proportion_male = [] for AU_value in AU_values_male: subframe = male_frame.loc[male_frame.AU05_r == AU_value] happy_proportion_male.append(subframe.Angry.mean()) plt.scatter(AU_values_male, happy_proportion_male, label="Male", marker = "p", alpha=0.8, s=30, c='r')#,edgecolors='k') #AU_values_female = list(female_frame.AU06_r.unique()) AU_values_female = list(female_frame.AU05_r.value_counts()[female_frame.AU05_r.value_counts() >= 25].index) happy_proportion_female = [] for AU_value in AU_values_female: subframe = female_frame.loc[female_frame.AU05_r == AU_value] happy_proportion_female.append(subframe.Angry.mean()) plt.scatter(AU_values_female, happy_proportion_female, label="Female", marker = "*", alpha=0.8, s=30, c='#089BDF')#,edgecolors='k') plt.xlabel('AU5 intensity', fontsize = 16) plt.ylabel('Proportion of "Angry" Labels', fontsize = 16) plt.title('P(Angry|AU5) for KDEF', fontsize = 18) plt.legend(fontsize = 16) #plt.xlim(-0.1, 3) #plt.savefig('ExpW_AU12.pdf') plt.show() # + #AU_values_male = list(male_frame.AU06_r.unique()) AU_values_male = list(male_frame.AU07_r.value_counts()[male_frame.AU07_r.value_counts() >= 25].index) happy_proportion_male = [] for AU_value in AU_values_male: subframe = male_frame.loc[male_frame.AU07_r == AU_value] happy_proportion_male.append(subframe.Angry.mean()) plt.scatter(AU_values_male, happy_proportion_male, label="Male", marker = "p", alpha=0.8, s=30, c='r')#,edgecolors='k') #AU_values_female = list(female_frame.AU06_r.unique()) AU_values_female = list(female_frame.AU07_r.value_counts()[female_frame.AU07_r.value_counts() >= 25].index) happy_proportion_female = [] for AU_value in AU_values_female: subframe = female_frame.loc[female_frame.AU07_r == AU_value] happy_proportion_female.append(subframe.Angry.mean()) plt.scatter(AU_values_female, happy_proportion_female, label="Female", marker = "*", alpha=0.8, s=30, c='#089BDF')#,edgecolors='k') plt.xlabel('AU7 intensity', fontsize = 16) plt.ylabel('Proportion of "Angry" Labels', fontsize = 16) plt.title('P(Angry|AU7) for KDEF', fontsize = 18) plt.legend(fontsize = 16) #plt.xlim(-0.1, 3) #plt.savefig('ExpW_AU12.pdf') plt.show() # + #AU_values_male = list(male_frame.AU06_r.unique()) AU_values_male = list(male_frame.AU23_r.value_counts()[male_frame.AU23_r.value_counts() >= 25].index) happy_proportion_male = [] for AU_value in AU_values_male: subframe = male_frame.loc[male_frame.AU23_r == AU_value] happy_proportion_male.append(subframe.Angry.mean()) plt.scatter(AU_values_male, happy_proportion_male, label="Male", marker = "p", alpha=0.8, s=30, c='r')#,edgecolors='k') #AU_values_female = list(female_frame.AU06_r.unique()) AU_values_female = list(female_frame.AU23_r.value_counts()[female_frame.AU23_r.value_counts() >= 25].index) happy_proportion_female = [] for AU_value in AU_values_female: subframe = female_frame.loc[female_frame.AU23_r == AU_value] happy_proportion_female.append(subframe.Angry.mean()) plt.scatter(AU_values_female, happy_proportion_female, label="Female", marker = "*", alpha=0.8, s=30, c='#089BDF')#,edgecolors='k') plt.xlabel('AU23 intensity', fontsize = 16) plt.ylabel('Proportion of "Angry" Labels', fontsize = 16) plt.title('P(Angry|AU23) for KDEF', fontsize = 18) plt.legend(fontsize = 16) #plt.xlim(-0.1, 3) #plt.savefig('ExpW_AU12.pdf') plt.show() # - # To formally show that there is a significant difference between the two logistic lines, we need logistic regression: formula = 'Angry ~ AU04_r + AU05_r + AU07_r + AU23_r + gender_preds' model = smf.glm(formula = formula, data=frame, family=sm.families.Binomial()) result = model.fit() print(result.summary()) formula = 'Angry ~ (AU04_r + AU05_r + AU07_r + AU23_r) * gender_preds' model = smf.glm(formula = formula, data=frame, family=sm.families.Binomial()) result = model.fit() print(result.summary()) formula = 'Angry ~ I(AU04_r + AU05_r + AU07_r + AU23_r) * gender_preds' model = smf.glm(formula = formula, data=frame, family=sm.families.Binomial()) result = model.fit() print(result.summary()) # # Chicago Face frame = pd.read_csv('ChicagoFace_with_AU_cleanedup.csv') print(frame.shape) frame.head() frame.Expression.value_counts() print(list(frame.columns)) frame[['AU04_c', 'AU05_c', 'AU07_c', 'AU23_c']].corr() frame[['AU04_r', 'AU05_r', 'AU07_r', 'AU23_r']].corr() frame = frame.rename(columns={'Gender': 'gender_preds'}) frame['Angry'] = (frame['Expression'] =='Angry').astype(int) frame['Fear'] = (frame['Expression'] == 'Fear').astype(int) frame['Happy'] = (frame['Expression'] =='Happy').astype(int) frame['Neutral'] = (frame['Expression'] == 'Neutral').astype(int) frame = frame.dropna() frame.shape frame['AU4_5_7_23_c'] = frame.AU04_c.apply(int).apply(str) + frame.AU05_c.apply(int).apply(str) + frame.AU07_c.apply(int).apply(str) + frame.AU23_c.apply(int).apply(str) frame.AU4_5_7_23_c.value_counts() frame['AU4_5_7_23_r_c'] = frame.AU04_r_c.apply(str) + frame.AU05_r_c.apply(str) + frame.AU07_c.apply(int).apply(str) + frame.AU23_c.apply(int).apply(str) frame.AU4_5_7_23_r_c.value_counts() # # P(expression|AU1, male) =? P(expression|AU1, female) frame.gender_preds = frame.gender_preds.apply(lambda x: 'Male' if x == 'M' else 'Female') male_frame = frame.loc[frame['gender_preds'] == 'Male'] female_frame = frame.loc[frame['gender_preds'] == 'Female'] print(male_frame.shape) print(female_frame.shape) # ## Anger # #### First check the simple proportion of male angry vs female angry: print(male_frame.Angry.mean()) print(female_frame.Angry.mean()) print(male_frame.AU04_r_c.mean()) print(female_frame.AU04_r_c.mean()) print(male_frame.AU05_r_c.mean()) print(female_frame.AU05_r_c.mean()) print(male_frame.AU07_c.mean()) print(female_frame.AU07_c.mean()) print(male_frame.AU23_c.mean()) print(female_frame.AU23_c.mean()) print((male_frame.AU4_5_7_23_c=='1111').mean()) print((female_frame.AU4_5_7_23_c=='1111').mean()) print(1-(male_frame.AU4_5_7_23_c=='0000').mean()) print(1-(female_frame.AU4_5_7_23_c=='0000').mean()) # #### Now check for conditional probability, controlled by AU: result_df = pd.concat([pd.crosstab(male_frame.AU04_r_c, male_frame.Angry, normalize = 'index')[1], pd.crosstab(female_frame.AU04_r_c, female_frame.Angry, normalize = 'index')[1]], axis=1) result_df.columns = ['male', 'female'] result_df['diff'] = result_df.female - result_df.male for index, row in result_df.iterrows(): cross_tab = pd.crosstab(frame[frame.AU04_r_c == index].gender_preds, frame[frame.AU04_r_c == index].Angry, margins = True) cross_tab.columns = ["Not Angry", "Angry", "row_totals"] cross_tab.index = ["Female", "Male", "col_totals"] observed = cross_tab.iloc[0:2,0:2] # Get table without totals for later use expected = np.outer(cross_tab["row_totals"][0:2], cross_tab.loc["col_totals"][0:2]) / len(frame[frame.AU04_r_c == index]) expected = pd.DataFrame(expected) expected.columns = ["Not Angry", "Angry"] expected.index = ["Female", "Male"] if (expected>=5).sum().sum() == 4: chi_squared_stat = (((observed-expected)**2)/expected).sum().sum() p_value = 1 - stats.chi2.cdf(x=chi_squared_stat, df=1) else: chi_squared_stat = np.nan p_value = np.nan print(chi_squared_stat) result_df.loc[index, "p-value"] = p_value result_df result_df = pd.concat([pd.crosstab(male_frame.AU05_r_c, male_frame.Angry, normalize = 'index')[1], pd.crosstab(female_frame.AU05_r_c, female_frame.Angry, normalize = 'index')[1]], axis=1) result_df.columns = ['male', 'female'] result_df['diff'] = result_df.female - result_df.male for index, row in result_df.iterrows(): cross_tab = pd.crosstab(frame[frame.AU05_r_c == index].gender_preds, frame[frame.AU05_r_c == index].Angry, margins = True) cross_tab.columns = ["Not Angry", "Angry", "row_totals"] cross_tab.index = ["Female", "Male", "col_totals"] observed = cross_tab.iloc[0:2,0:2] # Get table without totals for later use expected = np.outer(cross_tab["row_totals"][0:2], cross_tab.loc["col_totals"][0:2]) / len(frame[frame.AU05_r_c == index]) expected = pd.DataFrame(expected) expected.columns = ["Not Angry", "Angry"] expected.index = ["Female", "Male"] if (expected>=5).sum().sum() == 4: chi_squared_stat = (((observed-expected)**2)/expected).sum().sum() p_value = 1 - stats.chi2.cdf(x=chi_squared_stat, df=1) else: chi_squared_stat = np.nan p_value = np.nan print(chi_squared_stat) result_df.loc[index, "p-value"] = p_value result_df result_df = pd.concat([pd.crosstab(male_frame.AU05_c, male_frame.Angry, normalize = 'index')[1], pd.crosstab(female_frame.AU05_c, female_frame.Angry, normalize = 'index')[1]], axis=1) result_df.columns = ['male', 'female'] result_df['diff'] = result_df.female - result_df.male for index, row in result_df.iterrows(): cross_tab = pd.crosstab(frame[frame.AU05_c == index].gender_preds, frame[frame.AU05_c == index].Angry, margins = True) cross_tab.columns = ["Not Angry", "Angry", "row_totals"] cross_tab.index = ["Female", "Male", "col_totals"] observed = cross_tab.iloc[0:2,0:2] # Get table without totals for later use expected = np.outer(cross_tab["row_totals"][0:2], cross_tab.loc["col_totals"][0:2]) / len(frame[frame.AU05_c == index]) expected = pd.DataFrame(expected) expected.columns = ["Not Angry", "Angry"] expected.index = ["Female", "Male"] if (expected>=5).sum().sum() == 4: chi_squared_stat = (((observed-expected)**2)/expected).sum().sum() p_value = 1 - stats.chi2.cdf(x=chi_squared_stat, df=1) else: chi_squared_stat = np.nan p_value = np.nan print(chi_squared_stat) result_df.loc[index, "p-value"] = p_value result_df result_df = pd.concat([pd.crosstab(male_frame.AU07_c, male_frame.Angry, normalize = 'index')[1], pd.crosstab(female_frame.AU07_c, female_frame.Angry, normalize = 'index')[1]], axis=1) result_df.columns = ['male', 'female'] result_df['diff'] = result_df.female - result_df.male for index, row in result_df.iterrows(): cross_tab = pd.crosstab(frame[frame.AU07_c == index].gender_preds, frame[frame.AU07_c == index].Angry, margins = True) cross_tab.columns = ["Not Angry", "Angry", "row_totals"] cross_tab.index = ["Female", "Male", "col_totals"] observed = cross_tab.iloc[0:2,0:2] # Get table without totals for later use expected = np.outer(cross_tab["row_totals"][0:2], cross_tab.loc["col_totals"][0:2]) / len(frame[frame.AU07_c == index]) expected = pd.DataFrame(expected) expected.columns = ["Not Angry", "Angry"] expected.index = ["Female", "Male"] if (expected>=5).sum().sum() == 4: chi_squared_stat = (((observed-expected)**2)/expected).sum().sum() p_value = 1 - stats.chi2.cdf(x=chi_squared_stat, df=1) else: chi_squared_stat = np.nan p_value = np.nan print(chi_squared_stat) result_df.loc[index, "p-value"] = p_value result_df result_df = pd.concat([pd.crosstab(male_frame.AU23_c, male_frame.Angry, normalize = 'index')[1], pd.crosstab(female_frame.AU23_c, female_frame.Angry, normalize = 'index')[1]], axis=1) result_df.columns = ['male', 'female'] result_df['diff'] = result_df.female - result_df.male for index, row in result_df.iterrows(): cross_tab = pd.crosstab(frame[frame.AU23_c == index].gender_preds, frame[frame.AU23_c == index].Angry, margins = True) cross_tab.columns = ["Not Angry", "Angry", "row_totals"] cross_tab.index = ["Female", "Male", "col_totals"] observed = cross_tab.iloc[0:2,0:2] # Get table without totals for later use expected = np.outer(cross_tab["row_totals"][0:2], cross_tab.loc["col_totals"][0:2]) / len(frame[frame.AU23_c == index]) expected = pd.DataFrame(expected) expected.columns = ["Not Angry", "Angry"] expected.index = ["Female", "Male"] if (expected>=5).sum().sum() == 4: chi_squared_stat = (((observed-expected)**2)/expected).sum().sum() p_value = 1 - stats.chi2.cdf(x=chi_squared_stat, df=1) else: chi_squared_stat = np.nan p_value = np.nan print(chi_squared_stat) result_df.loc[index, "p-value"] = p_value result_df result_df = pd.concat([pd.crosstab(male_frame.AU4_5_7_23_r_c, male_frame.Angry, normalize = 'index')[1], pd.crosstab(female_frame.AU4_5_7_23_r_c, female_frame.Angry, normalize = 'index')[1]], axis=1) result_df.columns = ['male', 'female'] result_df['diff'] = result_df.female - result_df.male for index, row in result_df.iterrows(): cross_tab = pd.crosstab(frame[frame.AU4_5_7_23_r_c == index].gender_preds, frame[frame.AU4_5_7_23_r_c == index].Angry, margins = True) cross_tab.columns = ["Not Angry", "Angry", "row_totals"] cross_tab.index = ["Female", "Male", "col_totals"] observed = cross_tab.iloc[0:2,0:2] # Get table without totals for later use expected = np.outer(cross_tab["row_totals"][0:2], cross_tab.loc["col_totals"][0:2]) / len(frame[frame.AU4_5_7_23_r_c == index]) expected = pd.DataFrame(expected) expected.columns = ["Not Angry", "Angry"] expected.index = ["Female", "Male"] if (expected>=5).sum().sum() == 4: chi_squared_stat = (((observed-expected)**2)/expected).sum().sum() p_value = 1 - stats.chi2.cdf(x=chi_squared_stat, df=1) else: chi_squared_stat = np.nan p_value = np.nan print(chi_squared_stat) result_df.loc[index, "p-value"] = p_value result_df # + #AU_values_male = list(male_frame.AU06_r.unique()) AU_values_male = list(male_frame.AU04_r.value_counts()[male_frame.AU04_r.value_counts() >= 25].index) happy_proportion_male = [] for AU_value in AU_values_male: subframe = male_frame.loc[male_frame.AU04_r == AU_value] happy_proportion_male.append(subframe.Angry.mean()) plt.scatter(AU_values_male, happy_proportion_male, label="Male", marker = "p", alpha=0.8, s=30, c='r')#,edgecolors='k') #AU_values_female = list(female_frame.AU06_r.unique()) AU_values_female = list(female_frame.AU04_r.value_counts()[female_frame.AU04_r.value_counts() >= 25].index) happy_proportion_female = [] for AU_value in AU_values_female: subframe = female_frame.loc[female_frame.AU04_r == AU_value] happy_proportion_female.append(subframe.Angry.mean()) plt.scatter(AU_values_female, happy_proportion_female, label="Female", marker = "*", alpha=0.8, s=30, c='#089BDF')#,edgecolors='k') plt.xlabel('AU4 intensity', fontsize = 16) plt.ylabel('Proportion of "Angry" Labels', fontsize = 16) plt.title('P(Angry|AU4) for CFD', fontsize = 18) plt.legend(fontsize = 16) #plt.xlim(-0.1, 3) #plt.savefig('ExpW_AU6.pdf') plt.show() # + #AU_values_male = list(male_frame.AU06_r.unique()) AU_values_male = list(male_frame.AU05_r.value_counts()[male_frame.AU05_r.value_counts() >= 25].index) happy_proportion_male = [] for AU_value in AU_values_male: subframe = male_frame.loc[male_frame.AU05_r == AU_value] happy_proportion_male.append(subframe.Angry.mean()) plt.scatter(AU_values_male, happy_proportion_male, label="Male", marker = "p", alpha=0.8, s=30, c='r')#,edgecolors='k') #AU_values_female = list(female_frame.AU06_r.unique()) AU_values_female = list(female_frame.AU05_r.value_counts()[female_frame.AU05_r.value_counts() >= 25].index) happy_proportion_female = [] for AU_value in AU_values_female: subframe = female_frame.loc[female_frame.AU05_r == AU_value] happy_proportion_female.append(subframe.Angry.mean()) plt.scatter(AU_values_female, happy_proportion_female, label="Female", marker = "*", alpha=0.8, s=30, c='#089BDF')#,edgecolors='k') plt.xlabel('AU5 intensity', fontsize = 16) plt.ylabel('Proportion of "Angry" Labels', fontsize = 16) plt.title('P(Angry|AU5) for CFD', fontsize = 18) plt.legend(fontsize = 16) #plt.xlim(-0.1, 3) #plt.savefig('ExpW_AU12.pdf') plt.show() # + #AU_values_male = list(male_frame.AU06_r.unique()) AU_values_male = list(male_frame.AU07_r.value_counts()[male_frame.AU07_r.value_counts() >= 25].index) happy_proportion_male = [] for AU_value in AU_values_male: subframe = male_frame.loc[male_frame.AU07_r == AU_value] happy_proportion_male.append(subframe.Angry.mean()) plt.scatter(AU_values_male, happy_proportion_male, label="Male", marker = "p", alpha=0.8, s=30, c='r')#,edgecolors='k') #AU_values_female = list(female_frame.AU06_r.unique()) AU_values_female = list(female_frame.AU07_r.value_counts()[female_frame.AU07_r.value_counts() >= 25].index) happy_proportion_female = [] for AU_value in AU_values_female: subframe = female_frame.loc[female_frame.AU07_r == AU_value] happy_proportion_female.append(subframe.Angry.mean()) plt.scatter(AU_values_female, happy_proportion_female, label="Female", marker = "*", alpha=0.8, s=30, c='#089BDF')#,edgecolors='k') plt.xlabel('AU7 intensity', fontsize = 16) plt.ylabel('Proportion of "Angry" Labels', fontsize = 16) plt.title('P(Angry|AU7) for CFD', fontsize = 18) plt.legend(fontsize = 16) #plt.xlim(-0.1, 3) #plt.savefig('ExpW_AU12.pdf') plt.show() # + #AU_values_male = list(male_frame.AU06_r.unique()) AU_values_male = list(male_frame.AU23_r.value_counts()[male_frame.AU23_r.value_counts() >= 25].index) happy_proportion_male = [] for AU_value in AU_values_male: subframe = male_frame.loc[male_frame.AU23_r == AU_value] happy_proportion_male.append(subframe.Angry.mean()) plt.scatter(AU_values_male, happy_proportion_male, label="Male", marker = "p", alpha=0.8, s=30, c='r')#,edgecolors='k') #AU_values_female = list(female_frame.AU06_r.unique()) AU_values_female = list(female_frame.AU23_r.value_counts()[female_frame.AU23_r.value_counts() >= 25].index) happy_proportion_female = [] for AU_value in AU_values_female: subframe = female_frame.loc[female_frame.AU23_r == AU_value] happy_proportion_female.append(subframe.Angry.mean()) plt.scatter(AU_values_female, happy_proportion_female, label="Female", marker = "*", alpha=0.8, s=30, c='#089BDF')#,edgecolors='k') plt.xlabel('AU23 intensity', fontsize = 16) plt.ylabel('Proportion of "Angry" Labels', fontsize = 16) plt.title('P(Angry|AU23) for CFD', fontsize = 18) plt.legend(fontsize = 16) #plt.xlim(-0.1, 3) #plt.savefig('ExpW_AU12.pdf') plt.show() # - # To formally show that there is a significant difference between the two logistic lines, we need logistic regression: formula = 'Angry ~ AU04_r + AU05_r + AU07_r + AU23_r + gender_preds' model = smf.glm(formula = formula, data=frame, family=sm.families.Binomial()) result = model.fit() print(result.summary()) formula = 'Angry ~ (AU04_r + AU05_r + AU07_r + AU23_r) * gender_preds' model = smf.glm(formula = formula, data=frame, family=sm.families.Binomial()) result = model.fit() print(result.summary()) formula = 'Angry ~ I(AU04_r + AU05_r + AU07_r + AU23_r) * gender_preds' model = smf.glm(formula = formula, data=frame, family=sm.families.Binomial()) result = model.fit() print(result.summary())
annotation_bias_evaluation/Appendix Figure 1 and Table 2 - Checking Angry Annotation Gender Bias.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (Anaconda 5) # env: # LD_LIBRARY_PATH: /ext/anaconda5/lib # PROJ_LIB: /ext/anaconda-2019.03/share/proj # PYTHONHOME: /ext/anaconda5/lib/python3.5 # PYTHONPATH: /ext/anaconda5/lib/python3.5:/ext/anaconda5/lib/python3.5/site-packages # language: python # metadata: # cocalc: # description: Python/R distribution for data science # priority: -1 # url: https://www.anaconda.com/distribution/ # name: anaconda5 # --- # ## First Last - SymPy import sympy as sp sp.init_printing() x = sp.symbols('x') # ## What is the denominator of the third term of the Taylor expansion for the equation # # $$ \Large \frac{\sin{\left (x \right )}}{x^{4} + 27}$$ # ## Answer the Canvas quiz `SymPyQuiz`
Sympy_QUIZ.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="Ls2D-iRqty8x" # # The Deconfounder in Action # + [markdown] colab_type="text" id="vY0i2JWJuAO7" # In this notebook, we are going to see **the deconfounder in action**. # # We will perform **causal inference** with the deconfounder on a **breast cancer** dataset. # # **Goal:** # To convince all of us that the deconfounder is **easy** to use! # # + [markdown] colab_type="text" id="KwpnNVhL15jK" # The **deconfounder** operates in three steps: # # 1. **Fit** a factor model to the assigned causes; it leads to a candidate substitute confounder. # 2. **Check** the factor model with a predictive check. # 3. **Correct** for the substitute confounder in a causal inference. # # # Let's get started! # # + [markdown] colab_type="text" id="qQx9kYR26Lq4" # # Getting ready to work! # + colab_type="code" id="j5s-rHCuZ5qS" outputId="8a62a948-9d74-4d4b-dbdc-11eb4c6301e3" colab={"base_uri": "https://localhost:8080/", "height": 139} # !pip install tensorflow_probability # + colab_type="code" id="AHt4Q4Gch1I6" outputId="f1896dba-4a4d-49c0-aa87-acb32a9624fb" colab={"base_uri": "https://localhost:8080/", "height": 88} # %tensorflow_version 1.x import tensorflow as tf import numpy as np import numpy.random as npr import pandas as pd import tensorflow as tf import tensorflow_probability as tfp import statsmodels.api as sm from tensorflow_probability import edward2 as ed from sklearn.datasets import load_breast_cancer from pandas.plotting import scatter_matrix from scipy import sparse, stats from sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegression from sklearn.metrics import confusion_matrix, classification_report, roc_auc_score, roc_curve import matplotlib matplotlib.rcParams.update({'font.sans-serif' : 'Helvetica', 'axes.labelsize': 10, 'xtick.labelsize' : 6, 'ytick.labelsize' : 6, 'axes.titlesize' : 10}) import matplotlib.pyplot as plt import seaborn as sns color_names = ["windows blue", "amber", "crimson", "faded green", "dusty purple", "greyish"] colors = sns.xkcd_palette(color_names) sns.set(style="white", palette=sns.xkcd_palette(color_names), color_codes = False) # + colab_type="code" id="CbLrKgh75lmF" outputId="4cf27aaf-7aa9-4a42-a33b-07bd36572f22" colab={"base_uri": "https://localhost:8080/", "height": 207} # !pip show tensorflow # + colab_type="code" id="isEZ89zj5nax" outputId="db91e497-9eb4-42ce-d4c0-89b5d0dce548" colab={"base_uri": "https://localhost:8080/", "height": 187} # !pip show tensorflow_probability # + colab_type="code" id="k-Gkiqs1h2nd" outputId="0895f060-1e5a-4317-d3ee-4eaae8bb69f7" colab={"base_uri": "https://localhost:8080/", "height": 34} # set random seed so everyone gets the same number import random randseed = 123 print("random seed: ", randseed) random.seed(randseed) np.random.seed(randseed) tf.set_random_seed(randseed) # + [markdown] colab_type="text" id="GAv0QLoj6SPJ" # ## The scikit-learn breast cancer dataset # + [markdown] colab_type="text" id="t_cTMBQT6po6" # * It is a data set about **breast cancer**. # * We are interested in how tumor properties **affect** cancer diagnosis. # * The **(multiple) causes** are tumor properties, e.g. sizes, compactness, symmetry, texture. # * The **outcome** is tumor diagnosis, whether the breast cancer is diagnosed as malignant or benign. # # # + colab_type="code" id="Y9-3vRsFIXWs" colab={} data = load_breast_cancer() # + colab_type="code" id="5S-p6g7xhi26" outputId="db1619d7-ab4d-4e58-985d-e363b142c0ab" colab={"base_uri": "https://localhost:8080/", "height": 1000} print(data['DESCR']) # + [markdown] colab_type="text" id="NgtgxjBz3APu" # ***For simplicity, we will work with the first 10 features, i.e. the mean radius/texture/perimeter***/.... # + colab_type="code" id="C6NzW5eE3IyO" colab={} num_fea = 10 df = pd.DataFrame(data["data"][:,:num_fea], columns=data["feature_names"][:num_fea]) # + colab_type="code" id="lBAqZawRPC0c" outputId="9d625d3d-da52-4335-b7aa-c2a1a2d102e1" colab={"base_uri": "https://localhost:8080/", "height": 34} df.shape # + colab_type="code" id="r7yIcCQM3JvH" outputId="4334dc03-c26a-4510-92de-5dcaf384f4e6" colab={"base_uri": "https://localhost:8080/", "height": 238} df.head() # + colab_type="code" id="5jHvVdJc4NCl" colab={} dfy = data["target"] # + colab_type="code" id="nlefXx2u4Slb" outputId="255f35cf-6272-4065-b4f9-937dcf386560" colab={"base_uri": "https://localhost:8080/", "height": 119} dfy.shape, dfy[:100] # binary outcomes # + [markdown] colab_type="text" id="m12T5pzK6fql" # ## Preparing the dataset for the deconfounder # + [markdown] colab_type="text" id="gugfSD8m1heM" # ### Only one step of preprocessing needed! # # ### We need to get rid of the highly correlated causes # + [markdown] colab_type="text" id="TAqGlnyq5AYQ" # **Why** do we need to get rid of highly correlated causes? # # If two causes are **highly correlated**, a valid substitute confounder will largely **inflate the variance** of causal estimates downstream. # # This phenomenon is **closely related to** the variance inflation phenomenon in linear regression. # # # # # + [markdown] colab_type="text" id="CKHovx1InFLX" # ***A more technical explanation (ignorable)*** # # Think of the extreme case where two causes are perfectly collinear $A_1 = 5A_2$. The only random variable Z that # $$A_1 \perp A_2 | Z,$$ # # $(A_1, A_2)$ **must** be a **deterministic function** of Z. For example, $Z = A_1$ or $Z = A_2$. # # Such a substitute confounder Z **breaks one of the conditions** the deconfounder requires. See "***A note on overlap***" in the theory section of the paper. # # + [markdown] colab_type="text" id="ywFFAar53Vt5" # **How** do we get rid of highly correlated causes? # # * We first make a **scatter plot** of **all pairs** of the causes. # # * It reveals which causes are **highly correlated**. # # * We will **exclude** these highly correlated causes by hand. # # # + colab_type="code" id="P898PgYXvmxH" outputId="5632014f-0d6f-42fd-9e52-22b4864c0abb" colab={"base_uri": "https://localhost:8080/", "height": 936} sns.pairplot(df, size=1.5) # + colab_type="code" id="Shj079RXHecF" colab={} # perimeter and area are highly correlated with radius fea_cols = df.columns[[(not df.columns[i].endswith("perimeter")) \ and (not df.columns[i].endswith("area")) \ for i in range(df.shape[1])]] # + colab_type="code" id="vdUoBjmX1qvh" outputId="9af495d7-cebb-49e5-c199-a37e389f4612" colab={"base_uri": "https://localhost:8080/", "height": 34} dfX = pd.DataFrame(df[fea_cols]) print(dfX.shape, dfy.shape) # + [markdown] colab_type="text" id="1pVQ4OmG7Ljd" # ### How does the dataset look like after preprocessing? # + colab_type="code" id="e6YX8vvi1shr" outputId="c2087aed-6012-4b61-d4b0-8554f9778737" colab={"base_uri": "https://localhost:8080/", "height": 221} # The causes dfX.head() # + colab_type="code" id="V-6n4XSWptnK" outputId="1786878e-475b-4230-8103-6905bebc62ce" colab={"base_uri": "https://localhost:8080/", "height": 51} # The outcome dfy[:25] # + [markdown] colab_type="text" id="ty0TPGzn8KAA" # # The dataset is ready. Let's do causal inference with the deconfounder! # + [markdown] colab_type="text" id="pQc-2AYD1xQN" # ## Step 1: Fit a factor model to the assigned causes; it leads to a substitute confounder. # # + [markdown] colab_type="text" id="vZHcTdZv12b5" # ### We start with trying out a random factor model. How about a probabilistic PCA model? # + [markdown] colab_type="text" id="sTTdtDGqvnWY" # The matrix of assigned causes $X$ # # * It has N=569 rows and D=8 columns. # * N is the number of subjects/data points. # * D is the number of causes/data dimension. # + [markdown] colab_type="text" id="FgrUGC0BLzKU" # ### Step 1.1: Some chores first... # + [markdown] colab_type="text" id="hD3RUa1jDaUO" # #### Standardize the data # This step is optional to the deconfounder. # # It only makes finding a good probabilistic PCA model easier. # + colab_type="code" id="n8cq48v59tG-" colab={} # dfX.std() # + colab_type="code" id="7Ps8ev0rw1km" colab={} # standardize the data for PPCA X = np.array((dfX - dfX.mean())/dfX.std()) # + [markdown] colab_type="text" id="ov0ddf38Dl9k" # #### Then holdout some data! # + [markdown] colab_type="text" id="dU57s3fRD9V2" # We will later need to check the factor model with some heldout data. # So let's holdout some now. # + colab_type="code" id="-HLHcQaL86cq" colab={} # randomly holdout some entries of X num_datapoints, data_dim = X.shape holdout_portion = 0.2 n_holdout = int(holdout_portion * num_datapoints * data_dim) holdout_row = np.random.randint(num_datapoints, size=n_holdout) holdout_col = np.random.randint(data_dim, size=n_holdout) holdout_mask = (sparse.coo_matrix((np.ones(n_holdout), \ (holdout_row, holdout_col)), \ shape = X.shape)).toarray() holdout_subjects = np.unique(holdout_row) holdout_mask = np.minimum(1, holdout_mask) x_train = np.multiply(1-holdout_mask, X) x_vad = np.multiply(holdout_mask, X) # + [markdown] colab_type="text" id="PEA6ktoDETJo" # ### Step 1.2: We are ready to fit a probabilistic PCA model to x_train. # + [markdown] colab_type="text" id="uZLPxtnJLAMW" # This step of "**fitting** a factor model" involves **inferring latent variables** in probability models. # # We will rely on **Tensorflow Probability**, a library for probabilistic reasoning and statistical analysis in TensorFlow. # # There are many **other probabilistic programming toolboxes** for fitting factor models, e.g. Pyro, Stan. # # Some of the latent variable models can also be fit with **scikit-learn**. # # We are free to use any of these with the deconfounder! # # # + [markdown] colab_type="text" id="twnsBOq7AI6o" # # # **What does a probabilistic PCA model look like?** # # * Probabilistic PCA is a dimensionality reduction technique. It models data with a lower dimensional latent space. # # * We consider the assigned causes of the $n$th subject. We write it as $\mathbf{x}_n$, which is a $D=8$ dimensional vector. # # * The probabilistic PCA assumes the following data generating process for each $\mathbf{x}_n$, $n = 1, ..., N$: # # \begin{equation*} # \mathbf{z}_{n} \stackrel{iid}{\sim} N(\mathbf{0}, \mathbf{I}_K), # \end{equation*} # # \begin{equation*} # \mathbf{x}_n \mid \mathbf{z}_n # \sim N(\mathbf{z}_n\mathbf{W}, \sigma^2\mathbf{I}_D). # \end{equation*} # # # * We construct a $K$-dimensional substitute confounder $\mathbf{z}_{n}$ for each subject $n$, $n = 1, ..., N$. # * Each $\mathbf{z}_{n}$ is a $K$-dimensional latent vector, $n = 1, ..., N$. # # # # + colab_type="code" id="PuW5cLY-v6gh" colab={} # we allow both linear and quadratic model # for linear model x_n has mean z_n * W # for quadratic model x_n has mean b + z_n * W + (z_n**2) * W_2 # quadractice model needs to change the checking step accordingly def ppca_model(data_dim, latent_dim, num_datapoints, stddv_datapoints, mask, form="linear"): w = ed.Normal(loc=tf.zeros([latent_dim, data_dim]), scale=tf.ones([latent_dim, data_dim]), name="w") # parameter z = ed.Normal(loc=tf.zeros([num_datapoints, latent_dim]), scale=tf.ones([num_datapoints, latent_dim]), name="z") # local latent variable / substitute confounder if form == "linear": x = ed.Normal(loc=tf.multiply(tf.matmul(z, w), mask), scale=stddv_datapoints * tf.ones([num_datapoints, data_dim]), name="x") # (modeled) data elif form == "quadratic": b = ed.Normal(loc=tf.zeros([1, data_dim]), scale=tf.ones([1, data_dim]), name="b") # intercept w2 = ed.Normal(loc=tf.zeros([latent_dim, data_dim]), scale=tf.ones([latent_dim, data_dim]), name="w2") # quadratic parameter x = ed.Normal(loc=tf.multiply(b + tf.matmul(z, w) + tf.matmul(tf.square(z), w2), mask), scale=stddv_datapoints * tf.ones([num_datapoints, data_dim]), name="x") # (modeled) data return x, (w, z) log_joint = ed.make_log_joint_fn(ppca_model) # + [markdown] colab_type="text" id="bHhvv14CIeCN" # **Let's fit a probabilistic PCA model.** # + colab_type="code" id="FCxTNg-fxFwl" colab={} latent_dim = 2 stddv_datapoints = 0.1 model = ppca_model(data_dim=data_dim, latent_dim=latent_dim, num_datapoints=num_datapoints, stddv_datapoints=stddv_datapoints, mask=1-holdout_mask) # + [markdown] colab_type="text" id="a4IEJ4QhItaD" # The cell below implements **variational inference** for probabilistic PCA in tensorflow probability. # # You are free to fit the probabilistic PCA in your favourite ways with your favourite package. # # Note: approximate inference is perfectly fine! # # It is orthogonal to our discussion around the deconfounder. # # Let's **ignore** that for now (and forever). # # + colab_type="code" id="3a3_Bya5xG6M" outputId="393d8b8a-f075-4f2f-b2a7-a454191ef153" colab={"base_uri": "https://localhost:8080/", "height": 438} def variational_model(qb_mean, qb_stddv, qw_mean, qw_stddv, qw2_mean, qw2_stddv, qz_mean, qz_stddv): qb = ed.Normal(loc=qb_mean, scale=qb_stddv, name="qb") qw = ed.Normal(loc=qw_mean, scale=qw_stddv, name="qw") qw2 = ed.Normal(loc=qw2_mean, scale=qw2_stddv, name="qw2") qz = ed.Normal(loc=qz_mean, scale=qz_stddv, name="qz") return qb, qw, qw2, qz log_q = ed.make_log_joint_fn(variational_model) def target(b, w, w2, z): """Unnormalized target density as a function of the parameters.""" return log_joint(data_dim=data_dim, latent_dim=latent_dim, num_datapoints=num_datapoints, stddv_datapoints=stddv_datapoints, mask=1-holdout_mask, w=w, z=z, w2=w2, b=b, x=x_train) def target_q(qb, qw, qw2, qz): return log_q(qb_mean=qb_mean, qb_stddv=qb_stddv, qw_mean=qw_mean, qw_stddv=qw_stddv, qw2_mean=qw2_mean, qw2_stddv=qw2_stddv, qz_mean=qz_mean, qz_stddv=qz_stddv, qw=qw, qz=qz, qw2=qw2, qb=qb) qb_mean = tf.Variable(np.ones([1, data_dim]), dtype=tf.float32) qw_mean = tf.Variable(np.ones([latent_dim, data_dim]), dtype=tf.float32) qw2_mean = tf.Variable(np.ones([latent_dim, data_dim]), dtype=tf.float32) qz_mean = tf.Variable(np.ones([num_datapoints, latent_dim]), dtype=tf.float32) qb_stddv = tf.nn.softplus(tf.Variable(0 * np.ones([1, data_dim]), dtype=tf.float32)) qw_stddv = tf.nn.softplus(tf.Variable(-4 * np.ones([latent_dim, data_dim]), dtype=tf.float32)) qw2_stddv = tf.nn.softplus(tf.Variable(-4 * np.ones([latent_dim, data_dim]), dtype=tf.float32)) qz_stddv = tf.nn.softplus(tf.Variable(-4 * np.ones([num_datapoints, latent_dim]), dtype=tf.float32)) qb, qw, qw2, qz = variational_model(qb_mean=qb_mean, qb_stddv=qb_stddv, qw_mean=qw_mean, qw_stddv=qw_stddv, qw2_mean=qw2_mean, qw2_stddv=qw2_stddv, qz_mean=qz_mean, qz_stddv=qz_stddv) energy = target(qb, qw, qw2, qz) entropy = -target_q(qb, qw, qw2, qz) elbo = energy + entropy optimizer = tf.train.AdamOptimizer(learning_rate = 0.05) train = optimizer.minimize(-elbo) init = tf.global_variables_initializer() t = [] num_epochs = 500 with tf.Session() as sess: sess.run(init) for i in range(num_epochs): sess.run(train) if i % 5 == 0: t.append(sess.run([elbo])) b_mean_inferred = sess.run(qb_mean) b_stddv_inferred = sess.run(qb_stddv) w_mean_inferred = sess.run(qw_mean) w_stddv_inferred = sess.run(qw_stddv) w2_mean_inferred = sess.run(qw2_mean) w2_stddv_inferred = sess.run(qw2_stddv) z_mean_inferred = sess.run(qz_mean) z_stddv_inferred = sess.run(qz_stddv) print("Inferred axes:") print(w_mean_inferred) print("Standard Deviation:") print(w_stddv_inferred) plt.plot(range(1, num_epochs, 5), t) plt.show() def replace_latents(b, w, w2, z): def interceptor(rv_constructor, *rv_args, **rv_kwargs): """Replaces the priors with actual values to generate samples from.""" name = rv_kwargs.pop("name") if name == "b": rv_kwargs["value"] = b elif name == "w": rv_kwargs["value"] = w elif name == "w": rv_kwargs["value"] = w2 elif name == "z": rv_kwargs["value"] = z return rv_constructor(*rv_args, **rv_kwargs) return interceptor # + [markdown] colab_type="text" id="ygEgzBbxKcTC" # So we just played some **magic** to **fit the probabilistic PCA** to the matrix of assigned causes $\mathbf{X}$. # # # **The only important thing here is: ** # # We have **inferred** the latent variables $\mathbf{z}_n, n=1, ..., N$ and the parameters $\mathbf{W}$. # # Specifically, we have obtained from this step # # ``` # w_mean_inferred, # w_stddv_inferred, # z_mean_inferred, # z_stddv_inferred. # ``` # # # + [markdown] colab_type="text" id="CJ9ermuG-x9u" # ## Step 2: Check the factor model with a predictive check. # + [markdown] colab_type="text" id="EIWWRMoM_A6q" # # Now we are ready to **check** the probabilistic PCA model. # # The checking step is **very important** to the deconfounder. # # Pleeeeeze **always** check the factor model! # + [markdown] colab_type="text" id="JNOOzWK6UiLz" # **How** do we perform the predictive check? # # # 1. We will **generate** some replicated datasets for the heldout entries. # 2. And then **compare** the replicated datasets with the original dataset on the heldout entries. # 3. If they **look similar**, then we are good to go. # # # + [markdown] colab_type="text" id="19rmRCPIQ5-V" # #### Step 2.1: We generate some replicated datasets first. # + [markdown] colab_type="text" id="j02uW2WzQ-0i" # * We will start with generating some **replicated datasets** from the predictive distribution of the assigned causes $X$: # \begin{align} # p(\mathbf{X^{rep}_{n,heldout}} \,|\, \mathbf{X_{n, obs}}) = # \int p(\mathbf{X_{n, heldout}} \,|\, \mathbf{z}_n) p(\mathbf{z_n} \,|\, \mathbf{X}_{n, obs}) \mathrm{d} \mathbf{z_n}. # \end{align} # # * That is, we generate these datasets from a probabilistic PCA model given the **inferred** latent variables $\hat{p}(\mathbf{z}_n)$ and $\hat{p}(\mathbf{W})$: # # \begin{equation*} # \mathbf{z}_{n} \sim \hat{p}(\mathbf{z}_n), # \end{equation*} # # \begin{equation*} # \mathbf{W} \sim \hat{p}(\mathbf{W}), # \end{equation*} # # \begin{equation*} # \mathbf{x}_n \mid \mathbf{z}_n # \sim N(\mathbf{z}_n\mathbf{W}, \sigma^2\mathbf{I}_D). # \end{equation*} # # # * These replicated datasets tell us what the assigned causes $X$ **should look like** if it is indeed generated by the fitted probabilistic PCA model. # # + colab_type="code" id="DyCfobStxZmE" colab={} n_rep = 100 # number of replicated datasets we generate holdout_gen = np.zeros((n_rep,*(x_train.shape))) for i in range(n_rep): b_sample = npr.normal(b_mean_inferred, b_stddv_inferred) w_sample = npr.normal(w_mean_inferred, w_stddv_inferred) w2_sample = npr.normal(w2_mean_inferred, w2_stddv_inferred) z_sample = npr.normal(z_mean_inferred, z_stddv_inferred) with ed.interception(replace_latents(b_sample, w_sample, w2_sample, z_sample)): generate = ppca_model( data_dim=data_dim, latent_dim=latent_dim, num_datapoints=num_datapoints, stddv_datapoints=stddv_datapoints, mask=np.ones(x_train.shape)) with tf.Session() as sess: x_generated, _ = sess.run(generate) # look only at the heldout entries holdout_gen[i] = np.multiply(x_generated, holdout_mask) # + [markdown] colab_type="text" id="9mm5Ff83R2ia" # #### Step 2.2: Then we compute the test statistic on both the original and the replicated dataset. # + [markdown] colab_type="text" id="ncxX06n7SaTU" # # # * We use the **test statistic** of **expected heldout log likelihood**: # \begin{align} # t(\mathbf{X_{n,heldout}}) = \mathbb{E}_{\mathbf{Z}, \mathbf{W}}[{\log p(\mathbf{X_{n,heldout}} \,|\, \mathbf{Z}, \mathbf{W}) \,|\, # \mathbf{X_{n,obs}}}]. # \end{align} # # * We calculate this test statistic **for each $n$** and for **both** the **original** dataset $\mathbf{X_{n,heldout}}$ and the **replicated** dataset $\mathbf{X^{rep}_{n,heldout}}$. # # # + colab_type="code" id="F8VuWkc2xbKL" colab={} n_eval = 100 # we draw samples from the inferred Z and W obs_ll = [] rep_ll = [] for j in range(n_eval): w_sample = npr.normal(w_mean_inferred, w_stddv_inferred) z_sample = npr.normal(z_mean_inferred, z_stddv_inferred) holdoutmean_sample = np.multiply(z_sample.dot(w_sample), holdout_mask) obs_ll.append(np.mean(stats.norm(holdoutmean_sample, \ stddv_datapoints).logpdf(x_vad), axis=1)) rep_ll.append(np.mean(stats.norm(holdoutmean_sample, \ stddv_datapoints).logpdf(holdout_gen),axis=2)) obs_ll_per_zi, rep_ll_per_zi = np.mean(np.array(obs_ll), axis=0), np.mean(np.array(rep_ll), axis=0) # + [markdown] colab_type="text" id="b-HxX1jmZSzU" # #### Step 2.3: Finally we compare the test statistic of the original and the replicated dataset. # + [markdown] colab_type="text" id="OkVFMJLnZdvE" # # * We compare the test statistics via the $p$-values. # \begin{equation*} # \text{$p$-value} = p\left(t(\mathbf{X_{n,heldout}^{rep}}) < t(\mathbf{X_{n, heldout}})\right). # \end{equation*} # # * The **smaller** the $p$-value is, the **more different** the original dataset is from the replicated dataset. # # * We **fail** the check if the $p$-value is **small**. # # * Note this goes in the opposite direction to the conventional usage of $p$-values. # # # + [markdown] colab_type="text" id="rpX1T3Z-bY5L" # We compute a $p$-value for each $n$ and output the average $p$-values. # + colab_type="code" id="IIX0LYw4GUiM" outputId="4b80d01a-21d1-4459-a2ce-867a835fe769" colab={"base_uri": "https://localhost:8080/", "height": 34} pvals = np.array([np.mean(rep_ll_per_zi[:,i] < obs_ll_per_zi[i]) for i in range(num_datapoints)]) holdout_subjects = np.unique(holdout_row) overall_pval = np.mean(pvals[holdout_subjects]) print("Predictive check p-values", overall_pval) # + [markdown] colab_type="text" id="Vba2-ap0cg7A" # **We passed the check!** # # The substitute confounder $\mathbf{z}_n$ constructed in Step 1 is valid. We are ready to move on! # + [markdown] colab_type="text" id="FcsLigMobs-W" # #### An optional step # # We can also peak at **the predictive check of individual subjects**. # # This step is just for fun. It is how we generate Figure 2 of the paper. # # # # * We randomly choose a subject. # * Plot the kernel density estimate of the test statistic on the replicated datasets. # * Plot the test statistic on the original dataset (the dashed vertical line). # # # # + colab_type="code" id="pOgaNqPpJxJj" outputId="f70a5687-d79c-4e05-9064-0a5f213aba9a" colab={"base_uri": "https://localhost:8080/", "height": 301} subject_no = npr.choice(holdout_subjects) sns.kdeplot(rep_ll_per_zi[:,subject_no]).set_title("Predictive check for subject "+str(subject_no)) plt.axvline(x=obs_ll_per_zi[subject_no], linestyle='--') # + [markdown] colab_type="text" id="4QSqF4hTDvov" # ## Step 3: Correct for the substitute confounder in a causal inference. # + [markdown] colab_type="text" id="rsvEIwaqeXbH" # **How** to estimate causal effects? # # * For simplicity, we fit a logistic regression as an outcome model here. # # * The target is the observed outcome $y_n$, $n=1,\ldots, N$. # # * The regressor is the multiple causes $\mathbf{X}_n$, $n=1,\ldots, N$. # # **How** to correct for the substitute confounder? # # * We include the substitute confounder $\mathbf{Z}_n$, $n=1,\ldots, N$, into the regressors. # + colab_type="code" id="_RYJR1BxR1QG" colab={} # approximate the (random variable) substitute confounders with their inferred mean. Z_hat = z_mean_inferred # augment the regressors to be both the assigned causes X and the substitute confounder Z X_aug = np.column_stack([X, Z_hat]) # + colab_type="code" id="g45Th7DyR6Km" colab={} # holdout some data from prediction later X_train, X_test, y_train, y_test = train_test_split(X_aug, dfy, test_size=0.2, random_state=0) # + colab_type="code" id="LtabnZMmVw0J" outputId="2206d20f-a993-4f07-8d57-aefc20acd9c5" colab={"base_uri": "https://localhost:8080/", "height": 578} dcfX_train = sm.add_constant(X_train) dcflogit_model = sm.Logit(y_train, dcfX_train) dcfresult = dcflogit_model.fit_regularized(maxiter=5000) print(dcfresult.summary()) # + colab_type="code" id="dOz9bVViWyr0" colab={} res = pd.DataFrame({"causal_mean": dcfresult.params[:data_dim+1], \ "causal_std": dcfresult.bse[:data_dim+1], \ "causal_025": dcfresult.conf_int()[:data_dim+1,0], \ "causal_975": dcfresult.conf_int()[:data_dim+1,1], \ "causal_pval": dcfresult.pvalues[:data_dim+1]}) res["causal_sig"] = (res["causal_pval"] < 0.05) res = res.T res.columns = np.concatenate([["intercept"], np.array(dfX.columns)]) res = res.T # + colab_type="code" id="h28MJM0bWbHL" outputId="c9e23f16-6d1b-45d8-99f2-ebe0b42177fd" colab={"base_uri": "https://localhost:8080/", "height": 328} res # + [markdown] colab_type="text" id="J-6ROpAtgcL2" # We check the predictions to see if the logistic outcome model is a good outcome model. # + colab_type="code" id="bSIWv2tYg5mE" outputId="cc1eeac7-4217-4bc2-8a78-15b2607d85de" colab={"base_uri": "https://localhost:8080/", "height": 170} # make predictions with the causal model dcfX_test = X_test dcfy_predprob = dcfresult.predict(sm.add_constant(dcfX_test)) dcfy_pred = (dcfy_predprob > 0.5) print(classification_report(y_test, dcfy_pred)) # + [markdown] colab_type="text" id="pSKOJQzJ0xr0" # # We are done! # + [markdown] colab_type="text" id="2kR2t_dD_QwC" # We have computed the average causal effect of raising the causes by one unit (see the "causal mean" column above). # + [markdown] colab_type="text" id="BSYKVbRa1TxU" # # Is the deconfounder worth the effort? # + [markdown] colab_type="text" id="T6-HaUWP1DjY" # We finally compare the **causal** estimation (with the deconfounder) with the **noncausal** estimation (with vanilla regression). # + [markdown] colab_type="text" id="SBqs58JpWkaR" # ## The classical logistic regression! Note it is noncausal :-( # + colab_type="code" id="ClmfJrvgWJ1d" outputId="75235bff-bd9e-4bdd-b8de-03753dacda93" colab={"base_uri": "https://localhost:8080/", "height": 544} # regress the outcome against the causes only (no substitute confounders) nodcfX_train = sm.add_constant(X_train[:,:X.shape[1]]) nodcflogit_model = sm.Logit(y_train, nodcfX_train) nodcfresult = nodcflogit_model.fit_regularized(maxiter=5000) print(nodcfresult.summary()) # + colab_type="code" id="pP96iyOkXAya" colab={} res["noncausal_mean"] = np.array(nodcfresult.params) res["noncausal_std"] = np.array(nodcfresult.bse) res["noncausal_025"] = np.array(nodcfresult.conf_int()[:,0]) res["noncausal_975"] = np.array(nodcfresult.conf_int()[:,1]) res["noncausal_pval"] = np.array(nodcfresult.pvalues) res["noncausal_sig"] = (res["noncausal_pval"] < 0.05) # + colab_type="code" id="ku49y-cKXJJW" colab={} res["diff"] = res["causal_mean"] - res["noncausal_mean"] res["pval_diff"] = res["causal_pval"] - res["noncausal_pval"] # + colab_type="code" id="fDudsVQQXQXY" colab={} nodcfX_test = sm.add_constant(X_test[:,:X.shape[1]]) nodcfy_predprob = nodcfresult.predict(nodcfX_test) nodcfy_pred = (nodcfy_predprob > 0.5) # + [markdown] colab_type="text" id="QPJgm2mlhP0y" # **Causal models do not hurt predictions here!** # + colab_type="code" id="cveM4AMckC8v" outputId="70a35c33-f966-4b83-b230-9ac4290e9ff6" colab={"base_uri": "https://localhost:8080/", "height": 301} dcflogit_roc_auc = roc_auc_score(y_test, dcfy_pred) dcffpr, dcftpr, dcfthresholds = roc_curve(y_test, dcfy_predprob) nodcflogit_roc_auc = roc_auc_score(y_test, nodcfy_pred) nodcffpr, nodcftpr, nodcfthresholds = roc_curve(y_test, nodcfy_predprob) plt.figure() plt.plot(nodcffpr, nodcftpr, label='Noncausal Logistic Regression (area = %0.9f)' % nodcflogit_roc_auc) plt.plot(dcffpr, dcftpr, label='Causal Logistic Regression (area = %0.9f)' % dcflogit_roc_auc) plt.plot([0, 1], [0, 1],'r--') plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('Receiver operating characteristic') plt.legend(loc="lower right") plt.savefig('Log_ROC') plt.show() # + [markdown] colab_type="text" id="t8wC1WolhXS1" # **But causal models do change the regression coefficients and which features are significant.** # + [markdown] colab_type="text" id="W5oEmvWChw-m" # * The mean smoothness is a feature **significantly correlated** with the cancer diagnosis. # # * But it does **not significantly** **causally affect** the cancer diagnosis. # # * The effect of all features are **over-estimated** with the noncausal model except the "mean compactness". # + colab_type="code" id="jaWGj7CuYskI" outputId="e72515e1-1b24-4ea6-950f-c1bcbe43ac92" colab={"base_uri": "https://localhost:8080/", "height": 467} res.sort_values("pval_diff", ascending=True)[["pval_diff", "causal_pval", "noncausal_pval", "causal_sig", "noncausal_sig", "causal_mean", "noncausal_mean"]] # + [markdown] colab_type="text" id="mrhsKixc9nlz" # * We include causes into the regression **one-by-one**. # * The deconfounder coefficients **does not** flip signs. # * But classical logistic regression coefficients **does** flip signs. # * This suggests that **the deconfounder is causal**. # * It is because **causal** coefficients **do not change** as we include more variables into the system; causal estimation already controls for confounders so that it is causal. # * However, **correlation** coefficients **can change** as we include more variables into the system; if the added variable is a confounder, than the regression coefficients change to account for the confounding effects. # + colab_type="code" id="WqFB3btE_WQk" outputId="92ab9d91-887b-4157-d05e-25dec43882e4" colab={"base_uri": "https://localhost:8080/", "height": 1000} # The deconfounder with causes added one-by-one # The first i coefficient is the causal coefficient of the first i causes. # i = 1, ..., 8. for i in range(X.shape[1]): print(i, "causes included") # augment the regressors to be both the assigned causes X and the substitute confounder Z X_aug = np.column_stack([X[:,:i], Z_hat]) # holdout some data from prediction later X_train, X_test, y_train, y_test = train_test_split(X_aug, dfy, test_size=0.2, random_state=0) dcfX_train = sm.add_constant(X_train) dcflogit_model = sm.Logit(y_train, dcfX_train) dcfresult = dcflogit_model.fit_regularized(maxiter=5000) print(dcfresult.summary()) # + colab_type="code" id="tYjfPtOW_zmc" outputId="bde39892-0ead-4c1d-8e2b-cd97e7481312" colab={"base_uri": "https://localhost:8080/", "height": 1000} # Logistic regression with causes added one-by-one # The first i coefficient is the causal coefficient of the first i causes. # i = 1, ..., 8. for i in range(X.shape[1]): print(i, "causes included") # augment the regressors to be both the assigned causes X and the substitute confounder Z X_aug = np.column_stack([X[:,:i]]) # holdout some data from prediction later X_train, X_test, y_train, y_test = train_test_split(X_aug, dfy, test_size=0.2, random_state=0) dcfX_train = sm.add_constant(X_train) dcflogit_model = sm.Logit(y_train, dcfX_train) dcfresult = dcflogit_model.fit_regularized(maxiter=5000) print(dcfresult.summary()) # + [markdown] colab_type="text" id="LriJGU_rAYx9" # **We note that the causal coefficient of x4 is stable with the (causal) deconfounder but the correlation coefficent of x4 flips sign with the (noncausal) logistic regression.** # + [markdown] colab_type="text" id="mDTQQ07H-jqL" # # Takeaways # + [markdown] colab_type="text" id="E_fKgGXKinvp" # # # * The deconfounder is **not hard** to use. # * We simply **fit** a factor model, **check** it, and **infer** causal effects with the substitute confounder. # * Please **always check** the factor model. # * The deconfounder **makes a difference**. # * The deconfounder **deconfounds**. # # # + [markdown] colab_type="text" id="JgWF4gDkA1S4" # # Acknowledgements # + [markdown] colab_type="text" id="6NTee1r9A4Lm" # We thank <NAME> for suggesting the adding-causes-one-by-one idea.
deconfounder_tutorial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.7.3 64-bit (''base'': conda)' # name: python3 # --- # + # 载入标注文件 import json import os.path as osp import os from tqdm import tqdm from glob import glob import shutil import cv2 import matplotlib.pyplot as plt import pandas as pd from sklearn.model_selection import train_test_split # - # ## 数据基本处理 # - 重命名图片名,将后缀统一改为'jpg' # # # 标签数据读取与格式化 # - 把标签数据从csv文件中解析成txt文件 # - 在txt文件中保存每个目标的bbox信息 root = '/home/p100/rz/data/gaokong/' train_img_paths = glob(osp.join(root, 'org/images/train/*')) test_img_paths = glob(osp.join(root, 'org/images/test/*')) # rename image file name for imgp in test_img_paths: new_path = imgp.split('.')[0] + '.jpg' os.rename(imgp, new_path) # + # parser label cvs file to txt file csv_file_path = osp.join(root, 'org/train_label.csv') store = '/home/p100/rz/data/gaokong/org/labels/train' data = pd.read_csv(csv_file_path) image_names = data.iloc[:, 4].tolist() infors = data.iloc[:, 5].tolist() if not osp.exists(store): os.makedirs(store) for name, info in zip(image_names, infors): new_name = name.split('/')[1].split('.')[0] + '.jpg' txt_store_path = os.path.join(store, new_name.replace('jpg', 'txt')) info = json.loads(info) items = info['items'] objs = [] for it in items: bbox = it['meta'].get('geometry', None) if bbox is None: continue bbox = list(map(int, bbox)) label = it['labels']['标签'] obj = ','.join(map(str, bbox)) + '\t' + label objs.append(obj) with open(txt_store_path, 'w') as f: f.write('\n'.join(objs)) # - # # split train to train and valid dataset # train_img_names, valid_img_names = train_test_split([osp.basename(p) for p in train_img_paths], # test_size=0.1, # random_state=47) # # move images to valid dir # for name in valid_img_names: # org_img_path = osp.join('/home/p100/rz/data/gaokong/org/images/train', name) # org_label_path = osp.join('/home/p100/rz/data/gaokong/org/labels/train', name.replace('jpg', 'txt')) # shutil.move(org_img_path, '/home/p100/rz/data/gaokong/org/images/valid') # shutil.move(org_label_path, '/home/p100/rz/data/gaokong/org/labels/valid') # # EDA # + tags=[] txt_paths = glob(osp.join(root, 'org/labels/train/*.txt')) category = [] lines = 0 # 标签数量 for txtp in txt_paths: with open(txtp) as f: content = f.read().strip() if len(content) == 0: print(txtp, ' file is empty.') continue content = content.split('\n') for line in content: ct = line.strip().split('\t')[-1] if ct == '': continue lines += 1 category.append(ct) category = list(set(category)) category # - print('标签类别') print('类别数量:' ,len(category)) category print('训练集图片数量:', len(txt_paths)) print('训练集标签数量:', lines) total=[] for imgp in tqdm(train_img_paths): h, w, _ = cv2.imread(imgp).shape total.append((w, h)) unique=set(total) for k in unique: print('长宽为(%d.%d)的图片数量为:'%k,total.count(k)) # 可以看出 图像的分辨率不是固定的 (720,405)的图像相对较多,其次是(3840,2160) ids=[] images_id=[] for i in a['annotations']: ids.append(i['id']) images_id.append(i['image_id']) print('训练集图片数量: ', 5543) print('unique id 数量: ', len(set(ids))) print('unique image_id 数量: ', len(set(images_id))) image_list = set(images_id) import os tmp = os.listdir(r'F:\study\0_Project\sea_detection\2020dataset\VOC2007\VOCdevkit\VOC2007\JPEGImages') os_list = [int(i.replace('.jpg','')) for i in tmp] org_list = set(os_list) gaps = org_list - image_list print('缺少标注的图像数量:', len(gaps)) # gaps的元素在org_list中存在,但是在image_list中不存在,有88个文件没有标注信息,annotation为空 # 那么在训练的过程中,全图为空,没有标注的图,应该删掉,或者使用mixup,增强特征,这点后面再进行考虑 # mmdetection会自动删除全图标注为空的样本 # 查看统计量 import pandas as pd import seaborn as sns import matplotlib.pyplot as plt plt.rcParams['font.sans-serif'] = ['SimHei'] plt.rcParams['font.family']='sans-serif' plt.rcParams['figure.figsize'] = (10.0, 10.0) ## 创建类别标签字典 category_dic = dict([(i['id'], i['name']) for i in a['categories']]) category_dic # + counts_label_all=dict([(i['name'], 0) for i in a['categories']]) counts_label_train=dict([(j['name'], 0) for j in train['categories']]) counts_label_val=dict([(h['name'], 0) for h in val['categories']]) for i in a['annotations']: counts_label_all[category_dic[i['category_id']]] += 1 for j in train['annotations']: · counts_label_train[category_dic[j['category_id']]] += 1 for n in val['annotations']: counts_label_val[category_dic[n['category_id']]] += 1 print("总数据集每个类别的标注数量: {}".format(counts_label_all)) print("train集每个类别的标注数量: {}".format(counts_label_train)) print("val集每个类别的标注数量: {}".format(counts_label_val)) # - # 打印 # %matplotlib inline plt.style.use({'figure.figsize':(10, 10)}) indexs=counts_label.keys() values=counts_label.values() Count_df=pd.DataFrame(list(values),index=indexs) Count_df.plot(kind='pie',y=Count_df.columns) # 存在相同标签数量的图的数量的统计直方图 # %matplotlib inline plt.style.use({'figure.figsize':(20, 8)}) annoto_count={} for i in a['annotations']: annoto_count[i['image_id']]=annoto_count.setdefault(i['image_id'],0)+1 indes_list=set(annoto_count.values()) indes_list values_count=[list(annoto_count.values()).count(i) for i in indes_list] pd.DataFrame(values_count,index=indes_list,columns=['标签数量']).plot(kind='bar') # 有1个标注的图的数量有440张 # 有3个标注的图的数量有580张 # 剔除冗余信息 anntotations =[] for i in a['annotations']: an=i an.pop('id') # anns的标注id an.pop('iscrowd') anntotations.append(an) # 将标签于对应图片合并起来,形成新的数据格式 data = [] per_sample = {} for img in a['images']: sample_img = img annota_list = [] # 保存该图片对应的标签 for per in anntotations: if per['image_id']==img['id']: # 将一张图的所有的ann装进annota_list中 annota_list.append(per) for k in annota_list: anntotations.remove(k) # 把拿出来的ann在总的标注文件中都删除掉 sample_img['annotations'] = annota_list data.append(sample_img) # 定义一个画图函数 from PIL import Image, ImageFont, ImageDraw import numpy as np import colorsys import matplotlib.pyplot as plt def plot_imgs(img_data,gap=10,path=''): files_name=img_data['file_name'] img_annotations=img_data['annotations'] n=len(img_annotations) boxs=np.zeros((n,4)) tag=[] img=Image.open('F:/study/0_Project/sea_detection/2020dataset/VOC2007/VOCdevkit/VOC2007/JPEGImages/'+files_name) # 图片路径 img_w=img.size[0] img_h=img.size[1] for i in range(n): bbox=img_annotations[i]['bbox'] tag.append(category_dic[img_annotations[i]['category_id']]) y1 = max(0, np.floor(bbox[1] + 0.5).astype('int32')) x1 = max(0, np.floor(bbox[0] + 0.5).astype('int32')) y2 = min(img_h, np.floor(bbox[1]+bbox[3] + 0.5).astype('int32')) x2= min(img_w, np.floor(bbox[0]+bbox[2] + 0.5).astype('int32')) boxs[i]=[x1,y1,x2,y2] font = ImageFont.truetype(font="simsun.ttc",size=np.floor(3.5e-2 * img_w).astype('int32'),encoding="unic") hsv_tuples = [(x / n, 1., 1.)for x in range(n)] colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples)) colors = list(map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)), colors)) for index in range(len(boxs)): draw = ImageDraw.Draw(img) label_size = draw.textsize(tag[index], font) text_origin = np.array([20,25+index*label_size[1]]) for i in range(gap): draw.rectangle( [boxs[index][0] + i, boxs[index][1] + i, boxs[index][2] - i, boxs[index][3] - i],outline=colors[index]) # draw.rectangle(list(),outline=colors[index]) draw.rectangle( [tuple(text_origin), tuple(text_origin + label_size)], fill=colors[index]) draw.text(text_origin,tag[index], fill=(0, 0, 0), font=font) # %matplotlib inline plt.style.use({'figure.figsize':(20, 10)}) plt.imshow(img) for d in data: if len(d['annotations'])==21: break plot_imgs(d,gap=2,path='') # + # 对所有标注长宽做统计 total_size=[] total_height=[] total_wh=[] for im in data: # 每张图的信息 for b in im['annotations']: # 每张图的每个标注 # total_width += [b['bbox'][2]] # total_height += [b['bbox'][3]] wh = round(b['bbox'][2]/b['bbox'][3], 0) if wh < 1 : wh = round(b['bbox'][3]/b['bbox'][2],0) total_wh += [wh] # 所有标签的长宽高比例 box_wh_unique = list(set(total_wh)) box_wh_count=[total_wh.count(i) for i in box_wh_unique] bbox_wh_dict = {} for i, key in enumerate(box_wh_unique): print('宽高比{}: 数量:{}'.format(key, box_wh_count[i])) # 绘图 wh_df = pd.DataFrame(box_wh_count,index=box_wh_unique,columns=['宽高比数量']) wh_df.plot(kind='bar',color="#55aacc") plt.show() # + # 对所有标注面积比做统计 total_size704_676=[] total_size1920_1080=[] total_size3840_2160=[] total_size720_405=[] total_size586_481=[] total_wh=[] for im in data: # 每张图的信息 for b in im['annotations']: # 每张图的每个标注 if (im['width'], im['height']) == list(unique)[0]: size = round(b['bbox'][2] * b['bbox'][3]) total_size704_676 += [size] elif (im['width'], im['height']) == list(unique)[1]: size = round(b['bbox'][2] * b['bbox'][3]) total_size1920_1080 += [size] elif (im['width'], im['height']) == list(unique)[2]: size = round(b['bbox'][2] * b['bbox'][3]) total_size3840_2160 += [size] elif (im['width'], im['height']) == list(unique)[3]: size = round(b['bbox'][2] * b['bbox'][3]) total_size720_405 += [size] elif (im['width'], im['height']) == list(unique)[4]: size = round(b['bbox'][2] * b['bbox'][3]) total_size586_481 += [size] max_size = max(total_size704_676) gap = 1000 group_names = list(range(0,20000,gap)) size_cuts = pd.cut(total_size704_676,bins=group_names,labels=group_names[:-1]) total_df=pd.DataFrame({'704_676':size_cuts.value_counts()}) total_df.plot(kind='bar') max_size = max(total_size1920_1080) gap = 1000 group_names = list(range(0,90000,gap)) size_cuts = pd.cut(total_size1920_1080,bins=group_names,labels=group_names[:-1]) total_df=pd.DataFrame({'1920_1080':size_cuts.value_counts()}) total_df.plot(kind='bar') max_size = max(total_size3840_2160) gap = 1000 group_names = list(range(0,60000,gap)) size_cuts = pd.cut(total_size3840_2160,bins=group_names,labels=group_names[:-1]) total_df=pd.DataFrame({'3840_2160':size_cuts.value_counts()}) total_df.plot(kind='bar') max_size = max(total_size720_405) gap = 500 group_names = list(range(0,25000,gap)) size_cuts = pd.cut(total_size720_405,bins=group_names,labels=group_names[:-1]) total_df=pd.DataFrame({'720_405':size_cuts.value_counts()}) total_df.plot(kind='bar') max_size = max(total_size586_481) gap = 100 group_names = list(range(0,max_size,gap)) size_cuts = pd.cut(total_size586_481,bins=group_names,labels=group_names[:-1]) total_df=pd.DataFrame({'586_481':size_cuts.value_counts()}) total_df.plot(kind='bar')
detect/work_at_height/exploratory_data_analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- #import libraries import matplotlib.pyplot as plt import mpl_toolkits.axes_grid1 as axes_grid1 import numpy as np import os clear = lambda: os.system('clear') print("importing done") tt = 10000 #total time dt = 1 #timestep lx = 4 #length of domain in x ly = 4 #length of domain in y nx = 50 #number of nodes in x ny = 50 #number of nodes in x dx = lx/nx #interval x dy = ly/ny #interval y alpha = 4.66E-6 k =50.2 print("initialization done") q_dot = np.zeros([nx,ny]) q_dot[[25],[25]] = 10000000 print("heat initialization done") # + T = np.zeros([nx,ny]) #temperature profile #boundary conditions EBC = 700 #east boundary condition WBC = 700 #west boundary condition NBC = 303 #north boundary condition SBC = 303 #south boundary condition i = 0 while i < ny: T[0,i] = EBC T[ny - 1, i] = WBC i += 1 b = 0 while b < nx: T[b,0] = NBC T[b,nx-1] = SBC b += 1 i = 1 b = 1 while i < nx-1: while b < ny-1: T[i][b] += 303 b += 1 i += 1 print("temperature initialization done") # - t = 0 while (t < tt): i = 1 T_new = T while i < nx-1: j = 1 while j < ny -1: T_new[i,j] = (alpha*(((T[i+1][j]-(2*T[i][j])+T[i-1][j])/dx**2) + ((T[i][j+1]-(2*T[i][j])+T[i][j-1])/dy**2) + q_dot[i][j]/k) + T[i][j]/dt) j += 1 i += 1 print("time =", t) clear() T = T_new t += 1 print("simulation done") # + plt.imshow(T, cmap='hot', interpolation='nearest') plt.colorbar() # -
2 Dimensional unsteady state conduction.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- # + [markdown] origin_pos=0 # # Adadelta # :label:`sec_adadelta` # # # Adadelta是AdaGrad的另一种变体( :numref:`sec_adagrad`), # 主要区别在于前者减少了学习率适应坐标的数量。 # 此外,广义上Adadelta被称为没有学习率,因为它使用变化量本身作为未来变化的校准。 # Adadelta算法是在 :cite:`Zeiler.2012`中提出的。 # # ## Adadelta算法 # # 简而言之,Adadelta使用两个状态变量,$\mathbf{s}_t$用于存储梯度二阶导数的漏平均值,$\Delta\mathbf{x}_t$用于存储模型本身中参数变化二阶导数的泄露平均值。请注意,为了与其他出版物和实现的兼容性,我们使用作者的原始符号和命名(没有其他真正理由为什么应该使用不同的希腊变量来表示在动量中用于相同用途的参数,即AdaGrad、RMSProp和Adadelta)。 # # 以下是Adadelta的技术细节。鉴于参数du jour是$\rho$,我们获得了与 :numref:`sec_rmsprop`类似的以下泄漏更新: # # $$\begin{aligned} # \mathbf{s}_t & = \rho \mathbf{s}_{t-1} + (1 - \rho) \mathbf{g}_t^2. # \end{aligned}$$ # # 与 :numref:`sec_rmsprop`的区别在于,我们使用重新缩放的梯度$\mathbf{g}_t'$执行更新,即 # # $$\begin{aligned} # \mathbf{x}_t & = \mathbf{x}_{t-1} - \mathbf{g}_t'. \\ # \end{aligned}$$ # # 那么,调整后的梯度$\mathbf{g}_t'$是什么?我们可以按如下方式计算它: # # $$\begin{aligned} # \mathbf{g}_t' & = \frac{\sqrt{\Delta\mathbf{x}_{t-1} + \epsilon}}{\sqrt{{\mathbf{s}_t + \epsilon}}} \odot \mathbf{g}_t, \\ # \end{aligned}$$ # # 其中$\Delta \mathbf{x}_{t-1}$是重新缩放梯度的平方$\mathbf{g}_t'$的泄漏平均值。我们将$\Delta \mathbf{x}_{0}$初始化为$0$,然后在每个步骤中使用$\mathbf{g}_t'$更新它,即 # # $$\begin{aligned} # \Delta \mathbf{x}_t & = \rho \Delta\mathbf{x}_{t-1} + (1 - \rho) {\mathbf{g}_t'}^2, # \end{aligned}$$ # # 和$\epsilon$(例如$10^{-5}$这样的小值)是为了保持数字稳定性而加入的。 # # ## 代码实现 # # Adadelta需要为每个变量维护两个状态变量,即$\mathbf{s}_t$和$\Delta\mathbf{x}_t$。这将产生以下实施。 # # + origin_pos=2 tab=["pytorch"] # %matplotlib inline import torch from d2l import torch as d2l def init_adadelta_states(feature_dim): s_w, s_b = torch.zeros((feature_dim, 1)), torch.zeros(1) delta_w, delta_b = torch.zeros((feature_dim, 1)), torch.zeros(1) return ((s_w, delta_w), (s_b, delta_b)) def adadelta(params, states, hyperparams): rho, eps = hyperparams['rho'], 1e-5 for p, (s, delta) in zip(params, states): with torch.no_grad(): # In-placeupdatesvia[:] s[:] = rho * s + (1 - rho) * torch.square(p.grad) g = (torch.sqrt(delta + eps) / torch.sqrt(s + eps)) * p.grad p[:] -= g delta[:] = rho * delta + (1 - rho) * g * g p.grad.data.zero_() # + [markdown] origin_pos=4 # 对于每次参数更新,选择$\rho = 0.9$相当于10个半衰期。由此我们得到: # # + origin_pos=5 tab=["pytorch"] data_iter, feature_dim = d2l.get_data_ch11(batch_size=10) d2l.train_ch11(adadelta, init_adadelta_states(feature_dim), {'rho': 0.9}, data_iter, feature_dim); # + [markdown] origin_pos=6 # 为了简洁实现,我们只需使用`Trainer`类中的`adadelta`算法。 # # + origin_pos=8 tab=["pytorch"] trainer = torch.optim.Adadelta d2l.train_concise_ch11(trainer, {'rho': 0.9}, data_iter) # + [markdown] origin_pos=10 # ## 小结 # # * Adadelta没有学习率参数。相反,它使用参数本身的变化率来调整学习率。 # * Adadelta需要两个状态变量来存储梯度的二阶导数和参数的变化。 # * Adadelta使用泄漏的平均值来保持对适当统计数据的运行估计。 # # ## 练习 # # 1. 调整$\rho$的值,会发生什么? # 1. 展示如何在不使用$\mathbf{g}_t'$的情况下实现算法。为什么这是个好主意? # 1. Adadelta真的是学习率为0吗?你能找到Adadelta无法解决的优化问题吗? # 1. 将Adadelta的收敛行为与AdaGrad和RMSProp进行比较。 # # + [markdown] origin_pos=12 tab=["pytorch"] # [Discussions](https://discuss.d2l.ai/t/5772) #
d2l/pytorch/chapter_optimization/adadelta.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="0oGKniD-fesc" # --- # # # Object detection train 2.6.0 with TensorFlow >= 2.6.0 # This notebook will train a model for the object detection purpouse. # It can be run as a Jupyter notebook in the Google Colab environment or exported as a Python file and run from a command line. # # This software detects automatically if you are working on a Colab environment or in your local machine. # # For a local machine it just requires a Python >= 3.7 installed. # # All the operations for installing all the required libraries and for preparing the data needed by the train algoritm will be done effortlessly for you. # ## Train preparation: # * Collect a set of images containing the objects that you want to train. # * Split the set in two different folders; one for the train and the other for the evaluation. The number of the evaluation images could be from 10% to 30% of the train images. # * Label the images using a standard images annotation tool as [labelImg](https://github.com/tzutalin/labelImg), [VoTT](https://github.com/microsoft/VoTT), etc... and save the xml for each picture in the Pascal VOC format. # * Copy the folders with the prepared images set in your GDrive (if you are working on a Colab environment). # * Configure the train parameters listed in the next notebook's cell. # # ## Train: # Run the process and enjoy your time waiting the completion of the train. # # You can also stop the train and restart again later; if you didn't clean the output directory for the model the train will restart from the last checkpoint, continuing the fine tuning of the model. # The progress of the train can be followed by the Tensorboard (already included in this notebook). # # ### For Colab environment train: # The notebook needs to mount your GDrive. It will ask you the access authorization. Follow the instructions. # # --- # # # # # + id="p9wR0KipJaPZ" cellView="form" #@title #Notebook configuration #begin-module: default_cfg.py class Cfg(object): #@markdown ## Data on Google Drive: #@markdown (The data will be treated in a Google Drive space if enabled) data_on_drive = True #@param {type:"boolean"} #@markdown --- #@markdown ## Base model: #@markdown (The base model from which the train will start) model_type = 'SSD MobileNet v2 320x320' #@param ['CenterNet HourGlass104 512x512', 'CenterNet HourGlass104 1024x1024', 'CenterNet Resnet50 V1 FPN 512x512', 'CenterNet Resnet101 V1 FPN 512x512', 'CenterNet Resnet50 V2 512x512', 'CenterNet MobileNetV2 FPN 512x512', 'EfficientDet D0 512x512', 'EfficientDet D1 640x640', 'EfficientDet D2 768x768', 'EfficientDet D3 896x896', 'EfficientDet D4 1024x1024', 'EfficientDet D5 1280x1280', 'EfficientDet D6 1280x1280', 'EfficientDet D7 1536x1536', 'SSD MobileNet v2 320x320', 'SSD MobileNet V1 FPN 640x640', 'SSD MobileNet V2 FPNLite 320x320', 'SSD MobileNet V2 FPNLite 640x640', 'SSD ResNet50 V1 FPN 640x640 (RetinaNet50)', 'SSD ResNet50 V1 FPN 1024x1024 (RetinaNet50)', 'SSD ResNet101 V1 FPN 640x640 (RetinaNet101)', 'SSD ResNet101 V1 FPN 1024x1024 (RetinaNet101)', 'SSD ResNet152 V1 FPN 640x640 (RetinaNet152)', 'SSD ResNet152 V1 FPN 1024x1024 (RetinaNet152)', 'Faster R-CNN ResNet50 V1 640x640', 'Faster R-CNN ResNet50 V1 1024x1024', 'Faster R-CNN ResNet50 V1 800x1333', 'Faster R-CNN ResNet101 V1 640x640', 'Faster R-CNN ResNet101 V1 1024x1024', 'Faster R-CNN ResNet101 V1 800x1333', 'Faster R-CNN ResNet152 V1 640x640', 'Faster R-CNN ResNet152 V1 1024x1024', 'Faster R-CNN ResNet152 V1 800x1333', 'Faster R-CNN Inception ResNet V2 640x640', 'Faster R-CNN Inception ResNet V2 1024x1024', 'Mask R-CNN Inception ResNet V2 1024x1024'] #@markdown --- #@markdown ## Images directories: #@markdown The GDrive directory (Colab execution) or the local directory (machine execution) where are located the images set for the train and the one for the evaluation. train_images_dir = 'images/train' #@param {type:"string"} eval_images_dir = 'images/eval' #@param {type:"string"} #@markdown --- #@markdown ## Train directory: #@markdown The GDrive directory (Colab execution) or the local directory (machine execution) where the checkpoints will be saved. trained_model_dir = 'trained-model' #@param {type:"string"} #@markdown --- #@markdown ## Export directory: #@markdown The GDrive directory (Colab execution) or the local directory (machine execution) where the exported model will be saved. exported_model_dir = 'exported-model' #@param {type:"string"} #@markdown --- #@markdown ## Export ONNX: #@markdown The name of the exported ONNX model. It will be created in the exported_model_dir exported_onnx = 'saved_model.onnx' #@param {type:"string"} #@markdown --- #@markdown ## Export frozen: #@markdown The name of the exported frozen graph. It will be created in the exported_model_dir exported_frozen_graph = 'frozen_graph.pb' #@param {type:"string"} #@markdown --- #@markdown ## Maximum training steps: #@markdown The maximun number of train steps. If < 0 it will be limited by the base model configuration. num_train_steps = -1 #@param {type:"integer"} #@markdown --- #@markdown ## Batch size: #@markdown The size of the batch. If < 1 the value contained in the model pipeline configuration will be used batch_size = 16 #@param {type:"integer"} #@markdown --- #end-module # + id="r7g6Wr_PwP3O" cellView="form" #@title #Mount Google Drive #@markdown Mounting of the Google Drive (if enabled in the configuration). #begin-module: mount_google_drive.py import os import sys try: from default_cfg import Cfg except: pass def mount_google_drive(): if (not os.path.exists('/mnt/MyDrive')): print('Mounting the GDrive') from google.colab import drive drive.mount('/mnt') else: print('GDrive already mounted') if __name__ == '__main__': if (Cfg.data_on_drive and 'google.colab' in sys.modules): mount_google_drive() #end-module: mount_google_drive.py #@markdown --- # + id="WmNtRmJ8HQkD" cellView="form" #@title #Setup { form-width: "10%" } #@markdown Installation of the object detection API # Clone the object detection model builder API. import os # Kill any running processes and disconnect symbolic links if (os.path.exists('/mnt/MyDrive')): if (os.path.exists('/content/eval-images')): os.unlink('/content/eval-images') if (os.path.exists('/content/train-images')): os.unlink('/content/train-images') if (os.path.exists('/content/trained-model')): os.unlink('/content/trained-model') if (os.path.exists('/content/eval.log')): os.unlink('/content/eval.log') if (os.path.exists('/content/train.log')): os.unlink('/content/train.log') if (os.path.exists('/content/train.pid')): with open('/content/train.pid', 'r') as f: lines = f.readlines() os.system(f'kill -9 {lines[0]}') os.unlink('/content/train.pid') if (os.path.exists('/content/eval.pid')): with open('/content/eval.pid', 'r') as f: lines = f.readlines() os.system(f'kill -9 {lines[0]}') os.unlink('/content/eval.pid') # Clone the repository and install environment #program_dir = '/mnt/MyDrive/ODModelBuilderTF' if (os.path.exists('/mnt/MyDrive')) else '/usr/src/ODModelBuilderTF' program_dir = '/usr/src/ODModelBuilderTF' repo_url = 'https://github.com/darth-vader-lg/ODModelBuilderTF' repo_ref = 'v2.2.0' if (not os.path.isdir(program_dir)): # !git clone --depth 1 --branch {repo_ref} {repo_url} {program_dir} program_dir = os.path.join(program_dir, 'ODModelBuilderTF_Py') import os import shutil shutil.copy2(os.path.join(program_dir, 'requirements-colab.txt'), os.path.join(program_dir, 'requirements.txt')) # !cd {program_dir}; python install_virtual_environment.py --no-cache --no-custom-tf #@markdown --- # + id="KCY_BIaRUVBr" cellView="form" #@title #Train { form-width: "10%" } #@markdown Train of the model with the configured parameters. # Train model_type = Cfg.model_type train_images_dir = Cfg.train_images_dir eval_images_dir = Cfg.eval_images_dir model_dir = Cfg.trained_model_dir num_train_steps = Cfg.num_train_steps batch_size = Cfg.batch_size annotations_dir = "/content/annotations" print("Start of train.") # !nohup bash -c "cd {program_dir}; python3 main.py --eval_on_train_data --model_type='{model_type}' --train_images_dir={train_images_dir} --eval_images_dir={eval_images_dir} --model_dir={model_dir} --annotations_dir={annotations_dir} --num_train_steps={num_train_steps} --batch_size={batch_size}" 2>&1> train.log & echo $! > train.pid # #!cd {program_dir} && python3 main.py --eval_on_train_data --model_type='{model_type}' --train_images_dir={train_images_dir} --eval_images_dir={eval_images_dir} --model_dir={model_dir} --num_train_steps={num_train_steps} --batch_size={batch_size} #@markdown --- # + id="gH6SC1IDGOuH" cellView="form" #@title #Evaluation { form-width: "10%" } #@markdown Evaluate the model with the configured parameters. # Evaluate import os def check_train_started(): if (not os.path.isdir(annotations_dir)): return False if (not os.path.exists(os.path.join(annotations_dir, "pipeline.config"))): return False if (not os.path.exists(os.path.join(annotations_dir, "label_map.pbtxt"))): return False return True if (not check_train_started()): print ("Waiting the start of train...") import time while (not check_train_started()): time.sleep(1) print ("Train started.") checkpoint_dir = Cfg.trained_model_dir # !nohup bash -c "cd '{program_dir}' && python3 main.py --num_train_steps=0 --checkpoint_dir={checkpoint_dir} --annotations_dir={annotations_dir}" 2>&1> eval.log & echo $! > eval.pid #@markdown --- # + id="bOy5KI2ncnWd" cellView="form" #@title #Tensorboard { form-width: "10%" } #@markdown Display the tensorboard. # Tensorboard (optional) trained_model_dir = os.path.join('/mnt/MyDrive', Cfg.trained_model_dir) if Cfg.data_on_drive else Cfg.trained_model_dir import os def check_train_started(): if (not os.path.exists(os.path.join(trained_model_dir, "eval"))): return False return True if (not check_train_started()): print ("Waiting the start of train...") import time while (not check_train_started()): time.sleep(1) print ("Train started.") # %load_ext tensorboard # %tensorboard --logdir {trained_model_dir} # + id="7GsEePJzQ8Lt" cellView="form" #@title #Progress { form-width: "10%" } #@markdown Show the progress of the training. #@markdown It could be stopped by the user to export the model and after restarted.<br/> #@markdown The train process it's never stopped in any case; it continues in background. # Progress display import os import time train_lines_count = 0 eval_lines_count = 0 train_printed = False printing_stats = False while (True): if (os.path.isfile('/content/train.log')): with open('/content/train.log', 'rt') as f: lines = f.read().splitlines() n_lines = len(lines) if (n_lines > train_lines_count): if (not train_printed): print('=' * 80) print('Train') train_printed = True for i in range(train_lines_count, n_lines): if (lines[i].startswith('INFO:tensorflow:') or printing_stats): if ('{\'' in lines[i]): printing_stats = True if ('}' in lines[i]): printing_stats = False print(lines[i].replace('INFO:tensorflow:', '')) train_lines_count = n_lines if (os.path.isfile('/content/eval.log')): with open('/content/eval.log', 'rt') as f: lines = f.read().splitlines() n_lines = len(lines) if (n_lines > eval_lines_count): if (train_printed): print('=' * 80) print('Eval') train_printed = False for i in range(eval_lines_count, n_lines): if (lines[i].startswith('INFO:tensorflow:')): print(lines[i].replace('INFO:tensorflow:', '')) eval_lines_count = n_lines time.sleep(5) #@markdown --- # + id="wlu3arbqfh0d" cellView="form" #@title #Export { form-width: "10%" } #@markdown Export the model with the configured parameters. # Train trained_checkpoint_dir = Cfg.trained_model_dir exported_model_dir = Cfg.exported_model_dir exported_onnx = Cfg.exported_onnx exported_frozen_graph = Cfg.exported_frozen_graph # !cd {program_dir} && python main.py --num_train_steps=0 --trained_checkpoint_dir={trained_checkpoint_dir} --output_directory={exported_model_dir} --onnx={exported_onnx} --frozen_graph={exported_frozen_graph} #@markdown ---
ODModelBuilderTF_Py/ODModelBuilderTF.ipynb
;; --- ;; jupyter: ;; jupytext: ;; text_representation: ;; extension: .scm ;; format_name: light ;; format_version: '1.5' ;; jupytext_version: 1.14.4 ;; kernelspec: ;; display_name: Calysto Scheme 3 ;; language: scheme ;; name: calysto_scheme ;; --- ;; + vscode={"languageId": "json"} 10 ;; + vscode={"languageId": "json"} (+ 5 3 4) ;; + vscode={"languageId": "json"} (- 9 1) ;; + vscode={"languageId": "json"} (/ 6 2) ;; + vscode={"languageId": "json"} (+ (* 2 4) (- 4 6)) ;; + vscode={"languageId": "json"} (define a 3) ;; + vscode={"languageId": "json"} (define b (+ a 1)) ;; + vscode={"languageId": "json"} b ;; - (+ a b (* a b)) (= a b) (if (and ( > b a) (< b (* a b)) ) b a ) (cond ((= a 4) 6) ((= b 4) (+ 6 7 a) ) (else 25) ) (* (cond ((> a b ) a) ((< a b) b) (else -1 )) (+ a 1))
ch1/1.1/ex.1.01.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # [pyFAI](https://github.com/silx-kit/pyFAI) cookbooks and tutorials # # See the [full documentation](http://www.silx.org/doc/pyFAI/latest/) for more information. # ## Cookbooks # # - [Integration with python](../doc/source/usage/cookbook/integration_with_python.ipynb) # - [Integration with scripts](../doc/source/usage/cookbook/integration_with_scripts.ipynb) # ## Tutorials # # - [Introduction](../doc/source/usage/tutorial/Introduction/introduction.ipynb) # - [CCD calibration](../doc/source/usage/tutorial/CCD_Calibration/CCD_calibration.ipynb) # - [Calibrant](../doc/source/usage/tutorial/Calibrant/Calibrant.ipynb) # - [New calibrant](../doc/source/usage/tutorial/Calibrant/new_calibrant.ipynb) # - [Distortion](../doc/source/usage/tutorial/Distortion/Distortion.ipynb) # - [Geometry](../doc/source/usage/tutorial/Geometry/geometry.ipynb) # - Goniometer: # # - [Rotation-Pilatus100k](../doc/source/usage/tutorial/Goniometer/Rotation-Pilatus100k/Multi120_Pilatus100k.ipynb) # - [Rotation-XPADS540](../doc/source/usage/tutorial/Goniometer/Rotation-XPADS540/D2AM-15.ipynb) # - [Translation-Pilatus6M](../doc/source/usage/tutorial/Goniometer/Translation-Pilatus6M/TTcalibration.ipynb) # # - [Inpainting](../doc/source/usage/tutorial/Inpainting/Inpainting.ipynb) # - [LogScale](../doc/source/usage/tutorial/LogScale/Guinier.ipynb) # - [MakeCalibrant](../doc/source/usage/tutorial/MakeCalibrant/make_calibrant.ipynb) # - [MultiGeometry](../doc/source/usage/tutorial/MultiGeometry/MultiGeometry.ipynb) # - [Ellipse](../doc/source/usage/tutorial/Ellipse/ellipse.ipynb) # - Soleil: # # - [Cristal_Mythen](../doc/source/usage/tutorial/Soleil/Cristal_Mythen.ipynb) # - [Diffabs_Calibration_K6C](../doc/source/usage/tutorial/Soleil/Soleil_Diffabs_Calibration_K6C.ipynb) # - [Diffabs_Diffraction_Tomography](../doc/source/usage/tutorial/Soleil/Soleil_Diffabs_Diffraction_Tomography.ipynb) # # - Thick Detector: # # - [Deconvolution](../doc/source/usage/tutorial/ThickDetector/deconvolution.ipynb) # - [Goniometer id28](../doc/source/usage/tutorial/ThickDetector/goniometer_id28.ipynb) # - [Raytracing](../doc/source/usage/tutorial/ThickDetector/raytracing.ipynb) # # - [Variance](../doc/source/usage/tutorial/Variance/Variance.ipynb)
binder/index.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from keras.preprocessing import sequence from keras.layers.core import Dense, Dropout, Flatten,Activation from keras.layers import MaxPooling1D, Flatten from keras.layers import Embedding, LSTM, Conv1D, GlobalMaxPooling1D x_train = sequence.pad_sequences(x_train, maxlen=200) x_val = sequence.pad_sequences(x_val, maxlen=200) x_test = sequence.pad_sequences(x_test, maxlen=200) # MLP model = Sequential() model.add(Embedding(20000, 128, input_length=200)) model.add(Flatten()) model.add(Dense(256, activation='relu')) model.add(Dense(1, activation='sigmoid')) # LSTM model2 = Sequential() model2.add(Embedding(20000, 128)) model2.add(LSTM(128)) model2.add(Dense(1, activation='sigmoid')) # CNN model3 = Sequential() model3.add(Embedding(20000, 128, input_length=200)) model3.add(Dropout(0.2)) model3.add(Conv1D(256, 3, padding='valid', activation='relu', strides=1)) model3.add(GlobalMaxPooling1D()) model3.add(Dense(128, activation='relu')) model3.add(Dropout(0.2)) model3.add(Dense(1, activation='sigmoid')) # 순환 컨볼루션 신경망 모델 model4 = Sequential() model4.add(Embedding(20000, 128, input_length=200)) model4.add(Dropout(0.2)) model4.add(Conv1D(256, 3, padding='valid', activation='relu', strides=1)) model4.add(MaxPooling1D(pool_size=4)) model4.add(LSTM(128)) model4.add(Dense(1, activation='sigmoid')) from keras.datasets import imdb max_features = 20000 text_max_words = 200 (x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features) # 훈련셋과 검증셋 분리 x_val = x_train[20000:] y_val = y_train[20000:] x_train = x_train[:20000] y_train = y_train[:20000] # 데이터셋 전처리 : 문장 길이 맞추기 x_train = sequence.pad_sequences(x_train, maxlen=text_max_words) x_val = sequence.pad_sequences(x_val, maxlen=text_max_words) x_test = sequence.pad_sequences(x_test, maxlen=text_max_words) model2.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) hist = model2.fit(x_train, y_train, epochs=2, batch_size=64, validation_data=(x_val, y_val)) # %matplotlib inline import matplotlib.pyplot as plt # + fig, loss_ax = plt.subplots() acc_ax = loss_ax.twinx() loss_ax.plot(hist.history['loss'], 'y', label='train loss') loss_ax.plot(hist.history['val_loss'], 'r', label='val loss') loss_ax.set_ylim([-0.2, 1.2]) acc_ax.plot(hist.history['acc'], 'b', label='train acc') acc_ax.plot(hist.history['val_acc'], 'g', label='val acc') acc_ax.set_ylim([-0.2, 1.2]) loss_ax.set_xlabel('epoch') loss_ax.set_ylabel('loss') acc_ax.set_ylabel('accuray') loss_ax.legend(loc='upper left') acc_ax.legend(loc='lower left') plt.show() # - # 6. 모델 평가하기 loss_and_metrics = model.evaluate(x_test, y_test, batch_size=64) print('## evaluation loss and_metrics ##') print(loss_and_metrics)
nlp/keras/keras_2_mlp.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import mxnet as mx from mxnet.gluon.model_zoo import vision import os import numpy as np import glob import pandas as pd #from scipy.spatial.distance import cosine from IPython.display import Image # set the context on CPU, switch to GPU if there is one available ctx = mx.cpu() import glob2 from tqdm import tqdm import traceback # - def prePrep(image_fn): ''' inputs: full image fn output: numpy vector ''' image = mx.image.imdecode(open(image_fn, 'rb').read()).astype(np.float32) resized = mx.image.resize_short(image, 224) #minimum 224x224 images cropped, crop_info = mx.image.center_crop(resized, (224, 224)) normalized = mx.image.color_normalize(cropped/255, mean=mx.nd.array([0.485, 0.456, 0.406]), std=mx.nd.array([0.229, 0.224, 0.225])) # the network expect batches of the form (N,3,224,224) flipped_axis = normalized.transpose((2,0,1)) # Flipping from (224, 224, 3) to (3, 224, 224) batchified = flipped_axis.expand_dims(axis=0) # change the shape from (3, 224, 224) to (1, 3, 224, 224) return densenet.features(batchified)[0].asnumpy() # %time densenet = vision.densenet201(pretrained=True) type(densenet) # + #all_header_files = glob2.glob('src/**/*_dogCrop.jpg') fn_df_save = os.path.join(os.path.dirname(os.getcwd()), 'data', 'processed','0.0.6-whs-dogVectors.h5') #fn_df_load = os.path.join(os.path.dirname(os.getcwd()), 'data', 'processed','0.0.3-whs-dogVectors.h5') if not os.path.isdir(os.path.dirname(fn_df_save)): os.mkdir(os.path.dirname(fn_df_save)) df = pd.DataFrame() df['fn'] = None df['vector'] = None fns = glob2.glob(os.path.join(os.path.dirname(os.getcwd()), 'data', 'raw','**/*_dogCrop.jpg')) df['fn'] = fns '''for index, fn in enumerate(tqdm(fns)): #print('Processing: ',fn) if os.path.isdir(fn): fn = glob.iglob(os.path.join(fns, '*')) df.loc[index,'fn'] = fn ''' print('Now Apply-ing') # %time df['vector'] = df['fn'].apply(lambda x: prePrep(x)) print('Done Apply-ing') df.to_hdf(fn_df_save, key='df') for i in range(3): print(df.loc[i]) df.tail() # - # + #df['vector'].loc[0] #fn_save = os.path.join(os.path.dirname(os.getcwd()), 'data', 'processed','0.0.2-whs-dogVectors.pickle') #df.to_pickle(fn_save) # - ''' df = pd.read_pickle(fn_save) labels = [df['fn'].loc[index].split('/')[-1] for index in range(df.count()[0])] df_cross = pd.DataFrame(columns=labels, index=labels) for index in range(df.count()[0]): for sub_index in range(df.count()[0]): u = df['vector'].loc[index] v = df['vector'].loc[sub_index] label1 = df['fn'].loc[index].split('/')[-1] label2 = df['fn'].loc[sub_index].split('/')[-1] df_cross[label1][label2] = np.dot(u,v) / (np.linalg.norm(u) * np.linalg.norm(v)) df_cross.tail() '''
notebooks/0.2.1-whs-transformWithMxNet_YOLO.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import matplotlib as mpl import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D import numpy as np from sklearn import datasets from sklearn.mixture import GaussianMixture from sklearn.model_selection import StratifiedKFold from sklearn.preprocessing import MinMaxScaler from sklearn.decomposition import PCA from sklearn.cluster import KMeans from sklearn.cluster import MiniBatchKMeans from sklearn.cluster import SpectralClustering # - # ## Clustering # # In this notebook we explore clustering otherwise known as unsupervised learning. In order to examine the effects of clustering it is useful to have a data-set that is labeled with class labels as then we can visualize observe how well the identified clusters correspond to the original clusters. Notice that the clustering algorithms we will explore do NOT take into account the class labels. They are simply used to color the points in the visualizations. Let's start by loading and visualizing a data set consisting of audio features representing music tracks. There are 3 genres (classical, jazz, heavy metal) with 100 instances each. The first data-set we will examine consists of 2 features per track to make visualization easy. Let's look at a scatter plot of the points corresponding to each music track and use color to indicate what is the original class. The scikit-learn documentation has a lot of examples of clustering with interesting plots and visualizations. # + (X, y) = datasets.load_svmlight_file("data/3genres.arff.libsvm") X = X.toarray() X = MinMaxScaler().fit_transform(X) y = y.astype(int) target_names = ['classical', 'jazz', 'metal'] print(X.shape) print(y.shape) colors = ['navy', 'turquoise', 'darkorange'] for n, color in enumerate(colors): data = X[y == n] # Plot the training data plt.scatter(data[:, 0], data[:, 1], s=5, color=color, label=target_names[n]) plt.title('Scatter plot of audio features and genres') plt.legend(scatterpoints=1, loc='upper right', prop=dict(size=10)) plt.show() # - # ## K-means clustering # # We can perform k-means clustering on this data with 3 clusters and using the resulting clusters as a way to predict a class label. The fit_predict function of clustering algorithms does just that. Notice that the clustering assign each point to a cluster in an unsupervised manner i.e it only takes into account X not y like classifiers do. Comparing the original scatter plot with the resulting predictions of the clustering algorithm shows where potential erros can happen. As you can see the dark blue cluster "takes over" some of the light blue cluster. # # We can also look at other clustering methods such as spectral clustering and Gaussian Mixture Models. # # + random_state = 170 cluster_names = ['cluster 1', 'cluster 2', 'cluster 3'] def plot_clustering(X,y,y_pred): fig = plt.figure(figsize=(12,4)) for n, color in enumerate(colors): data = X[y == n] # Plot the training data plt.subplot(131) plt.subplots_adjust(bottom=.01, top=0.95, hspace=.15, wspace=.15, left=.01, right=.99) plt.scatter(data[:, 0], data[:, 1], s=20, color=color,label=target_names[n]) plt.legend(scatterpoints=1, loc='upper right', prop=dict(size=10)) # Plot the cluster predictions data = X[y_pred == n] h = plt.subplot(132) plt.scatter(data[:, 0], data[:, 1], s=20, marker='x',color=color,label=cluster_names[n]) plt.legend(scatterpoints=1, loc='upper right', prop=dict(size=10)) plt.subplot(133) data = X[y == n] plt.scatter(data[:, 0], data[:, 1], s=20, color=color, label=target_names[n]) data = X[y_pred == n] plt.scatter(data[:, 0], data[:, 1], s=40, marker='x',color=color, label=cluster_names[n]) plt.legend(scatterpoints=1, loc='upper right', prop=dict(size=10)) train_accuracy = np.mean(y_pred.ravel() == y.ravel()) * 100 plt.text(0.7, 0.5, 'Accuracy: %.1f' % train_accuracy, transform=h.transAxes) plt.show() y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X) plot_clustering(X,y,y_pred) y_pred = SpectralClustering(n_clusters=3, eigen_solver='arpack', random_state=random_state,assign_labels = 'kmeans', affinity='nearest_neighbors').fit_predict(X) plot_clustering(X,y,y_pred) gmm = GaussianMixture(n_components=3, random_state=50, max_iter=100) gmm.fit(X) y_pred = gmm.predict(X) plot_clustering(X,y,y_pred) # rerrange cluster numbers to make things work #cluster_mapping = {} #cluster_mapping[0] = 0 #cluster_mapping[1] = 2 #cluster_mapping[2] = 1 y_mapped_predict = np.array([cluster_mapping[i] for i in y_pred]) y_mapped_predict[y_pred == 0] = 0 y_mapped_predict[y_pred == 1] = 2 y_mapped_predict[y_pred == 2] = 1 plot_clustering(X,y,y_mapped_predict) # - # ### Clustering with k-means visualized in 3 dimensions # # If we increase the number of feature we use to represent each music track to 3 then we can do scatter plots and perform clustering in the resulting 3D space. In most actual applications clustering is performed in high-dimensional feature spaces so visualization is not easy. At the end of this notebook we explore Principal Component Analysis a methodology for dimensionality reduction. # # + # Modified for documentation by <NAME> # License: BSD 3 clause from sklearn.cluster import KMeans from sklearn import datasets np.random.seed(5) (X, y) = datasets.load_svmlight_file("data/3genres_4features.arff.libsvm") X = X.toarray() X = MinMaxScaler().fit_transform(X) target_names = ['classical', 'jazz', 'metal'] y = y.astype(int) estimators = [('k_means_8', KMeans(n_clusters=8)), ('k_means_3', KMeans(n_clusters=3)), ] fignum = 1 titles = ['8 clusters', '3 clusters'] for name, est in estimators: fig = plt.figure(fignum, figsize=(7, 6)) ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=28, azim=34) est.fit(X) labels = est.labels_ ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=labels.astype(np.float), edgecolor='k') ax.w_xaxis.set_ticklabels([]) ax.w_yaxis.set_ticklabels([]) ax.w_zaxis.set_ticklabels([]) ax.set_xlabel('Feature 1') ax.set_ylabel('Feature 2') ax.set_zlabel('Feature 3') ax.set_title(titles[fignum - 1]) ax.dist = 12 fignum = fignum + 1 # Plot the ground truth fig = plt.figure(fignum, figsize=(7, 6)) ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=28, azim=34) for name, label in [('classical', 0), ('jazz', 1), ('metal', 2)]: ax.text3D(X[y == label, 3].mean(), X[y == label, 0].mean(), X[y == label, 2].mean()-1.5, name, horizontalalignment='center', bbox=dict(alpha=.2, edgecolor='w', facecolor='w')) # Reorder the labels to have colors matching the cluster results y = np.choose(y, [1, 2, 0]).astype(np.float) ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=y, edgecolor='k') ax.w_xaxis.set_ticklabels([]) ax.w_yaxis.set_ticklabels([]) ax.w_zaxis.set_ticklabels([]) ax.set_xlabel('Feature 1') ax.set_ylabel('Feature 2') ax.set_zlabel('Feature 3') ax.set_title('Ground Truth') ax.dist = 12 plt.show() # - # ## Clustering usig GMMs # # The idea behind clustering using Gaussian Mixture Models is that each cluster will correspond to a different # Gaussian shaped component. When visualized in 2D these components can be represented as ellipses. When the covariance matrix is diagonal then these ellipses will be aligned with the axis. When they are spherical that means that the standard deviation of the features in each dimensions is considered equal. In the tied configuration the covariance matrices of each mixture component are tied to be equal. The most flexible case is when full covariance matrices are used. # + colors = ['navy', 'turquoise', 'darkorange'] def make_ellipses(gmm, ax): for n, color in enumerate(colors): if gmm.covariance_type == 'full': covariances = gmm.covariances_[n][:2, :2] elif gmm.covariance_type == 'tied': covariances = gmm.covariances_[:2, :2] elif gmm.covariance_type == 'diag': covariances = np.diag(gmm.covariances_[n][:2]) elif gmm.covariance_type == 'spherical': covariances = np.eye(gmm.means_.shape[1]) * gmm.covariances_[n] v, w = np.linalg.eigh(covariances) u = w[0] / np.linalg.norm(w[0]) angle = np.arctan2(u[1], u[0]) angle = 180 * angle / np.pi # convert to degrees v = 2. * np.sqrt(2.) * np.sqrt(v) ell = mpl.patches.Ellipse(gmm.means_[n, :2], v[0], v[1], 180 + angle, color=color) ell.set_clip_box(ax.bbox) ell.set_alpha(0.5) ax.add_artist(ell) (X, y) = datasets.load_svmlight_file("data/3genres.arff.libsvm") X = X.toarray() X = MinMaxScaler().fit_transform(X) target_names = ['classical', 'jazz', 'metal'] # Break up the dataset into non-overlapping training (75%) and testing # (25%) sets. skf = StratifiedKFold(n_splits=4) # Only take the first fold. train_index, test_index = next(iter(skf.split(X, y))) X_train = X[train_index] y_train = y[train_index] X_test = X[test_index] y_test = y[test_index] n_classes = len(np.unique(y_train)) # Try GMMs using different types of covariances. estimators = dict((cov_type, GaussianMixture(n_components=n_classes, covariance_type=cov_type, max_iter=100, random_state=0)) for cov_type in ['spherical', 'diag', 'tied', 'full']) n_estimators = len(estimators) fig = plt.figure(figsize=(6 * n_estimators // 2, 12)) plt.subplots_adjust(bottom=.01, top=0.95, hspace=.15, wspace=.05, left=.01, right=.99) for index, (name, estimator) in enumerate(estimators.items()): # Since we have class labels for the training data, we can # initialize the GMM parameters in a supervised manner. estimator.means_init = np.array([X_train[y_train == i].mean(axis=0) for i in range(n_classes)]) # Train the other parameters using the EM algorithm. estimator.fit(X_train) h = plt.subplot(2, n_estimators // 2, index + 1) text= h.text(0,0, "", va="bottom", ha="left") make_ellipses(estimator, h) for n, color in enumerate(colors): data = X[y == n] # Plot the training data plt.scatter(data[:, 0], data[:, 1], s=0.8, color=color, label=target_names[n]) # Plot the test data with crosses for n, color in enumerate(colors): data = X_test[y_test == n] plt.scatter(data[:, 0], data[:, 1], marker='x', color=color) y_train_pred = estimator.predict(X_train) train_accuracy = np.mean(y_train_pred.ravel() == y_train.ravel()) * 100 plt.text(0.05, 0.9, 'Train accuracy: %.1f' % train_accuracy, transform=h.transAxes) y_test_pred = estimator.predict(X_test) test_accuracy = np.mean(y_test_pred.ravel() == y_test.ravel()) * 100 plt.text(0.05, 0.8, 'Test accuracy: %.1f' % test_accuracy, transform=h.transAxes) plt.xticks(()) plt.yticks(()) plt.title(name) plt.legend(scatterpoints=1, loc='lower right', prop=dict(size=12)) plt.show() # - # ## Principal component analysis # # PCA is a tecnique for dimensionality reduction. In this example we take as input a similar audio feature data-set for the 3 genres we have been exploring but with 124 features per track instead. Using PCA we can reduce the dimensionality to 3 dimensions. # + (X, y) = datasets.load_svmlight_file("data/3genres_full.arff.libsvm") print(X.shape) X = X.toarray() X = MinMaxScaler().fit_transform(X) target_names = ['classical', 'jazz', 'metal'] # To getter a better understanding of interaction of the dimensions # plot the first three PCA dimensions fig = plt.figure(1, figsize=(8, 6)) ax = Axes3D(fig, elev=-150, azim=110) X_reduced = PCA(n_components=3).fit_transform(X) ax.scatter(X_reduced[:, 0], X_reduced[:, 1], X_reduced[:, 2], c=y, cmap=plt.cm.Set1, edgecolor='k', s=40) ax.set_title("First three PCA directions") ax.set_xlabel("1st eigenvector") ax.w_xaxis.set_ticklabels([]) ax.set_ylabel("2nd eigenvector") ax.w_yaxis.set_ticklabels([]) ax.set_zlabel("3rd eigenvector") ax.w_zaxis.set_ticklabels([]) plt.show() # - # We can use the reduced PCA vectors as a way to visualize the results of clustering in the high-dimensional (124) original feature space. y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X) # To getter a better understanding of interaction of the dimensions # plot the first three PCA dimensions fig = plt.figure(1, figsize=(8, 6)) ax = Axes3D(fig, elev=-150, azim=110) X_reduced = PCA(n_components=3).fit_transform(X) ax.scatter(X_reduced[:, 0], X_reduced[:, 1], X_reduced[:, 2], c=y_pred, cmap=plt.cm.Set1, edgecolor='k', s=40) ax.set_title("First three PCA directions") ax.set_xlabel("1st eigenvector") ax.w_xaxis.set_ticklabels([]) ax.set_ylabel("2nd eigenvector") ax.w_yaxis.set_ticklabels([]) ax.set_zlabel("3rd eigenvector") ax.w_zaxis.set_ticklabels([]) plt.show() # We can compare the accuracy of clustering using the original ground truth labels based on the 124 features vs the accuracy using the 3-dimensional features reduced by PCA. As can be seen the performance is similar - the increase might be due to reduction of noise in the features. # + y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X) train_accuracy = np.mean(y_pred.ravel() == y.ravel()) * 100 print(train_accuracy) y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_reduced) train_accuracy = np.mean(y_pred.ravel() == y.ravel()) * 100 print(train_accuracy) # -
Clustering.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Discrete Bayes Animations import matplotlib.pyplot as plt import sys sys.path.insert(0,'..') # allow us to format the book import book_format book_format.set_style() # This notebook creates the animations for the Discrete Bayesian filters chapter. It is not really intended to be a readable part of the book, but of course you are free to look at the source code, and even modify it. However, if you are interested in running your own animations, I'll point you to the examples subdirectory of the book, which contains a number of python scripts that you can run and modify from an IDE or the command line. This module saves the animations to GIF files, which is quite slow and not very interactive. # # On Windows you need to install ffmpeg with # # $ conda install -c conda-forge ffmpeg # # I don't know what to do for linux or MaxOS. # + from matplotlib import animation import matplotlib.pyplot as plt import numpy as np from kf_book.book_plots import bar_plot # %matplotlib inline # the predict algorithm of the discrete bayesian filter def predict(pos, move, p_correct, p_under, p_over): n = len(pos) result = np.array(pos, dtype=float) for i in range(n): result[i] = \ pos[(i-move) % n] * p_correct + \ pos[(i-move-1) % n] * p_over + \ pos[(i-move+1) % n] * p_under return result def normalize(p): s = sum(p) for i in range (len(p)): p[i] = p[i] / s # the update algorithm of the discrete bayesian filter def update(pos, measure, p_hit, p_miss): q = np.array(pos, dtype=float) for i in range(len(hallway)): if hallway[i] == measure: q[i] = pos[i] * p_hit else: q[i] = pos[i] * p_miss normalize(q) return q # - import matplotlib # make sure our matplotlibrc has been edited to use imagemagick print(matplotlib.matplotlib_fname()) matplotlib.rcParams['animation.writer'] # + from kf_book.gif_animate import animate pos = [1.0,0,0,0,0,0,0,0,0,0] def bar_animate(nframe): global pos plt.cla() bar_plot(pos) plt.title('Step {}'.format(nframe + 1)) pos = predict(pos, 1, .8, .1, .1) for i in range(10): bar_animate(i) fig = plt.figure(figsize=(6.5, 2.5)) animate('02_no_info.gif', bar_animate, fig=fig, frames=75, interval=75); # - # <img src="02_no_info.gif"> # + pos = np.array([.1]*10) hallway = np.array([1, 1, 0, 0, 0, 0, 0, 0, 1, 0]) def bar_animate(nframe): global pos #if nframe == 0: # return bar_plot(pos, ylim=(0,1.0)) plt.title('Step {}'.format(nframe + 1)) if nframe % 2 == 0: pos = predict(pos, 1, .9, .05, .05) else: x = int((nframe/2) % len(hallway)) z = hallway[x] pos = update(pos, z, .9, .2) fig = plt.figure(figsize=(6.5, 2.5)) animate('02_simulate.gif', bar_animate, fig=fig, frames=40, interval=85); # - # <img src="02_simulate.gif">
animations/discrete_bayes_animations.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import seaborn as sns import matplotlib.pyplot as plt sns.set(style="white", context="talk") rs = np.random.RandomState(7) # %matplotlib inline # + # Set up the matplotlib figure f, (ax1, ax2, ax3) = plt.subplots(3, 1, figsize=(8, 6), sharex=True) # Generate some sequential data x = np.array(list("ABCDEFGHI")) y1 = np.arange(1, 10) sns.barplot(x, y1, palette="BuGn_d", ax=ax1) ax1.set_ylabel("Sequential") # Center the data to make it diverging y2 = y1 - 5 sns.barplot(x, y2, palette="RdBu_r", ax=ax2) ax2.set_ylabel("Diverging") # Randomly reorder the data to make it qualitative y3 = rs.choice(y1, 9, replace=False) sns.barplot(x, y3, palette="Set3", ax=ax3) ax3.set_ylabel("Qualitative") # -
notebooks/testColorPallet-Seaborn.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:kaggle] # language: python # name: conda-env-kaggle-py # --- # # Table of Contents # * [Intro](#Intro) # * [Logistic Regression](#Logistic-Regression) # * [From Linear to Logistic Regression](#From-Linear-to-Logistic-Regression) # * [Logistic Function](#Logistic-Function) # * [Decision Boundary [TOFIX]](#Decision-Boundary-[TOFIX]) # * [Simulate Data](#Simulate-Data) # * [Logistic Regression (Sklearn)](#Logistic-Regression-%28Sklearn%29) # * [Gradient Descent](#Gradient-Descent) # * [Training Animation](#Training-Animation) # # # Intro # Exploratory notebook related to basic concepts and theory behind logistic regression. Includes toy examples implementation and relative visualization. # ## Logistic Regression # Contrary from what the name suggests, logistic regression solves Classification type of problems. It moves away from regression to overcome linearity limitations in the context of classification, and adopts the logistic function for hypothesis building. # + # %matplotlib notebook import numpy as np import pandas as pd import seaborn as sns from sklearn import linear_model, datasets from matplotlib import pyplot as plt, animation sns.set_context("paper") # - # ## From Linear to Logistic Regression # Demonstrate the rationale behind the move from linear to logistic regression using reproduced examples from [Coursera course](https://www.coursera.org/learn/machine-learning). Consider again the statements "classification is not a linear function". # # We can clearly see how outliers can easily demonstrate the non feasibility of regression of classification problems. # Tumor data x = np.arange(10) y = np.array([0]*5 + [1]*5) # + # Plot separation when "clean" data from scipy import stats slope, intercept, r, p, _ = stats.linregress(x, y) print('Slope = {:.3f} (r = {:.3f}, p = {:.5f})'.format(slope, r, p)) ax = sns.regplot(x, y) x_intersect = (0.5 - intercept)/slope ax.plot([x_intersect, x_intersect], [-1,2], 'k-') sns.plt.show() # + # Plot sepration when outlier x = np.append(x, [25]) y = np.append(y, [1]) slope, intercept, r, p, _ = stats.linregress(x, y) print('Slope = {:.3f} (r = {:.3f}, p = {:.5f})'.format(slope, r, p)) ax = sns.regplot(x, y) x_intersect = (0.5 - intercept)/slope ax.plot([x_intersect, x_intersect], [-1,2], 'k-') sns.plt.show() # - # ## Logistic Function # The hypothesis function associated with the Logistic Regression model. # # $$\frac{1}{1+e^{-x}}$$ # # A sigmoid function is a function characterized by an S shaped curve. Logistic function is a special case of sigmoid function, but often the two terms are used interchangeably. # # Statistical approaches tend to mention the logit function (inverse of the sigmoid one) and the concept of odds. [Great article about the connection of the two interpretations](https://sebastianraschka.com/faq/docs/logistic-why-sigmoid.html) # + # Exponential x = np.linspace(-2, 5, 100) y = np.exp(-x) ax = plt.plot(x, y) plt.show() # + # Sigmoid x = np.linspace(-10, 10, 100) y = 1/(1 + np.exp(-x)) ax = plt.plot(x, y) plt.show() # - # # Decision Boundary [TOFIX] h_0 = lambda x : t_0 + (t_1 * x[0]) + (t_2 * x[1]) # + t_0 = -3 t_1 = 1 t_2 = 1 x_1 = np.arange(5) x_2 = np.arange(5) res = np.dstack(np.meshgrid(x_1, x_2)).reshape(-1, 2) s_1 = filter(lambda x : h_0((x[0],x[1]))>=0, res) s_2 = filter(lambda x : h_0((x[0],x[1]))<0, res) m = ['+','o'] for i, s in enumerate([s_1, s_2]): x_1, x_2 = list(map(np.array, zip(*s))) sns.regplot(x_1, x_2, fit_reg=False, marker=m[i]) sns.plt.show() # - # # Simulate Data iris = datasets.load_iris() X = iris.data[:, :2] # we only take the first two features. Y = iris.target # Replace label 2 with value 1, so we have only two classes to predict np.place(Y, Y==2, 1) df = pd.DataFrame(X, columns=['feat_1', 'feat_2']) df['class'] = Y df.head() sns.regplot(x='feat_1', y='feat_2', data=df[df['class']==0], color='g', fit_reg=False) sns.regplot(x='feat_1', y='feat_2', data=df[df['class']==1], color='b', fit_reg=False) sns.plt.legend(['0 Class', '1 Class']) sns.plt.show() # # Logistic Regression (Sklearn) from sklearn import metrics logreg = linear_model.LogisticRegression(C=1e5) logreg.fit(X, Y) predictions = logreg.predict(X) metrics.accuracy_score(Y, predictions) # # Gradient Descent # Fit model using gradient descent. # # For the cost function we can rely on cross-entropy loss, which for binary cases is: # # $$ # L(y,\hat{y})\ =\ -y\log {\hat {y}}-(1-y)\log(1-{\hat {y}}) # $$ # # [Ref 1](http://aimotion.blogspot.ie/2011/11/machine-learning-with-python-logistic.html) # Sigmoid function def sigmoid(X): res = 1.0 / (1.0 + np.exp(-1.0 * X)) return res # Cost for single prediction def compute_cost(X, y_true, theta): m = len(y_true) y_pred = sigmoid(X.dot(theta).flatten()) # Simplified #if y_true == 1: # return -log(y_pred) #else: # return -log(1 - y_pred) # One liner cost = ((-y_true.T.dot(np.log(y_pred)) - (1-y_true).T.dot(np.log(1-y_pred))) /(1.0*m)) return cost # single gradient descent step def gradient_descent_step(X, y, theta, alpha): m = len(y) # compute predictions pred = sigmoid(X.dot(theta).flatten()) # get error errors = -np.sum((y-pred)*X.T, axis=1).reshape(3,1) theta -= alpha * (errors/m) return theta # run an entire training cycle def train(X, y, alpha, iters): cost_history = np.zeros(shape=(iters, 1)) theta_history = [] # our parameters are slope and intercepts (bias) theta = np.random.randn(3, 1) for i in range(iters): theta = gradient_descent_step(X, y, theta, alpha) cost_history[i, 0] = compute_cost(X, y, theta) theta_history.append(theta.copy()) return theta_history, cost_history # + # Parameter learning # input data including bias iris = datasets.load_iris() X = iris.data[:, :3] X[:, 2] = 1 y = iris.target # Replace label 2 with value 1, so we have only two classes to predict np.place(y, y==2, 1) print(X.shape) print(y.shape) # - alpha = 0.01 epochs = 1000 theta_history, cost_history = train(X, y, alpha, epochs) # Plot history fig, axes = plt.subplots(2, 1) # plot cost axes[0].set_title('Cost History') axes[0].plot(cost_history.reshape(-1)) axes[0].set_ylabel("cost") # plot theta axes[1].set_title('Theta History') for t_idx in range(len(theta_history[0])): axes[1].plot([t[t_idx] for t in theta_history], label='theta_{}'.format(t_idx)) axes[1].set_xlabel("epoch") plt.legend() plt.show() # ## Training Animation # + alpha = 0.01 epochs = 100 # Plot SGD animation fig, ax = sns.plt.subplots(figsize=(8, 6)) xx, yy = np.mgrid[0:10:.5, 0:10:.5] grid = np.c_[xx.ravel(), yy.ravel()] X_grid = np.ones(shape=(len(xx)*len(yy), 3)) X_grid[:, :2] = grid theta = np.random.randn(3, 1) pred = sigmoid(X_grid.dot(theta).flatten()).reshape(xx.shape) contour = ax.contourf(xx, yy, pred, 25, cmap="RdBu", vmin=0, vmax=1) sns.regplot(x='feat_1', y='feat_2', data=df[df['class']==0], color='g', fit_reg=False) sns.regplot(x='feat_1', y='feat_2', data=df[df['class']==1], color='b', fit_reg=False) ax_c = fig.colorbar(contour) ax_c.set_ticks([0, .25, .5, .75, 1]) epoch_text = ax.text(0, 0, "Epoch 0") def animate(i): global X, y, theta, alpha, df theta = gradient_descent_step(X, y, theta, alpha) pred = sigmoid(X_grid.dot(theta).flatten()).reshape(xx.shape) contour = ax.contourf(xx, yy, pred, 25, cmap="RdBu", vmin=0, vmax=1) cost = compute_cost(X, y, theta) epoch_text.set_text("Epoch {}, cost {:.3f}".format(i, cost)) sns.regplot(x='feat_1', y='feat_2', data=df[df['class']==0], color='g', fit_reg=False) sns.regplot(x='feat_1', y='feat_2', data=df[df['class']==1], color='b', fit_reg=False) return epoch_text, ani = animation.FuncAnimation(fig, animate, epochs, interval=1, repeat=False)
machine learning/Logistic Regression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # plot.hist # ## Help # + import scripts.plot.dist as dist # Show help try: args = dist.parse(["-h"]) except: pass # - # ## Plot Histograms (Ungrouped) # ### Plot Single Histogram # To plot a single histogram you just have to specify the file containing the data (`-i`) and the column in the file that you want to plot (`-d`). # + argv = [ "-i", "data/hist_rmsd_n1.dat", "-d", "1", ] args = dist.parse(argv) args_dict = dist.args_to_dict(args) dist.plot(**args_dict) # - # The optional arguments can be used to make the whole plot more appealing. # + argv = [ "-i", "data/hist_rmsd_n1.dat", "-d", "1", "-b", "15", "-t", "RMDS Distribution", "-lx", "RMSD (Å)", "-l", "Histogram", "-ll", "2.5", "-rl", "5", ] args = dist.parse(argv) args_dict = dist.args_to_dict(args) dist.plot(**args_dict) # - # Kernel density estimation allow to estimate the probabilty density function of the distribution. Note that this will normalize the histogram. # + argv = [ "-i", "data/hist_rmsd_n1.dat", "-d", "1", "-b", "25", "-t", "RMDS Distribution", "-lx", "RMSD (Å)", "-l", "KDE", "-ll", "2.5", "-rl", "5", "--kde", ] args = dist.parse(argv) args_dict = dist.args_to_dict(args) dist.plot(**args_dict) # - # ### Plot Multiple Histograms # + argv = [ "-i", "data/hist_rmsd_n1.dat", "data/hist_rmsd_n2.dat", "data/hist_rmsd_rd1.dat", "data/hist_rmsd_rd2.dat", "-d", "1", "1", "1", "1", "-b", "25", "-t", "RMDS Distribution", "-lx", "RMSD (Å)", "-l", "N1", "N2", "RD1", "RD2" ] args = dist.parse(argv) args_dict = dist.args_to_dict(args) dist.plot(**args_dict) # - # ## Plot Histograms (Grouped) # The distributions can be grouped together in different subplots. # + argv = [ "-i", "data/hist_rmsd_n1.dat", "data/hist_rmsd_n2.dat", "data/hist_rmsd_rd1.dat", "data/hist_rmsd_rd2.dat", "-d", "1", "1", "1", "1", "-b", "25", "-t", "RMDS Distribution", "-lx", "RMSD (Å)", "-l", "N1", "N2", "RD1", "RD2", "--kde", "-g", "0", "0", "1", "1", ] args = dist.parse(argv) args_dict = dist.args_to_dict(args) dist.plot(**args_dict) # + argv = [ "-i", "data/hist_rmsd_n1.dat", "data/hist_rmsd_n2.dat", "data/hist_rmsd_rd1.dat", "data/hist_rmsd_rd2.dat", "-d", "1", "1", "1", "1", "-b", "25", "-t", "RMDS Distribution", "-lx", "RMSD (Å)", "-l", "N1", "N2", "RD1", "RD2", "--kde", "-g", "0", "1", "2", "3", ] args = dist.parse(argv) args_dict = dist.args_to_dict(args) dist.plot(**args_dict)
examples/plot/dist.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import os import json from copy import deepcopy import numpy as np # #### Funtion to return a list of all the files and directories inside a directory which are not hidden def listdir_nohidden(path): contents = os.listdir(path) new_contents = [i for i in contents if i[0] != '.'] return new_contents # #### A function which loads the reddit data and return conversations is a dictionary containing each reddit conversation in the set they belong to # . train = 'training data' # . dev = 'validation data' # . test = 'testing data' def load_data(): # Path of the validation data's labels path_dev = "rumoureval2019_data/rumoureval-2019-training-data/dev-key.json" with open(path_dev, 'r') as f: dev_key = json.load(f) # Path of the training data's labels path_train = "rumoureval2019_data/rumoureval-2019-training-data/train-key.json" with open(path_train, 'r') as f: train_key = json.load(f) path = "rumoureval2019_data/rumoureval-2019-training-data/reddit-training-data" # A list of all the conversations in reddit training data conversation_ids = listdir_nohidden(path) conversations = {} conversations['dev'] = [] conversations['train'] = [] conversations['test'] = [] for id in conversation_ids: conversation = {} conversation['id'] = id path_src = path+'/'+id+'/source-tweet' files_t = sorted(listdir_nohidden(path_src)) with open(os.path.join(path_src, files_t[0])) as f: for line in f: src = json.loads(line) src['text'] = src['data']['children'][0]['data']['title'] src['user'] = src['data']['children'][0]['data']['author'] if files_t[0].endswith('.json'): filename = files_t[0][:-5] src['id_str'] = filename else: print ("No, no I don't like that") src['used'] = 0 #Checking whether a conversation is training information or validation if src['id_str'] in list(dev_key['subtaskaenglish'].keys()): src['setA'] = 'dev' src['label'] = dev_key['subtaskaenglish'][src['id_str']] elif src['id_str'] in list(train_key['subtaskaenglish'].keys()): src['setA'] = 'train' src['label'] = train_key['subtaskaenglish'][src['id_str']] else: print ("Post was not found! Task A, Post ID: ", src['id_str']) if src['id_str'] in list(dev_key['subtaskbenglish'].keys()): src['setB'] = 'dev' conversation['veracity'] = dev_key['subtaskbenglish'][src['id_str']] elif src['id_str'] in list(train_key['subtaskbenglish'].keys()): src['setB'] = 'train' conversation['veracity'] = train_key['subtaskbenglish'][src['id_str']] else: print ("Post was not found! Task B, Post ID: ", src['id_str']) conversation['source'] = src tweets = [] path_repl = path+'/'+id+'/replies' files_t = sorted(listdir_nohidden(path_repl)) for repl_file in files_t: with open(os.path.join(path_repl, repl_file)) as f: for line in f: tw = json.loads(line) if 'body' in list(tw['data'].keys()): tw['text'] = tw['data']['body'] tw['user'] = tw['data']['author'] if repl_file.endswith('.json'): filename = repl_file[:-5] tw['id_str'] = filename else: print ("No, no I don't like that reply") tw['used'] = 0 if tw['id_str'] in list(dev_key['subtaskaenglish'].keys()): tw['setA'] = 'dev' tw['label'] = dev_key['subtaskaenglish'][tw['id_str']] elif tw['id_str'] in list(train_key['subtaskaenglish'].keys()): tw['setA'] = 'train' tw['label'] = train_key['subtaskaenglish'][tw['id_str']] else: print ("Post was not found! Task A, Reply ID: ", tw['id_str']) tweets.append(tw) else: tw['text'] = '' tw['user'] = '' tw['used'] = 0 if repl_file.endswith('.json'): filename = repl_file[:-5] tw['id_str'] = filename else: print ("No, no I don't like that reply") if tw['id_str'] in list(dev_key['subtaskaenglish'].keys()): tw['setA'] = 'dev' tw['label'] = dev_key['subtaskaenglish'][tw['id_str']] elif tw['id_str'] in list(train_key['subtaskaenglish'].keys()): tw['setA'] = 'train' tw['label'] = train_key['subtaskaenglish'][tw['id_str']] else: print ("Post was not found! Task A, Reply ID: ", tw['id_str']) tweets.append(tw) conversation['replies'] = tweets path_struct = path+'/'+id+'/structure.json' with open(path_struct, 'r') as f: struct = json.load(f) conversation['structure'] = struct # branches = tree2branches(conversation['structure']) # conversation['branches'] = branches conversations['train'].append(conversation) #%% path = "rumoureval2019_data/rumoureval-2019-training-data/reddit-dev-data" conversation_ids = listdir_nohidden(path) for id in conversation_ids: conversation = {} conversation['id'] = id path_src = path+'/'+id+'/source-tweet' files_t = sorted(listdir_nohidden(path_src)) with open(os.path.join(path_src, files_t[0])) as f: for line in f: src = json.loads(line) src['text'] = src['data']['children'][0]['data']['title'] src['user'] = src['data']['children'][0]['data']['author'] if files_t[0].endswith('.json'): filename = files_t[0][:-5] src['id_str'] = filename else: print ("No, no I don't like that") src['used'] = 0 # if src['id_str'] in list(dev_key['subtaskaenglish'].keys()): src['setA'] = 'dev' src['label'] = dev_key['subtaskaenglish'][src['id_str']] elif src['id_str'] in list(train_key['subtaskaenglish'].keys()): src['setA'] = 'train' src['label'] = train_key['subtaskaenglish'][src['id_str']] else: print ("Post was not found! Task A, Post ID: ", src['id_str']) if src['id_str'] in list(dev_key['subtaskbenglish'].keys()): src['setB'] = 'dev' conversation['veracity'] = dev_key['subtaskbenglish'][src['id_str']] elif src['id_str'] in list(train_key['subtaskbenglish'].keys()): src['setB'] = 'train' conversation['veracity'] = train_key['subtaskbenglish'][src['id_str']] else: print ("Post was not found! Task B, Post ID: ", src['id_str']) conversation['source'] = src tweets = [] path_repl = path+'/'+id+'/replies' files_t = sorted(listdir_nohidden(path_repl)) for repl_file in files_t: with open(os.path.join(path_repl, repl_file)) as f: for line in f: tw = json.loads(line) if 'body' in list(tw['data'].keys()): tw['text'] = tw['data']['body'] tw['user'] = tw['data']['author'] if repl_file.endswith('.json'): filename = repl_file[:-5] tw['id_str'] = filename else: print ("No, no I don't like that reply") tw['used'] = 0 if tw['id_str'] in list(dev_key['subtaskaenglish'].keys()): tw['setA'] = 'dev' tw['label'] = dev_key['subtaskaenglish'][tw['id_str']] elif tw['id_str'] in list(train_key['subtaskaenglish'].keys()): tw['setA'] = 'train' tw['label'] = train_key['subtaskaenglish'][tw['id_str']] else: print ("Post was not found! Task A, Reply ID: ", tw['id_str']) tweets.append(tw) else: tw['text'] = '' tw['user'] = '' tw['used'] = 0 if repl_file.endswith('.json'): filename = repl_file[:-5] tw['id_str'] = filename else: print ("No, no I don't like that reply") if tw['id_str'] in list(dev_key['subtaskaenglish'].keys()): tw['setA'] = 'dev' tw['label'] = dev_key['subtaskaenglish'][tw['id_str']] elif tw['id_str'] in list(train_key['subtaskaenglish'].keys()): tw['setA'] = 'train' tw['label'] = train_key['subtaskaenglish'][tw['id_str']] else: print ("Post was not found! Task A, Reply ID: ", tw['id_str']) tweets.append(tw) conversation['replies'] = tweets path_struct = path+'/'+id+'/structure.json' with open(path_struct, 'r') as f: struct = json.load(f) conversation['structure'] = struct # branches = tree2branches(conversation['structure']) # conversation['branches'] = branches conversations['dev'].append(conversation) #%% path = "rumoureval2019_data/rumoureval-2019-test-data/reddit-test-data" conversation_ids = listdir_nohidden(path) for id in conversation_ids: conversation = {} conversation['id'] = id path_src = path+'/'+id+'/source-tweet' files_t = sorted(listdir_nohidden(path_src)) with open(os.path.join(path_src, files_t[0])) as f: for line in f: src = json.loads(line) src['text'] = src['data']['children'][0]['data']['title'] src['user'] = src['data']['children'][0]['data']['author'] if files_t[0].endswith('.json'): filename = files_t[0][:-5] src['id_str'] = filename else: print ("No, no I don't like that") src['used'] = 0 conversation['source'] = src tweets = [] path_repl = path+'/'+id+'/replies' files_t = sorted(listdir_nohidden(path_repl)) for repl_file in files_t: with open(os.path.join(path_repl, repl_file)) as f: for line in f: tw = json.loads(line) if 'body' in list(tw['data'].keys()): tw['text'] = tw['data']['body'] tw['user'] = tw['data']['author'] if repl_file.endswith('.json'): filename = repl_file[:-5] tw['id_str'] = filename else: print ("No, no I don't like that reply") tw['used'] = 0 tweets.append(tw) else: tw['text'] = '' tw['user'] = '' tw['used'] = 0 if repl_file.endswith('.json'): filename = repl_file[:-5] tw['id_str'] = filename else: print ("No, no I don't like that reply") tweets.append(tw) conversation['replies'] = tweets path_struct = path+'/'+id+'/structure.json' with open(path_struct, 'r') as f: struct = json.load(f) conversation['structure'] = struct conversations['test'].append(conversation) return conversations
preprocessing_reddit.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # !pip install matplotlib # !pip install numpy # !pip install scipy # %matplotlib inline import matplotlib.pyplot as plt from IPython.display import Image # + [markdown] slideshow={"slide_type": "slide"} # # Numpy - multidimensional data arrays # + [markdown] slideshow={"slide_type": "subslide"} # ## Introduction # # Numpy is not part of the "standard library", but it might as well be for engineers. Numpy is Python's answer to Matlab - the "back end" is implemented in C so its performance is very fast (comparable to Matlab). # + slideshow={"slide_type": "subslide"} import numpy as np # + slideshow={"slide_type": "subslide"} ## Creating `numpy` arrays There are a number of ways to initialize new numpy arrays, for example from * a Python list or tuples * using functions that are dedicated to generating numpy arrays, such as `arange`, `linspace`, etc. * reading data from files # + slideshow={"slide_type": "slide"} # a vector: the argument to the array function is a Python list v = np.array([1,2,3,4]) print(v) # a matrix: the argument to the array function is a nested Python list M = np.array([[1, 2], [3, 4]]) print(M) type(v), type(M) # + [markdown] slideshow={"slide_type": "subslide"} # ### Creating arrays with functions # + [markdown] slideshow={"slide_type": "subslide"} # It is often more efficient to generate large arrays instead of creating them from lists. There are a few useful functions for this in numpy: # # * `np.arange` - create a range with a specified step size (endpoints not included) # * `np.linspace` - create a range with a specified number of points (endpoints *are* included) # * `np.logspace` - create a range with a specified number of points in log space (endpoints *are* included) # * `np.mgrid` - create points on a multi-dimensional grid (similar to meshgrid in matlab) # * `np.random.rand` - create random number matrix from a uniform distribution # * `np.random.randn` - create random number matrix from a standard normal distribution # * `np.zeros` - create a matrix of zeros # * `np.ones` - create a matrix of ones # * `np.eye` - create identity matrix # + [markdown] slideshow={"slide_type": "subslide"} # ## Manipulating arrays # # Once we generate `numpy` arrays, we need to interact with them. This involves a few operations: # # * indexing - accessing certain elements # * index "slicing" - accessing certain subsets of elements # * fancy indexing - combinations of indexing and slicing # # This is not very different from Matlab. # + [markdown] slideshow={"slide_type": "subslide"} # ### Index slicing # # Index slicing is the name for the syntax `M[lower:upper:step]` to extract a subset of an array. # - # ## Activity: # + [markdown] slideshow={"slide_type": "slide"} # # matplotlib - Plotting in Python # + [markdown] slideshow={"slide_type": "subslide"} # Matplotlib has advantages: # # * Easy to get started (MATLAB-like interface) # * Support for LaTeX formatted labels and texts # * Output in many formats, including PNG, PDF, SVG, EPS, and PGF. # * Extensive gallery of examples with source code (https://matplotlib.org/gallery.html) # * Programmatic control over all aspects of figures # # Programmatic control is a blessing and a curse... # # Other plotting tools are available (Plotly, Bokeh, D3, ...) but `matplotlib` is the workhorse. # + [markdown] slideshow={"slide_type": "subslide"} # Matplotlib can be used in two ways: # # * `pylab` modules (works like MATLAB) # * object-oreinted interface (harder but more powerful) # - # %matplotlib inline # + [markdown] slideshow={"slide_type": "subslide"} # ## MATLAB-like API # # The easiest way to get started with plotting using matplotlib is often to use the MATLAB-like API provided by matplotlib. # # It is designed to be compatible with MATLAB's plotting functions, so it is easy to get started with if you are familiar with MATLAB. # # To use this API from matplotlib, we need to include the symbols in the `pylab` module: # + slideshow={"slide_type": "subslide"} from pylab import * x = np.linspace(0, 5, 10) y = x ** 2 figure() plot(x, y) xlabel('x') ylabel('y') title('title') show() # + [markdown] slideshow={"slide_type": "subslide"} # Most of the plotting related functions in MATLAB are covered by the `pylab` module. For example, subplot and color/symbol selection: # + slideshow={"slide_type": "fragment"} subplot(1,2,1) plot(x, y, 'r--') subplot(1,2,2) plot(y, x, 'g*-'); # + [markdown] slideshow={"slide_type": "subslide"} # ## The matplotlib object-oriented interface # # The `pylab` interface is easy, but limited. # # * Use simple global functions that match with MATLAB # * Objects are implicitly defined and hidden from users. # # The `pyplot` object-oriented interface is harder to learn, but much more powerful. # # * Use objects instead of global functions. # * Explicitly define objects - much better for multiple figures. # + slideshow={"slide_type": "fragment"} import matplotlib import matplotlib.pyplot as plt import numpy as np # + slideshow={"slide_type": "subslide"} # fig = plt.figure() axes = fig.add_axes([0.1, 0.1, 0.8, 0.8]) # left, bottom, width, height (range 0 to 1) axes.plot(x, y) axes.set_xlabel('x') axes.set_ylabel('y') axes.set_title('title'); # + slideshow={"slide_type": "subslide"} # fig = plt.figure() axes1 = fig.add_axes([0.1, 0.1, 0.8, 0.8]) # main axes axes2 = fig.add_axes([0.2, 0.5, 0.4, 0.3]) # inset axes # main figure axes1.plot(x, y, 'r') axes1.set_xlabel('x') axes1.set_ylabel('y') axes1.set_title('title') # insert axes2.plot(y, x, 'g') axes2.set_xlabel('y') axes2g.set_ylabel('x') axes2.set_title('insert title'); # + [markdown] slideshow={"slide_type": "subslide"} # ### Saving figures # # To save a figure to a file we can use the `savefig` method in the `Figure` class. You can output in many formats, but the most common are: # # * PNG (raster) # * JPG (raster) # * SVG (vector) # * PDF (vector) # # The SVG and PDF formats are great because they can be edited afterward with vector graphics programs like Inkscape or Adobe Illustrator. # - fig.savefig('cool_plot.pdf') # ## The Gallery # # Matplotlib is a complicated library. The matplotlib gallery is the most useful resource for learning matplotlib # # https://matplotlib.org/gallery.html # + [markdown] slideshow={"slide_type": "subslide"} # ## Activity: get a few plots from the matplotlib gallery (plots that does not import data) and get them running below # + # fake data np.random.seed(937) data = np.random.lognormal(size=(37, 4), mean=1.5, sigma=1.75) labels = list('ABCD') fs = 10 # fontsize # demonstrate how to toggle the display of different elements: fig, axes = plt.subplots(nrows=2, ncols=3, figsize=(6, 6), sharey=True) axes[0, 0].boxplot(data, labels=labels) axes[0, 0].set_title('Default', fontsize=fs) axes[0, 1].boxplot(data, labels=labels, showmeans=True) axes[0, 1].set_title('showmeans=True', fontsize=fs) axes[0, 2].boxplot(data, labels=labels, showmeans=True, meanline=True) axes[0, 2].set_title('showmeans=True,\nmeanline=True', fontsize=fs) axes[1, 0].boxplot(data, labels=labels, showbox=False, showcaps=False) tufte_title = 'Tufte Style \n(showbox=False,\nshowcaps=False)' axes[1, 0].set_title(tufte_title, fontsize=fs) axes[1, 1].boxplot(data, labels=labels, notch=True, bootstrap=10000) axes[1, 1].set_title('notch=True,\nbootstrap=10000', fontsize=fs) axes[1, 2].boxplot(data, labels=labels, showfliers=False) axes[1, 2].set_title('showfliers=False', fontsize=fs) for ax in axes.flatten(): ax.set_yscale('log') ax.set_yticklabels([]) fig.subplots_adjust(hspace=0.4) plt.show() # demonstrate how to customize the display different elements: boxprops = dict(linestyle='--', linewidth=3, color='darkgoldenrod') flierprops = dict(marker='o', markerfacecolor='green', markersize=12, linestyle='none') medianprops = dict(linestyle='-.', linewidth=2.5, color='firebrick') meanpointprops = dict(marker='D', markeredgecolor='black', markerfacecolor='firebrick') meanlineprops = dict(linestyle='--', linewidth=2.5, color='purple') fig, axes = plt.subplots(nrows=2, ncols=3, figsize=(6, 6), sharey=True) axes[0, 0].boxplot(data, boxprops=boxprops) axes[0, 0].set_title('Custom boxprops', fontsize=fs) axes[0, 1].boxplot(data, flierprops=flierprops, medianprops=medianprops) axes[0, 1].set_title('Custom medianprops\nand flierprops', fontsize=fs) axes[0, 2].boxplot(data, whis='range') axes[0, 2].set_title('whis="range"', fontsize=fs) axes[1, 0].boxplot(data, meanprops=meanpointprops, meanline=False, showmeans=True) axes[1, 0].set_title('Custom mean\nas point', fontsize=fs) axes[1, 1].boxplot(data, meanprops=meanlineprops, meanline=True, showmeans=True) axes[1, 1].set_title('Custom mean\nas line', fontsize=fs) axes[1, 2].boxplot(data, whis=[15, 85]) axes[1, 2].set_title('whis=[15, 85]\n#percentiles', fontsize=fs) for ax in axes.flatten(): ax.set_yscale('log') ax.set_yticklabels([]) fig.suptitle("I never said they'd be pretty") fig.subplots_adjust(hspace=0.4) plt.show() # + [markdown] slideshow={"slide_type": "slide"} # # SciPy - Library of scientific algorithms for Python # + [markdown] slideshow={"slide_type": "subslide"} # The SciPy framework builds on top of the low-level NumPy framework for multidimensional arrays, and provides a large number of higher-level scientific algorithms. Today we will discuss a few that are most useful for the average scientist: # # * Integration ([scipy.integrate](http://docs.scipy.org/doc/scipy/reference/integrate.html)) # * Optimization ([scipy.optimize](http://docs.scipy.org/doc/scipy/reference/optimize.html)) # * Interpolation ([scipy.interpolate](http://docs.scipy.org/doc/scipy/reference/interpolate.html)) # * Linear Algebra ([scipy.linalg](http://docs.scipy.org/doc/scipy/reference/linalg.html)) # * Statistics ([scipy.stats](http://docs.scipy.org/doc/scipy/reference/stats.html)) # * File IO ([scipy.io](http://docs.scipy.org/doc/scipy/reference/io.html)) # # # + [markdown] slideshow={"slide_type": "slide"} # ## Linear Regression # # Linear regression is very easy in scipy. # + slideshow={"slide_type": "subslide"} x = np.arange(0,11,1) #make some data y = 3*x+1 y = y.astype('float64') y += np.random.randn(len(x)) # + slideshow={"slide_type": "subslide"} from scipy.stats import linregress slope, intercept, r_value, p_value, std_err = linregress(x,y) print(slope,intercept) plt.scatter(x,y) plt.plot(x,x*slope+intercept) # + slideshow={"slide_type": "slide"} ## Polynomial fitting is not recommended Fitting data to polynomials is quite easy, but often a very bad solution for complex models. * higher orders very sensative * very easy to overfit * missing the true complexity of your system that said, here's how to do a polynomial fit polynomial fitting is easiest in numpy with `np.polyfit` # + slideshow={"slide_type": "subslide"} x = np.linspace(0, 1, 20) y = np.cos(x) + 0.3*np.random.rand(20) #clearly not a polynomial, but we can fit it coefficients = np.polyfit(x, y, 3) #find the coefficients p = np.poly1d(coefficients)#pass the coefficients into np.poly1d to make a function t = np.linspace(0, 1, 200) plt.scatter(x, y) plt.plot(t,p(t),'-') plt.show() # + [markdown] slideshow={"slide_type": "subslide"} # But what about if we continue to add orders to the polynomial? Let's plot the error as we increase the order. Write a for loop that evaluates the sum of squaared error for polynomial models of order 1-14 for the above x,y data # + slideshow={"slide_type": "subslide"} err = [] poly_range = range(1,14) for i in poly_range: coefficients = np.polyfit(x, y, i) p = np.poly1d(coefficients) err.append(sum((y-p(t))**2)) plt.plot(range(1,14),err) plt.ylabel('sum of squared error') plt.xlabel('polynomial order') plt.figure() plt.scatter(x, y) plt.plot(t,p(t),'-') # + [markdown] slideshow={"slide_type": "subslide"} # Is this good? # - # ## Introducing the Bayesian information criterion # # Bayesian information criterion (BIC) attempts to balance the complexity of a model with its goodness of fit, it is defined to be: # # $ BIC = n*\ln{(\sigma^2_e)} + k*\ln(n)$ # # where $n$ is the number of data points, $\sigma_e$ is the standard deviation of the error, and $k$ is the number of parameters. # # (Lower BIC is better) # + #here's a function that implements it def BIC(y, yhat, k, weight = 1): err = y - yhat sigma = np.std(np.real(err)) n = len(y) B = n*np.log(sigma**2) + weight*k*np.log(n) return B h = BIC(y,p(x),3) print(h) # - # Let's repeat our analysis plotting the BIC, take a few minutes to do that # + BIC = [] poly_range = range(1,14) plt.plot(range(1,17),BIC) plt.ylabel('sum of squared error') plt.xlabel('polynomial order') # - # Tools like BIC help guard against overfitting # ## Use Scipy Curve Fitting # # Let's extend our curve fitting to any function using scipy's `curve_fit` function. This enables us to perform non-linear curve fitting. # # + from scipy.optimize import curve_fit def line(x,m,b): return m*x+b params, params_covariance = curve_fit(line,x,y) print(params) plt.figure() plt.scatter(x,y) plt.plot(np.linspace(x[0],x[-1],100),line(np.linspace(x[0],x[-1],100),*params)) #note passing in aruguments with * # - # re-write the above to fit an exponenial function: # # $y = Ae^{kx}+C$ # # (rerun the cell if the regression does not converge) # + x = np.linspace(0, 1, 20) y = np.cos(x) + 0.3*np.random.rand(20) from scipy.optimize import curve_fit params, params_covariance = curve_fit(line,x,y) plt.figure() plt.scatter(x,y) plt.plot(np.linspace(x[0],x[-1],100),line(np.linspace(x[0],x[-1],100),*params)) # + [markdown] slideshow={"slide_type": "slide"} # ## Optimization # # Optimization (finding minima or maxima of a function) is a large field in mathematics, and optimization of complicated functions or in many variables can be rather involved. Here we will only look at a few very simple cases. For a more detailed introduction to optimization with SciPy see: http://scipy-lectures.github.com/advanced/mathematical_optimization/index.html # + [markdown] slideshow={"slide_type": "subslide"} # ### Finding a minima # # Let's first look at how to find the minima of a simple function of a single variable: # + slideshow={"slide_type": "fragment"} from scipy import optimize def f(x): return 4*x**3 + (x-2)**2 + x**4 fig, ax = plt.subplots() x = np.linspace(-5, 3, 100) y = f(x) ax.plot(x, y); # + [markdown] slideshow={"slide_type": "subslide"} # There are many types of optimizers available. We will use the common `BFGS` and `CG` optimizers here, but you can read more in the [documentation](https://docs.scipy.org/doc/scipy/reference/optimize.html). # + slideshow={"slide_type": "fragment"} from scipy.optimize import minimize x_min = minimize(f, -1, method='CG') #minimize can take any kind of function, with any number of variables # # method? # # output? print(x_min.x) # - # Lectures 3+4 of Johanssen: [http://github.com/jrjohansson/scientific-python-lectures](http://github.com/jrjohansson/scientific-python-lectures) # # Scipy: # * http://www.scipy.org - The official web page for the SciPy project. # * http://docs.scipy.org/doc/scipy/reference/tutorial/index.html - A tutorial on how to get started using SciPy. # * https://github.com/scipy/scipy/ - The SciPy source code. # # Matplotlib: # * http://www.matplotlib.org - The project web page for matplotlib. # * https://github.com/matplotlib/matplotlib - The source code for matplotlib. # * http://matplotlib.org/gallery.html - A large gallery showcaseing various types of plots matplotlib can create. Highly recommended! # * http://www.loria.fr/~rougier/teaching/matplotlib - A good matplotlib tutorial. # * http://scipy-lectures.github.io/matplotlib/matplotlib.html - Another good matplotlib reference. #
Week02/recitation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Feature Map Correlation # # Measures the correlation between ground truth segmentations of organelles and the filters in the last convolutional layer of a ResNet50. Uses this information to "naively" derive a segmentation. # %load_ext autoreload # %autoreload 2 # %matplotlib inline import os, pdb, math import numpy as np import torch import torch.nn as nn from scipy.stats.mstats import pointbiserialr from matplotlib import pyplot as plt from albumentations import Compose, Normalize, Resize from albumentations.pytorch import ToTensorV2 from _helpers import * # + #pick an example (index can be 0-7) index = 2 #get the best downsampling factor downsample_factors = [4, 4, 16, 2, 8, 4, 8, 4] #define the device that we're using device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") #load the cellemnet and imagenet moco pretrained models cellemnet, cellemnet_norms = load_moco_pretrained(dataset='cellemnet') imagenet, _ = load_moco_pretrained(dataset='imagenet') #change the strides in the resnet layers such that #the output feature maps will be larger or smaller #adjust this based on the size of the target structure #for mitochondria-sized objects a downsampling factor of #8 is a good place to start cellemnet = restride_resnet(cellemnet, downsample_factor=downsample_factors[index]) imagenet = restride_resnet(imagenet, downsample_factor=downsample_factors[index]) #next we want to remove the average pooling and #fully connected layers (i.e. the last 2 layers), such that #the output layer will be layer4 of the resnet cellemnet = nn.Sequential(*list(cellemnet.children())[:-2]) imagenet = nn.Sequential(*list(imagenet.children())[:-2]) #move the models to devices and set to eval mode cellemnet = cellemnet.to(device).eval() imagenet = imagenet.to(device).eval() # + #create cellemnet transforms cellemnet_aug = Compose([ Resize(224, 224), Normalize(mean=cellemnet_norms[0], std=cellemnet_norms[1]), ToTensorV2() ]) #create imagenet transforms imagenet_aug = Compose([ Resize(224, 224), Normalize(), #default normalization values are for ImageNet ToTensorV2() ]) #make the dataset classes cellemnet_data = CorrelationData('example_data/correlation/', tfs=cellemnet_aug, gray_channels=1) imagenet_data = CorrelationData('example_data/correlation/', tfs=imagenet_aug, gray_channels=3) # + #get the num_filters filters most correlated with the #labelmap and create a new filter that is their average response num_filters = 32 #set the threshold for feature response #to create a binarized "correlated" segmentation thr = 0.3 #load the images and masks #first cellemnet data = cellemnet_data[index] cellemnet_image, cellemnet_mask = data['image'], data['mask'] cellemnet_corrs = correlated_filters(cellemnet, cellemnet_image.to(device), cellemnet_mask) #it's possible to get NaN correlations if a filter has no response anywhere #just reset those to 0s cellemnet_corrs = np.nan_to_num(cellemnet_corrs) topk_indices = torch.topk(torch.from_numpy(cellemnet_corrs), num_filters).indices.to(device) cellemnet_topk_mean_fmap = mean_topk_map(cellemnet, cellemnet_image.to(device), topk_indices, rescale=True) #measure the pb correlation between this "mean response" and the ground truth cellemnet_rpb = pointbiserialr(cellemnet_mask.numpy().ravel(), cellemnet_topk_mean_fmap.ravel())[0] #measure iou between "correlated segmentation" and ground truth cellemnet_iou = binary_iou((cellemnet_topk_mean_fmap > thr).astype(np.uint8), cellemnet_mask.numpy()) #then imagenet data = imagenet_data[index] imagenet_image, imagenet_mask = data['image'], data['mask'] #now the same for imagenet imagenet_corrs = correlated_filters(imagenet, imagenet_image.to(device), imagenet_mask) imagenet_corrs = np.nan_to_num(imagenet_corrs) topk_indices = torch.topk(torch.from_numpy(imagenet_corrs), num_filters).indices.to(device) imagenet_topk_mean_fmap = mean_topk_map(imagenet, imagenet_image.to(device), topk_indices, rescale=True) imagenet_rpb = pointbiserialr(imagenet_mask.numpy().ravel(), imagenet_topk_mean_fmap.ravel())[0] imagenet_iou = binary_iou((imagenet_topk_mean_fmap > thr).astype(np.uint8), imagenet_mask.numpy()) # + f, ax = plt.subplots(3, 2, figsize=(8, 14)) cmap = 'plasma' #plot the image, labelmap, mean_topk_map, and binarized mean_topk_map #for both cellemnet and imagnet ax[0, 0].set_title('Image', fontsize=32, fontname='Arial', pad=24) ax[0, 0].imshow(cellemnet_image[0], cmap='gray') ax[0, 1].set_title('Labelmap', fontsize=32, fontname='Arial', pad=24) #ax[0, 1].imshow(cellemnet_image[0], cmap='gray') ax[0, 1].imshow(cellemnet_mask, alpha=1, cmap='gray') ax[1, 0].set_title('$r_{pb}$: ' + f'{cellemnet_rpb:.3f}', fontsize=32, fontname='Arial') ax[1, 0].imshow(cellemnet_image[0], cmap='gray') ax[1, 0].imshow(cellemnet_topk_mean_fmap, alpha=0.5, cmap=cmap) ax[1, 1].set_title(f'IoU: {cellemnet_iou:.3f}', fontsize=32, fontname='Arial') #ax[1, 1].imshow(cellemnet_image[0], cmap='gray') ax[1, 1].imshow(cellemnet_topk_mean_fmap > thr, alpha=1, cmap='gray') #now the same for imagenet ax[2, 0].set_title('$r_{pb}$: ' + f'{imagenet_rpb:.3f}', fontsize=32, fontname='Arial') ax[2, 0].imshow(imagenet_image[0], cmap='gray') ax[2, 0].imshow(imagenet_topk_mean_fmap, alpha=0.5, cmap=cmap) ax[2, 1].set_title(f'IoU: {imagenet_iou:.3f}', fontsize=32, fontname='Arial') #ax[2, 1].imshow(imagenet_image[0], cmap='gray') ax[2, 1].imshow(imagenet_topk_mean_fmap > thr, alpha=1, cmap='gray') for a in f.axes: a.set_xticks([]) a.set_yticks([]) plt.subplots_adjust(wspace=0, hspace=0) plt.tight_layout() # -
notebooks/featuremap_correlation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # 이전에 쓰던거 복붙해서 한거라서 기재된 라이브러리를 다 쓰는게 아니라 신경안쓰셔도됨 from selenium import webdriver from selenium.webdriver.remote.webelement import WebElement from selenium.webdriver.common.keys import Keys from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.common.by import By from selenium.webdriver.support import expected_conditions as EC from bs4 import BeautifulSoup import pandas as pd import time # + # headless는 안씀, 채원님 링크 참고하거나 물어봐서 활용하면 될듯 # - browser = webdriver.Chrome('./chromedriver.exe') browser.get('https://ets.krx.co.kr/main/main.jsp') # 탭클릭 browser.find_element_by_xpath("//button[@class='isu-button btn-dropdown']").click() # 탭선택 xpath경로의 마지막 li[숫자] : 1~11 browser.find_element_by_xpath("/html/body/div[1]/div[2]/div[3]/div[1]/div[1]/div[2]/ul/li[1]/a").click() # + # for 문으로 탭클릭과 경로 변경해서 아래 스크래핑 코드 넣으면 페이지에서 다 스크래핑 가능 # - html = browser.page_source soup = BeautifulSoup(html, 'html.parser') contents = soup.select('div.info-content') contents targets = contents[0] targets = str(targets) targets target = targets.split('<td>') target = target[1:] # 불순물 뭐가 있는지 확인해서 나오는 것들 공백으로 대체함 shift = {'</td>':'', '<td>':'', '</tr>':'', '<tr>':'', '</span>':'', '<span>':'', '<span class="up">':'', '<span class="down">':'', '</tbody>':'', '\n':'', '</table>':'', '</div>':''} def replace_all(text, dic): for i, j in dic.items(): text = text.replace(i, j) return text raw = [] for i in target: raw.append(replace_all(i, shift)) raw
project/ML/ML_i/5.krx_scraping.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Overview: Downloading Files # This notebook provides an overview of how to download some of the files needed to make `brutus` work. These can be downloaded directly from the [Harvard Dataverse](https://dataverse.harvard.edu/dataverse/astro-brutus) or through utility functions in the `utils` module of the code. import brutus from brutus import utils as butils # ## Stellar Models # Stellar models in `brutus` come in two flavors: **evolutionary tracks** which follow the evolution of a *particular* star *over* time or **isochrones** which model the behavior of a *population* of stars at *fixed* time. These can be downloaded using several convenience functions, which alias simple `wget` commands. Note that the outputs will likely show up in your terminal. fpath = '../data/DATAFILES/' # example path butils.fetch_tracks(target_dir=fpath, # target directory track='MIST_1.2_vvcrit0.0') # name # If the filename is not specified, then the most recent version will be downloaded. You can see all available options in the function documentation. butils.fetch_isos(target_dir=fpath) # ## Neural Networks # To generate photometry with these models, you also need to download the associated **neural networks** (NNs), which interpolate over a grid of atmospheric models. butils.fetch_nns(target_dir=fpath) # ## Model Grids # In addition to the model files, you can also download pre-generated **model grids** that have been used in previous publications. butils.fetch_grids(target_dir=fpath, grid='bayestar_v5') # These are associated with **photometric offsets** that have been calibrated with low-reddening stars with high signal-to-noise parallax measurements. butils.fetch_offsets(target_dir=fpath, grid='bayestar_v5') # ## Dust Maps # Finally, `brutus` is able to use 3-D dust maps as 3-D priors over extinction. butils.fetch_dustmaps(target_dir=fpath, dustmap='bayestar19') # # File Names # While downloading the files is easy enough, other portions of the code are set up to read in particular file names. We can check the exact file names easily enough. from os import listdir listdir(fpath)
demos/Overview 0 - Downloading Files.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "skip"} # ![UKDS Logo](./images/UKDS_Logos_Col_Grey_300dpi.png) # + [markdown] slideshow={"slide_type": "skip"} # # Introduction to Python for Social Scientists # + [markdown] slideshow={"slide_type": "skip"} # Welcome to the <a href="https://ukdataservice.ac.uk/" target=_blank>UK Data Service</a> training series on *New Forms of Data for Social Science Research*. This series guides you through some of the most common and valuable new sources of data available for social science research: data collected from websites, social media platorms, text data, conducting simulations (agent based modelling), to name a few. To help you get to grips with these new forms of data, we provide webinars, interactive notebooks containing live programming code, reading lists and more. # # * To access training materials for the entire series: <a href="https://github.com/UKDataServiceOpen/new-forms-of-data" target=_blank>[Training Materials]</a> # # * To keep up to date with upcoming and past training events: <a href="https://ukdataservice.ac.uk/news-and-events/events" target=_blank>[Events]</a> # # * To get in contact with feedback, ideas or to seek assistance: <a href="https://ukdataservice.ac.uk/help.aspx" target=_blank>[Help]</a> # # <a href="https://www.research.manchester.ac.uk/portal/julia.kasmire.html" target=_blank>Dr <NAME></a> and <a href="https://www.research.manchester.ac.uk/portal/diarmuid.mcdonnell.html" target=_blank>Dr <NAME></a> <br /> # UK Data Service <br /> # University of Manchester <br /> # May 2020 # + [markdown] slideshow={"slide_type": "skip"} # ## Introduction # # Computational methods for collecting, cleaning and analysing data are an increasingly important component of a social scientist’s toolkit. Central to engaging in these methods is the ability to write readable and effective code using a programming language. # # In this training series we demonstrate core programming concepts and methods through the use of social science examples. In particular we focus on four areas of programming/computational social science: # 1. Introduction to Python. [Focus of this notebook] # 2. Collecting data I: web-scraping. # 3. Collecting data II: APIs. # 4. Setting up your computational environment. # # ### Aims # # This lesson - **Introduction to Python** - has two aims: # 1. Demonstrate how to use Python for a typical social science research activity. # 2. Cultivate your computational thinking skills through a coding example. # + [markdown] slideshow={"slide_type": "skip"} # ## Guide to using this resource # # This learning resource was built using <a href="https://jupyter.org/" target=_blank>Jupyter Notebook</a>, an open-source software application that allows you to mix code, results and narrative in a single document. As <a href="https://jupyter4edu.github.io/jupyter-edu-book/" target=_blank>Barba et al. (2019)</a> espouse: # > In a world where every subject matter can have a data-supported treatment, where computational devices are omnipresent and pervasive, the union of natural language and computation creates compelling communication and learning opportunities. # # If you are familiar with Jupyter notebooks then skip ahead to the main content (*What is a programming language?*). Otherwise, the following is a quick guide to navigating and interacting with the notebook. # + [markdown] slideshow={"slide_type": "slide"} # ### Interaction # # **You only need to execute the code that is contained in sections which are marked by `In []`.** # # To execute a cell, click or double-click the cell and press the `Run` button on the top toolbar (you can also use the keyboard shortcut Shift + Enter). # # Try it for yourself: # + slideshow={"slide_type": "subslide"} print("Enter your name and press enter:") name = input() print("\r") print("Hello {}, enjoy learning more about Python!".format(name)) # + [markdown] slideshow={"slide_type": "skip"} # ### Learn more # # Jupyter notebooks provide rich, flexible features for conducting and documenting your data analysis workflow. To learn more about additional notebook features, we recommend working through some of the <a href="https://github.com/darribas/gds19/blob/master/content/labs/lab_00.ipynb" target=_blank>materials</a> provided by <NAME>-Bel at the University of Liverpool. # + [markdown] slideshow={"slide_type": "skip"} # ## What is a programming language? # # In essence, a *programming language* is a set of instructions through which humans can interact with a computer. Similar to a spoken language, there are grammatical (e.g., specifying commands correctly) and syntactical (e.g., arranging commands in the correct order) rules that need to followed. # + [markdown] slideshow={"slide_type": "skip"} # ### Vocabulary # # Like learning any new language (spoken or programming), much of the difficulty arises from getting to grips with an unfamiliar vocabulary. The following are some general programming terms, adapted from Brooker (2020), that are worth keeping in mind as you progress on your computational oddysey: # # * **Programming language** - a means of interacting with and issuing instructions to a computer. # * **Programming** - the practice of using a programming language (also known as *coding*). # * **Code** - the written down instructions that result from programming. # * **Script** - a collection of code. # * **Shell** - a tool that allows you to write and execute code e.g., using R withoutR Studio, using the Command Line Interface (CLI) on your computer. # * **Debugging** - fixing errors or issues with your code. # * **Testing** - the process of discovering errors or issues with your code. # + [markdown] slideshow={"slide_type": "skip"} # ### What is Python? # # Python is an open-source, general purpose, high level and extensible programming language. These terms may be unfamiliar, especially in this context, so let's take them one-by-one: # * **Open source** - the source code that underpins Python is freely available for others to use, modify and share, as long as these activities comply with the <a href="https://docs.python.org/3/license.html" target=_blank>license</a>. Watch this neat <a href="https://www.youtube.com/watch?v=Tyd0FO0tko8" target=_blank>overview</a> of open-source software to learn more. # * **General purpose** - Python can be used for a multitude of programming activities, such as web development, scientific computing, software development, system administration, and more. # * **High level** - the way Python is written is highly abstracted from the language your machine uses to send, receive and store information. Computers receive instructions in a language called *binary*, which is a series of 1s and 0s that represent a range of characters. For example, here is my name ("Diarmuid") represented as binary: <br><br>01000100 01101001 01100001 01110010 01101101 01110101 01101001 01100100<br><br>The first sequence of 8 bits (known as a "byte") represents the letter "D", the second sequence the letter "i" and so on. It would be difficult and tedious in the extreme to write programming code in sequences of binary, hence the creation of high-level languages that are easier to read, write and understand by humans. # * **Extensible** - Python's functionality, capabilities and range of uses can be expanded. This is often achieved through the creation and sharing of additional, open-source add-ons (known as *packages*). For example, the `BeautifulSoup` module was created to enable Python users to extract information from web pages. # + [markdown] slideshow={"slide_type": "slide"} # ## Python for social science research # + [markdown] slideshow={"slide_type": "subslide"} # Python provides some really nice methods and techniques for social scientists. It can be used to download and view files: # + slideshow={"slide_type": "subslide"} import requests accounts = "http://apps.charitycommission.gov.uk/Accounts/Ends35/0000211535_AC_20190331_E_C.PDF" outfile = "annual-accounts-211535-2019.pdf" response = requests.get(accounts, allow_redirects=True) with open(outfile, "wb") as f: # with the file open in "write binary" mode, and giving it a shorter name (f) f.write(response.content) # + slideshow={"slide_type": "subslide"} from IPython.display import IFrame IFrame(outfile, width=800, height=500) # + [markdown] slideshow={"slide_type": "subslide"} # It can produce some interesting, interactive visualisations: # + slideshow={"slide_type": "subslide"} IFrame("images/cast-sankey-diagram-2020-03-27.html", width=800, height=600) # + [markdown] slideshow={"slide_type": "subslide"} # It can do lots more, including natural language processing, machine learning, text mining etc. However, in this lesson we're going to focus on learning to walk before we start to run. Therefore, we'll focus on some core data manipulation skills that many social scientists could benefit from possessing. # + [markdown] slideshow={"slide_type": "slide"} # ### Manipulating data # # Let's introduce some of the fundamentals of Python through a typical social science research activity: **creating a sampling frame**. # > A sampling frame is a list or other device used to define a researcher's population of interest. (Lewis-Beck et al., 2004) # + [markdown] slideshow={"slide_type": "skip"} # Though not as exciting as other social science activities (e.g., data visualisation), constructing a sampling frame requires a reasonable level of data and computational literacy, as we will soon demonstrate. It is also perfect for demonstrating how Python can be used to interact with your computer and data. # + [markdown] slideshow={"slide_type": "slide"} # ### Defining the problem # # Programming is fundamentally about solving problems, so let's reframe our social science research activity in those terms. # # > Jane is a sociologist who is completing a mixed methods PhD. The research design involves three waves of surveys sent to individuals, followed by semi-structured interviews with a subset of the survey respondents. The surveys have been completed and now her main task is to construct a sampling frame of individuals who could participate in the interview phase. # + [markdown] slideshow={"slide_type": "subslide"} # While this is an important first step, the problem can seem quite daunting when defined in such broad terms. Therefore it helps to *decompose* the problem into smaller steps: # 1. Locate the files containing the data we need. # 2. Open each file and extract its contents. # 3. Append the contents of each file together to create a master file of all responses (i.e., our sampling frame). # 4. Save the sampling frame as a separate file. # 5. Produce a random sample of responses using the sampling frame and save this as a separate file; the random sample represents individuals who would then be contacted about participating in the semi-structured interviews. # + [markdown] slideshow={"slide_type": "skip"} # What we've just written is called *pseudo-code*, as it captures the main tasks and the order in which they need to be run, but isn't code that a programming language can understand (e.g., you can't just tell Python "*Hey, locate the files I need for my project*"). # # We're going to use some real, open data in this lesson: individual responses to the 1961, 1971 and 1981 UK censuses [<a href="https://www.ukdataservice.ac.uk/get-data/open-data.aspx" target=_blank>Available here</a>]. These data sets contain a 1% sample of all individuals who responded to each census, and contain a subset of variables relating to each respondent (e.g., sex, age). We won't concern ourselves too much with the contents of these data sets, just how we can manipulate them to solve our research problem (creating a sampling frame). # + [markdown] slideshow={"slide_type": "slide"} # ### Setting up Python # # Python already has lots of functionality available when you first launch it. For example, we can perform calculations like so: # + slideshow={"slide_type": "subslide"} 43 * 105 # + [markdown] slideshow={"slide_type": "subslide"} # Or we can print statements to the screen: # + slideshow={"slide_type": "fragment"} print("Python cannot be that difficult to learn, right?") # + [markdown] slideshow={"slide_type": "subslide"} # Often though, we need to *import* additional functionality specific to the activity at hand. # + slideshow={"slide_type": "subslide"} # Import modules import os # module for navigating your machine (e.g., file directories) import csv # module for working with delimited text files import pandas as pd # module for handling data from datetime import datetime # module for working with dates and time print("Succesfully imported necessary modules") # - # !pip freeze # + [markdown] slideshow={"slide_type": "skip"} # Modules are additional techniques or functions that are not present when you launch Python. Some do not even come with Python when you download it and must be installed on your machine separately - think of using `ssc install <package>` in Stata, or `install.packages(<package>)` in R. For now just understand that many useful modules need to be imported every time you start a new Python session. # + [markdown] slideshow={"slide_type": "slide"} # ### Working with directories # # (A directory is another name for a folder.) # + [markdown] slideshow={"slide_type": "skip"} # A key task in any research project is setting up the directory structure in a logical and organised manner (Ferretti et al., 2019). While we're sure you won't find this task very interesting, not giving this enough thought leads to some very frustrating scenarios, such as: # * Raw data and clean data being stored in the same folder. # * File names which only serve to confuse - which is the latest version: *Thesis Chapter 5 final.docx* or *Thesis Chapter 5 final UPDATED.docx*? # * Accidently deleting files because they are stored in the wrong folder. # # In addition, a messy, non-sensical directory structure severely hinders your ability to collaborate with others (or yourself in the future). Think of being near the end of a project and having to update a chapter or article: can you find the clean data and recreate the steps that produced the table? Can you find the quotes critical to evidencing a particular theme? # # Thankfully Python provides techniques for creating and navigating your directory structure. # + [markdown] slideshow={"slide_type": "subslide"} # #### Locating current folder # # The first thing we can ask Python to do is tell us where we currently are on our computer. That is, where is the file we are currently using located? # + slideshow={"slide_type": "fragment"} os.getcwd() # + [markdown] slideshow={"slide_type": "skip"} # Let's unpack the `os.getcwd()` command: There is a module called `os` which provides various methods for working with files and folders on your computer. One of these methods is called `getcwd()`, which returns a value indicating which folder is acting as the current working directory. # + [markdown] slideshow={"slide_type": "subslide"} # And what files are located in this folder? # + slideshow={"slide_type": "fragment"} os.listdir() # + [markdown] slideshow={"slide_type": "subslide"} # #### Examining folders # # Let's take a look at the folder containing the data files we need to process: # + slideshow={"slide_type": "fragment"} os.listdir("responses") # + [markdown] slideshow={"slide_type": "skip"} # The `os.listdir()` command lists the contents of a given directory. # # Remember how we said Python understands where we are currently on our machine? This is a very useful feature, as it saves us having to worry about the *absolute* path to a file. For example, here is the full path to the data files: # + slideshow={"slide_type": "subslide"} os.path.abspath("responses") # + [markdown] slideshow={"slide_type": "subslide"} # Using *relative* rather than *absolute* paths to locate files and folders really comes in handy when you need to move your files to another computer, or you're working as part of team and each member has their own machine. # + [markdown] slideshow={"slide_type": "subslide"} # #### Creating folders # # Finally, we will create a folder to store the sampling frame file: # + slideshow={"slide_type": "fragment"} #os.mkdir("sampling-frame") os.listdir() # list contents of current working directory # + [markdown] slideshow={"slide_type": "slide"} # ### Working with files # # We saw in the previous section that the files we need are located in a folder called "responses". Our first task is to open one of these files, and we begin by telling Python where to find it: # + slideshow={"slide_type": "fragment"} census_1961_file = "responses/census_1961.csv" chickenchicken = "responses/census_1961.csv" print(census_1961_file, chickenchicken) # + [markdown] slideshow={"slide_type": "skip"} # Before opening the file, let's address the new element in above previous command. We defined a variable called `census_1961_file` to store the location and name of the 1961 census file. Note how the value of this variable is enclosed in double quotes (""): this denotes that it is a *string* variable i.e., it stores values that should be treated as text. # # Defining a variable means we no longer have to type `"responses/census_1961.csv"` when we want to refer to or interact with this file. Instead, Python knows the `census_1961_file` variable stores this information for us. # # Let's open and read in the content of the file using the `pandas` module we imported earlier: # + slideshow={"slide_type": "subslide"} census_1961_data = pd.read_csv(census_1961_file, encoding = "ISO-8859-1", index_col=False) # + [markdown] slideshow={"slide_type": "skip"} # Let's unpack what the command is doing, starting on the right-hand side of the "=" sign. We use the `pandas` module, referring to it by its abbreviation `pd`. From `pandas` we employ the `read_csv()` method and supply it with three arguments: a CSV file to read (`census_1961_file`), a means of interpreting the contents of the file (`encoding = "ISO-8859-1"`), and an instruction not to create what's called an index column (`index_col=False`). # # Don't stress about knowing which arguments are necessary and which are optional: just refer to the help documentation for a given module (e.g., <a href="https://pandas.pydata.org/docs/" target=_blank>pandas</a>). # + [markdown] slideshow={"slide_type": "subslide"} # `pandas` provides lots of useful functionality for manipulating and exploring data sets, such as viewing a sample of observations: # + slideshow={"slide_type": "subslide"} census_1961_data.sample(10) # + [markdown] slideshow={"slide_type": "subslide"} # Now that we've solved the issue of finding and reading files, let's apply this solution to the other census data sets: # + slideshow={"slide_type": "subslide"} census_1971_file = "responses/census_1971.csv" census_1981_file = "responses/census_1981.csv" census_1971_data = pd.read_csv(census_1971_file, encoding = "ISO-8859-1", index_col=False) census_1981_data = pd.read_csv(census_1981_file, encoding = "ISO-8859-1", index_col=False) # + [markdown] slideshow={"slide_type": "slide"} # ### Creating a sampling frame # + [markdown] slideshow={"slide_type": "skip"} # If you're somebody with experience of data analysis using SPSS or Stata for example, you may have noticed a significant advantage of Python: namely the ability to hold multiple data sets in memory at the same time (R can do this also). # + [markdown] slideshow={"slide_type": "subslide"} # Back to our next problem: combining the three census data sets to produce a master data set of all respondents (our sampling frame). The first thing we'll do is create a new variable by copying one of the existing data sets: # + slideshow={"slide_type": "subslide"} census_all_data = census_1961_data census_all_data.sample(5) # + [markdown] slideshow={"slide_type": "subslide"} # Now we append observations from the other data sets to the bottom of `census_all_data`: # + slideshow={"slide_type": "subslide"} census_all_data = census_all_data.append([census_1971_data, census_1981_data]) # + [markdown] slideshow={"slide_type": "skip"} # Let's quickly highlight an efficient bit of coding. Note that we supplied the `append()` method with two data sets at once (`[census_1971_data, census_1981_data]`). The use of square brackets indicates to Python it is working with a *list* variable (i.e., it contains more than one value). The alternative would be two separate append commands: # # (Note how you can tell Python not to run certain commands by prefixing them with `#` symbol) # + slideshow={"slide_type": "subslide"} # census_all_data = census_all_data.append(census_1971_data) # census_all_data = census_all_data.append(census_1981_data) # + [markdown] slideshow={"slide_type": "subslide"} # We need to check whether we have the correct number of observations in the new data set. Using simple arithmetic, we know the sample frame should equal the sum of the number of observations across the three census data sets. First, we need to know how to capture the number of observations in each data set: # + slideshow={"slide_type": "subslide"} len(census_all_data) # + slideshow={"slide_type": "subslide"} len(census_1961_data) # + [markdown] slideshow={"slide_type": "skip"} # The `len()` function - which is available for use when you launch Python, no importing necessary - returns the number of observations in a data set. # # Now we can ask Python to evaluate whether the lenght of the master data set equals the sum of the lenghts of each census data set: # + slideshow={"slide_type": "subslide"} len(census_all_data) == len(census_1961_data) + len(census_1971_data) + len(census_1981_data) # + [markdown] slideshow={"slide_type": "skip"} # The above command is a Boolean expression, as evidenced by the fact it returns only one of the following two values: `True` or `False`. Boolean expressions are tremendously useful for evaluating whether a condition is met and thus controlling the *flow* of your code: if a condition is met, do one thing; if not, do something else. You can learn more about Boolean logic in chapter 21 of <a href="https://assets.digitalocean.com/books/python/how-to-code-in-python.pdf" target=_blank>How to Code in Python</a>. # + [markdown] slideshow={"slide_type": "slide"} # ### Producing a random sample # # By combining three census data sets, we have now have a sampling frame containing `1,562,660` respondents; obviously this is far too many individuals to contact about participating in follow-up interviews, so let's take a simple random sample: # + slideshow={"slide_type": "subslide"} census_random_sample = census_all_data.sample(frac=.01) len(census_random_sample) # + [markdown] slideshow={"slide_type": "skip"} # The above command takes a 1% (`frac=.01`) random sample of observations from the sampling frame saves the result as a new data set (`census_random_sample`). # # Because we used `pandas` to create our data sets, we are able to use this module's methods directly on the data set variable. This probably sounds confusing, so let's see how else could have generated the random sample: # + slideshow={"slide_type": "subslide"} census_random_sample_alt = pd.DataFrame.sample(census_all_data, frac=.01) len(census_random_sample_alt) # + [markdown] slideshow={"slide_type": "slide"} # ### Saving our work # # The final problem to be solved: saving our work. Once again we can lean on the `pandas` module to simplify this task for us: # + slideshow={"slide_type": "subslide"} census_all_data.to_csv("sampling-frame/census-sampling-frame.csv", index=False) census_random_sample.to_csv("sampling-frame/census-random-sample.csv", index=False) # + [markdown] slideshow={"slide_type": "subslide"} # How can we tell it worked? We could ask Python to list the contents of the "sampling-frame" folder: # + slideshow={"slide_type": "fragment"} os.listdir("sampling-frame") # + [markdown] slideshow={"slide_type": "subslide"} # Python has certainly created the files, though it's worth checking if the contents are correct: # + slideshow={"slide_type": "fragment"} random_sample_data = pd.read_csv("sampling-frame/census-random-sample.csv", encoding = "ISO-8859-1", index_col=False) random_sample_data # In this blah # + [markdown] slideshow={"slide_type": "subslide"} # And Voila, we have successfully solved our sampling frame research problem! # + [markdown] slideshow={"slide_type": "slide"} # ## What have we learned? # # Let's recap what key skills and techniques we've learned: # * **How to import modules**. You will usually need to import modules into Python to support your work. Python does come with some methods and functions that are ready to use straight away, but for computational social science tasks you'll almost certainly need to import some additional modules. # * **How to navigate, create and delete folders**. You can use Python to navigate your directory structure using *relative* or *absolute* paths (with the former much preferred for reasons of collaboration and project portability). # * **How to read and manipulate data in files** Plenty of software packages (e.g., Stata, SPSS) and programming languages (e.g., R) provide functionality for working with data. Python is no different, and we consider it to have considerable advantages over other tools. # * **How to do all of the above in an efficient, clear and effective manner**. # + [markdown] slideshow={"slide_type": "skip"} # ## Conclusion # # Python is a very powerful programming language, brimful of methods - data manipulation, web-scraping, natural langauge processing, interactive data visualisations - that are of great use to social scientists. # # Jane's research task, creating a sampling frame, could have been solved using a manual approach: creating folders using `right-click > New folder`, opening each file individually and copy-and-pasting rows into a new file etc. While we do not advocate using Python for every task or project, we encourage you to think clearly about the advantages of adopting a computational approach: # * **Scalability** - What happens if there are 10, 100, or 1000 files instead of 3? # * **Accuracy** - What if Jane makes a mistake when copy-and-pasting records from the individual files? How will she know she's made a mistake? # * **Reproducibility** - What if Jane loses the sampling frame file? What if Jane wants to collaborate with another researcher? Does that person need to manually create the same folders, recreate or update the sampling frame? # * **Automatability** - What if new data becomes available on a monthly basis? Does Jane need to set a reminder in her calendar to download the latest data? # # Like learning a spoke language, your initial attempts at writing code are rudimentary and frustrating. You'll find yourself only to able to write the same simple commands, wondering when Python's conventions will become second nature. However, with practice and immersion (through a relevant project) you will find your ability increases rapidly. In our assessment, the learning curve for social scientists developing their programming skills is steep at the beginning ("What the hell is a loop!?"), gentle in the middle ("Huh, so that's how you scrape a website"), and steep once again at the end ("Why did God invent neural networks..."). # # We promise though, with a modest investment of time and energy you will be surprised what opportunities emerge from knowing a little bit of Python. # # Good luck on your programming adventures! # + [markdown] slideshow={"slide_type": "skip"} # ## Bibliography # # <NAME>. et al. (2019). *Teaching and Learning with Jupyter*. <a href="https://jupyter4edu.github.io/jupyter-edu-book/" target=_blank>https://jupyter4edu.github.io/jupyter-edu-book/</a>. # # <NAME>. (2020). *Programming with Python for Social Scientists*. London: SAGE Publications Ltd. # # <NAME>., <NAME>., & <NAME>. (2019). *Code Camp: 2019 (v1.0)*. <a href="http://doi.org/10.5281/zenodo.3474043" target=_blank>http://doi.org/10.5281/zenodo.3474043</a> # # <NAME>. (n.d.). *How to Code in Python 3*. https://assets.digitalocean.com/books/python/how-to-code-in-python.pdf # + [markdown] slideshow={"slide_type": "skip"} # ## Further reading and resources # # We hope this brief lession has whetted your appetite for learning more about Python and programming in general. There are some fantastic learning materials available to you, many of them free. We highly recommend the materials referenced in the Bibliography. In addition, you may find the list of useful books, papers, websites and other resources on our web-scraping Github repository worth referencing: <a href="https://github.com/UKDataServiceOpen/web-scraping/tree/master/reading-list/" target=_blank>[Reading list]</a> # + [markdown] slideshow={"slide_type": "skip"} # --END OF FILE--
code/ukds-intro-to-python-2020-05-06.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Market Sharing # # ## Objective and Prerequisites # # In this example, we’ll show you how to solve a goal programming problem that involves allocating the retailers to two divisions of a company in order to optimize the trade-offs of several market sharing goals. You’ll learn how to create a mixed integer linear programming model of the problem using the Gurobi Python API and how to find an optimal solution to the problem using the Gurobi Optimizer. # # This model is example 13 from the fifth edition of Model Building in Mathematical Programming by <NAME> on pages 267-268 and 322-324. # # This modeling example is at the beginner level, where we assume that you know Python and that you have some knowledge about building mathematical optimization models. The reader should also consult the [documentation](https://www.gurobi.com/resources/?category-filter=documentation) # of the Gurobi Python API. # # **Download the Repository** <br /> # You can download the repository containing this and other examples by clicking [here](https://github.com/Gurobi/modeling-examples/archive/master.zip). # # **Gurobi License** <br /> # In order to run this Jupyter Notebook properly, you must have a Gurobi license. If you do not have one, you can request an [evaluation license](https://www.gurobi.com/downloads/request-an-evaluation-license/?utm_source=3PW&utm_medium=OT&utm_campaign=WW-MU-MUI-OR-O_LEA-PR_NO-Q3_FY20_WW_JPME_MARKET_SHARING_COM_EVAL_GitHub&utm_term=Market_Sharing&utm_content=C_JPM) as a *commercial user*, or download a [free license](https://www.gurobi.com/academia/academic-program-and-licenses/?utm_source=3PW&utm_medium=OT&utm_campaign=WW-MU-EDU-OR-O_LEA-PR_NO-Q3_FY20_WW_JPME_MARKET_SHARING_ACADEMIC_EVAL_GitHub&utm_term=Market_Sharing&utm_content=C_JPM) as an *academic user*. # ## Problem Description # # A large company has two divisions: D1 and D2. The company supplies retailers with oil and spirit. The goal is to allocate each retailer to either division D1 or division D2. The allocated division will be the retailer’s supplier. As far as possible, the allocation must be made so that D1 controls 40% of the market and D2 the remaining 60%. The retailers in the table below are listed as M1 to M23. Each retailer has an estimated market for oil and spirit. Retailers M1 to M8 are in region 1, retailers M9 to M18 are in region 2, and retailers M19 to M23 are in region 3. Certain retailers are considered to have good growth prospects and categorized as group A and the others are in group B. Each retailer has a certain number of delivery points. # ![retailers](retailers.PNG) # # We want to make the 40%/60% split between D1 and D2 in each of the following categories: # 1. Total number of delivery points # 2. Control of spirit market # 3. Control of oil market in region 1 # 4. Control of oil market in region 2 # 5. Control of oil market in region 3 # 6. Number of retailers in group A # 7. Number of retailers in group B. # # There is flexibility in that any market share may vary by $\pm$ 5%. That is, the share can vary between the limits 35%/65% and 45%/55%. The objective is to minimize the sum of the percentage deviations from the 40%/60% split. # ## Model Formulation # # ### Sets and Indices # # $r \in \text{Retailers}=\{\ 1,2,...,23\}$ # # ### Parameters # # $\text{deliveryPoints}_{r} \in \mathbb{N}^+$: Delivery points of retailer $r$. # # $\text{spiritMarket}_{r} \in \mathbb{R}^+$: Spirit market -in millions of gallons, of retailer $r$. # # $\text{oilMarket1}_{r} \in \mathbb{R}^+$: Oil market -in millions of gallons of retailer $r$ in Region 1. # # $\text{oilMarket2}_{r} \in \mathbb{R}^+$: Oil market -in millions of gallons of retailer $r$ in Region 2. # # $\text{oilMarket3}_{r} \in \mathbb{R}^+$: Oil market -in millions of gallons of retailer $r$ in Region 3. # # $\text{retailerA}_{r} \in \{0,1\}$: Parameter has a value of 1 if retailer $r$ belongs to group A. # # $\text{retailerB}_{r} \in \{0,1\}$: Parameter has a value of 1 if retailer $r$ belongs to group B. # # $\text{deliveryPoints40} \in \mathbb{R}^+$: Forty percent of the delivery points. # # $\text{deliveryPoints5} \in \mathbb{R}^+$: Five percent of the delivery points. # # $\text{spiritMarket40} \in \mathbb{R}^+$: Forty percent of the spirit market. # # $\text{spiritMarket5} \in \mathbb{R}^+$: Five percent of the spirit market. # # $\text{oilMarket1_40} \in \mathbb{R}^+$: Forty percent of the oil market in region 1. # # $\text{oilMarket1_5} \in \mathbb{R}^+$: Five percent of the oil market in region 1. # # $\text{oilMarket2_40} \in \mathbb{R}^+$: Forty percent of the oil market in region 2. # # $\text{oilMarket2_5} \in \mathbb{R}^+$: Five percent of the oil market in region 2. # # $\text{oilMarket3_40} \in \mathbb{R}^+$: Forty percent of the oil market in region 3. # # $\text{oilMarket3_5} \in \mathbb{R}^+$: Five percent of the oil market in region 3. # # $\text{retailerA40} \in \mathbb{R}^+$: Forty percent of the number of retailers in group A. # # $\text{retailerA5} \in \mathbb{R}^+$: Five percent of the number of retailers in group A. # # $\text{retailerB40} \in \mathbb{R}^+$: Forty percent of the number of retailers in group B. # # $\text{retailerB5} \in \mathbb{R}^+$: Five percent of the number of retailers in group B. # # ### Decision Variables # # $\text{allocate}_{r} \in \{0,1\}$: This binary variable is equal 1, if retailer r is allocated to Division 1, and 0 if allocated to Division 2. # # $\text{deliveryPointsPos} \in \mathbb{R}^+$: This decision variable measures the positive deviation of the retailers’ allocation for the goal of satisfying forty percent of the delivery points. # # $\text{deliveryPointsNeg} \in \mathbb{R}^+$: This decision variable measures the negative deviation of the retailers’ allocation for the goal of satisfying forty percent of the delivery points. # # $\text{spiritMarketPos} \in \mathbb{R}^+$: This decision variable measures the positive deviation of the retailers’ allocation for the goal of satisfying forty percent of the spirit market. # # $\text{spiritMarketNeg} \in \mathbb{R}^+$: This decision variable measures the negative deviation of the retailers’ allocation for the goal of satisfying forty percent of the spirit market. # # $\text{oilMarket1Pos} \in \mathbb{R}^+$: This decision variable measures the positive deviation of the retailers’ allocation for the goal of satisfying forty percent of the oil market in region 1. # # $\text{oilMarket1Neg} \in \mathbb{R}^+$: This decision variable measures the negative deviation of the retailers’ allocation for the goal of satisfying forty percent of the oil market in region 1. # # $\text{oilMarket2Pos} \in \mathbb{R}^+$: This decision variable measures the positive deviation of the retailers’ allocation for the goal of satisfying forty percent of the oil market in region 2. # # $\text{oilMarket2Neg} \in \mathbb{R}^+$: This decision variable measures the negative deviation of the retailers’ allocation for the goal of satisfying forty percent of the oil market in region 2. # # $\text{oilMarket3Pos} \in \mathbb{R}^+$: This decision variable measures the positive deviation of the retailers’ allocation for the goal of satisfying forty percent of the oil market in region 3. # # $\text{oilMarket3Neg} \in \mathbb{R}^+$: This decision variable measures the negative deviation of the retailers’ allocation for the goal of satisfying forty percent of the oil market in region 3. # # $\text{retailerAPos} \in \mathbb{R}^+$: This decision variable measures the positive deviation of the retailers’ allocation for the goal of satisfying forty percent of the number of retailers in group A. # # $\text{retailerANeg} \in \mathbb{R}^+$: This decision variable measures the negative deviation of the retailers’ allocation for the goal of satisfying forty percent of the number of retailers in group A. # # $\text{retailerBPos} \in \mathbb{R}^+$: This decision variable measures the positive deviation of the retailers’ allocation for the goal of satisfying forty percent of the number of retailers in group B. # # $\text{retailerBNeg} \in \mathbb{R}^+$: This decision variable measures the negative deviation of the retailers’ allocation for the goal of satisfying forty percent of the number of retailers in group B. # # ### Constraints # # **Delivery points**: The allocation of retailers at Division 1 satisfies as much as possible forty percent of the delivery points. # # \begin{equation} # \sum_{r \in \text{Retailers}} \text{deliveryPoints}_{r}*{\text{allocate}_{r}} + \text{deliveryPointsPos} - \text{deliveryPointsNeg} = \text{deliveryPoints40} # \end{equation} # # **Spirit Market**: The allocation of retailers at Division 1 satisfies as much as possible forty percent of the spirit market. # # \begin{equation} # \sum_{r \in \text{Retailers}} \text{spiritMarket}_{r}*{\text{allocate}_{r}} + \text{spiritMarketPos} - # \text{spiritMarketNeg} = \text{spiritMarket40} # \end{equation} # # **Oil market region 1**: The allocation of retailers in region 1 at Division 1 satisfies as much as possible forty percent of the oil market in that region. # # \begin{equation} # \sum_{r \in \text{Retailers}} \text{oilMarket1}_{r}*{\text{allocate}_{r}} + \text{oilMarket1Pos} - # \text{oilMarket1Neg} = \text{oilMarket1_40} # \end{equation} # # **Oil market region 2**: The allocation of retailers in region 2 at Division 1 satisfies as much as possible forty percent of the oil market in that region. # # \begin{equation} # \sum_{r \in \text{Retailers}} \text{oilMarket2}_{r}*{\text{allocate}_{r}} + \text{oilMarket2Pos} - # \text{oilMarket2Neg} = \text{oilMarket2_40} # \end{equation} # # **Oil market region 3**: The allocation of retailers in region 3 at Division 1 satisfies as much as possible forty percent of the oil market in that region. # # \begin{equation} # \sum_{r \in \text{Retailers}} \text{oilMarket3}_{r}*{\text{allocate}_{r}} + \text{oilMarket3Pos} - # \text{oilMarket3Neg} = \text{oilMarket3_40} # \end{equation} # # **Group A**: The allocation of retailers at Division 1 satisfies as much as possible forty percent of the retailers in group A. # # \begin{equation} # \sum_{r \in \text{Retailers}} \text{retailerA40}_{r}*{\text{allocate}_{r}} + \text{retailerAPos} - # \text{retailerANeg} = \text{retailerA40} # \end{equation} # # **Group B**: The allocation of retailers at Division 1 satisfies as much as possible forty percent of the retailers in group B. # # \begin{equation} # \sum_{r \in \text{Retailers}} \text{retailerB40}_{r}*{\text{allocate}_{r}} + \text{retailerBPos} - # \text{retailerBNeg} = \text{retailerB40} # \end{equation} # # **Flexibility**: There is flexibility in that any market share may vary by $\pm$ 5%. # # $$ # \text{deliveryPointsPos} \leq \text{deliveryPoints5} # $$ # # $$ # \text{deliveryPointsNeg} \leq \text{deliveryPoints5} # $$ # # $$ # \text{spiritMarketPos} \leq \text{spiritMarket5} # $$ # # $$ # \text{spiritMarketNeg} \leq \text{spiritMarket5} # $$ # # $$ # \text{oilMarket1Pos} \leq \text{oilMarket1_5} # $$ # # $$ # \text{oilMarket1Neg} \leq \text{oilMarket1_5} # $$ # # $$ # \text{oilMarket2Pos} \leq \text{oilMarket2_5} # $$ # # $$ # \text{oilMarket2Neg} \leq \text{oilMarket2_5} # $$ # # $$ # \text{oilMarket3Pos} \leq \text{oilMarket3_5} # $$ # # $$ # \text{oilMarket3Neg} \leq \text{oilMarket3_5} # $$ # # $$ # \text{retailerAPos} \leq \text{retailerA5} # $$ # # $$ # \text{retailerANeg} \leq \text{retailerA5} # $$ # # $$ # \text{retailerBPos} \leq \text{retailerB5} # $$ # # $$ # \text{retailerBNeg} \leq \text{retailerB5} # $$ # # ### Objective Function # # **Minimize deviations**: Minimize the sum of positive and negative deviations. # # \begin{equation} # \text{Minimize} \quad \text{deliveryPointsPos} + \text{deliveryPointsNeg} + \text{spiritMarketPos} + \text{spiritMarketNeg} + # \text{oilMarket1Pos} + \text{oilMarket1Neg} # \end{equation} # # $$ # + \text{oilMarket2Pos} + \text{oilMarket2Neg} + \text{oilMarket3Pos} + \text{oilMarket3Neg} # $$ # # $$ # + \text{retailerAPos} + \text{retailerANeg} + \text{retailerBPos} + \text{retailerBNeg} # $$ # ## Python Implementation # # We import the Gurobi Python Module and other Python libraries. # + import numpy as np import pandas as pd from itertools import product import gurobipy as gp from gurobipy import GRB # tested with Python 3.7.0 & Gurobi 9.0 # - # ## Input data # # We define all the input data for the model. # + # Create a dictionary to capture the delivery points and spirit market -in millions of gallons. retailers, deliveryPoints, spiritMarket = gp.multidict({ (1): [11,34], (2): [47,411], (3): [44,82], (4): [25,157], (5): [10,5], (6): [26,183], (7): [26,14], (8): [54,215], (9): [18,102], (10): [51,21], (11): [20,54], (12): [105,0], (13): [7,6], (14): [16,96], (15): [34,118], (16): [100,112], (17): [50,535], (18): [21,8], (19): [11,53], (20): [19,28], (21): [14,69], (22): [10,65], (23): [11,27] }) # Create a dictionary to capture the oil market -in millions of gallons for region 1. retailers1, oilMarket1 = gp.multidict({ (1): 9, (2): 13, (3): 14, (4): 17, (5): 18, (6): 19, (7): 23, (8): 21 }) # Create a dictionary to capture the oil market -in millions of gallons for region 2. retailers2, oilMarket2 = gp.multidict({ (9): 9, (10): 11, (11): 17, (12): 18, (13): 18, (14): 17, (15): 22, (16): 24, (17): 36, (18): 43 }) # Create a dictionary to capture the oil market -in millions of gallons for region 3. retailers3, oilMarket3 = gp.multidict({ (19): 6, (20): 15, (21): 15, (22): 25, (23): 39 }) # Create a dictionary to capture retailers in group A. groupA, retailerA = gp.multidict({ (1): 1, (2): 1, (3): 1, (5): 1, (6): 1, (10): 1, (15): 1, (20): 1 }) # Create a dictionary to capture retailers in group B. groupB, retailerB = gp.multidict({ (4): 1, (7): 1, (8): 1, (9): 1, (11): 1, (12): 1, (13): 1, (14): 1, (16): 1, (17): 1, (18): 1, (19): 1, (21): 1, (22): 1, (23): 1 }) # Forty and five percentages of each goal deliveryPoints40 = 292 deliveryPoints5 = 36.5 spiritMarket40 = 958 spiritMarket5 = 119.75 oilMarket1_40 = 53.6 oilMarket1_5 = 6.7 oilMarket2_40 = 86 oilMarket2_5 = 10.75 oilMarket3_40 = 40 oilMarket3_5 = 5 retailerA40 = 3.2 retailerA5 = 0.4 retailerB40 = 6 retailerB5 = 0.75 # - # ## Model Deployment # # We create a model and the variables. The main decision variable is a binary variable that is equal to 1 when a retailer is allocated to Division 1, and 0 when allocated it to Division 2. The rest of the decision variables measure positive and negative deviations from the seven goals of the 40%/60% split. # + model = gp.Model('MarketSharing') # Allocation of retailers to Division 1. allocate = model.addVars(retailers, vtype=GRB.BINARY, name="allocate") # Positive and negative deviation of delivery points goal. deliveryPointsPos = model.addVar(ub= deliveryPoints5, name='deliveryPointsPos') deliveryPointsNeg = model.addVar(ub= deliveryPoints5, name='deliveryPointsNeg') # Positive and negative deviation of spirit market goal. spiritMarketPos = model.addVar(ub=spiritMarket5, name='spiritMarketPos') spiritMarketNeg = model.addVar(ub=spiritMarket5, name='spiritMarketNeg') # Positive and negative deviation of oil market in region 1 goal. oilMarket1Pos = model.addVar(ub=oilMarket1_5, name='oilMarket1Pos') oilMarket1Neg = model.addVar(ub=oilMarket1_5, name='oilMarket1Neg') # Positive and negative deviation of oil market in region 2 goal. oilMarket2Pos = model.addVar(ub=oilMarket2_5, name='oilMarket2Pos') oilMarket2Neg = model.addVar(ub=oilMarket2_5, name='oilMarket2Neg') # Positive and negative deviation of oil market in region 3 goal. oilMarket3Pos = model.addVar(ub=oilMarket3_5, name='oilMarket3Pos') oilMarket3Neg = model.addVar(ub=oilMarket3_5, name='oilMarket3Neg') # Positive and negative deviation of retailers in group A goal. retailerAPos = model.addVar(ub=retailerA5, name='retailerAPos') retailerANeg = model.addVar(ub=retailerA5, name='retailerANeg') # Positive and negative deviation of retailers in group B goal. retailerBPos = model.addVar(ub=retailerB5, name='retailerBPos') retailerBNeg = model.addVar(ub=retailerB5, name='retailerBNeg') # - # The allocation of retailers at Division 1 satisfies as much as possible forty percent of the delivery points. # + # Delivery points constraint. DPConstr = model.addConstr((gp.quicksum(deliveryPoints[r]*allocate[r] for r in retailers) + deliveryPointsPos - deliveryPointsNeg == deliveryPoints40), name='DPConstrs') # - # The allocation of retailers at Division 1 satisfies as much as possible forty percent of the spirit market. # + # Spirit market constraint. SMConstr = model.addConstr((gp.quicksum(spiritMarket[r]*allocate[r] for r in retailers) + spiritMarketPos - spiritMarketNeg == spiritMarket40), name='SMConstr') # - # The allocation of retailers in region 1 at Division 1 satisfies as much as possible forty percent of the oil market in that region. # + # Oil market in region 1 constraint. OM1Constr = model.addConstr((gp.quicksum(oilMarket1[r]*allocate[r] for r in retailers1) + oilMarket1Pos - oilMarket1Neg == oilMarket1_40), name='OM1Constr') # - # The allocation of retailers in region 2 at Division 1 satisfies as much as possible forty percent of the oil market in that region. # + # Oil market in region 2 constraint. OM2Constr = model.addConstr((gp.quicksum(oilMarket2[r]*allocate[r] for r in retailers2) + oilMarket2Pos - oilMarket2Neg == oilMarket2_40), name='OM2Constr') # - # The allocation of retailers in region 3 at Division 1 satisfies as much as possible forty percent of the oil market in that region. # + # Oil market in region 3 constraint. OM3Constr = model.addConstr((gp.quicksum(oilMarket3[r]*allocate[r] for r in retailers3) + oilMarket3Pos - oilMarket3Neg == oilMarket3_40), name='OM3Constr') # - # The allocation of retailers at Division 1 satisfies as much as possible forty percent of the retailers in group A. # + # Group A constraint. AConstr = model.addConstr((gp.quicksum(retailerA[r]*allocate[r] for r in groupA) + retailerAPos - retailerANeg == retailerA40), name='AConstr') # - # The allocation of retailers at Division 1 satisfies as much as possible forty percent of the retailers in group B. # + # Group B constraint. BConstr = model.addConstr((gp.quicksum(retailerB[r]*allocate[r] for r in groupB) + retailerBPos - retailerBNeg == retailerB40), name='BConstr') # - # Minimize the sum of positive and negative deviations. # + # Objective function obj = deliveryPointsPos + deliveryPointsNeg+ spiritMarketPos + spiritMarketNeg + oilMarket1Pos + oilMarket1Neg + oilMarket2Pos + oilMarket2Neg + oilMarket3Pos + oilMarket3Neg + retailerAPos + retailerANeg + retailerBPos + retailerBNeg model.setObjective(obj) # + # Verify model formulation model.write('marketSharing.lp') # Run optimization engine model.optimize() # - # ## Analysis # # The allocation of retailers to Division 1 that minimizes the sum of positive and negative deviations from the goal follows. In addition, we show how each goal is within the 35%/45% range values. # + # Output reports print("\n\n_________________________________________________________________________________") print(f"The optimal allocation of retailers to Division 1 is:") print("_________________________________________________________________________________") for r in retailers: if(allocate[r].x > 0.5): print(f"Retailer{r}") #print(f"\nThe optimal objective function value is {model.objVal}") # - # The following report validates that the goals have been satisfied within acceptable 35% and 45% ranges. # + # Test that the solution is within acceptable ranges. goal_ranges = pd.DataFrame(columns=["Goal", "Min_35", "Actual", "Max_45"]) count = 0 DeliveryPointsGoal = 0 for r in retailers: if (allocate[r].x > 0.5): DeliveryPointsGoal += deliveryPoints[r]*allocate[r].x goal_ranges = goal_ranges.append({"Goal": 'Delivery points', "Min_35": round(deliveryPoints40*(0.35/0.40),2), "Actual": round(DeliveryPointsGoal,2), "Max_45": round(deliveryPoints40*(0.45/0.40),2) }, ignore_index=True) count += 1 spiritMarketGoal = 0 for r in retailers: if (allocate[r].x > 0.5): spiritMarketGoal += spiritMarket[r]*allocate[r].x goal_ranges = goal_ranges.append({"Goal": 'Spirit market', "Min_35": round(spiritMarket40*(0.35/0.40),2), "Actual": round(spiritMarketGoal,2), "Max_45": round(spiritMarket40*(0.45/0.40),2) }, ignore_index=True) count += 1 oilMarket1Goal = 0 for r in retailers1: if (allocate[r].x > 0.5): oilMarket1Goal += oilMarket1[r]*allocate[r].x goal_ranges = goal_ranges.append({"Goal": 'Oil market1', "Min_35": round(oilMarket1_40*(0.35/0.40),2), "Actual": round(oilMarket1Goal,2), "Max_45": round(oilMarket1_40*(0.45/0.40),2) }, ignore_index=True) count += 1 oilMarket2Goal = 0 for r in retailers2: if (allocate[r].x > 0.5): oilMarket2Goal += oilMarket2[r]*allocate[r].x # goal_ranges = goal_ranges.append({"Goal": 'Oil market2', "Min_35": round(oilMarket2_40*(0.35/0.40),2), "Actual": round(oilMarket2Goal,2), "Max_45": round(oilMarket2_40*(0.45/0.40),2) }, ignore_index=True) count += 1 oilMarket3Goal = 0 for r in retailers3: if (allocate[r].x > 0.5): oilMarket3Goal += oilMarket3[r]*allocate[r].x # goal_ranges = goal_ranges.append({"Goal": 'Oil market3', "Min_35": round(oilMarket3_40*(0.35/0.40),2), "Actual": round(oilMarket3Goal,2), "Max_45": round(oilMarket3_40*(0.45/0.40),2) }, ignore_index=True) count += 1 retailerAGoal = 0 for r in groupA: if (allocate[r].x > 0.5): retailerAGoal += retailerA[r]*allocate[r].x # goal_ranges = goal_ranges.append({"Goal": 'Group A', "Min_35": round(retailerA40*(0.35/0.40),2), "Actual": round(retailerAGoal,2), "Max_45": round(retailerA40*(0.45/0.40),2) }, ignore_index=True) count += 1 retailerBGoal = 0 for r in groupB: if (allocate[r].x > 0.5): retailerBGoal += retailerB[r]*allocate[r].x # goal_ranges = goal_ranges.append({"Goal": 'Group B', "Min_35": round(retailerB40*(0.35/0.40),2), "Actual": round(retailerBGoal,2), "Max_45": round(retailerB40*(0.45/0.40),2) }, ignore_index=True) count += 1 goal_ranges.index=[''] * count goal_ranges # - # ## References # # <NAME>, Model Building in Mathematical Programming, fifth edition. # # Copyright © 2020 Gurobi Optimization, LLC
market_sharing/market_sharing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + id="Px_dw13ak33X" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 85} outputId="481d69d2-d2c3-4b94-b174-8f83af04bd2f" # !pip install -q kaggle # !mkdir -p ~/.kaggle # !echo '{"username":"XXXX","key":"XXXX"}' > ~/.kaggle/kaggle.json # !kaggle datasets download -d alexattia/the-simpsons-characters-dataset # + id="djv2j8vq0l0N" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="b50a9f2e-8bac-48f4-bcbc-89f3d5b64cf1" # If running in Google Colab from os import path from wheel.pep425tags import get_abbr_impl, get_impl_ver, get_abi_tag platform = '{}{}-{}'.format(get_abbr_impl(), get_impl_ver(), get_abi_tag()) accelerator = 'cu80' if path.exists('/opt/bin/nvidia-smi') else 'cpu' # !pip install -q http://download.pytorch.org/whl/{accelerator}/torch-0.4.0-{platform}-linux_x86_64.whl torchvision import torch print(torch.__version__) print(torch.cuda.is_available()) # + id="HsIgQFUlbn8B" colab_type="code" colab={} # !unzip -qo the-simpsons-characters-dataset.zip -d the-simpsons-characters-dataset # !cd the-simpsons-characters-dataset # !unzip -qo simpsons_dataset.zip -d the-simpsons-characters-dataset/ # !unzip -qo kaggle_simpson_testset.zip -d the-simpsons-characters-dataset/ # !rm ./the-simpsons-characters-dataset/kaggle_simpson_testset/.DS_Store # !rm ./the-simpsons-characters-dataset/simpsons_dataset/nelson_muntz/.DS_Store # + id="5SZS7l6bbfCm" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="b7ea23f6-156e-4ede-97a2-a09b40417793" from os import listdir #configure train dataset train_root_path = "./the-simpsons-characters-dataset/simpsons_dataset" character_directories = listdir(train_root_path) #character_directories.remove('.DS_Store') print("Train: {}".format(character_directories[:5])) #configure test dataset test_root_path = "./the-simpsons-characters-dataset/kaggle_simpson_testset" test_image_names = listdir(test_root_path) #test_image_names.remove('.DS_Store') print("Test: {}".format(test_image_names[:5])) # + [markdown] id="G16VAONabfC5" colab_type="text" # ### Calculate mean width and lenght from test images # + id="dWoTzSqwbfC8" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="4b488bd1-5617-4efc-dbff-2d5acb542916" import os, random from scipy.misc import imread, imresize width = 0 lenght = 0 num_test_images = len(test_image_names) for i in range(num_test_images): path_file = os.path.join(test_root_path, test_image_names[i]) image = imread(path_file) width += image.shape[0] lenght += image.shape[1] width_mean = width//num_test_images lenght_mean = lenght//num_test_images dim_size = (width_mean + lenght_mean) // 2 print("Width mean: {}".format(width_mean)) print("Lenght mean: {}".format(lenght_mean)) print("Size mean dimension: {}".format(dim_size)) # + [markdown] id="YJJQ-LpXbfDG" colab_type="text" # Size mean dimension will be used for the resizing process. __All the images will be scaled__ to __(149, 149)__ since it's the average of the test images. # + [markdown] id="IfyAbSatbfDH" colab_type="text" # ### Show some test examples # + id="nhHY2PsFbfDJ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 194} outputId="447e9568-f8c6-4d00-a572-fa92465834cd" import matplotlib.pyplot as plt idx = random.randint(0, num_test_images) sample_file, sample_name = test_image_names[idx], test_image_names[idx].split('_')[:-1] path_file = os.path.join(test_root_path, sample_file) sample_image = imread(path_file) print("Label:{}, Image:{}, Shape:{}".format('_'.join(sample_name), idx, sample_image.shape)) plt.figure(figsize=(3,3)) plt.imshow(sample_image) plt.axis('off') plt.show() # + [markdown] id="ZHrhrWUqbfDR" colab_type="text" # ### Making batches (resized) # + id="LJMtTmEFbfDT" colab_type="code" colab={} def get_num_of_samples(): count = 0 for _,character in enumerate(character_directories): path = os.path.join(train_root_path, character) count += len(listdir(path)) return count def get_batch(batch_init, batch_size): data = {'image':[], 'label':[]} character_batch_size = batch_size//len(character_directories) character_batch_init = batch_init//len(character_directories) character_batch_end = character_batch_init + character_batch_size for _,character in enumerate(character_directories): path = os.path.join(train_root_path, character) images_list = listdir(path) for i in range(character_batch_init, character_batch_end): if len(images_list) == 0: continue #if this character has small number of features #we repeat them if i >= len(images_list): p = i % len(images_list) else: p = i path_file = os.path.join(path, images_list[p]) image = imread(path_file) #all with the same shape image = imresize(image, (dim_size, dim_size)) data['image'].append(image) data['label'].append(character) return data def get_batches(num_batches, batch_size, verbose=False): #num max of samples num_samples = get_num_of_samples() #check number of batches with the maximum max_num_batches = num_samples//batch_size - 1 if verbose: print("Number of samples:{}".format(num_samples)) print("Batches:{} Size:{}".format(num_batches, batch_size)) assert num_batches <= max_num_batches, "Surpassed the maximum number of batches" for i in range(0, num_batches): init = i * batch_size if verbose: print("Batch-{} yielding images from {} to {}...".format(i, init, init+batch_size)) yield get_batch(init, batch_size) # + id="53H0GuvAbfDX" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 502} outputId="3504dbc6-0e9b-4e09-f68a-00ab7b787df1" #testing generator batch_size = 500 for b in get_batches(10, batch_size, verbose=True): print("\t|- retrieved {} images".format(len(b['image']))) # + [markdown] id="FD319qQebfDc" colab_type="text" # ### Preprocessing data # + id="cHGJqgDYbfDd" colab_type="code" colab={} from sklearn import preprocessing #num characters num_characters = len(character_directories) #normalize def normalize(x): #we use the feature scaling to have all the batches #in the same space, that is (0,1) return (x - np.amin(x))/(np.amax(x) - np.amin(x)) #one-hot encode lb = preprocessing.LabelBinarizer() lb = lb.fit(character_directories) def one_hot(label): return lb.transform([label]) # + [markdown] id="Jey_T8srbfDh" colab_type="text" # ### Storing preprocessed batches on disk # + id="rD4X5CpybfDj" colab_type="code" colab={} num_batches = 40 batch_size = 500 # + id="yLKUDO21bfDn" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 165} outputId="4d115705-3f04-46d7-992b-d3a5a6f48331" import pickle import numpy as np cnt_images = 0 for cnt, b in enumerate(get_batches(num_batches, batch_size)): data = {'image':[], 'label':[]} for i in range( min(len(b['image']), batch_size) ): image = np.array( b['image'][i] ) label = np.array( b['label'][i] ) #label = label.reshape([-1,:]) if len(image.shape) == 3: data['image'].append(normalize(image)) data['label'].append(one_hot(label)[-1,:]) cnt_images += 1 else: print("Dim image < 3") with open("simpson_train_{}.pkl".format(cnt), 'wb') as file: pickle.dump(data, file, pickle.HIGHEST_PROTOCOL) print("Loaded {} train images and stored on disk".format(cnt_images)) # + id="TQRJW7CAbfDs" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 85} outputId="48861d43-038e-4ffe-88b5-136166a32781" #testing load from file import pickle with open('simpson_train_0.pkl', 'rb') as file: data = pickle.load(file) print("Example of onehot encoded:\n{}".format(data['label'][0])) print("Data shape: {}".format(data['image'][0].shape)) # + [markdown] id="FtDsSpX2p0MX" colab_type="text" # ###NOTE # Since here the data is already processed and saved as pickle files. # + [markdown] id="uKHY-7v8bfDw" colab_type="text" # ### Building the Network # + id="IdGmHLj4bfDx" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="e8bff984-5a78-4271-e7a9-0d620393b366" import torch import torchvision device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # Assume that we are on a CUDA machine, then this should print a CUDA device: print(device) # + id="nUczTZx41MeP" colab_type="code" colab={} import torch.nn as nn import torch.nn.functional as F num_characters = 47 class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(3, 32, 5) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(32, 64, 5) self.fc1 = nn.Linear(64 * 34 * 34, num_characters) def forward(self, x): x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) #print("shape: {}".format(x.size())) x = x.view(x.size(0), -1) x = self.fc1(x) return x net = Net() # + id="lRT1si9t1OmT" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 119} outputId="34476c8e-8617-4398-ab42-08dcf28388fa" #move the neural network to the GPU if torch.cuda.device_count() > 1: print("Let's use", torch.cuda.device_count(), "GPUs!") # dim = 0 [30, xxx] -> [10, ...], [10, ...], [10, ...] on 3 GPUs net = nn.DataParallel(net) net.to(device) # + id="TddrNaBC1Q_n" colab_type="code" colab={} import torch.optim as optim loss_fn = nn.CrossEntropyLoss() #buit-in softmax, we can use logits directly optimizer = optim.Adam(net.parameters()) # + id="u-GAP8Gl3DlY" colab_type="code" colab={} import os import pickle from sklearn.model_selection import train_test_split def getDatasetsFromPickle(file): #print("Processing: {}".format(fname)) data = pickle.load(file) X_train, X_val, y_train, y_val = train_test_split(data['image'], data['label'], test_size=0.2) inputs_train, labels_train = torch.FloatTensor(X_train), torch.FloatTensor(y_train) inputs_val, labels_val = torch.FloatTensor(X_train), torch.FloatTensor(y_train) #permute image as (samples, x, y, channels) to (samples, channels, x, y) inputs_train = inputs_train.permute(0, 3, 1, 2) inputs_val = inputs_val.permute(0, 3, 1, 2) #move the inputs and labels to the GPU return inputs_train.to(device), labels_train.to(device), inputs_val.to(device), labels_val.to(device) # + id="1vInNIXs1TlC" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 136} outputId="af679deb-0425-4c74-c3b8-c44882cd3f77" stats = {'train_loss':[], 'val_loss':[], 'acc':[]} for epoch in range(3): # loop over the dataset multiple times for i in range(100): fname = "simpson_train_{}.pkl".format(i) if os.path.exists(fname): with open(fname, 'rb') as file: #retrieve the data inputs_train, labels_train, inputs_val, labels_val = getDatasetsFromPickle(file) # zero the parameter gradients optimizer.zero_grad() # forward + backward + optimize outputs = net(inputs_train) #cross entropy loss doesn't accept onehot encoded targets # |-> use the index class instead lbls_no_onehot_encoded = torch.argmax(labels_train, dim=1) loss = loss_fn(outputs, lbls_no_onehot_encoded) loss.backward() optimizer.step() #statistics stats['train_loss'].append(loss.item()) with torch.no_grad(): outputs = net(inputs_val) label_val_classes = torch.argmax(labels_val, dim=1) output_classes = torch.argmax(outputs, dim=1) stats['val_loss'].append( loss_fn(outputs, label_val_classes).item() ) stats['acc'].append( (output_classes == label_val_classes).sum().item() / label_val_classes.size(0) ) #printouts if i % 20 == 19: printout = "Epoch: {} Batch: {} Training loss: {:.3f} Validation loss: {:.3f} Accuracy: {:.3f}" print(printout.format(epoch + 1, i + 1, stats['train_loss'][-1], stats['val_loss'][-1], stats['acc'][-1],)) else: break print('Finished Training') # + id="tB8nRYrG3qaB" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 364} outputId="af82aaad-5a33-4a08-c12e-08af92e0e8a3" import matplotlib.pyplot as plt plt.plot(stats['train_loss'], label='Train Loss') plt.plot(stats['val_loss'], label='Validation Loss') plt.plot(stats['acc'], label='Accuracy') plt.legend() # + [markdown] id="KQQDBi19bfD9" colab_type="text" # ### Testing model # + id="AQmJvgrr6MDO" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 265} outputId="19aa30d0-8189-4fa2-aa24-014b2e60bb6d" import warnings warnings.filterwarnings('ignore') #select random image idx = random.randint(0, num_test_images) sample_file, sample_name = test_image_names[idx], test_image_names[idx].split('_')[:-1] path_file = os.path.join(test_root_path, sample_file) #read them test_image = normalize(imresize(imread(path_file), (dim_size, dim_size))) test_label_onehot = one_hot('_'.join(sample_name))[-1,:] #move to tensors test_image, test_label_onehot = torch.FloatTensor(test_image), torch.FloatTensor(test_label_onehot) #permute image as (samples, x, y, channels) to (samples, channels, x, y) test_image = test_image.permute(2, 0, 1) test_image.unsqueeze_(0) #move to GPU test_image, test_label_onehot = test_image.to(device), test_label_onehot.to(device) ## with torch.no_grad(): output = net(test_image) predicted_character = torch.argmax(output.data, 1) actual_character = torch.argmax(test_label_onehot) print("Right!!") if (predicted_character == actual_character) else print("Wrong..") #showing actual_name = ' '.join([s.capitalize() for s in sample_name]) print("Label: {}".format(actual_name)) pred_name = lb.inverse_transform(output.cpu().numpy()).item() #copy from cuda to cpu, then to numpy prediction = ' '.join([s.capitalize() for s in pred_name.split('_')]) print("Prediction: {}".format(prediction)) plt.figure(figsize=(3,3)) plt.imshow(test_image.permute(0, 2, 3, 1).squeeze()) plt.axis('off') plt.show()
simpsons/Simpsons-PyTorch.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import pandas as pd import linearsolve as ls import matplotlib.pyplot as plt plt.style.use('classic') # %matplotlib inline # # Class 14: Prescott's Real Business Cycle Model I # # In this notebook, we'll consider a centralized version of the model from pages 11-17 in Edward Prescott's article "Theory Ahead of Business Cycle Measurement in the Fall 1986 of the Federal Reserve Bank of Minneapolis' *Quarterly Review* (link to article: https://www.minneapolisfed.org/research/qr/qr1042.pdf). The model is just like the RBC model that we studying in the previous lecture, except that now we include an endogenous labor supply. # ## Prescott's RBC Model with Labor # # The equilibrium conditions for Prescott's RBC model with labor are: # # \begin{align} # \frac{1}{C_t} & = \beta E_t \left[\frac{\alpha A_{t+1}K_{t+1}^{\alpha-1}L_{t+1}^{1-\alpha} +1-\delta }{C_{t+1}}\right]\\ # \frac{\varphi}{1-L_t} & = \frac{(1-\alpha)A_tK_t^{\alpha}L_t^{-\alpha}}{C_t} \\ # Y_t & = A_t K_t^{\alpha}L_t^{1-\alpha}\\ # K_{t+1} & = I_t + (1-\delta) K_t\\ # Y_t & = C_t + I_t\\ # \log A_{t+1} & = \rho \log A_t + \epsilon_{t+1} # \end{align} # # where $\epsilon_{t+1} \sim \mathcal{N}(0,\sigma^2)$. # # The objective is use `linearsolve` to simulate impulse responses to a TFP shock using the following parameter values for the simulation: # # | $\rho$ | $\sigma$ | $\beta$ | $\varphi$ | $\alpha$ | $\delta $ | # |--------|----------|---------|-----------|----------|-----------| # | 0.75 | 0.006 | 0.99 | 1.7317 | 0.35 | 0.025 | # # # The value for $\beta$ implies a steady state (annualized) real interest rate of about 4 percent: # # \begin{align} # 4 \cdot \left(\beta^{-1} - 1\right) & \approx 0.04040 # \end{align} # # $\rho = 0.75$ and $\sigma = 0.006$ are consistent with the statistical properties of the cyclical component of TFP in the US. $\alpha$ is set so that, consistent with the long-run average of the US, the labor share of income is about 65 percent of GDP. The deprecation rate of capital is calibrated to be about 10 percent annually. Finally, $\varphi$ was chosen last to ensure that in the steady state households allocate about 33 percent of their available time to labor. # ## Model Preparation # # Before proceding, let's recast the model in the form required for `linearsolve`. Write the model with all variables moved to the left-hand side of the equations and dropping the expecations operator $E_t$ and the exogenous shock $\epsilon_{t+1}$: # # \begin{align} # 0 & = \beta\left[\frac{\alpha A_{t+1}K_{t+1}^{\alpha-1}L_{t+1}^{1-\alpha} +1-\delta }{C_{t+1}}\right] - \frac{1}{C_t}\\ # 0 & = \frac{(1-\alpha)A_tK_t^{\alpha}L_t^{-\alpha}}{C_t} - \frac{\varphi}{1-L_t}\\ # 0 & = A_t K_t^{\alpha}L_t^{1-\alpha} - Y_t\\ # 0 & = I_t + (1-\delta) K_t - K_{t+1}\\ # 0 & = C_t + I_t - Y_t\\ # 0 & = \rho \log A_t - \log A_{t+1} # \end{align} # # Remember, capital and TFP are called *state variables* because they're $t+1$ values are predetermined. Output, consumption, and investment are called a *costate* or *control* variables. Note that the model as 5 equations in 5 endogenous variables. # # # ## Initialization, Approximation, and Solution # # The next several cells initialize the model in `linearsolve` and then approximate and solve it. # + # Create a variable called 'parameters' that stores the model parameter values in a Pandas Series # Print the model's parameters # + # Create variable called 'varNames' that stores the variable names in a list with state variables ordered first # Create variable called 'shockNames' that stores an exogenous shock name for each state variable. # + # Define a function that evaluates the equilibrium conditions of the model solved for zero. PROVIDED def equilibrium_equations(variables_forward,variables_current,parameters): # Parameters. PROVIDED p = parameters # Current variables. PROVIDED cur = variables_current # Forward variables. PROVIDED fwd = variables_forward # Euler equation # Labor-labor choice # Production function # Capital evolution # Market clearing # Exogenous tfp # Stack equilibrium conditions into a numpy array # - # Next, initialize the model using `ls.model` which takes the following required arguments: # # * `equations` # * `nstates` # * `varNames` # * `shockNames` # * `parameters` # + # Initialize the model into a variable named 'rbc_model' # + # Compute the steady state numerically using .compute_ss() method of rbc_model # Print the computed steady state # - # Find the log-linear approximation around the non-stochastic steady state and solve using .approximate_and_solve() method of rbc_model # ## Impulse Responses # # Compute a 26 period impulse responses of the model's variables to a 0.01 unit shock to TFP in period 5. # + # Compute impulse responses # Print the first 10 rows of the computed impulse responses to the TFP shock # - # Construct a $2\times3$ grid of plots of simulated TFP, output, labor, consumption, investment, and capital. Be sure to multiply simulated values by 100 so that vertical axis units are in "percent deviation from steady state." # + # Create figure. PROVIDED fig = plt.figure(figsize=(18,8)) # Create upper-left axis. PROVIDED ax = fig.add_subplot(2,3,1) # Create upper-center axis. PROVIDED ax = fig.add_subplot(2,3,2) # Create upper-right axis. PROVIDED ax = fig.add_subplot(2,3,3) # Create lower-left axis. PROVIDED ax = fig.add_subplot(2,3,4) # Create lower-center axis. PROVIDED ax = fig.add_subplot(2,3,5) # Create lower-right axis. PROVIDED ax = fig.add_subplot(2,3,6)
Lecture Notebooks/Econ126_Class_14_blank.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Dimensionality Reduction using t-SNE # t-Distributed Stochastic Neighbor Embedding (t-SNE) is a non-linear technique for dimensionality reduction that is particularly well suited for the visualization of high-dimensional datasets. t-SNE minimizes the divergence between two distributions: a distribution that measures pairwise similarities of the input objects and a distribution that measures pairwise similarities of the corresponding low-dimensional points in the embedding. # # In this way, t-SNE maps the multi-dimensional data to a lower dimensional space and attempts to find patterns in the data by identifying observed clusters based on similarity of data points with multiple features. However, after this process, the input features are no longer identifiable, and you cannot make any inference based only on the output of t-SNE. Hence it is mainly **a data exploration and visualization technique.** # # We will start by taking the 50 principal components that we created in the earlier post [New York City Airbnb PCA](https://shravan-kuchkula.github.io/nyc-airbnb-pca/), and apply the t-SNE with 3 components which we can use to create a 3D scatter plot of the data points. # ## Get the data # The principal components created earlier are stored in `airbnb_final.csv` file, which we will load in to begin our analysis. import pandas as pd data = pd.read_csv('airbnb_final.csv') data.head() # rename the PC columns pc_col_names = ["pc_" + item for item in list(data.columns[10:])] other_col_names = list(data.columns[:10]) data.columns = other_col_names + pc_col_names # ## Apply t-SNE # + from sklearn.manifold import TSNE # extract the 50 principal components A = data.iloc[:,10:].values type(A) # + # Dimension reduction with t-SNE model = TSNE(n_components=3, learning_rate=100, random_state=42) tsne_features = model.fit_transform(A) # Construct a t-SNE dataframe tsne_df = pd.DataFrame({'TSNE1': tsne_features[:,0], 'TSNE2': tsne_features[:,1], 'TSNE3': tsne_features[:,2] }) # - tsne_df.shape # The `tsne_df` dataframe contains the 3 tsne features for all 45,605 airbnb listings. We can now use this data along with other columns of the airbnb dataset to build a 3D scatterplot. # + data_tsne = data[other_col_names] tsne_final= pd.concat([tsne_df, data_tsne], axis=1) # save this as tsne takes extremely long to run tsne_final.to_csv('tsne_final.csv', index=False) tsne_final.head() # - # ## Plotly express to visualize the data import pandas as pd import plotly.express as px tsne_final = pd.read_csv('../data/raw/tsne_final.csv') plotly_data = tsne_final[(tsne_final.neighbourhood_cleansed == 'Chelsea') & (tsne_final.minimum_nights <= 3) & (tsne_final.bedrooms == 0) ] plotly_data.shape plotly_data.head() # + fig = px.scatter_3d(plotly_data, x='TSNE1', y='TSNE2', z='TSNE3', color='price_category', hover_name='name', hover_data=['price', 'minimum_nights', 'id'], template='plotly_dark', opacity=0.9, title='Visualizing airbnb locations in feature space', labels={'TSNE1': 'X', 'TSNE2': 'Y', 'TSNE3':'Z'}, ) fig.write_html('scatter-3d.html') # -
notebooks/17_sk_plotly_3d_scatter.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Four-strokes in Europe: Analyzing Eurostat vehicle dataset # ### importing necessary libraries # + import pandas as pd import numpy as np import os import datetime import seaborn as sns import matplotlib.pyplot as plt import scipy # %matplotlib notebook # %matplotlib inline # - # # Pick datasets # ## source of datasets : https://ec.europa.eu/eurostat/data/database # ### New car registration by engine types and sizes vehicle_reg = pd.read_excel(r'road_eqr_carmot.tsv.xlsx', index_col=0) # ### GDP dataset countries_nama = pd.read_excel(r'nama_10_gdp.tsv.xlsx', index_col=0) vehicle_reg.head() countries_nama.head() # # Questions # ## 1) How new car registration evolves over years and are there any observable trend at different countries? # ## 2) Does Europe says goodbye to diesel engines? # ## 3) Which engine sizes are the most popular for both diesel and petrol cars? # ## 4) How the number of new cars correlates to GDP? # # Prepare data def preproc_cleaning(character,df,letters, dataColStart): """ Set dataframe columns to string. Replace ':' character with NaN. Replace letters in numeric cells with empty space. Set specific column values to numeric type. Parameters ---------- character : string To be replaced to Nan. df : DataFrame To be processed. letters: string list To be replaced with empty space. dataColStart: The index of the column from which cell values should be numeric. Returns ------- DataFrame """ df.columns = df.columns.astype(str) df = df.replace(character,np.NaN,regex=True) df = df.replace(letters,' ',regex=True) df[df.iloc[:,dataColStart:].columns] = df[df.iloc[:,dataColStart:].columns].apply(pd.to_numeric, errors='coerce') return df letters=['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z'] vehicle_reg = preproc_cleaning(':',vehicle_reg,letters, 3) countries_nama = preproc_cleaning(':',countries_nama,letters, 2) # ### get unique country names, motor types and engine sizes and save them in a list countries=vehicle_reg['geo\\time'].unique().tolist() motor_nrgs=vehicle_reg['mot_nrg'].unique().tolist() engines=vehicle_reg['engine'].unique().tolist() # ### select all engine types and sizes for each country. Columns showing years show the number of newly registered cars veh_allEngine_allNrg=vehicle_reg[(vehicle_reg['mot_nrg']=='TOTAL') & (vehicle_reg['engine']=='TOTAL')] # ### drop columns and rows having only NaN veh_allEngine_allNrg_droped=veh_allEngine_allNrg.dropna(axis=1, how='all') veh_allEngine_allNrg_droped=veh_allEngine_allNrg_droped.dropna(axis=0, how='all') # ### set country names to index veh_allEngine_allNrg_droped=veh_allEngine_allNrg_droped.set_index('geo\\time') # # Analysis and visualisation # ## show heatmaps for 1st question: How new car registration evolves over years and are there any observable trend at different countries? plt.figure(figsize=(14, 8)) sns.heatmap(data=veh_allEngine_allNrg_droped.iloc[:,2:], annot=False, center=0, linewidths=.0) plt.xlabel('Years') plt.ylabel('Countries') plt.title('New registration of passenger cars -- all engine sizes & all fuel types', fontsize=16) plt.figure(figsize=(14, 8)) sns.heatmap(data=veh_allEngine_allNrg_droped.iloc[:,2:].div(veh_allEngine_allNrg_droped.iloc[:,2:].max(axis=1), axis=0), annot=False, center=0, linewidths=.0) plt.xlabel('Years') plt.ylabel('Countries') plt.title('New registration of passenger cars -- all engine sizes & all fuel types \n Normalized by max in rows', fontsize=16) veh_allEngine_diesel=vehicle_reg[(vehicle_reg['mot_nrg']=='DIE') & (vehicle_reg['engine']=='TOTAL')] veh_allEngine_diesel_dropped=veh_allEngine_diesel.dropna(axis=1, how='all') veh_allEngine_diesel_dropped=veh_allEngine_diesel_dropped.dropna(axis=0, how='all') veh_allEngine_diesel_dropped=veh_allEngine_diesel_dropped.set_index('geo\\time') plt.figure(figsize=(14, 8)) sns.heatmap(data=veh_allEngine_diesel_dropped.iloc[:,2:], annot=False, center=0, linewidths=.0) plt.xlabel('Years') plt.ylabel('Countries') plt.title('New registration of passenger cars -- all engine sizes & diesel', fontsize=16) plt.figure(figsize=(14, 8)) sns.heatmap(data=veh_allEngine_diesel_dropped.iloc[:,2:].div(veh_allEngine_diesel_dropped.iloc[:,2:].max(axis=1), axis=0), annot=False, center=0, linewidths=.0) plt.xlabel('Years') plt.ylabel('Countries') plt.title('New registration of passenger cars -- all engine sizes & diesel \n Normalized by max in rows', fontsize=16) veh_allEngine_petrol=vehicle_reg[(vehicle_reg['mot_nrg']=='PET') & (vehicle_reg['engine']=='TOTAL')] veh_allEngine_petrol_dropped=veh_allEngine_petrol.dropna(axis=1, how='all') veh_allEngine_petrol_dropped=veh_allEngine_petrol_dropped.dropna(axis=0, how='all') veh_allEngine_petrol_dropped=veh_allEngine_petrol_dropped.set_index('geo\\time') plt.figure(figsize=(14, 8)) sns.heatmap(data=veh_allEngine_petrol_dropped.iloc[:,2:], annot=False, center=0, linewidths=.0) plt.xlabel('Years') plt.ylabel('Countries') plt.title('New registration of passenger cars -- all engine sizes & petrol', fontsize=16) plt.figure(figsize=(14, 8)) sns.heatmap(data=veh_allEngine_petrol_dropped.iloc[:,2:].div(veh_allEngine_petrol_dropped.iloc[:,2:].max(axis=1), axis=0), annot=False, center=0, linewidths=.0) plt.xlabel('Years') plt.ylabel('Countries') plt.title('New registration of passenger cars -- all engine sizes & petrol \n Normalized by max in rows', fontsize=16) # ## show heatmap for 2nd question: Does Europe says goodbye to diesel engines? # ### petrol/diesel ratio was calculated by dividing the petrol car dataframe with the diesel car dataframe # + np.seterr(divide='ignore', invalid='ignore') veh_allEngine_petrol_diesel_ratio_dropped=veh_allEngine_petrol_dropped.iloc[:,2:]/veh_allEngine_diesel_dropped.iloc[:,2:] # + plt.figure(figsize=(14, 8)) sns.heatmap(data=veh_allEngine_petrol_diesel_ratio_dropped, annot=False, center=0, linewidths=.0, cmap='Reds', vmin=0, vmax=1) plt.xlabel('Years') plt.ylabel('Countries') plt.title('New registration of passenger cars -- petrol-diesel ratio', fontsize=16) # - plt.figure(figsize=(14, 8)) sns.heatmap(data=veh_allEngine_petrol_diesel_ratio_dropped, annot=False, center=0, linewidths=.0, cmap='Reds', vmin=0, vmax=6) plt.xlabel('Years') plt.ylabel('Countries') plt.title('New registration of passenger cars -- petrol-diesel ratio', fontsize=16) # ## show lineplots for 3rd question: Which engine sizes are the most popular for both diesel and petrol cars? vehicle_reg_es=vehicle_reg[vehicle_reg['geo\\time']=='ES'] vehicle_reg_it= vehicle_reg[vehicle_reg['geo\\time']=='IT'] vehicle_reg_ger= vehicle_reg[vehicle_reg['geo\\time']=='DE'] vehicle_reg_fin= vehicle_reg[vehicle_reg['geo\\time']=='FI'] vehicle_reg_es_die=vehicle_reg_es[vehicle_reg_es["mot_nrg"]=='DIE'] vehicle_reg_it_die=vehicle_reg_it[vehicle_reg_it["mot_nrg"]=='DIE'] vehicle_reg_ger_die=vehicle_reg_ger[vehicle_reg_ger["mot_nrg"]=='DIE'] vehicle_reg_fin_die=vehicle_reg_fin[vehicle_reg_fin["mot_nrg"]=='DIE'] vehicle_reg_es_pet=vehicle_reg_es[vehicle_reg_es["mot_nrg"]=='PET'] vehicle_reg_it_pet=vehicle_reg_it[vehicle_reg_it["mot_nrg"]=='PET'] vehicle_reg_ger_pet=vehicle_reg_ger[vehicle_reg_ger["mot_nrg"]=='PET'] vehicle_reg_fin_pet=vehicle_reg_fin[vehicle_reg_fin["mot_nrg"]=='PET'] # ### save dataframes in interest in two lists: diesels and petrols for plots # + vehicle_reg_es_die_droped=vehicle_reg_es_die.dropna(axis=1, how='all') vehicle_reg_it_die_droped=vehicle_reg_it_die.dropna(axis=1, how='all') vehicle_reg_ger_die_droped=vehicle_reg_ger_die.dropna(axis=1, how='all') vehicle_reg_fin_die_droped=vehicle_reg_fin_die.dropna(axis=1, how='all') diesels = [] diesels.append(vehicle_reg_it_die_droped) diesels.append(vehicle_reg_ger_die_droped) diesels.append(vehicle_reg_fin_die_droped) # + vehicle_reg_es_pet_droped=vehicle_reg_es_pet.dropna(axis=1, how='all') vehicle_reg_it_pet_droped=vehicle_reg_it_pet.dropna(axis=1, how='all') vehicle_reg_ger_pet_droped=vehicle_reg_ger_pet.dropna(axis=1, how='all') vehicle_reg_fin_pet_droped=vehicle_reg_fin_pet.dropna(axis=1, how='all') petrols = [] petrols.append(vehicle_reg_it_pet_droped) petrols.append(vehicle_reg_ger_pet_droped) petrols.append(vehicle_reg_fin_pet_droped) # - for pet in petrols: for i in range(0,len(pet)-1): ax=pet.iloc[i,3:].plot() plt.legend([str(pet.iloc[0,0])+" "+str(pet.iloc[0,1])+" "+str(pet.iloc[0,2]), str(pet.iloc[1,0])+" "+str(pet.iloc[1,1])+" "+str(pet.iloc[1,2]),str(pet.iloc[2,0])+" "+str(pet.iloc[2,1])+" "+str(pet.iloc[2,2])], loc='upper left') ax.set_xlabel('Years') ax.set_ylabel('Cars sold') ax.set_title('Registration of petrol cars by engine size in '+str(pet.iloc[0,2])) plt.show() for die in diesels: for i in range(0,len(die)-1): die.iloc[i,3:].plot() plt.legend([str(die.iloc[0,0])+" "+str(die.iloc[0,1])+" "+str(die.iloc[0,2]), str(die.iloc[1,0])+" "+str(die.iloc[1,1])+" "+str(die.iloc[1,2]),str(die.iloc[2,0])+" "+str(die.iloc[2,1])+" "+str(die.iloc[2,2])], loc='upper left') ax.set_xlabel('Years') ax.set_ylabel('Cars sold') ax.set_title('Registration of diesel cars by engine size in '+str(pet.iloc[0,2])) plt.show() # ## show correlation matrices for 4th question: How the number of new cars correlates to GDP? # # ### na_item=B1GQ --> Gross domestic product at market prices countries_gdp=countries_nama[countries_nama['na_item']=='B1GQ'] countries_gdp countries_gdp = countries_gdp.rename_axis('unit').reset_index() countries_gdp # ### get GDP at current price countries_gdp_meuro=countries_gdp[countries_gdp['unit']=='CP_MEUR'] countries_gdp_meuro countries_gdp_meuro_cropped=countries_gdp_meuro.iloc[:,2:] countries_gdp_meuro_cropped_reindexed=countries_gdp_meuro_cropped.set_index('geo\\time') countries_gdp_meuro_cropped_reindexed_transposed=countries_gdp_meuro_cropped_reindexed.transpose() countries_gdp_meuro_cropped_reindexed_transposed # ### Exclude columns that are summarizing GDPs countries_gdp_meuro_cropped_reindexed_transposed=countries_gdp_meuro_cropped_reindexed_transposed.drop(columns=['EU27_2020', 'EU28','EU15','EA','EA19','EA12']) plt.figure(figsize=(20, 8)) ax=countries_gdp_meuro_cropped_reindexed_transposed.iloc[:10,:].boxplot(column=countries_gdp_meuro_cropped_reindexed_transposed.columns.tolist()) plt.title('GDP of countries (last 10 years)') ax.set_ylabel('GDP (M EUR)') ax.set_xlabel('Countries') plt.show() countries_gdp_meuro_cropped_reindexed_transposed_DE = countries_gdp_meuro_cropped_reindexed_transposed[['DE']] countries_gdp_meuro_cropped_reindexed_transposed_IT = countries_gdp_meuro_cropped_reindexed_transposed[['IT']] countries_gdp_meuro_cropped_reindexed_transposed_FI = countries_gdp_meuro_cropped_reindexed_transposed[['FI']] vehicle_reg_it_pet_droped_transposed = vehicle_reg_it_pet_droped.drop(columns=['mot_nrg','geo\\time']).set_index('engine').transpose() vehicle_reg_it_pet_droped_transposed.reset_index(inplace=True) countries_gdp_meuro_cropped_reindexed_transposed_IT.reset_index(inplace=True) merged_vehicle_gdp_IT = vehicle_reg_it_pet_droped_transposed.merge(countries_gdp_meuro_cropped_reindexed_transposed_IT,on='index') merged_vehicle_gdp_IT_corr=merged_vehicle_gdp_IT.corr() matrix = np.triu(merged_vehicle_gdp_IT_corr) ax=sns.heatmap(merged_vehicle_gdp_IT_corr, annot=True, mask=matrix) plt.title('Correlation between GDP and vehicles with different petrol engine size in Italy') plt.show() # + vehicle_reg_it_pet_droped_transposed_first10=vehicle_reg_it_pet_droped_transposed.iloc[:10,:] merged_vehicle_gdp_IT = vehicle_reg_it_pet_droped_transposed_first10.merge(countries_gdp_meuro_cropped_reindexed_transposed_IT,on='index') merged_vehicle_gdp_IT_corr=merged_vehicle_gdp_IT.corr() matrix = np.triu(merged_vehicle_gdp_IT_corr) ax=sns.heatmap(merged_vehicle_gdp_IT_corr, annot=True, mask=matrix) plt.title('Correlation between GDP and vehicles with different petrol engine size in Italy -- last 10 years') plt.show() # - vehicle_reg_ger_pet_droped_transposed = vehicle_reg_ger_pet_droped.drop(columns=['mot_nrg','geo\\time']).set_index('engine').transpose() vehicle_reg_ger_pet_droped_transposed.reset_index(inplace=True) countries_gdp_meuro_cropped_reindexed_transposed_DE.reset_index(inplace=True) merged_vehicle_gdp_DE = vehicle_reg_ger_pet_droped_transposed.merge(countries_gdp_meuro_cropped_reindexed_transposed_DE,on='index') merged_vehicle_gdp_DE_corr=merged_vehicle_gdp_DE.corr() matrix = np.triu(merged_vehicle_gdp_DE_corr) sns.heatmap(merged_vehicle_gdp_DE_corr, annot=True, mask=matrix) plt.title('Correlation between GDP and vehicles with different petrol engine size in Germany') plt.show() vehicle_reg_ger_pet_droped_transposed_first10 = vehicle_reg_ger_pet_droped_transposed.iloc[:10,:] merged_vehicle_gdp_DE = vehicle_reg_ger_pet_droped_transposed_first10.merge(countries_gdp_meuro_cropped_reindexed_transposed_DE,on='index') merged_vehicle_gdp_DE_corr=merged_vehicle_gdp_DE.corr() matrix = np.triu(merged_vehicle_gdp_DE_corr) sns.heatmap(merged_vehicle_gdp_DE_corr, annot=True, mask=matrix) plt.title('Correlation between GDP and vehicles with different petrol engine size in Germany -- last 10 years') plt.show() vehicle_reg_fin_pet_droped_transposed = vehicle_reg_fin_pet_droped.drop(columns=['mot_nrg','geo\\time']).set_index('engine').transpose() vehicle_reg_fin_pet_droped_transposed.reset_index(inplace=True) countries_gdp_meuro_cropped_reindexed_transposed_FI.reset_index(inplace=True) merged_vehicle_gdp_FI = vehicle_reg_fin_pet_droped_transposed.merge(countries_gdp_meuro_cropped_reindexed_transposed_FI,on='index') merged_vehicle_gdp_FI_corr=merged_vehicle_gdp_FI.corr() matrix = np.triu(merged_vehicle_gdp_FI_corr) sns.heatmap(merged_vehicle_gdp_FI_corr, annot=True, mask=matrix) plt.title('Correlation between GDP and vehicles with different petrol engine size in Finland') plt.show() vehicle_reg_fin_pet_droped_transposed_first10 = vehicle_reg_fin_pet_droped_transposed.iloc[:10,:] merged_vehicle_gdp_FI = vehicle_reg_fin_pet_droped_transposed_first10.merge(countries_gdp_meuro_cropped_reindexed_transposed_FI,on='index') merged_vehicle_gdp_FI_corr=merged_vehicle_gdp_FI.corr() matrix = np.triu(merged_vehicle_gdp_FI_corr) sns.heatmap(merged_vehicle_gdp_FI_corr, annot=True, mask=matrix) plt.title('Correlation between GDP and vehicles with different petrol engine size in Finland') plt.show() vehicle_reg_ger_die_droped_transposed = vehicle_reg_ger_die_droped.drop(columns=['mot_nrg','geo\\time']).set_index('engine').transpose() vehicle_reg_ger_die_droped_transposed.reset_index(inplace=True) merged_vehicle_gdp_DE = vehicle_reg_ger_die_droped_transposed.merge(countries_gdp_meuro_cropped_reindexed_transposed_DE,on='index') merged_vehicle_gdp_DE_corr=merged_vehicle_gdp_DE.corr() matrix = np.triu(merged_vehicle_gdp_DE_corr) sns.heatmap(merged_vehicle_gdp_DE_corr, annot=True, mask=matrix) plt.title('Correlation between GDP and vehicles with different diesel engine size in Germany') plt.show() vehicle_reg_fin_die_droped_transposed = vehicle_reg_fin_die_droped.drop(columns=['mot_nrg','geo\\time']).set_index('engine').transpose() vehicle_reg_fin_die_droped_transposed.reset_index(inplace=True) merged_vehicle_gdp_FI = vehicle_reg_fin_die_droped_transposed.merge(countries_gdp_meuro_cropped_reindexed_transposed_FI,on='index') merged_vehicle_gdp_FI_corr=merged_vehicle_gdp_FI.corr() matrix = np.triu(merged_vehicle_gdp_FI_corr) sns.heatmap(merged_vehicle_gdp_FI_corr, annot=True, mask=matrix) plt.title('Correlation between GDP and vehicles with different diesel engine size in Finland') plt.show() vehicle_reg_it_die_droped_transposed = vehicle_reg_it_die_droped.drop(columns=['mot_nrg','geo\\time']).set_index('engine').transpose() vehicle_reg_it_die_droped_transposed.reset_index(inplace=True) merged_vehicle_gdp_IT = vehicle_reg_it_die_droped_transposed.merge(countries_gdp_meuro_cropped_reindexed_transposed_IT,on='index') merged_vehicle_gdp_IT_corr=merged_vehicle_gdp_IT.corr() matrix = np.triu(merged_vehicle_gdp_IT_corr) ax=sns.heatmap(merged_vehicle_gdp_IT_corr, annot=True, mask=matrix) plt.title('Correlation between GDP and vehicles with different diesel engine size in Italy') plt.show() ax=countries_gdp_meuro_cropped_reindexed_transposed_FI.plot(x='index',y="FI") ax.set_xlabel('Years') ax.set_ylabel('GDGP (M EUR)') plt.show() ax=countries_gdp_meuro_cropped_reindexed_transposed_DE.plot(x='index',y="DE") ax.set_xlabel('Years') ax.set_ylabel('GDP (M EUR)') plt.show() ax=countries_gdp_meuro_cropped_reindexed_transposed_IT.plot(x='index',y="IT") ax.set_xlabel('Years') ax.set_ylabel('GDP (M EUR)') plt.show() countries_gdp_meuro_cropped_reindexed_transposed_FI.plot(x='index',y="FI") countries_gdp_meuro_cropped_reindexed_transposed_DE.plot(x='index',y="DE") countries_gdp_meuro_cropped_reindexed_transposed_IT.plot(x='index',y="IT") plt.show() # + fig, axs = plt.subplots(3,sharex=True,figsize=(14, 8)); fig.suptitle('GDP of Finland, Germany and Italy over years (from top to bottom)'); axs[0].plot(countries_gdp_meuro_cropped_reindexed_transposed_FI['index'],countries_gdp_meuro_cropped_reindexed_transposed_FI["FI"]) axs[1].plot(countries_gdp_meuro_cropped_reindexed_transposed_DE['index'],countries_gdp_meuro_cropped_reindexed_transposed_DE["DE"]) axs[2].plot(countries_gdp_meuro_cropped_reindexed_transposed_IT['index'],countries_gdp_meuro_cropped_reindexed_transposed_IT["IT"]) plt.xticks(rotation=45); # - # # Communicate your business insights # ### We saw that selling new cars in European countries change dynamically depending on the economic situation. # ### Diesel passenger cars were very popular in many countries from the early 2000's until circa 2014/2015. # ### After that in almost all countries petrol cars gained back its priority # ### (also hybrid and electric cars started to be sold significantly everywhere). # ### In Italy, Germany and Finland the number of new petrol cars with engine size between 1400–1999 cc are rising rapidly. # ### In Finland petrol cars with large engines (above 2000 cc) are also getting more and more popular. # ### New car registration has strong positive correlation with GDP.
Udacity_Data_Scientist_Nanodegree_project1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # Computing Word Frequency for Brand Perception Interviews # amongst key stakeholders NYCAASC # #### Importing Libraries import re from nltk import word_tokenize, pos_tag import math import string from collections import Counter import pandas as pd # only need to run once nltk.download() # #### Reading in Data df = pd.read_csv("/Users/chuamelia/Downloads/Brand_IDI_Qual.tsv",sep="\t") # Looking at Structure of Data df.head(1) # #### Prepare Functions for Analysis # [Table of Word Types][Table of Word Type] # [Table of Word Type]: http://www.ling.upenn.edu/courses/Fall_2003/ling001/penn_treebank_pos.html #Write function to append all tokens to one list. def stuff_tokenizer(column, list): discard = ['IN', ',','.','TO', 'DT', 'PRP', 'CC', 'are', 'is', 'um', u'it\u2019s', 'PRP$'] end_num = len(column) temp = [] for i in range(end_num): #append to one list temp.extend(word_tokenize(column[i].decode('utf-8'))) temp2 = pos_tag(temp) #tag words for i in temp2: #discard prepositions, articles, etc. if i[1] not in discard and i[0] not in discard: list.append(i) # + #add decode('utf-8') bc "\xe2\x80\x99" interprtation #ascii' codec can't decode byte #example: df['first_Time'][0] # - # Create empty lists for stuffing. q1a = [] q1b = [] q2 = [] q3 = [] q4 = [] q5 = [] # Stuff Away! stuff_tokenizer(df['a_position_toNYCAASC'],q1a) stuff_tokenizer(df['b_position_toNYCAASC'],q1b) stuff_tokenizer(df['embody_Mission'],q2) stuff_tokenizer(df['future_Direction'],q3) stuff_tokenizer(df['first_Time'],q4) stuff_tokenizer(df['logo'],q5) # Checking if stuffing worked... print q1a[:3] dataset = [q1a,q1b,q2,q3,q4,q5] for i in dataset: common = Counter(i) print common.most_common(5) # Need to remove prepositions # How can we control for one person repeating the same word? # select distinct words: my_list = list(set(my_list)) # compare word counts Counter(q4).most_common() cmn = Counter(q4) cmn = {k: v for k, v in cmn.iteritems() if v > 10} cmn # #### References # # - <NAME> [Everything I Need to Know About NLP I learned from Sesame Street][a1] # # # [a1]: http://nbviewer.jupyter.org/url/norvig.com/ipython/How%20to%20Do%20Things%20with%20Words.ipynb
research/brand-IDI.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:root] * # language: python # name: conda-root-py # --- import pandas as pd import numpy as np import matplotlib.pyplot as plt import os import json from annoy import AnnoyIndex from mpl_toolkits.mplot3d import Axes3D from sklearn import decomposition from pathlib import Path from transformers import pipeline from tqdm import tqdm # # this part is to use a existing model to test on our data set def jsonl_list_to_dataframe(file_list, columns=['language', 'docstring', 'code']): """Load a list of jsonl.gz files into a pandas DataFrame.""" return pd.concat([pd.read_json(f, orient='records', compression='gzip', lines=True)[columns] for f in file_list], sort=False) def get_dfs(path, splits = ["train", "valid", "test"]): """Grabs the different data splits and converts them into dataframes""" dfs = [] for split in ["train", "valid", "test"]: files = sorted((path/split).glob("**/*.gz")) df = jsonl_list_to_dataframe(files) dfs.append(df) return dfs path = Path('/mnt/sdc/zheng/sde/data_bert') java_df = get_dfs(path/"java/final/jsonl", ["valid"])[0] python_df = get_dfs(path/"python/final/jsonl", ["valid"])[0] python_df.head() langs_df = pd.concat([java_df, python_df]).sample(frac = 0.001) feature_extractor = pipeline( "feature-extraction", model="huggingface/CodeBERTa-small-v1", tokenizer="huggingface/CodeBERTa-small-v1" ) len(langs_df) fill_mask = pipeline( "fill-mask", model="huggingface/CodeBERTa-small-v1", tokenizer="huggingface/CodeBERTa-small-v1" ) # + # The sun <mask>. # => fill_mask("private int drain(byte[] b, int off, int len) {\n int remaining = Math.min(len, byteBuffer.remaining());\n byteBuffer.put(b, off, remaining);\n <mask> remaining;\n }") # + fill_mask("This is a <mask>.") # This is the beginning of a beautiful <mask>. # => # - def get_features(methods, extractor): method_features = [] for method in tqdm(methods): features = np.mean(extractor(method)[0], axis = 0) method_features.append(features) return method_features X = get_features(langs_df.code.values, feature_extractor) y = langs_df.language.values np.shape(X) for i, lang in enumerate(y): if lang == 'java': y[i] = 0 elif lang == 'python': y[i] = 1 y = y.astype(np.int32) # # %matplotlib inline # + fig = plt.figure(1, figsize=(4, 3)) plt.clf() ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=64, azim=134) plt.cla() pca = decomposition.PCA(n_components=3) pca.fit(X) X = pca.transform(X) for name, label in [('java', 0), ('python', 1)]: ax.text3D(X[y == label, 0].mean(), X[y == label, 1].mean() + 1.5, X[y == label, 2].mean(), name, horizontalalignment='center', bbox=dict(alpha=.5, edgecolor='w', facecolor='w')) # Reorder the labels to have colors matching the cluster results # y = np.choose(y, [0, 1]).astype(np.float) ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=y, cmap=plt.cm.rainbow, edgecolor='k') ax.w_xaxis.set_ticklabels([]) ax.w_yaxis.set_ticklabels([]) ax.w_zaxis.set_ticklabels([]) plt.show() # + import pylab as pl from sklearn.manifold import TSNE import copy X = copy.deepcopy(X_O) X = TSNE(n_components=2, random_state=None,).fit_transform(X) # Reorder the labels to have colors matching the cluster results # y = np.choose(y, [0, 1]).astype(np.float) pl.figure() for name, label in [('java', 0), ('python', 1)]: pl.scatter(X[y==label,0], X[y==label,1], label=name) pl.legend(['java','python']) pl.title('2d t-SNE of python and java') pl.show() # - # # In this part, we train a BPE on our own first, then train the model # In this part, we train a BPE on our own first, then train the model path = Path('/mnt/sdc/zheng/sde/data_bert/java') java_df = get_dfs(path/"final/jsonl", ["train"]) java_df_val = get_dfs(path/"final/jsonl", ["valid"]) java_df_test = get_dfs(path/"final/jsonl", ["test"]) print(len(java_df), len(java_df_val), len(java_df_test)) paths = [str(x) for x in Path("/mnt/sdc/zheng/sde/data/transformer/data").glob("**/train.txt")] paths # + from tokenizers import ByteLevelBPETokenizer # Initialize a tokenizer import time for size in [2000, 5000, 10000, 20000]: os.makedirs('./'+str(size), exist_ok = True) start = time.time() tokenizer = ByteLevelBPETokenizer() # Customize training tokenizer.train(files=paths, vocab_size=size, min_frequency=2, special_tokens=[ "<s>", "<pad>", "</s>", "<unk>", "<mask>", ]) end = time.time() print("Test --- %s seconds ---" % (end - start)) # Save files to disk tokenizer.save('./'+str(size), "esperberto_"+str(size)+"size") # - size = 10000 vocab_json =json.load(open('/home/zheng/sde_course/hugg/'+str(size)+'/esperberto_'+str(size)+'size-vocab.json','r')) vocab_json # choose 1% data to train a small model paths = [str(x) for x in Path("/mnt/sdc/zheng/sde/data/transformer/data").glob("**/*.txt")] name_dict = {0:'train_slice_001.txt',1:'valid_slice_001.txt',2:'test_slice_001.txt'} for i in range(3): print_df = java_df[i].sample(frac = 0.01) print(print_df.shape) f = open('/home/zheng/'+name_dict[i],'a') for code_item in print_df['code']: print(code_item,file=f) print_df.columns print(langs_df.shape, langs_df_val.shape, langs_df_test.shape) for item in langs_df: print(item['code'], file=f) langs_df['code'].iloc[0] # + from tokenizers.implementations import ByteLevelBPETokenizer from tokenizers.processors import BertProcessing tokenizer = ByteLevelBPETokenizer( '/home/zheng/sde_course/hugg/'+str(size)+'/esperberto_'+str(size)+'size-vocab.json', '/home/zheng/sde_course/hugg/'+str(size)+'/esperberto_'+str(size)+'size-merges.txt', ) tokenizer._tokenizer.post_processor = BertProcessing( ("</s>", tokenizer.token_to_id("</s>")), ("<s>", tokenizer.token_to_id("<s>")), ) tokenizer.enable_truncation(max_length=512) print( tokenizer.encode("Mi estas Julien.") ) # Encoding(num_tokens=7, ...) # tokens: ['<s>', 'Mi', 'Ġestas', 'ĠJuli', 'en', '.', '</s>'] # - tokenizer tokenizer
notebooks/zheng/train_BPE.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: fastaidev # language: python # name: fastaidev # --- import sys sys.path.append('..') from fastai_resnet_audio.model import * from fastai_resnet_audio.data import * from fastai.vision.all import * # # fastai_resnet_audio tutorial # # > Tutorial for fastai-resnet-audio - Dataset used: https://github.com/earthspecies/open_collaboration_on_audio_classification/blob/master/introduction.ipynb path = untar_data('https://storage.googleapis.com/ml-animal-sounds-datasets/macaques_24414Hz.zip') path # ## DataLoaders # # Create DataBlock and DataLoaders with AudioBlock and AudioTransforms # + length = 0.5 device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") dblocks = DataBlock(blocks = (AudioBlock,CategoryBlock), get_items=get_files, splitter=RandomSplitter(seed=42), get_y=parent_label, item_tfms=[AudioRandomCrop(length=length), AudioFixLength(length=length), ], batch_tfms=[AudioAddNoise(device=device)] ) dls=dblocks.dataloaders(path, bs=128) # - dls.one_batch() # ## Model # # Create **model configuration** - available configurations are resnet1d18 and resnet1d34. # # You have to adopt the **num_classes** parameter according to the number of classes of your dataset (8 classes for this dataset). config = resnet1d18 config['num_classes'] = 8 config # Create **model** using config model = ResNetAudio(**config) model # ## Learner + Training # # Creating the **learner and trainig** the model is straight forward. learn = Learner(dls, model, metrics=accuracy, cbs=ShowGraphCallback()) learn.lr_find() learn.unfreeze() learn.fit_one_cycle(5, 1e-3) # ## Fine-Tune pretrained model on different dataset # # **Steps** # - create DataLoaders # - create model with same config (num_classes) as the pretrained model # - create learner # - load pretrained model weights with learn.load("pretrained.pth") # - call **replace_head** with num_classes=number classes new dataset # # Lets pretend the macaques dataset had 20 instead of 8 classes path = untar_data('https://storage.googleapis.com/ml-animal-sounds-datasets/macaques_24414Hz.zip') path # + length = 0.5 device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") dblocks = DataBlock(blocks = (AudioBlock,CategoryBlock), get_items=get_files, splitter=RandomSplitter(seed=42), get_y=parent_label, item_tfms=[AudioRandomCrop(length=length), AudioFixLength(length=length), ], batch_tfms=[AudioAddNoise(device=device)] ) dls=dblocks.dataloaders(path, bs=128) # - # Model was pretrained on dataset with 8 classes, so create config with 8 classes to load the pretrianed model. config = resnet1d18 config['num_classes'] = 8 learn = Learner(dls, model, metrics=accuracy, cbs=ShowGraphCallback()) learn.load("saved_model") # After loading the pretrained weights we can **replace the last linear layer**. In this example for a dataset with **20 classes**. replace_head(learn.model, num_classes=20) model[-1][-1]
tutorial/tutorial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/ikiskin/OpenAPIReviews/blob/master/NeurIPSOpenReviewDatasetAndBenchmarksRound2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="lmRSVnqlek_m" colab={"base_uri": "https://localhost:8080/", "height": 975} outputId="02a952f2-aede-4383-98fb-ab2f647d26a6" # !pip install openreview-py pandas matplotlib seaborn # + id="_qmSij2me5bX" colab={"base_uri": "https://localhost:8080/", "height": 390} outputId="cbea0cdf-3c4b-4934-8c4a-10ce2de2326a" # Code heavily borrowed from openreview-py's examples: https://openreview-py.readthedocs.io/en/latest/examples.html from collections import defaultdict, deque import openreview import io import os import json import statistics import seaborn as sns import matplotlib.pyplot as plt import pandas as pd guest_client = openreview.Client(baseurl='https://api.openreview.net') submissions = openreview.tools.iterget_notes( guest_client, invitation='NeurIPS.cc/2021/Track/Datasets_and_Benchmarks/Round2/-/Submission') submissions_by_forum = {n.forum: n for n in submissions} print('getting metadata...') # print(submissions_by_forum) # There should be 3 reviews per forum. reviews = openreview.tools.iterget_notes( guest_client, invitation='NeurIPS.cc/2021/Track/Datasets_and_Benchmarks/Round2/Paper.*/-/Official_Review') reviews_by_forum = defaultdict(list) for review in reviews: reviews_by_forum[review.forum].append(review) # Build a list of metadata. # For every paper (forum), get the review ratings metadata = [] means, medians, all_scores = [], [], [] for forum in submissions_by_forum: forum_reviews = reviews_by_forum[forum] review_ratings = [n.content['rating'] for n in forum_reviews] review_scores = [] for score in review_ratings: idx = score.find(':') review_scores.append(int(score[:idx])) for s in review_scores: all_scores.append(s) mean = statistics.mean(review_scores) median = statistics.median(review_scores) means.append(mean) medians.append(median) forum_metadata = { 'forum': forum, 'url': 'https://openreview.net/forum?id=' + forum, 'title': submissions_by_forum[forum].content['title'], 'scores': review_scores, 'avg_score': mean } metadata.append(forum_metadata) pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) df = pd.DataFrame(metadata) df = df.sort_values(by=['avg_score'], ascending=False) print('Mean: ', statistics.mean(means)) print('Medians: ', statistics.mean(medians)) sns.distplot(means, kde=False, color='red').set_title('Distribution of Average Scores') sns.despine() # + colab={"base_uri": "https://localhost:8080/"} id="XY0cpG2zLacR" outputId="9ccb81ce-f1ae-4266-fe60-56563928216b" submissions_by_forum = {n.forum: n for n in submissions} # print('getting metadata...') print(submissions_by_forum) # + id="hDkBUogSfKAy" def make_clickable(val): # target _blank to open new window return '<a target="_blank" href="{}">{}</a>'.format(val, val) df.style.format({'url': make_clickable}) df.style.hide_index() df = df.drop(columns=['forum']) df = df.round(2) # + [markdown] id="77fP4Hi_6USH" # ### Sorted List of Papers # + id="cko0RXj2fcBq" colab={"base_uri": "https://localhost:8080/"} outputId="82777030-9718-442b-f85f-a88244f0caea" print(df.to_string()) # + id="_2nmWgXdh02g" # + id="YlqkUgdb7M30" colab={"base_uri": "https://localhost:8080/"} outputId="c50c2e80-96e4-4b02-f3d9-f144e475a41f" len(df) # + id="B2zjDCwR7831" colab={"base_uri": "https://localhost:8080/"} outputId="c240ab58-0c95-4118-c3a0-809fc3d39dbb" df.index.get_loc(48) # + id="QQrPrOZquPuK" colab={"base_uri": "https://localhost:8080/"} outputId="04fa3ae4-4b33-4be9-ed1b-fa0084fe46ea" 89/154 # + id="7WrCPDjjC8rY"
NeurIPSOpenReviewDatasetAndBenchmarksRound2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import tensorflow as tf tf.enable_eager_execution() a = tf.random.uniform((4, 10)) a tf.gather_nd(a, tf.stack([[0, 1, 2, 3], [4, 5, 6, 7]], axis=1))
Untitled.ipynb