code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + _cell_guid="30a1e2a3-b20e-4334-bc24-ead09d165447" _uuid="65c914bac4b79e4a8b86bbd161030efe84d16786" import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sw = pd.read_csv('../input/Steven Wilson.csv') # <NAME> pt = pd.read_csv('../input/Porcupine Tree.csv', nrows=len(sw)) # Porcupine Tree # remove useless columns ignore = ['analysis_url', 'id', 'track_href', 'uri', 'type', 'album', 'name', 'artist', 'lyrics'] sw.drop(ignore, axis=1, inplace=True) pt.drop(ignore, axis=1, inplace=True) sw.describe() # + _cell_guid="61013891-5681-4339-aeea-8303820abdfa" _uuid="4e05184403a8d08693405fc67203d27eb52cb9a1" # custom color palette red_blue = ['#19B5FE', '#EF4836'] palette = sns.color_palette(red_blue) sns.set_palette(palette) # + _cell_guid="26257f43-9784-4f7f-b3a1-c580a8c72fa1" _uuid="761232590122513e713157743c737448dd084fa4" # let's compare the songs of SW and PT using histograms fig = plt.figure(figsize=(15,15)) for i, feature in enumerate(sw): ax = plt.subplot(4,4,i+1) ax.set_title(feature) sw[feature].hist(alpha=0.7, label='<NAME>') pt[feature].hist(alpha=0.7, label='Porcupine Tree') plt.legend(loc='upper right') # + [markdown] _cell_guid="baaa640b-91a1-4ec2-a23d-b40587a72727" _uuid="c75b895f11e165f5684854f50f3eddb1e7b7bc46" # For full documentation on these features, go [here](https://developer.spotify.com/web-api/get-audio-features/). # # The reason I compared these two artists is because <NAME> used to be a member of Porcupine Tree, but he moved on and is now doing solo albums. However, their sound, style and genre are very similar. That's because <NAME> was Porcupine Tree's frontman, main writer and producer. He's really good at music production and his work is worshiped in the progressive rock scene. # # Anyways, I want to use a machine learning algorithm (specifically, a classifier) that tells me if a song is similar Wilson's style. So in order to build the classifier I have to train it with Wilson's songs. So, **The initial question was...** *should I use Porcupine Tree's songs as training data? * # # Apparently, yes. # # There are a few differences though. <NAME> tends to be sadder (see *valence*), less energetic (see *energy*) and quieter (see *loudness*). Porcupine Tree used to be a little more noisy and energetic, I guess you could call them more "danceable". # # Also, there are some tempo differences... PT is slower than SW. That's because early PT was more experimental. They had more psychodelic tracks, you know: ambient noises, echoes, electronics, people talking (see *speechiness*)... no instruments or lyrics at all. On the other hand, <NAME>'s solo work is more traditional, he has more classic influences like *King Crimson* and *Yes*. That's why most of his tempos fall in the safe-120-area. Some even say that he just follows the "progressive rock blueprints", but let's not deviate... # # These differences are not *that* significant though, it's normal for some songs to be kind of different from each other. Imagine an album where all the songs have the same valence, tempo and energy... that'd be very boring. # # There are major similarities between these two artists. Both are kind of sad and use chords in similar ways, i.e. minor and major chords (see *mode*). They like to use voiceovers in their songs (see *speechiness*), both follow the 4/4 "standard" measure. (see *time signature*). Both use the same musical notes in similar ways (see *key*). Finally, they are instrumentally related and you can *kind of* dance to both PT and SW.
notebooks/Comparing Steven Wilson and Porcupine Tree.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Random Forest # + import numpy as np import pandas as pd from sklearn.model_selection import train_test_split,KFold from sklearn.utils import shuffle from sklearn.metrics import confusion_matrix,accuracy_score,precision_score,\ recall_score,roc_curve,auc #import expectation_reflection as ER #from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier #from sklearn.model_selection import RandomizedSearchCV from sklearn.model_selection import GridSearchCV import matplotlib.pyplot as plt import seaborn as sns from sklearn.preprocessing import MinMaxScaler from function import split_train_test,make_data_balance # - np.random.seed(1) # First of all, the processed data are imported. # + #data_list = ['1paradox'] #data_list = ['29parkinson','30paradox2','31renal','32patientcare','33svr','34newt','35pcos'] data_list = np.loadtxt('data_list.txt',dtype='str') print(data_list) # - def read_data(data_id): data_name = data_list[data_id] print('data_name:',data_name) #Xy = np.loadtxt('%s/data_processed.dat'%data_name) Xy = np.loadtxt('../data/%s/data_processed.dat'%data_name) X = Xy[:,:-1] y = Xy[:,-1] #print(np.unique(y,return_counts=True)) X,y = make_data_balance(X,y) print(np.unique(y,return_counts=True)) X, y = shuffle(X, y, random_state=1) X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.5,random_state = 1) sc = MinMaxScaler() X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test) return X_train,X_test,y_train,y_test def measure_performance(X_train,X_test,y_train,y_test): model = RandomForestClassifier() # Number of trees in random forest #n_estimators = [int(x) for x in np.linspace(start = 10, stop = 100, num = 10)] n_estimators = [10,50,100] # Number of features to consider at every split max_features = ['auto'] # Maximum number of levels in tree #max_depth = [int(x) for x in np.linspace(1, 10, num = 10)] max_depth = [2,4,6,8,10] #max_depth.append(None) # Minimum number of samples required to split a node min_samples_split = [5, 10, 15, 20] # Minimum number of samples required at each leaf node min_samples_leaf = [int(x) for x in np.linspace(start = 1, stop = 5, num = 5)] # Method of selecting samples for training each tree #bootstrap = [True, False] bootstrap = [True] # Create the random grid hyper_parameters = {'n_estimators': n_estimators, 'max_features': max_features, 'max_depth': max_depth, 'min_samples_split': min_samples_split, 'min_samples_leaf': min_samples_leaf, 'bootstrap': bootstrap} #random_search = RandomizedSearchCV(estimator = model, param_distributions = random_grid, n_iter = 100, # cv = 4, verbose=2, random_state=1, n_jobs = -1) # Create grid search using cross validation clf = GridSearchCV(model, hyper_parameters, cv=4, iid='deprecated') # Fit grid search best_model = clf.fit(X_train, y_train) # View best hyperparameters #print('Best Penalty:', best_model.best_estimator_.get_params()['penalty']) #print('Best C:', best_model.best_estimator_.get_params()['C']) # best hyper parameters print('best_hyper_parameters:',best_model.best_params_) # performance: y_test_pred = best_model.best_estimator_.predict(X_test) acc = accuracy_score(y_test,y_test_pred) #print('Accuracy:', acc) p_test_pred = best_model.best_estimator_.predict_proba(X_test) # prob of [0,1] p_test_pred = p_test_pred[:,1] # prob of 1 fp,tp,thresholds = roc_curve(y_test, p_test_pred, drop_intermediate=False) roc_auc = auc(fp,tp) #print('AUC:', roc_auc) precision = precision_score(y_test,y_test_pred) #print('Precision:',precision) recall = recall_score(y_test,y_test_pred) #print('Recall:',recall) return acc,roc_auc,precision,recall # + n_data = len(data_list) roc_auc = np.zeros(n_data) ; acc = np.zeros(n_data) precision = np.zeros(n_data) ; recall = np.zeros(n_data) #data_id = 0 for data_id in range(n_data): X_train,X_test,y_train,y_test = read_data(data_id) acc[data_id],roc_auc[data_id],precision[data_id],recall[data_id] =\ measure_performance(X_train,X_test,y_train,y_test) print(data_id,acc[data_id],roc_auc[data_id]) # - print('acc_mean:',acc.mean()) print('roc_mean:',roc_auc.mean()) print('precision:',precision.mean()) print('recall:',recall.mean()) np.savetxt('RF_result.dat',(roc_auc,acc,precision,recall),fmt='%f')
20.01.1400_GridSearchCV/RF_GridSearchCV.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: classification # language: python # name: classification # --- # # Agglomerative Clustering # + import pandas as pd df_data = pd.read_csv('../data/2d_clustering_normal.csv') # - df_data.head() data = df_data.drop('label',axis=1) label = df_data['label'] # ### Run Agglomerative # + from sklearn.cluster import AgglomerativeClustering hier_clus = AgglomerativeClustering(n_clusters=3, affinity = 'euclidean', linkage = 'ward') predictions = hier_clus.fit_predict(data) # - df_data['pred_label'] = predictions # ## Dengdrogram import scipy.cluster.hierarchy as hier dendrogram = hier.dendrogram(hier.linkage(data, method='ward')) # + from matplotlib import pyplot as plt import numpy as np colors = {0:'b', 1:'g', 2:'r', 3:'c', 4:'m', 5:'y', 6:'k'} plt.figure() plt.figure(num=None, figsize=(16, 6), dpi=80, facecolor='w', edgecolor='k') plt.subplot(1,2,1) unq_labels = np.unique(df_data['label']) for i in unq_labels: df = df_data.loc[df_data['label'] == i][['x','y']] x = df['x'] y = df['y'] plt.scatter(x, y, c=colors[i], alpha=1) plt.title('Ground Truth') plt.subplot(1,2,2) unq_labels = np.unique(df_data['pred_label']) for i in unq_labels: df = df_data.loc[df_data['pred_label'] == i][['x','y']] x = df['x'] y = df['y'] plt.scatter(x, y, c=colors[i], alpha=1) plt.title('Predictions') plt.show() # - # ### Change the number of clusters # + from sklearn.cluster import AgglomerativeClustering hier_clus = AgglomerativeClustering(n_clusters=5, affinity = 'euclidean', linkage = 'ward') predictions = hier_clus.fit_predict(data) df_data['pred_label'] = predictions # + from matplotlib import pyplot as plt import numpy as np colors = {0:'b', 1:'g', 2:'r', 3:'c', 4:'m', 5:'y', 6:'k'} plt.figure(figsize=(8,6)) plt.figure(num=None, figsize=(16, 6), dpi=80, facecolor='w', edgecolor='k') plt.subplot(1,2,1) unq_labels = np.unique(df_data['label']) for i in unq_labels: df = df_data.loc[df_data['label'] == i][['x','y']] x = df['x'] y = df['y'] plt.scatter(x, y, c=colors[i], alpha=1) plt.title('Ground Truth') plt.subplot(1,2,2) unq_labels = np.unique(df_data['pred_label']) for i in unq_labels: df = df_data.loc[df_data['pred_label'] == i][['x','y']] x = df['x'] y = df['y'] plt.scatter(x, y, c=colors[i], alpha=1) plt.title('Predictions') plt.show() # -
clustering/notebooks/04 - Agglomerative Clustering.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python # language: python # name: python # --- # + deletable=false nbgrader={"cell_type": "code", "checksum": "3d6aac4236f8e1ec85380e692dcc51b1", "grade": false, "grade_id": "set_a", "locked": false, "schema_version": 3, "solution": true} # YOUR CODE HERE raise NotImplementedError() # + deletable=false nbgrader={"cell_type": "code", "checksum": "8bb5c7c6f388fae724e5ef53dc4deeb2", "grade": true, "grade_id": "foo", "locked": false, "points": 1.0, "schema_version": 3, "solution": false} print("Success!") # + [markdown] deletable=false nbgrader={"cell_type": "code", "checksum": "75d78cdf605a339809ceaace462c5f33", "grade": true, "grade_id": "bar", "locked": false, "points": 1.0, "schema_version": 3, "solution": false} # assert a == 1 # + [markdown] deletable=false nbgrader={"cell_type": "markdown", "checksum": "9e51fd0022c24c4105e38369d2f9d751", "grade": true, "grade_id": "baz", "locked": false, "points": 2.0, "schema_version": 3, "solution": true} # YOUR ANSWER HERE # + nbgrader={"cell_type": "code", "checksum": "5a193c164d7b444efe9a3612bee09f4c", "grade": true, "grade_id": "quux", "locked": false, "points": 3.0, "schema_version": 3, "solution": true} # YOUR CODE HERE raise NotImplementedError() # + nbgrader={"cell_type": "code", "checksum": "86f5f877fe95faac003fcd4b8d43d093", "grade": false, "grade_id": "ro1", "locked": true, "schema_version": 3, "solution": false} print("Don't change this cell!") # + [markdown] nbgrader={"cell_type": "markdown", "checksum": "0122b50e5eaf367b9874d07ebaf80521", "grade": false, "grade_id": "ro2", "locked": true, "schema_version": 3, "solution": false} # This cell shouldn't be changed.
nbgrader/tests/nbextensions/files/submitted-grade-cell-type-changed.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np import configparser import csv import os import imageio import numpy as np import pandas as pd import configparser from utils import * from PIL import Image from sklearn.model_selection import train_test_split from matplotlib import pyplot as plt # + config = configparser.ConfigParser() config.read('config.INI') TRAIN_PATH_IMAGE = config['paths']['TRAIN_PATH_IMAGE'] TEST_PATH_IMAGE = config['paths']['TEST_PATH_IMAGE'] VALIDATION_PATH_IMAGE = config['paths']['VALIDATION_PATH_IMAGE'] TRAIN_TEST_PATH_CSV = config['paths']['TRAIN_TEST_PATH_CSV'] TRAIN_CSV_NAME = config['file_names']['TRAIN_CSV_NAME'] TEST_CSV_NAME = config['file_names']['TEST_CSV_NAME'] VALIDATION_CSV_NAME = config['file_names']['VALIDATION_CSV_NAME'] # - IMG_WIDTH = int(config['image_shape']['IMG_WIDTH']) IMG_HEIGHT = int(config['image_shape']['IMG_HEIGHT']) train = pd.read_csv(TRAIN_TEST_PATH_CSV + TRAIN_CSV_NAME) test = pd.read_csv(TRAIN_TEST_PATH_CSV + TEST_CSV_NAME) train.iloc[:1, 100:200] from data_loader import * data_loader = DataLoader(TRAIN_PATH_IMAGE, VALIDATION_PATH_IMAGE, TEST_PATH_IMAGE, 64, 64, 64, IMG_HEIGHT, IMG_WIDTH, 1, 10) X, y = data_loader.train_data_loader(0) # + # X = X / X.max() # + import os import glob from PIL import Image import random import numpy as np class DataLoader: def __init__(self, train_images_dir, val_images_dir, test_images_dir, train_batch_size, val_batch_size, test_batch_size, height_of_image, width_of_image, num_channels, num_classes): self.train_paths = glob.glob(os.path.join(train_images_dir, "**/*.png"), recursive=True) self.val_paths = glob.glob(os.path.join(val_images_dir, "**/*.png"), recursive=True) self.test_paths = glob.glob(os.path.join(test_images_dir, "**/*.png"), recursive=True) # random.shuffle(self.train_paths) # random.shuffle(self.val_paths) # random.shuffle(self.test_paths) self.train_batch_size = train_batch_size self.val_batch_size = val_batch_size self.test_batch_size = test_batch_size self.height_of_image = height_of_image self.width_of_image = width_of_image self.num_channels = num_channels self.num_classes = num_classes def load_image(self, path, is_flattened = False): im = np.asarray(Image.open(path)) lbl = np.eye(self.num_classes)[int(path.rsplit('\\', 2)[-2])] if is_flattened: im = im.reshape(self.height_of_image * self.width_of_image) return im, lbl def batch_data_loader(self, batch_size, file_paths, index, is_flattened = False, randomization = False): ims = [] lbls = [] if index == 0 or randomization: random.shuffle(file_paths) while batch_size >= 1 and (len(file_paths) - index > 0): im, lbl = self.load_image(file_paths[index], is_flattened) ims.append(im) lbls.append(lbl) batch_size -= 1 index += 1 imgs = np.array(ims) imgs = imgs.reshape(-1, self.height_of_image, self.width_of_image, self.num_channels) imgs = imgs / imgs.max() return imgs, np.array(lbls) def train_data_loader(self, index, randomization = False): return self.batch_data_loader(self.train_batch_size, self.train_paths, index, randomization = randomization) def val_data_loader(self, index, randomization = False): return self.batch_data_loader(self.val_batch_size, self.val_paths, index, randomization = randomization) def test_data_loader(self, index, randomization = False): return self.batch_data_loader(self.test_batch_size, self.test_paths, index, randomization = randomization) def get_train_data_size(self): return len(self.train_paths) def get_val_data_size(self): return len(self.val_paths) def get_test_data_size(self): return len(self.test_paths) def all_train_data_loader(self, is_flattened = False): return self.batch_data_loader(self.get_train_data_size(), self.train_paths, 0) def all_val_data_loader(self, is_flattened = False): return self.batch_data_loader(self.get_val_data_size(), self.val_paths, 0) def all_test_data_loader(self, is_flattened = False): return self.batch_data_loader(self.get_test_data_size(), self.test_paths, 0) # - X[0, :, :, 0] # + # plt.imshow(X[0, :, :, 0]) # -
Homeworks/3_mnist/Data Investigation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [Root] # language: python # name: Python [Root] # --- # + # %pylab inline import sys sys.path.append('../') sys.path.append('../support/') from glob import glob from os.path import join, isfile, basename from multiprocessing import Pool from scipy.ndimage.interpolation import rotate from IPython.display import clear_output from ct_reader import * from tqdm import tqdm from functools import partial from matplotlib.pyplot import * import pickle from paths import * from scipy.misc import imresize # - # BATCH_SIZE is an amount of patients showed per iteration BATCH_SIZE = 10 def read_ct(path, ret_xy_spacing=False): patient = read_ct_scan(path) image = get_pixels_hu(patient) image[image == image[0,0,0]] = 0 if ret_xy_spacing: try: return image, patient[0].PixelSpacing[0] except AttributeError: return image, scan.GetSpacing()[0] return image # """ # Structure: # 1. left lung 1 # 2. right lung 2 # 4. bronchi 0 # 8. trachea 0 # # 3. left overlapped by right 3 # # 5. bronchi overlapped by left 1 # 6. bronchi overlapped by right 2 # 7. bronchi overlapped by right, overlapped by left 3 # # 9. trachea overlapped by left 1 # 10. trachea overlapped by right 2 # 11. trachea overlapped by right, overlapped by left 3 # # 12. bronchi overlapped by trachea 0 # 13. bronchi overlapped by trachea, overlapped by left 1 # 14. bronchi overlapped by trachea, overlapped by right 2 # 15. bronchi overlapped by trachea, overlapped by right, overlapped by left 3 # """ def display(patient, mask): mask[(mask == 4) | (mask == 12) | (mask == 8)] = 0 mask[(mask == 1) | (mask == 5) | (mask == 9) | (mask == 13)] = 1 mask[(mask == 2) | (mask == 6) | (mask == 10) | (mask == 14)] = 2 mask[(mask == 3) | (mask == 7) | (mask == 15)] = 3 subplot(2, 2, 1) imshow(patient[patient.shape[0] // 2]) axis('off') subplot(2, 2, 2) imshow(imresize(clip(patient[:, patient.shape[1] // 2], -1000, 400), (patient.shape[0], patient.shape[0]))) axis('off') subplot(2, 2, 3) imshow(mask[patient.shape[0] // 2]) axis('off') subplot(2, 2, 4) imshow(imresize(mask[:, patient.shape[1] // 2], (patient.shape[0], patient.shape[0]))) axis('off') show() global_paths = glob(join(PATH['STAGE_MASKS'], "*[0-9a-f].npy")) global_paths = sorted([join(PATH['STAGE_DATA'], basename(path).split('.npy')[0]) for path in global_paths]) erroneus = list() upsides = list() checkpoint = 0 iterations = int(ceil(len(global_paths) / BATCH_SIZE)) erroneus = list() iterations = int(ceil(len(global_paths) / BATCH_SIZE)) for counter in range(checkpoint, iterations): paths = global_paths[BATCH_SIZE * counter: BATCH_SIZE * (counter + 1)] for i, path in enumerate(paths): patient = read_ct(path) mask = load(join(PATH['STAGE_MASKS'], basename(path) + '.npy')) print(i, iterations - counter, path) display(patient, mask) while True: try: print('Erroneus:') err = input() nomerus = list() if err != '': nomerus = list(map(int, err.split(' '))) print('Inverted:') ups = input() nomerus = [nomerus, []] if ups != '': nomerus[1] = list(map(int, ups.split(' '))) break except: pass for i in nomerus[0]: erroneus.append(basename(paths[abs(i)])) pickle.dump(erroneus, open(join(PATH['STAGE_MASKS'], 'still_erroneus_ncrash'), 'wb')) for i in nomerus[1]: upsides.append(basename(paths[abs(i)])) pickle.dump(upsides, open(join(PATH['STAGE_MASKS'], 'upsides'), 'wb')) clear_output() # To continue correction, load your checkpoints: erroneus = pickle.load(open(join(PATH['STAGE_MASKS'], 'still_erroneus_ncrash'), 'rb')) upsides = pickle.load(open(join(PATH['STAGE_MASKS'], 'upsides'), 'rb'))
IPython/Correction.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Edge Computing with Intel(TM) Distribution of OpenVINO(TM) # # This notebook covers the instructions to setup a AWS Greengrass client and run applications utilizing computer vision and deep learning at the edge to communicate with AWS IoT Cloud. # # If you are using a workshop laptop, setup process is already done for you so you won't need to redo the installation & configuration steps. They are all for your reference for the further development process. # # All the instructions for this tutorial is intended for Ubuntu 16.04, certain instructions might change according to selected operating system or distribution. # # Edge Device Configuration # # Initial configuration of edge device requires complete setup of OpenVINO(TM) Toolkit. # # ## 1 - Install OpenVINO(TM) Toolkit # # Follow instructions from OpenVINO(TM) Toolkit documentation https://docs.openvinotoolkit.org/latest/_docs_install_guides_installing_openvino_linux.html # # ## 2 - Get Edge Analytics Samples # # ```bash # git clone https://github.com/intel/Edge-Analytics-FaaS # git clone https://github.com/intel/Edge-optimized-models # ``` # # ## 3 - Install AWS GG Client and Prequisites # # ```bash # sudo apt-get install python-pip # # pip2 install numpy boto3 awscli opencv-python # ``` # # AWS Greengrass Core Installation & Configurations # # This part includes instructions to install Greengrass software and complete required configurations for secure connection to AWS IoT Cloud. # # Most of the instructions can be found from # # ## 1 - Add GreenGrass Core User and User Group # # ```bash # sudo adduser --system ggc_user # sudo addgroup -- system ggc_group # ``` # # After setup, reboot your system. # # ## 2 - Download Required Files from AWS # # First Download the Greengrass Service from: # # https://docs.aws.amazon.com/greengrass/latest/developerguide/what-is-gg.html?icmpid=docs_gg_console#gg-core-download-tab # # Get GreenGrass SDK, which is required to deploy Lambda functions # # https://github.com/aws/aws-greengrass-core-sdk-python # # # ## 3 - Create a group and download the certificates as indicated # # Follow instructions from: https://docs.aws.amazon.com/greengrass/latest/developerguide/gg-device-start.html # # After completing the certificate installation: # # ```bash # # cd /greengrass/ggc/core # # sudo ./greengrassd start # # ``` # # You can navigate to Monitor section to see if you have completed the setup. # # Running Edge Device for Demo # # When GG is ready, you are ready to deploy your FaaS to Greengrass edge. # # See the instructions from: # # - https://github.com/intel/Edge-Analytics-FaaS # - https://github.com/intel/Edge-optimized-models # # Resources # # - OpenVINO Edge Computing Resources # # - https://github.com/intel/Edge-Analytics-FaaS # - https://github.com/intel/Edge-optimized-models # # - AWS Greengrass Documentation # # - https://console.aws.amazon.com/iot/home?#/greengrassIntro
Cloud Connectors & Edge Computing with OpenVINO(TM) - Lab 4.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # # theme {theme} : text only # + [markdown] slideshow={"slide_type": ""} # ## subtitle cell # - # text cell # # * item 1 # * item 2 # + [markdown] cell_style="center" slideshow={"slide_type": "slide"} # # theme {theme} : code cells # + cell_style="center" # simple stuff for now def fibo(n): return 1 if n <= 1 else fibo(n-1) + fibo(n-2) [fibo(x) for x in range(15)] # + [markdown] slideshow={"slide_type": "slide"} # ## some math # + cell_style="split" # %matplotlib notebook import numpy as np import matplotlib.pyplot as plt plt.ion() # + cell_style="split" X = np.linspace(-5, 5) Y = np.exp(X) plt.plot(X, Y); # + [markdown] slideshow={"slide_type": "slide"} # ## and now for a table # - # prior to 5.6 we were getting very small tables # # # | sample | table | # |--------|-------| # | men | 10% | # | women | 70% | # | children | 30% |
tests/themes/master.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:balance] * # language: python # name: conda-env-balance-py # --- import cvxpy as cp import numpy as np import cmath import hsbalance as hs import matplotlib.pyplot as plt # %matplotlib widget # # Introduction # # This notebook is aimed to give a quick presentation for `hsbalance` package. # `hsbalance` is Python based package that is meant to find the optimized solution of rotor balancing problem # # A. Independent Systems # # # Creating Model # <b>for independent systems where number of balancing planes are equal to the number of measuring points we do not need optimization process as number of equations are equal to the number of unknowns # 1. Enter the initial vibration column vector `A`: # - each row represents the vibration at certain measuring plane. # - vibration is to be represented in the form ('amplitude' @ 'phase(degrees)') # - Enter slow roll vibration column `A0` # + A_math = [['170@112'], ['53@78']] A0_math = [['12@30'], ['12@30']] # - # 2. Enter trial mass effect matrix `B` # B = [['B00', 'B01'] # ['B10', 'B11']] # where: # - B00: vibration at measuring point 1 when trial mass at balancing plane 1 # - B01: vibration at measuring point 1 when trial mass at balancing plane 2 # - B00: vibration at measuring point 2 when trial mass at balancing plane 1 # - B00: vibration at measuring point 2 when trial mass at balancing plane 2 # # As a general rule in this notebook columns will be for balancing planes and rows are for measuring points B_math = [['235@94', '189@115'], ['58@68', '77@104']] # 3. Enter the trial mass amounts in row vector `U`: U_math = ['1.15@0', '1.15@0'] # 4. Transform matrices to cartesian (complex number) form: A = hs.convert_matrix_to_cart(A_math) A0 = hs.convert_matrix_to_cart(A0_math) B = hs.convert_matrix_to_cart(B_math) U = hs.convert_matrix_to_cart(U_math) print('A=\n{}\n\nA0=\n{}\n\nB=\n{}\n\nU = {}'.format(A, A0, B, U)) # As in this example, this is an independent system where number of measuring points (M) are equal to the number of balancing planes (N). # we, thus, except an exact solution for balancing weights `W` that can be calculated first by find the Influence Coefficients matrix `ALPHA`: # \begin{align} # \tag {1} # \alpha = \frac{(B - A)}{U} \label{eq:test1} # \end{align} # \begin{align} # \tag {2} # W = - \alpha^{-1}(A - A_{0}) # \end{align} Alpha_CI = (B - A)/U W = -np.linalg.inv(Alpha_CI) @ (A - A0) hs.convert_matrix_to_math(Alpha_CI) W # Transform back to mathematical expression form ALPHA_math = hs.convert_matrix_to_math(Alpha_CI) W_math = hs.convert_matrix_to_math(W) print('ALPHA=\n{}\n\nW=\n{}'.format(ALPHA_math, W_math)) # >This means we need to put 2 grams at angel 57.4 degrees on balancing plane 1, and 1.1 grams at 301.1 degrees on plane 2. # - Lets Try out the same independent system problem using our optimization modeling code: # - first we will create a model of the system parameters: # - we will be using least squares model to solve the problem using optimization technique to minimize the squares of errors: alpha = hs.Alpha() # create an instance of alpha class alpha.add(A=A, B=B, U=U) # calculate alpha from parameters hs.convert_matrix_to_math(alpha.value) my_model = hs.LeastSquares(A-A0, alpha) W = my_model.solve() hs.convert_matrix_to_math(W) my_model.rmse() # Which is exactly as the exact solution # # A. Dependent Systems # ## Introduction # In dependent systems, number of measuring points are less than the number of balancing planes. # This will lead to a problem with infinite number of solutions as the number of unknowns are less than the number of equations. # We can use optimization technique here effectively to reduce the error and we can apply constraints to our model. # We will be example.[[1]](#1) which presents a 1150 MW nuclear power turbine-generator # balancing problem. The system consists of 11 measuring points and 5 balancing planes. (independent system) # In practical plane #4 was not accessible. # # # <a id="1">[1]</a> # <NAME>, <NAME>, and <NAME>. Balancing a 1150 MW turbine-generator. United Kingdom: N. p., 2000. Web. # # ## Parameters # + ALPHA_math=[ ['9.8@117', '17@124', '7.2@114', '38.5@77'], ['2.7@43', '14.3@317', '4.5@213', '14.3@270'], ['12.5@323', '25@261', '15.2@158', '30@238'], ['22.4@92', '32.6@45', '23.3@315', '27.8@210'], ['26@94', '40.3@9', '25@330', '34@213'], ['40.3@355', '43@144', '29.6@61', '65.4@322'], ['20.6@339', '32.3@152', '36.7@41', '61.8@322'], ['12.6@226', '37.6@52', '18.8@153', '26@176'], ['13.4@209', '26.9@76', '47.5@98', '71.7@312'], ['13.4@154', '22.4@307', '52@299', '102@165'], ['5.4@24', '7.2@199', '22.4@2', '27.8@99']] A_math=[ ['55@259'], ['45@118'], ['124@21'], ['138@349'], ['107@349'], ['90@280'], ['58@354'], ['108@201'], ['88@190'], ['56@48'], ['73@158']] # - # Convert to complex numbers (cartesian) form A = hs.convert_matrix_to_cart(A_math) ALPHA = hs.convert_matrix_to_cart(ALPHA_math) # A, ALPHA # Adding ALPHA alpha = hs.Alpha() alpha.add(direct_matrix=ALPHA) alpha.check() # ## Solving with Least squares: model_LeastSquares = hs.LeastSquares(A, alpha, name='Least_squares') # Instantiate least square model W_LeastSquares = model_LeastSquares.solve() #solve hs.convert_matrix_to_math(W_LeastSquares) residuals_LeastSquares = model_LeastSquares.expected_residual_vibration() hs.convert_matrix_to_math(residuals_LeastSquares) # Expected residule vibrations # Root mean square error: rmse_LeastSquares = model_LeastSquares.rmse() rmse_LeastSquares # ### Discussion # Least square has iterated over to get the minimum squares of errors (hence, the least `RMSE`) # Doing so, it does it blindly so we can see that it add huge amount of weight at plane 1 (3.8 kg!), meanwhile vibration on bearing #3 is expected to be 106 $\mu$ which is probably an alarm value!! # # # # # # ## Solving with MinMax: model_MinMax = hs.Min_max(A, alpha, name='MinMax') # Instantiate MinMax model W_MinMax = model_MinMax.solve() #solve hs.convert_matrix_to_math(W_MinMax) residuals_MinMax = model_MinMax.expected_residual_vibration() hs.convert_matrix_to_math(residuals_MinMax) # Expected residule vibrations # Root mean square error: rmse_MinMax = model_MinMax.rmse() rmse_MinMax # ### Discussion # `MinMax` is a great optimization tool that tends to, instead of unbiased `Least Squares`, level up the residuals to minimize the maximum. here we see that we have a great improvement in the residual vibrations (max 70$\mu\$). # The downside is putting more and more weights in the correction (4.4 grams in plane 1 now!) and ends up with higher RMSE. # In order to constraint the weight to a certain limit we can perform a Constrained Minmax` model weight_const ={0 : 3.402, 1 : 3.402, 2 : 3.402, 3 : 3.402} # limit weight to 120 oz model_MinMax_const = hs.Min_max(A, alpha, weight_const=weight_const, name='MinMax_const') # Instantiate MinMax model W_MinMax_const = model_MinMax_const.solve() #solve hs.convert_matrix_to_math(W_MinMax_const) residuals_MinMax_const = model_MinMax_const.expected_residual_vibration() hs.convert_matrix_to_math(residuals_MinMax_const) # Expected residule vibrations # Root mean square error: rmse_MinMax_const = model_MinMax_const.rmse() rmse_MinMax_const # ### Discussion # Constrained MinMAx` has done its job in minimizing the weights to 3.402 Kg (120 oz). # The downside is that we got more maximum vibration in residuals (73$\mu\$) # ## Solving with Linear Matrix Inequality (LMI) # In certain situations, instead of being unbiased ---> `Least Squares` or leveled ---> `MinMax`, we actually want to be BIASED to certain planes. In other words we want the optimzer to do its best to decrease certain planes (`critical planes`) and keep the others under a `lazy constrains` just below certain amount of vibration level. weight_const ={0 : 3.402, 1 : 3.402, 2 : 3.402, 3 : 3.402} # limit weight to 120 oz critical_planes = {1, 9} # setting the critical planes to be 2, 10 (note python start counting at 0) V_max = 76 # max vibration for non-critical planes model_LMI = hs.LMI(A, alpha, weight_const=weight_const, critical_planes=critical_planes, V_max=V_max , name='LMI') # Instantiate LMI model W_LMI = model_LMI.solve() #solve hs.convert_matrix_to_math(W_LMI) residuals_LMI = model_LMI.expected_residual_vibration() hs.convert_matrix_to_math(residuals_LMI) # Expected residule vibrations # Root mean square error: rmse_LMI = model_LMI.rmse() rmse_LMI # ### Discussion # LMI model has been biased to plane 2 with a slight enhancement and plane 10 which greatly decreased from 69.7$\mu$ to 45.1$\mu$ (35% decrease) but that was with the cost of increasing non critical planes to the limit we have assigned (76$\mu\$) # ## Plotting models = [model_LeastSquares, model_MinMax, model_MinMax_const, model_LMI] def plot_models(models): residule_vibration = {model.name:abs(model.expected_residual_vibration().ravel()) for model in models} rmse = {model.name:model.rmse() for model in models} fig, (ax0, ax1) = plt.subplots(2, 1) ax0.bar(rmse.keys(), rmse.values()) plt.xlabel('Models') plt.ylabel('Vibration'); models_number = len(residule_vibration.values()) measuring_points = max((len(array) for array in residule_vibration.values())) jet= plt.get_cmap('jet') colors = iter(jet(np.linspace(0,1,models_number))) step = 0 for array in residule_vibration.values(): ax1.bar(np.arange(len(array)) + step, array, color = next(colors), width = 1/models_number) step += 1 / (models_number+1) ax1.legend([model.name for model in models]) ax1.set_xticks(range(measuring_points), ['M.P '+ str(point) for point in range(1, 1+measuring_points)], rotation =45); plot_models(models)
hsbalance_Playground.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import os import IPython.display as ipd # + data_path = r"E:\personal\LiveProjects\speech_recognition\google_speech_new\google_speech\train" train_csv = os.path.join(data_path, r"train.csv") df = pd.read_csv(train_csv, encoding='utf-8') print(df) # - audio_path = os.path.join(data_path, os.path.join(r"audio", df.iloc[1]['file_path'])) print(audio_path) ipd.Audio(audio_path, autoplay=True)
preparation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Indian Companies Registration Data [1857 - 2020] # # ### Original source # #### https://www.kaggle.com/rowhitswami/all-indian-companies-registration-data-1900-2019 # ### Import libraries # + import pandas as pd import numpy as np import seaborn as sns import matplotlib from matplotlib import pyplot as plt from matplotlib import ticker as plticker import plotly.express as px # - # ### Read Dataset df = pd.read_csv('registered_companies.csv') df.head() print(f"There are {df.shape[0]} rows") # #### Seems like there is a lot of missing values. Let's remove rows with missing values since we can not derive missing values anyway. # + data = df.dropna() data.head() # - print(f"There are {data.shape[0]} rows") # ### 1. Find states that have the largest amount of registed companies # #### First plot is visualized normally, second with using log scale. # + matplotlib.rc('xtick', labelsize=22) matplotlib.rc('ytick', labelsize=22) state_count = data.groupby('REGISTERED_STATE').size().sort_values(ascending=True) plt.figure(figsize=(20,20)) sns.barplot(state_count.tolist(), state_count.keys()) plt.show() # - plt.figure(figsize=(20,20)) sns.barplot(state_count.tolist(), state_count.keys()).set(xscale="log") plt.show() # ### 2. Find number of registrations over the years data['YEAR_OF_REGISTRATION'] = pd.to_datetime(data['DATE_OF_REGISTRATION'], errors = 'coerce').dt.year # + year_count = data.groupby('YEAR_OF_REGISTRATION').size().to_frame().reset_index() px.bar(year_count, x = 'YEAR_OF_REGISTRATION', y = 0, labels={'YEAR_OF_REGISTRATION' : 'Year', '0': 'Number of registred companies' })
Indian Companies Registrations/Indian Companies Registrations.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Table of contents # # 1.Understanding the Data # 2.Reading the Data in # 3.Multiple Regression Model # 4.Prediction # 5.Practice # Importing Needed Package import matplotlib.pyplot as plt import pandas as pd import pylab as pl import numpy as np # %matplotlib inline import warnings warnings.filterwarnings('ignore') # Understanding the Data # FuelConsumption.csv: # We have downloaded a fuel consumption dataset, FuelConsumption.csv, which contains model-specific fuel consumption ratings and estimated carbon dioxide emissions for new light-duty vehicles for retail sale in Canada. Dataset source # # 1.MODELYEAR e.g. 2014 # 2.MAKE e.g. Acura # 3.MODEL e.g. ILX # 4.VEHICLE CLASS e.g. SUV # 5.ENGINE SIZE e.g. 4.7 # 6.CYLINDERS e.g 6 # 7.TRANSMISSION e.g. A6 # 8.FUELTYPE e.g. z # 9.FUEL CONSUMPTION in CITY(L/100 km) e.g. 9.9 # 10.FUEL CONSUMPTION in HWY (L/100 km) e.g. 8.9 # 11.FUEL CONSUMPTION COMB (L/100 km) e.g. 9.2 # 12.CO2 EMISSIONS (g/km) e.g. 182 --> low --> 0 # Reading the data in # + df = pd.read_csv("FuelConsumption.csv") # take a look at the dataset df.head() # - cdf = df[['ENGINESIZE','CYLINDERS','FUELCONSUMPTION_CITY','FUELCONSUMPTION_HWY','FUELCONSUMPTION_COMB','CO2EMISSIONS']] cdf.head(9) # Lets plot Emission values with respect to Engine size: plt.scatter(cdf.ENGINESIZE, cdf.CO2EMISSIONS, color='blue') plt.xlabel("Engine size") plt.ylabel("Emission") plt.show() # Creating train and test dataset: # Train/Test Split involves splitting the dataset into training and testing sets respectively, which are mutually exclusive. After which, you train with the training set and test with the testing set. This will provide a more accurate evaluation on out-of-sample accuracy because the testing dataset is not part of the dataset that have been used to train the data. It is more realistic for real world problems. # # This means that we know the outcome of each data point in this dataset, making it great to test with! And since this data has not been used to train the model, the model has no knowledge of the outcome of these data points. So, in essence, it’s truly an out-of-sample testing. msk=np.random.rand(len(df))<0.8 train=cdf[msk] test=cdf[~msk] # Train data distribution plt.scatter(train.ENGINESIZE, train.CO2EMISSIONS, color='blue') plt.xlabel("Engine size") plt.ylabel("Emission") plt.show() # Multiple Regression Model: # In reality, there are multiple variables that predict the Co2emission. When more than one independent variable is present, the process is called multiple linear regression. For example, predicting co2emission using FUELCONSUMPTION_COMB, EngineSize and Cylinders of cars. The good thing here is that Multiple linear regression is the extension of simple linear regression model. from sklearn import linear_model regr=linear_model.LinearRegression() x = np.asanyarray(train[['ENGINESIZE','CYLINDERS','FUELCONSUMPTION_COMB']]) y = np.asanyarray(train[['CO2EMISSIONS']]) regr.fit (x,y) # The coefficients print ('Coefficients: ', regr.coef_) # As mentioned before, Coefficient and Intercept , are the parameters of the fit line. Given that it is a multiple linear regression, with 3 parameters, and knowing that the parameters are the intercept and coefficients of hyperplane, sklearn can estimate them from our data. Scikit-learn uses plain Ordinary Least Squares method to solve this problem. # Ordinary Least Squares (OLS) # OLS is a method for estimating the unknown parameters in a linear regression model. OLS chooses the parameters of a linear function of a set of explanatory variables by minimizing the sum of the squares of the differences between the target dependent variable and those predicted by the linear function. In other words, it tries to minimizes the sum of squared errors (SSE) or mean squared error (MSE) between the target variable (y) and our predicted output ($\hat{y}$) over all samples in the dataset. # # OLS can find the best parameters using of the following methods: # # - Solving the model parameters analytically using closed-form equations # - Using an optimization algorithm (Gradient Descent, Stochastic Gradient Descent, Newton’s Method, etc.) # Prediction # + x=np.asanyarray(test[['ENGINESIZE','CYLINDERS','FUELCONSUMPTION_COMB']]) y=np.asanyarray(test[['CO2EMISSIONS']]) y_hat=regr.predict(test[['ENGINESIZE','CYLINDERS','FUELCONSUMPTION_COMB']]) print("Residual sum of squares: %.2f" % np.mean((y_hat - y) ** 2)) # Explained variance score: 1 is perfect prediction print('Variance score: %.2f' % regr.score(x, y)) # - # explained variance regression score: # If y(hat) is the estimated target output, y the corresponding(correct) target output, and Var is Variance, the square of the standard deviation, then the explained variance is estimated as follow: # # explained Variance(y,y(hat)) = 1 - var(y-y(hat)/var(y) # The best possible score is 1.0, lower values are worse.
Multiple Linear Regression_Co2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/jesuspablo888/daa_2021_1/blob/master/Tarea_7_Recursividad.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="LUF8d7yTKLQz" class Pila: def __init__(self): self.__data = [] def is_empty(self): return len(self.__data)==0 def get_top(self): return self.__data[len(self.__data)-1] def pop(self): return self.__data.pop() def push (self, value): self.__data.append(value) def lenght(self): return len(self.__data) def to_string(self): for i in self.__data[::-1]: print(i) # + colab={"base_uri": "https://localhost:8080/"} id="4Rxi_aS9bilb" outputId="5efba31b-41a0-427a-dc8e-ab5a0cac04be" # 1. Crear una lista de enteros en Python y # realizar la suma con recursividad, el caso base es cuando la lista este vacía. def nose(l): if len(l) == 0: return 0 else: n = l.pop() n1 = nose(l) n += n1 return n lisInt=[1,1,1,1,1] print(lisInt) print(f"LA SUMA ES:",nose(lisInt)) # + colab={"base_uri": "https://localhost:8080/"} id="SbejZURqeGVN" outputId="9f21e395-9b0d-4c55-f6c2-c3fcfe4ab351" # 2. Hacer un contador regresivo con recursión. def contador(x): if x >= 0: print(x) contador(x-1) contador(10) # + id="Syw7YbhZkjjU" colab={"base_uri": "https://localhost:8080/"} outputId="e3ba2710-60d9-4142-e7c4-d3d3b659e79f" # 3. Sacar de un ADT pila el valor en la posición media. def posicion_media(p,t): if t//2 == p.lenght()-1: return print(f"Valor Medio:",p.pop()) else: n=p.pop() posicion_media(p,t) p.push(n) p = Pila() p.push(5) p.push(6) p.push(10) p.push(1) # <--- valor medio p.push(2) p.push(14) p.push(12) p.to_string() print("-----") posicion_media(p,p.lenght()) print("-----") p.to_string()
Tarea_7_Recursividad.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # default_exp onnx # - # # ONNX # > Exporting models to `ONNX` format #hide from nbdev.showdoc import * #export from fastinference.soft_dependencies import SoftDependencies if not SoftDependencies.check()['onnxcpu']: raise ImportError("The onnxcpu or onnxgpu module is not installed.") # + # export from fastai.learner import Learner from fastcore.all import * import torch from torch import tensor, Tensor import onnxruntime as ort # - #e @patch def to_onnx(x:Learner, fname='export', path=Path('.')): "Export model to `ONNX` format" orig_bs = x.dls[0].bs x.dls[0].bs=1 dummy_inp = next(iter(x.dls[0])) x.dls[0].bs = orig_bs names = inspect.getfullargspec(x.model.forward).args[1:] dynamic_axes = {n:{0:'batch_size'} for n in names} dynamic_axes['output'] = {0:'batch_size'} torch.onnx.export(x.model, dummy_inp[:-1], path/f'{fname}.onnx', input_names=names, output_names=['output'], dynamic_axes=dynamic_axes) data_exp = x.dls.new_empty() data_exp.loss_func = x.loss_func torch.save(data_exp, path/f'{fname}.pkl', pickle_protocol=2) #slow show_doc(Learner.to_onnx) # Currently supports single-output models. See an example usage below: from fastai.tabular.all import * path = untar_data(URLs.ADULT_SAMPLE) df = pd.read_csv(path/'adult.csv') splits = RandomSplitter()(range_of(df)) cat_names = ['workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race'] cont_names = ['age', 'fnlwgt', 'education-num'] procs = [Categorify, FillMissing, Normalize] y_names = 'salary' dls = TabularPandas(df, procs=procs, cat_names=cat_names, cont_names=cont_names, y_names=y_names, splits=splits).dataloaders() learn = tabular_learner(dls, layers=[200,100]) #slow learn.to_onnx('tabular') #slow #export from fastinference.inference.inference import _decode_loss #slow #export class fastONNX(): "ONNX wrapper for `Learner`" def __init__(self, fn): self.ort_session = ort.InferenceSession(fn+'.onnx') try: self.ort_session.set_providers(['CUDAExecutionProvider']) cpu = False except: self.ort_session.set_providers(['CPUExecutionProvider']) cpu = True self.dls = torch.load(fn+'.pkl') def to_numpy(self, t:tensor): return t.detach.cpu().numpy() if t.requires_grad else t.cpu().numpy() def predict(self, inps): "Predict a single numpy item" if isinstance(inps[0], Tensor): inps = [self.to_numpy(x) for x in inps] names = [i.name for i in self.ort_session.get_inputs()] xs = {name:x for name,x in zip(names,inps)} outs = self.ort_session.run(None, xs) return outs def get_preds(self, dl=None, raw_outs=False, decoded_loss=True, fully_decoded=False): "Get predictions with possible decoding" inps, outs, dec_out, raw = [], [], [], [] loss_func = self.dls.loss_func is_multi, n_inp = False, self.dls.n_inp if n_inp > 1: is_multi = true [inps.append([]) for _ in range(n_inp)] for batch in dl: batch_np = [] if is_multi: for i in range(n_inp): item = self.to_numpy(batch[i]) inps[i].append(item) batch_np.append(item) else: inps.append(self.to_numpy(batch[:n_inp])) if decoded_loss or fully_decoded: out = self.predict(batch_np) raw.append(out) dec_out.append(loss_func.decodes(tensor(out))) else: raw.append(self.predict(batch_np)) axis = 1 if len(dl) > 1 else 0 raw = np.concatenate(raw, axis=axis) if decoded_loss or fully_decoded: dec_out = np.concatenate(dec_out, axis=axis) if not raw_outs: try: outs.insert(0, loss_func.activation(tensor(raw)).numpy()) except: outs.insert(0, dec_out) else: outs.insert(0, raw) if decoded_loss: outs = _decode_loss(self.dls.vocab, dec_out, outs) return outs def test_dl(self, test_items, **kwargs): return self.dls.test_dl(test_items, **kwargs) #slow show_doc(fastONNX)
nbs/03_onnx.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.8.10 64-bit # language: python # name: python3 # --- # # Python 基本数据类型 # # Python 内置了一些基本数据类型,极大的方便描述的任务。 # # Python 中带有关键字的结构,用于表达特定的语义,被称为 **语句**。 # # 本节主要探讨 {token}`表达式语句 <python-grammar:expression_stmt>` 与 {token}`赋值语句 <python-grammar:assignment_stmt>`。 # # Python 的 {term}`表达式 <expression>` 表示那些可以被求值的对象。换句话说,一个表达式就是表达元素例如字面值、名称、属性访问、运算符或调用,它们最终都会返回一个值。本文主要介绍 Python 基本数据类型所代表的表达式。 # # 赋值语句用于将名称(重)绑定到特定值,以及修改属性或可变对象的成员项: # # ```md # 标识符(或者标识符列表)= 表达式 # ``` # # 此时的标识符一般被称为 `变量`。 # # ## 标识符 # # {ref}`标识符 <identifiers>` (也称为 **名称**)是由特定的 Unicode 字符(包含英文、汉字等,且英文字母区分大小写)、(十进制)数字和下划线组成,但不能以数字作为开头。 # # 示例: 长度 = 100 # 单位 m 宽度 = 400 # 单位 m 面积 = 长度 * 宽度 # 单位 m^2 面积 # 可以看出:标识符大大地简化了表达丰富语义的过程。 # # ```{warning} # 如果变量未定义(即,未赋值),使用该变量会提示错误(在 Python 中被称为 “异常”): # ``` 小马 # ## 数字 # # 数字字面值与运算符 `+`(正号,一般被省略)、`-`(负号) 组成了数学中的数字(名称为 {class}`numbers.Number`)。按照数域范围分为:整型(名称为 {class}`numbers.Integral` 表示)、分数型(名称为 {class}`numbers.Rational`)、浮点型(名称为 {class}`numbers.Real` 表示)以及复数型(名称为 {class}`numbers.Complex` 表示)。整型、分数型、浮点型以及复数型支持数学运算。 # # ### 数学运算 # # Python 提供有基本的数学运算: # # 1. `+`(加法)、`-`(减法)、`*`(乘法)、`/`(除法)、`//`(向下取整除法)、`%`(求余)、`**`(乘方); # 2. 使用 `()` 分组表达式,用于改变运算的优先级。 # # 可看一些例子(以整数为例): 27 + 49 # 整数加法 100 - 1 # 整数减法 45 * 3 # 整数乘法 75 / 6 # 整数除法 75 / 15 # 除法返回的值总是浮点数 (3 + 7) * 5 # 使用 () 分组表达式 50 // 5, 53 // 5, 57 // 5 # 向下取整 73 % 9 # 求取余数 2 ** 10 # 乘方 # ### 复数 # # 看看 Python 是如何表达 {dfn}`复数` 的。 7j # 纯虚数字面量 3 + 5j # 复数字面量 # 复数的运算: # 定义复数变量 a = 3 + 4j b = 7 + 1j a + b, a - b, a * b, a / b, a ** b # ## 对象的属性、方法以及函数 # # 由于 Python 一切皆是对象,故而每个对象都可能绑定了一些属性(通过 `.` 获取其值)、{term}`方法 <method>` 以及 {term}`函数 <function>`。 # # 比如,复数 $z$ 有一些属性值: # + z = 6 + 8j z.real # 复数的实部 # - z.imag # 复数的虚部 # **方法** 可以理解为对象自有的行为,一般带有 `(参数列表)` 调用其行为。比如,复数 $z$ 有一个求取共轭复数的**方法**: a.conjugate() # Python 内置了许多函数用于处理对象。**函数** 可以理解为通用的行为,一般带有 `(参数列表)` 调用其行为。 # # 比如,求取复数 $z$ 的模: abs(z) # ## 序列 # # 本次仅仅讨论几个常用的序列:字符串(不可变)、元组(不可变)和列表(可变)。 # # ### 字符串 # # Python 使用 **字符串** 记录对象的“语义”信息。字符串通常使用双引号、单引号等表示。如: 晓明 = '小马' say = "小马,说 '你好!'" # 字符串可以使用函数 {func}`print` 显示信息: print(晓明) print(say) # 可能会有一些特殊字符,需要 `\` 进行转义: ss = '你好\n世界' ss # 直接显示,则换行符 `\n` 没有被转义 # 使用 print 显示转义 print(ss) # 还有一种情况,你不想将一些转义符号转义,可以使用原始字符串 `r"..."` 模式: st = r'你好\n世界' print(st) # 转义被忽略 # 其实原始字符串,等同于将 `\` “逃逸”: st # 有时,想要在字符串中嵌入变量,可以使用 `f"...{变量}"`(被称为 **格式字符串**,此类字符串是 **表达式**,而不是常量): h = 160 # 身高 小小 = f"小小的身高是 {h}" print(小小) # #### 字符串的运算 # # 字符串也支持一些特殊的运算。比如,拼接: 'A' 'a' 'A' + 'a' 'A' * 5 # 如果字符串很长很长,可以使用如下方法拼接(以空白符分隔的多个相邻字符串或字节串字面值,可用不同引号标注,等同于合并操作): ('a' 'b' 'c') # 还有一种特殊的字符串,叫做文档字符串,使用 `'''...'''` 表示: # + a = '''iter djkf fkd ''' print(a) # - # 可以看出,字符串是自动换行的。此类字符串常用于注释代码块。 # # ### 列表 & 元组 # # 使用 `[...]` 包裹的 Python 类型称为 **列表**;使用 `(...)` 包裹的 Python 类型称为 **元组**。不同项使用 `,` 隔开。 # # 例如: 四季 = ('春', '夏', '秋', '冬') # 元组 猫屋 = ['小猫1', '小猫2'] # 列表 print(四季) print(猫屋) # ```{hint} # 元组的定义真正起作用的是 `,`。 # ``` 2, 4, 's' # 这也是元组 # ## 集合 # # 集合 # : 此类对象表示由不重复且不可变对象组成的无序且有限的集合。因此它们不能通过下标来索引。但是它们可被迭代,也可用内置函数 {func}`len` 返回集合中的项数。集合常见的用处是快速成员检测,去除序列中的重复项,以及进行交、并、差和对称差等数学运算。 # # 集合分为两类: # # - 可变集合:{class}`set` # - 不可变集合:{class}`frozenset` # # 例如: 字母集 = {'a', 'w', 'a', 'e', 'r'} # 等价于 字母集 = set('a', 'w', 'a', 'e', 'r') 字母集 # 可以看到,已经自动去除重复项。 # # ## 字典 # # Python 使用字典表示对象之间的关系。表示方法有: # # 1. `{key: value, key1: value2, ...}` 键值对的形式 # 2. {class}`dict`(key=value, key1=value2) # # 例如: # + 字母表 = { 'a': 'A', 'b': 'B' } 字母表2 = dict(a='A', b='B') 字母表 # - # ## 对象的编号与类型 # # 一个 Python 对象的编号和类型是唯一的,可以分别使用 {func}`id` 与 {class}`type` 获取其值。 # # 比如: h = 450 # 高度 地址 = id(h) # 获取标识符 h 的所在地址,即编号 地址 类型 = type(h) # h 的类型是整数 int 类型 # <iframe id="Python" # title="Python 基本数据类型与变量" # width="100%" # height="500" # src="https://developer.hs.net/thread/1600?nav=course"> # </iframe> #
doc/python-study/basic/intro/basic-type.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # Importing the necessary libraries import pandas as pd from sklearn.model_selection import train_test_split from sklearn.feature_extraction.text import CountVectorizer from sklearn.naive_bayes import MultinomialNB from sklearn.pipeline import Pipeline # - # Reading the data into a dataframe data = pd.read_csv("spam.txt") data.head() data.groupby('Category').describe() data['Spam'] = data['Category'].apply(lambda x: 1 if x=='spam' else 0) data.head() # Splitting the data into train and test sets X_train, X_test, y_train, y_test = train_test_split(data.Message, data.Spam, test_size=0.25) # ### Without a pipeline # Applying count vecorizer into the features v = CountVectorizer() X_train_count = v.fit_transform(X_train.values) X_train_count.toarray()[:3] # Applying Naive Bayes on data model = MultinomialNB() model.fit(X_train_count, y_train) emails = [ 'Hey mohan, can we get together to watch footbal game tomorrow?', 'Upto 20% discount on parking, exclusive offer just for you. Dont miss this reward!' ] emails_count = v.transform(emails) model.predict(emails_count) X_test_count = v.transform(X_test) model.score(X_test_count, y_test) # ### With sklearn pipeline clf = Pipeline([ ('vectorizer', CountVectorizer()), ('nb', MultinomialNB()) ]) clf.fit(X_train, y_train) clf.score(X_test,y_test) clf.predict(emails)
Supervised/Classification/NaiveBayes/Spam_Ham_Prediction.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Implementing the Calculation # # Generate some fake data first # Most of the code below is just to generate some fake data to visualize with - everything up until the actual plotting of the data below should be replaced with your algorithms from before in your own implementation! # + import numpy as np def generate_data(): ''' Generates fake data to use for calculating lane curvature. In your own project, you'll ignore this function and instead feed in the output of your lane detection algorithm to the lane curvature calculation. ''' # Set random seed number so results are consistent for grader # Comment this out if you'd like to see results on different random data! np.random.seed(0) # Generate some fake data to represent lane-line pixels ploty = np.linspace(0, 719, num=720)# to cover same y-range as image quadratic_coeff = 3e-4 # arbitrary quadratic coefficient # For each y position generate random x position within +/-50 pix # of the line base position in each case (x=200 for left, and x=900 for right) leftx = np.array([200 + (y**2)*quadratic_coeff + np.random.randint(-50, high=51) for y in ploty]) rightx = np.array([900 + (y**2)*quadratic_coeff + np.random.randint(-50, high=51) for y in ploty]) leftx = leftx[::-1] # Reverse to match top-to-bottom in y rightx = rightx[::-1] # Reverse to match top-to-bottom in y # Fit a second order polynomial to pixel positions in each fake lane line left_fit = np.polyfit(ploty, leftx, 2) right_fit = np.polyfit(ploty, rightx, 2) return ploty, left_fit, right_fit def measure_curvature_pixels(): ''' Calculates the curvature of polynomial functions in pixels. ''' # Start by generating our fake example data # Make sure to feed in your real data instead in your project! ploty, left_fit, right_fit = generate_data() # Define y-value where we want radius of curvature # We'll choose the maximum y-value, corresponding to the bottom of the image y_eval = np.max(ploty) ##### TO-DO: Implement the calculation of R_curve (radius of curvature) ##### left_curverad = ((1 + (2*left_fit[0]*y_eval + left_fit[1])**2)**1.5) / np.absolute(2*left_fit[0]) right_curverad = ((1 + (2*right_fit[0]*y_eval + right_fit[1])**2)**1.5) / np.absolute(2*right_fit[0]) return left_curverad, right_curverad # Calculate the radius of curvature in pixels for both lane lines left_curverad, right_curverad = measure_curvature_pixels() print(left_curverad, right_curverad) # Should see values of 1625.06 and 1976.30 here, if using # the default `generate_data` function with given seed number # - # ### From Pixels to Real-World # Great! You've now calculated the radius of curvature for our lane lines. But now we need to stop and think... We've calculated the radius of curvature based on pixel values, so the radius we are reporting is in pixel space, which is not the same as real world space. So we actually need to repeat this calculation after converting our x and y values to real world space. # # This involves measuring how long and wide the section of lane is that we're projecting in our warped image. We could do this in detail by measuring out the physical lane in the field of view of the camera, but for this project, you can assume that if you're projecting a section of lane similar to the images above, the lane is about 30 meters long and 3.7 meters wide. Or, if you prefer to derive a conversion from pixel space to world space in your own images, compare your images with U.S. regulations that require a minimum lane width of 12 feet or 3.7 meters, and the dashed lane lines are 10 feet or 3 meters long each. # # Let's say that our camera image has 720 relevant pixels in the y-dimension (remember, our image is perspective-transformed!), and we'll say roughly 700 relevant pixels in the x-dimension (our example of fake generated data above used from 200 pixels on the left to 900 on the right, or 700). Therefore, to convert from pixels to real-world meter measurements, we can use: # # Define conversions in x and y from pixels space to meters # ym_per_pix = 30/720 # meters per pixel in y dimension # xm_per_pix = 3.7/700 # meters per pixel in x dimension # # In the below quiz, you'll use the above conversions in order to adjust your calculation from before to give real-world lane curvature values. Once again, you'll focus on the left_curverad and right_curverad values within the new measure_curvature_real() function; however, you'll also need to adjust how you use np.polyfit() within generate_data() in order for this to work correctly. How do you need to change these to convert to meters? # + import numpy as np def generate_data(ym_per_pix, xm_per_pix): ''' Generates fake data to use for calculating lane curvature. In your own project, you'll ignore this function and instead feed in the output of your lane detection algorithm to the lane curvature calculation. ''' # Set random seed number so results are consistent for grader # Comment this out if you'd like to see results on different random data! np.random.seed(0) # Generate some fake data to represent lane-line pixels ploty = np.linspace(0, 719, num=720)# to cover same y-range as image quadratic_coeff = 3e-4 # arbitrary quadratic coefficient # For each y position generate random x position within +/-50 pix # of the line base position in each case (x=200 for left, and x=900 for right) leftx = np.array([200 + (y**2)*quadratic_coeff + np.random.randint(-50, high=51) for y in ploty]) rightx = np.array([900 + (y**2)*quadratic_coeff + np.random.randint(-50, high=51) for y in ploty]) leftx = leftx[::-1] # Reverse to match top-to-bottom in y rightx = rightx[::-1] # Reverse to match top-to-bottom in y # Fit a second order polynomial to pixel positions in each fake lane line ##### TO-DO: Fit new polynomials to x,y in world space ##### ##### Utilize `ym_per_pix` & `xm_per_pix` here ##### left_fit_cr = np.polyfit(ploty*ym_per_pix, leftx*xm_per_pix, 2) right_fit_cr = np.polyfit(ploty*ym_per_pix, rightx*xm_per_pix, 2) return ploty, left_fit_cr, right_fit_cr def measure_curvature_real(): ''' Calculates the curvature of polynomial functions in meters. ''' # Define conversions in x and y from pixels space to meters ym_per_pix = 30/720 # meters per pixel in y dimension xm_per_pix = 3.7/700 # meters per pixel in x dimension # Start by generating our fake example data # Make sure to feed in your real data instead in your project! ploty, left_fit_cr, right_fit_cr = generate_data(ym_per_pix, xm_per_pix) # Define y-value where we want radius of curvature # We'll choose the maximum y-value, corresponding to the bottom of the image y_eval = np.max(ploty) ##### TO-DO: Implement the calculation of R_curve (radius of curvature) ##### left_curverad = ((1 + (2*left_fit_cr[0]*y_eval*ym_per_pix + left_fit_cr[1])**2)**1.5) / np.absolute(2*left_fit_cr[0]) right_curverad = ((1 + (2*right_fit_cr[0]*y_eval*ym_per_pix + right_fit_cr[1])**2)**1.5) / np.absolute(2*right_fit_cr[0]) return left_curverad, right_curverad # Calculate the radius of curvature in meters for both lane lines left_curverad, right_curverad = measure_curvature_real() print(left_curverad, 'm', right_curverad, 'm') # Should see values of 533.75 and 648.16 here, if using # the default `generate_data` function with given seed number # -
Advanced Computer Vision/3 Measuring Curvature/Measuring Curvature.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## GRID SOFTWARE v6.0 # - Some Useful Functions # - Communication Module # - Tracking Module # - Navigation Algorithm # - Main Function # # *STATUS* : Runnning # # *PROBLEMS TO SOLVE* : # 1. Time delay between command and execution ; preventing overshoot # 2. Extending the code for entire arena # 3. Getting an estimate of angular velocity and turn proportional to the angle # 4. Reducing the frequent turning of the bot # 5. Using Cam Caliberation to determine Marker Pose (Optional) # 6. Implement Reverse ( difficult) # 7. Check for input video feed speed # # ## Some useful Functions import cv2 from cv2 import aruco import numpy as np import math import time import socket import os import glob import shutil from os.path import isfile, join import datetime # + size = 0.127 bot_ids = [22, 1, 9, 22] # Give the aruco code of the bot def distCalc(r1, r2): x1, y1 = r1 x2, y2 = r2 return math.dist((x1,y1), (x2,y2)) def centreCalc(r1, r2): return (int((r1[0]+r2[0])/2), int((r1[1]+r2[1])/2)) def angleCalc(r1, r2): x1, y1 = r1 x2, y2 = r2 inner_prod = x1*x2 + y1*y2 len1 = math.hypot(x1, y1) len2 = math.hypot(x2, y2) angle = math.acos(inner_prod/(len1*len2)) if (x1*y2 - x2*y1) < 0: angle = -angle return angle # - # ## Establishing Server Connection # + hostname = socket.gethostname() my_ip = socket.gethostbyname(hostname) print(my_ip) #Give this ID in the ESP32 sketch def startServer(port): global serversocket serversocket = socket.socket() host = '0.0.0.0' try: serversocket.bind((host, port)) except socket.error as e: print(str(e)) print("Waiting for connection") serversocket.listen(5) global client, addr client, addr = serversocket.accept() print("Connection made with " + str(client)) def closeServer(): client.close() serversocket.close() def sendCommand(command, t=0): client.sendto(command.encode('UTF-8'), addr) time.sleep(t) # - # ## Bot Tracker def arucoDetector(img) : imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) key = getattr(aruco, f'<KEY>') arucoDict = aruco.Dictionary_get(key) arucoParam = aruco.DetectorParameters_create() bbox, ids, rejected = aruco.detectMarkers(imgGray, arucoDict, parameters=arucoParam) img = aruco.drawDetectedMarkers(img, bbox) return img, bbox, ids # ## Navigation Algorithm def arucoOrient(img, bbox, ids, bot, dest = 0) : bot_flag = 0 dest_flag = 0 for i in range(len(ids)): if ids[i]==dest: dest_coords = centreCalc(bbox[i][0][0],bbox[i][0][2]) dest_flag = 1 if ids[i]==bot_ids[bot]: bot_coords = centreCalc(bbox[i][0][0], bbox[i][0][2]) bot_front = centreCalc(bbox[i][0][0], bbox[i][0][1]) bot_rear = centreCalc(bbox[i][0][2], bbox[i][0][3]) bot_flag = 1 img = cv2.arrowedLine(img, bot_rear, bot_front, (0,0,255), 2) img = cv2.arrowedLine(img, bot_coords, dest_coords, (0,255,0), 2) angle = angleCalc(np.subtract(np.array(bot_front), np.array(bot_rear)),np.subtract(np.array(dest_coords), np.array(bot_coords))) distance = distCalc(bot_coords, dest_coords) return img, bot_coords, dest_coords, distance, angle def commandfunc(distance, angle, r_thr = 50, theta_thr = 0.3): command = '0' flag = 0 if abs(angle) > theta_thr : if angle>theta_thr : command = ('5') else: command = ('7') elif distance < r_thr : command = ('0') flag=1 else: command = ('1') return command, flag # ## Save the video file def convert_frames_to_video(pathIn,pathOut,fps): frame_array = [] files = [f for f in os.listdir(pathIn) if isfile(join(pathIn, f))] #for sorting the file names properly files = sorted(files, key=lambda x: int(x.split('.')[1])) for i in range(len(files)): filename=pathIn + files[i] #reading each files img = cv2.imread(filename) height, width, layers = img.shape size = (width,height) #print(filename) #inserting the frames into an image array frame_array.append(img) out = cv2.VideoWriter(pathOut,cv2.VideoWriter_fourcc(*'DIVX'), fps, size) for i in range(len(frame_array)): # writing to a image array out.write(frame_array[i]) out.release() shutil.rmtree('./data') # ## Sample Command def sampleCommand(): sendCommand('0', 0) sendCommand('1', 0) sendCommand('0', 0) ''' startServer() sampleCommand() closeServer() ''' #startServer(8093) #for i in range(0,6): # sendCommand('10') # ## Main Function # + if __name__ == '__main__': p = 60 cap = cv2.VideoCapture(1) if cap is not None : print("GoingGood") dest = 0 command = '0' angle = 0 flag = 0 k = 0 i = 0 bot = 0 no_bots = 2 path = [[23,24,23,25],[24,23,25,23],[7,8,7,6],[11,12,11,10]] flip = [25, 4, 8, 12] startServer(8093) try : os.mkdir('./data') except : shutil.rmtree('./data') os.mkdir('./data') try : os.mkdir('./TestingVideos') except : pass start = time.time() while True : success, frame = cap.read() if frame is not None : frame, bbox, ids = arucoDetector(frame) #print(ids) if ids is not None: if bot_ids[bot] in ids and path[bot][i] in ids : if flag==0: frame, bot_coords, dest_coords, distance, angle = arucoOrient(frame, bbox, ids,bot, path[bot][i]) command, flag = commandfunc(distance, angle) sendCommand(command, bot) frame = cv2.putText(frame, "Command : " + str(command),(50,50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 2 ) frame = cv2.putText(frame, "Distance : " + str(distance),(50,75), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 2 ) frame = cv2.putText(frame, "Angle : " + str(angle),(50,100), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 2 ) if flag==1: flag = 0 frame = cv2.putText(frame, "Next to Go " + str(path[bot][i]),(50,100), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 2 ) if path[bot][i] in flip: print("Flip Now") frame = cv2.putText(frame, "Flipping Happening " + str(path[bot][i]),(50,100), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 2 ) if i<3: i+=1 elif bot<no_bots: bot+=1 i=0 port+=1 closeServer() startServer(port) print("Closing and Startin the port number") print(port) else: break else: frame = cv2.putText(frame, "No Aruco Detected", (50,50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 2) cv2.namedWindow("Live Tracking", cv2.WINDOW_NORMAL) cv2.resizeWindow("Live Tracking",int(frame.shape[1]*p/100),int(frame.shape[0]*p/100)) cv2.imshow("Live Tracking", frame) cv2.imwrite('./data/frames.'+str(k)+'.png',frame) k+=1 else : print("Error in reading frame") q = cv2.waitKey(1) if q == ord("q"): break end = time.time() laptime = end - start print(laptime) frame = cv2.putText(frame, "LAPTIME :" + str(laptime), (150,150), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 2) cv2.imwrite('./data/frames.'+str(k)+'.png',frame) k+=1 cv2.destroyAllWindows() closeServer() ct = datetime.datetime.today() ct1 = str(ct).split() ct = ct1[1].split('.')[0].split(':') convert_frames_to_video('./data/', './TestingVideos/' +str(ct1[0])+'-'+str(ct[0])+'-'+str(ct[1])+'-'+str(ct[2])+'.mp4', 30.0) # - startServer(8093) sendCommand('9') closeServer()
centralnav.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # 2.18 Programming for Geoscientists class test - example paper based on 2012 2.18 exam # #Test instructions # # * This test contains **5** questions each of which should be answered. # * Write your program in a Python cell just under each question. # * You can write an explanation of your solution as comments in your code. # * In each case your solution program must fulfil all of the instructions - please check the instructions carefully and double check that your program fulfils all of the given instructions. # * Save your work regularly. # * At the end of the test you should email your IPython notebook document (i.e. this document) to [<NAME>](http://www.imperial.ac.uk/people/g.gorman) at <EMAIL> # **1.** Ignore this question from 2012 as it deals with material we have not discussed this year. # **2.** Write a program, that creates a *list* $t$ with 6 values, 0.1, 0.2, ..., 0.6. Compute a corresponding *list* $y$ of $y(t)$ values using the formula: # $$y(t) = v_0t − gt^2,$$ # where $v_0=6.0$ and $g=9.8$. # # * Store these two lists, t and y, in a new list t1. # * Write out a table with a column of t and a column of y values by traversing the data in the nested t1 list. # * You may use list or NumPy array for t and y. # * Print out a table header with the column names ’t’ and ’y’. # * For printing the table, iterate the nested list t1, do not access the previously computed t and y lists directly. # * Print out the table t1 using format specifiers so that the columns are aligned. # + t = [0.1*i for i in range(1, 7)] v0 = 6.0 g = 9.8 y = [v0*ti-g*ti**2 for ti in t] t1 = [] for ti, yi in zip(t, y): t1.append([ti, yi]) print "%8s%8s"%("t", "y") for pair in t1: print "%8g%8g"%(pair[0], pair[1]) # - # **3.** The factorial of n, written as n!, is defined as: # $$n! = n(n − 1)(n − 2)...2\cdot1,$$ # with the special cases # $$1! = 1, 0! = 1.$$ # For example, $4! = 4\cdot3\cdot2\cdot1 = 24$, and $2! = 2\cdot1 = 2$. # # * Write a function fact(n) that returns $n!$. You **must** not use the *fact* function from the math module. # * Return 1 immediately if $x$ is 1 or 0, otherwise use a loop to compute $n!$. # * The function must be called *fact* and take a single argument called n. # * The software should check that the supplied value is a non-negative integer. If it is not, raise a ValueError exception. # + def fact(n): if type(n) != int: raise ValueError("value must be of type int") if n < 0: raise ValueError("value must be positive") if n<2: return 1 product = 1 for i in range(1, n+1): product *= i return product # Let's show that it works for i in range(5): print "%d! = %d"%(i, fact(i)) # - # **4.** A table of temperatures and densities, in units of degrees ($C$) and $kg/m^3$, are given in the file *data/density\_air.dat* # Write a program that reads in the data from file into a list for temperature (first column) and density (second column) and plots the variation of density against temperature. # # * The input file contains blank lines and lines starting with a ’#’, which you must ignore when reading in the data. # * You may use list or NumPy array for temperature and density. # * Plot the variation of density against temperature. # * Label the x axis "Temperature (Celsius)" and the y axis "Density ($kg/m^3$)". # * Use the plot title "Density of air at different temperatures, at 1 atm pressure". # * Display a legend with the label ’Air’. # + # %pylab inline from pylab import * file = open("data/density_air.dat") tlist = [] dlist = [] for line in file: words = line.split() if len(words) != 2: continue try: t = float(words[0]) d = float(words[1]) except: continue tlist.append(t) dlist.append(d) tarray = array(tlist) darray = array(dlist) plot(tarray, darray) xlabel("Temperature (Celsius)") ylabel("Density ($kg/m^3$)") title("Density of air at different temperatures, at 1 atm pressure") legend(("Air", )) # - # **5.** Based on the data in the file *data/constants.txt*, make a dictionary where the keys are the names of the physical constant and the values are a tuple containing the numerical value and the units. # # * Use a Python dictionary to store the data. # * All numerical values should be of type float. # * Print out the dictionary without any formatting. # + file = open("data/constants.txt", "r") constants = {} for line in file: try: name = line[0:27].strip() words = line[27:].split() value = float(words[0]) units = words[1] except: continue constants[name] = (value, units) print constants # -
notebook/python_class_test_2012-solution.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # # Practical Optimisations for Pandas 🐼 # ## <NAME> # + [markdown] slideshow={"slide_type": "notes"} # # About Me 🙈 # # + [markdown] slideshow={"slide_type": "notes"} # - Software Engineer at Salesforce 👷 # + [markdown] slideshow={"slide_type": "notes"} # - Big passion for python, data and performance optimisations 🐍🤖 # + [markdown] slideshow={"slide_type": "notes"} # - Online at [medium](https://medium.com/@Eyaltra) | [twitter](https://twitter.com/eyaltra) 🌐 # + [markdown] slideshow={"slide_type": "slide"} # # Optimizing Your Pandas is not Rocket Science 🚀 # + [markdown] slideshow={"slide_type": "slide"} # # Optimization ?! Why ?🤨 # + [markdown] slideshow={"slide_type": "fragment"} # - Fast is better than slow 🐇 # # + [markdown] slideshow={"slide_type": "notes"} # - latency response time 200 milliseconds client roundtrip # - throughput successful traffic flow of 200 requests per seconds # # + [markdown] slideshow={"slide_type": "fragment"} # - Memory efficiency is good 💾 # # + [markdown] slideshow={"slide_type": "fragment"} # - Saving money is awesome [💸](https://aws.amazon.com/ec2/pricing/on-demand/) # # + [markdown] slideshow={"slide_type": "fragment"} # - Hardware will only take you so far 💻 # + [markdown] slideshow={"slide_type": "notes"} # - Ok now that i have got you attention, the next question i want to tackle is when should we optimize our code # + [markdown] slideshow={"slide_type": "slide"} # # Before We Optimize ⏰ # + [markdown] slideshow={"slide_type": "fragment"} # - It's actually needed 🚔 # + [markdown] slideshow={"slide_type": "notes"} # # #### remember optimized code is: # - harder to write and read # - less maintainable # - buggier, more brittle # # #### Optimize when # - gather requirements, there are some parts you won't be able to touch # - establish percentile SLAs: 50, 95, 99 max # + [markdown] slideshow={"slide_type": "fragment"} # - Our code is well tested 💯 # + [markdown] slideshow={"slide_type": "fragment"} # - [Focus on the bottlenecks](https://www.youtube.com/watch?v=9wfFXRCkkLE) 🍾 # + [markdown] slideshow={"slide_type": "notes"} # - I have a 45 minute talk on how to properly profile code, in this talk i give u a glimp # + [markdown] slideshow={"slide_type": "slide"} # # Profiling 📍 # + [markdown] slideshow={"slide_type": "fragment"} # - **timeit** - Benchmark multiple runs of the code snippet and measure CPU ⌛ # + [markdown] slideshow={"slide_type": "fragment"} # - **memit** - Measures process Memory 💾 # + [markdown] slideshow={"slide_type": "slide"} # # Dataset 📉 # + slideshow={"slide_type": "skip"} # ! pip install numba numexpr # + slideshow={"slide_type": "skip"} import math import time import warnings from dateutil.parser import parse import janitor import numpy as np import pandas as pd from numba import jit from sklearn import datasets from pandas.api.types import is_datetime64_any_dtype as is_datetime # + slideshow={"slide_type": "skip"} warnings.filterwarnings("ignore", category=pd.errors.DtypeWarning) pd.options.display.max_columns = 999 # + slideshow={"slide_type": "skip"} path = 'https://raw.githubusercontent.com/FBosler/you-datascientist/master/invoices.csv' def load_dataset(naivly=False): df = (pd.concat([pd.read_csv(path) .clean_names() .remove_columns(["meal_id", "company_id"]) for i in range(20)]) .assign(meal_tip=lambda x: x.meal_price.map(lambda x: x * 0.2)) .astype({"meal_price": int}) .rename(columns={"meal_price": "meal_price_with_tip"})) if naivly: for col in df.columns: df[col] = df[col].astype(object) return df # + slideshow={"slide_type": "fragment"} df = load_dataset() df.head() # + [markdown] slideshow={"slide_type": "slide"} # # How 👀 # + [markdown] slideshow={"slide_type": "slide"} # # Use What You Need 🧑 # + [markdown] slideshow={"slide_type": "fragment"} # - Keep needed columns only # + [markdown] slideshow={"slide_type": "fragment"} # - Keep needed rows only # + [markdown] slideshow={"slide_type": "slide"} # # Dont Reinvent the Wheel 🎡 # + [markdown] slideshow={"slide_type": "fragment"} # - Vast ecosystem # + [markdown] slideshow={"slide_type": "fragment"} # - Use existing solutions # + [markdown] slideshow={"slide_type": "fragment"} # - Fewer bugs # + [markdown] slideshow={"slide_type": "fragment"} # - Highly optimized # + [markdown] slideshow={"slide_type": "slide"} # # Avoid Loops ♾ # + [markdown] slideshow={"slide_type": "fragment"} # ### Bad Option 😈 # - # # + slideshow={"slide_type": "skip"} import warnings warnings.filterwarnings("ignore") # + slideshow={"slide_type": "fragment"} def iterrows_original_meal_price(df): for i, row in df.iterrows(): df.loc[i]["original_meal_price"] = row["meal_price_with_tip"] - row["meal_tip"] return df # + slideshow={"slide_type": "fragment"} # %%timeit -r 1 -n 1 iterrows_original_meal_price(df) # + [markdown] slideshow={"slide_type": "subslide"} # ### Better Option 🤵 # - # # + slideshow={"slide_type": "fragment"} def apply_original_meal_price(df): df["original_meal_price"] = df.apply(lambda x: x['meal_price_with_tip'] - x['meal_tip'], axis=1) return df # + slideshow={"slide_type": "fragment"} # %%timeit apply_original_meal_price(df) # - # # + [markdown] slideshow={"slide_type": "fragment"} # ### 150x Improvement In Execution Time ⌛ # + [markdown] slideshow={"slide_type": "subslide"} # ### Best Option 👼 # - # # + slideshow={"slide_type": "fragment"} def vectorized_original_meal_price(df): df["original_meal_price"] = df["meal_price_with_tip"] - df["meal_tip"] return df # + slideshow={"slide_type": "fragment"} # %%timeit vectorized_original_meal_price(df) # - # # + [markdown] slideshow={"slide_type": "fragment"} # ### Another 2000x Improvement In Execution Time ⌛ # + [markdown] slideshow={"slide_type": "subslide"} # - **pandas vectorized functions**: +, -, .str.lower(), .str.strip(), .dt.second and more # + [markdown] slideshow={"slide_type": "fragment"} # - **numpy vectorized functions**: np.log, np.log, np.divide, np.subtract, np.where, and more # + [markdown] slideshow={"slide_type": "fragment"} # - **scipy vectorized functions**: scipy.special.gamma, scipy.special.beta and more # + [markdown] slideshow={"slide_type": "fragment"} # - **np.vectorize** # + [markdown] slideshow={"slide_type": "slide"} # # Picking the Right Type 🌈 # + [markdown] slideshow={"slide_type": "-"} # # + [markdown] slideshow={"slide_type": "fragment"} # ## Motivation 🏆 # + [markdown] slideshow={"slide_type": "-"} # # + slideshow={"slide_type": "fragment"} ones = np.ones(shape=5000) ones # + slideshow={"slide_type": "fragment"} types = ['object', 'complex128', 'float64', 'int64', 'int32', 'int16', 'int8', 'bool'] df = pd.DataFrame(dict([(t, ones.astype(t)) for t in types])) df.memory_usage(index=False, deep=True) # + [markdown] slideshow={"slide_type": "subslide"} # ## Supported Types 🌈 # + [markdown] slideshow={"slide_type": "fragment"} # - int64 / float64 # # + [markdown] slideshow={"slide_type": "fragment"} # - bool # + [markdown] slideshow={"slide_type": "fragment"} # - objects # # + [markdown] slideshow={"slide_type": "fragment"} # - datetime64 / timedelta # # + [markdown] slideshow={"slide_type": "fragment"} # - [Category](https://pandas.pydata.org/pandas-docs/stable/user_guide/categorical.html) # + [markdown] slideshow={"slide_type": "fragment"} # - [Sparse Types](https://pandas.pydata.org/docs/user_guide/sparse.html) # # + [markdown] slideshow={"slide_type": "fragment"} # - [Nullable Integer](https://pandas.pydata.org/docs/user_guide/integer_na.html)/[Nullable Bolean](https://pandas.pydata.org/docs/user_guide/boolean.html) # # + [markdown] slideshow={"slide_type": "fragment"} # - [Your Own Types](https://www.youtube.com/watch?v=xx7H5EkzQH0) # + [markdown] slideshow={"slide_type": "notes"} # - Open Sourced Types like [cyberpandas](https://github.com/ContinuumIO/cyberpandas) and [geopandas](https://github.com/geopandas/geopandas) # + [markdown] slideshow={"slide_type": "subslide"} # ## Where We Stand 🌈 # + slideshow={"slide_type": "skip"} df = load_dataset(naivly=True) # + [markdown] slideshow={"slide_type": "-"} # # + slideshow={"slide_type": "fragment"} df.memory_usage(deep=True).sum() # + slideshow={"slide_type": "fragment"} df.memory_usage(deep=True) # + slideshow={"slide_type": "skip"} df.dtypes # + [markdown] slideshow={"slide_type": "subslide"} # ## Optimized Types 🌈 # + [markdown] slideshow={"slide_type": "-"} # # + slideshow={"slide_type": "fragment"} optimized_df = df.astype({'order_id': 'category', 'date': 'category', 'date_of_meal': 'category', 'participants': 'category', 'meal_price_with_tip': 'int16', 'type_of_meal': 'category', 'heroes_adjustment': 'bool', 'meal_tip': 'float32'}) # + [markdown] slideshow={"slide_type": "-"} # # + slideshow={"slide_type": "fragment"} optimized_df.memory_usage(deep=True).sum() # - # # + [markdown] slideshow={"slide_type": "fragment"} # ### 13x Improvement In Memory ⌛ # + [markdown] slideshow={"slide_type": "subslide"} # ## Optimized Types 🌈 # # + [markdown] slideshow={"slide_type": "fragment"} # - Improved operation performance 🧮 # + slideshow={"slide_type": "fragment"} # %%timeit df["meal_price_with_tip"].astype(object).mean() # + slideshow={"slide_type": "fragment"} # %%timeit df["meal_price_with_tip"].astype(float).mean() # - # # + [markdown] slideshow={"slide_type": "fragment"} # ### 2.5x Performance Improvement⌛ # + [markdown] slideshow={"slide_type": "slide"} # # Recommended Installation 👨‍🏫 # + [markdown] slideshow={"slide_type": "fragment"} # - [numexpr](https://pypi.org/project/numexpr/) - Fast numerical expression evaluator for NumPy # + [markdown] slideshow={"slide_type": "fragment"} # - [bottleneck](https://github.com/pydata/bottleneck) - uses specialized nan aware Cython routines to achieve large speedups. # + [markdown] slideshow={"slide_type": "fragment"} # - Better for medium to big datasets # + [markdown] slideshow={"slide_type": "slide"} # # Compiled Code 🤯 # + [markdown] slideshow={"slide_type": "fragment"} # - Python dynamic nature # + [markdown] slideshow={"slide_type": "fragment"} # - No compilation optimization # + [markdown] slideshow={"slide_type": "fragment"} # - Pure Python can be slow # + [markdown] slideshow={"slide_type": "-"} # # + slideshow={"slide_type": "fragment"} def foo(N): accumulator = 0 for i in range(N): accumulator = accumulator + i return accumulator # + slideshow={"slide_type": "fragment"} # %%timeit df.meal_price_with_tip.map(foo) # + [markdown] slideshow={"slide_type": "subslide"} # ## Cython and Numba for the rescue 👨‍🚒 # + [markdown] slideshow={"slide_type": "subslide"} # ## Cython 🤯 # + [markdown] slideshow={"slide_type": "fragment"} # - Up to 100x speedup from pure python 👍 # # + [markdown] slideshow={"slide_type": "fragment"} # - Learning Curve 👎 # + [markdown] slideshow={"slide_type": "fragment"} # - Separated Compilation Step 👎 👍 # # + slideshow={"slide_type": "skip"} # %load_ext Cython # + [markdown] slideshow={"slide_type": "subslide"} # ## Example # + slideshow={"slide_type": "fragment"} language="cython" # def cython_foo(long N): # cdef long accumulator # accumulator = 0 # # cdef long i # for i in range(N): # accumulator += i # # return accumulator # + slideshow={"slide_type": "fragment"} # %%timeit df.meal_price_with_tip.map(cython_foo) # - # # + [markdown] slideshow={"slide_type": "fragment"} # ### 100x Performance Improvement⌛ # + [markdown] slideshow={"slide_type": "subslide"} # ## Numba 🤯 # + [markdown] slideshow={"slide_type": "fragment"} # - Up to 200x speedup from pure python 👍 # # + [markdown] slideshow={"slide_type": "fragment"} # - Easy 👍 # # + [markdown] slideshow={"slide_type": "notes"} # using numba is really easy its simply adding a decorator to a method # + [markdown] slideshow={"slide_type": "fragment"} # - Highly Configurable - fastmath, parallel, nogil 👍 # + [markdown] slideshow={"slide_type": "fragment"} # - Mostly Numeric 👎 # + [markdown] slideshow={"slide_type": "subslide"} # ## Example # + slideshow={"slide_type": "fragment"} @jit(nopython=True) def numba_foo(N): accumulator = 0 for i in range(N): accumulator = accumulator + i return accumulator # + slideshow={"slide_type": "fragment"} # %%timeit df.meal_price_with_tip.map(numba_foo) # - # # + [markdown] slideshow={"slide_type": "fragment"} # ### 65x Performance Improvement⌛ # + [markdown] slideshow={"slide_type": "subslide"} # ### 1️⃣ Vectorized methods # + [markdown] slideshow={"slide_type": "fragment"} # ### 2️⃣ Numba # + [markdown] slideshow={"slide_type": "fragment"} # ### 3️⃣ Cython # + [markdown] slideshow={"slide_type": "slide"} # # General Python Optimizations 🐍 # + [markdown] slideshow={"slide_type": "subslide"} # ## Caching 🏎 # # + [markdown] slideshow={"slide_type": "fragment"} # - Avoid unnecessary work/computation. # + [markdown] slideshow={"slide_type": "fragment"} # - Faster code # + [markdown] slideshow={"slide_type": "fragment"} # - functools.lru_cache # + [markdown] slideshow={"slide_type": "subslide"} # ## Intermediate Variables👩‍👩‍👧‍👧 # + [markdown] slideshow={"slide_type": "fragment"} # - Intermediate calculations # + [markdown] slideshow={"slide_type": "fragment"} # - Memory foot print of both objects # + [markdown] slideshow={"slide_type": "fragment"} # - Smarter variables allocation # + slideshow={"slide_type": "skip"} def another_foo(data): return data * 2 def foo(data): return data + 10 # + [markdown] slideshow={"slide_type": "subslide"} # ## Example # + slideshow={"slide_type": "skip"} # %reload_ext memory_profiler # + slideshow={"slide_type": "fragment"} def load_data(): return np.ones((2 ** 30), dtype=np.uint8) # + slideshow={"slide_type": "fragment"} # %%memit def proccess(): data = load_data() data2 = foo(data) data3 = another_foo(data2) return data3 proccess() # + slideshow={"slide_type": "fragment"} # %%memit def proccess(): data = load_data() data = foo(data) data = another_foo(data) return data proccess() # + [markdown] slideshow={"slide_type": "subslide"} # ## Concurrency And Parallelism 🎸🎺🎻🎷 # + [markdown] slideshow={"slide_type": "fragment"} # - pandas methods use a single process # + [markdown] slideshow={"slide_type": "fragment"} # - CPU-bound can benefit parallelism # + [markdown] slideshow={"slide_type": "fragment"} # - IO-bound can benefit either parallelism or concurrency # + [markdown] slideshow={"slide_type": "slide"} # # How 👀 # + [markdown] slideshow={"slide_type": "fragment"} # - Use What You Need 💾⌛ # + [markdown] slideshow={"slide_type": "fragment"} # - Dont Reinvent the Wheel ⌛💾 # + [markdown] slideshow={"slide_type": "fragment"} # - Avoid Loops ⌛ # + [markdown] slideshow={"slide_type": "fragment"} # - Picking the Right Types 💾⌛ # + [markdown] slideshow={"slide_type": "fragment"} # - Recommended Installation ⌛💾 # + [markdown] slideshow={"slide_type": "fragment"} # - Compiled Code ⌛ # + [markdown] slideshow={"slide_type": "fragment"} # - General Python Optimizations ⌛💾 # + [markdown] slideshow={"slide_type": "slide"} # # Optimizing Your Pandas is not Rocket Science 🚀 # + [markdown] slideshow={"slide_type": "slide"} # ![](https://i.pinimg.com/originals/b9/0a/79/b90a79b4c361d079144597d0bcdd61de.jpg)
Lectures/practical_optimisations_for_pandas/PyconIL2021-Optimizing Pandas.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + colab={"base_uri": "https://localhost:8080/"} id="jN6kc1s1D_O7" outputId="621805a6-9bd0-4255-9577-5d7defc0e497" from google.colab import drive drive.mount('/content/drive', force_remount = True) # %tensorflow_version 2.x # + colab={"base_uri": "https://localhost:8080/"} id="RafgGAGyD_O-" outputId="dcf013ec-a026-4b2f-fff1-8b1916419606" # !pip install tiffile # !pip install vollseg # + id="CuRh3s3OD_O_" import numpy as np from tifffile import imread from pathlib import Path from glob import glob from tqdm import tqdm from skimage.measure import label import matplotlib.pyplot as plt from stardist import fill_label_holes from skimage.metrics import structural_similarity as ssim from skimage.metrics import normalized_root_mse as mse import numpy as np from skimage.measure import regionprops import csv from numba import jit from tqdm import tqdm from scipy.optimize import linear_sum_assignment from collections import namedtuple from csbdeep.utils import _raise import seaborn as sns # + id="43_D29CoD_O_" def MakeBinary(labelimage): Binary = labelimage > 0 return Binary # + colab={"base_uri": "https://localhost:8080/"} id="M_xnhq6nD_PA" outputId="40ae3383-fffb-4f78-fbdb-3d3d39dc0605" GT = sorted(glob('/content/drive/My Drive/Jakub_Mari_Varun_Analysis/Stardist3D/training/RealMask/*.tif')) NoAugSeg = sorted(glob('/content/drive/My Drive/Jakub_Mari_Varun_Analysis/Stardist3D/training/Raw/UnAugmentedResults/SmartSeedsMask/*.tif')) AugSeg = sorted(glob('/content/drive/My Drive/Jakub_Mari_Varun_Analysis/Stardist3D/training/Raw/UnAugmentedResults/SmartSeedsMask/*.tif')) Results = '/content/drive/My Drive/Jakub_Mari_Varun_Analysis/Stardist3D/Metrics/' Path(Results).mkdir(exist_ok=True) GT = list(map(imread,GT)) NoAugSeg = list(map(imread,NoAugSeg)) AugSeg = list(map(imread,AugSeg)) GT = [y for y in tqdm(GT)] NoAugSeg = [y for y in tqdm(NoAugSeg)] AugSeg = [y for y in tqdm(AugSeg)] print(np.asarray(GT).shape) # + id="DlOuoKe1D_PA" # + id="7uFLqdLwD_PA" NoAugSegmse = [] AugSegmse = [] NoAugcsvname = Results + "/" + "NoAugSegmse" NoAugSegwriter = csv.writer(open(NoAugcsvname + ".csv", "a")) NoAugSegwriter.writerow(["ssim"]) AugSegcsvname = Results + "/" + "AugSegmse" AugSegwriter = csv.writer(open(AugSegcsvname + ".csv", "a")) AugSegwriter.writerow(["ssim"]) for i in range(len(AugSeg)): NoAugSegsimilarity = mse(MakeBinary(NoAugSeg[i]),MakeBinary(GT[i])) NoAugSegmse.append(NoAugSegsimilarity) AugSegsimilarity = mse(MakeBinary(AugSeg[i]),MakeBinary(GT[i])) AugSegmse.append(AugSegsimilarity) NoAugSegwriter.writerow([NoAugSegsimilarity]) AugSegwriter.writerow([AugSegsimilarity]) # + id="IAgWZxzVD_PB" matching_criteria = dict() def label_are_sequential(y): """ returns true if y has only sequential labels from 1... """ labels = np.unique(y) return (set(labels)-{0}) == set(range(1,1+labels.max())) def is_array_of_integers(y): return isinstance(y,np.ndarray) and np.issubdtype(y.dtype, np.integer) def _check_label_array(y, name=None, check_sequential=False): err = ValueError("{label} must be an array of {integers}.".format( label = 'labels' if name is None else name, integers = ('sequential ' if check_sequential else '') + 'non-negative integers', )) is_array_of_integers(y) or _raise(err) if check_sequential: label_are_sequential(y) or _raise(err) else: y.min() >= 0 or _raise(err) return True def label_overlap(x, y, check=True): if check: _check_label_array(x,'x',True) _check_label_array(y,'y',True) x.shape == y.shape or _raise(ValueError("x and y must have the same shape")) return _label_overlap(x, y) @jit(nopython=True) def _label_overlap(x, y): x = x.ravel() y = y.ravel() overlap = np.zeros((1+x.max(),1+y.max()), dtype=np.uint) for i in range(len(x)): overlap[x[i],y[i]] += 1 return overlap def get_ssim(x,y): similarity = mse(x,y) return similarity def intersection_over_union(overlap): _check_label_array(overlap,'overlap') if np.sum(overlap) == 0: return overlap n_pixels_pred = np.sum(overlap, axis=0, keepdims=True) n_pixels_true = np.sum(overlap, axis=1, keepdims=True) return overlap / (n_pixels_pred + n_pixels_true - overlap) matching_criteria['iou'] = intersection_over_union def intersection_over_true(overlap): _check_label_array(overlap,'overlap') if np.sum(overlap) == 0: return overlap n_pixels_true = np.sum(overlap, axis=1, keepdims=True) return overlap / n_pixels_true matching_criteria['iot'] = intersection_over_true def intersection_over_pred(overlap): _check_label_array(overlap,'overlap') if np.sum(overlap) == 0: return overlap n_pixels_pred = np.sum(overlap, axis=0, keepdims=True) return overlap / n_pixels_pred matching_criteria['iop'] = intersection_over_pred def precision(tp,fp,fn): return tp/(tp+fp) if tp > 0 else 0 def recall(tp,fp,fn): return tp/(tp+fn) if tp > 0 else 0 def accuracy(tp,fp,fn): # also known as "average precision" (?) # -> https://www.kaggle.com/c/data-science-bowl-2018#evaluation return tp/(tp+fp+fn) if tp > 0 else 0 def f1(tp,fp,fn): # also known as "dice coefficient" return (2*tp)/(2*tp+fp+fn) if tp > 0 else 0 def _safe_divide(x,y): return x/y if y>0 else 0.0 def matching(y_true, y_pred, thresh=0.5, criterion='iot', report_matches=False): """Calculate detection/instance segmentation metrics between ground truth and predicted label images. Currently, the following metrics are implemented: 'fp', 'tp', 'fn', 'precision', 'recall', 'accuracy', 'f1', 'criterion', 'thresh', 'n_true', 'n_pred', 'mean_true_score', 'mean_matched_score', 'panoptic_quality' Corresponding objects of y_true and y_pred are counted as true positives (tp), false positives (fp), and false negatives (fn) whether their intersection over union (IoU) >= thresh (for criterion='iou', which can be changed) * mean_matched_score is the mean IoUs of matched true positives * mean_true_score is the mean IoUs of matched true positives but normalized by the total number of GT objects * panoptic_quality defined as in Eq. 1 of Kirillov et al. "Panoptic Segmentation", CVPR 2019 Parameters ---------- y_true: ndarray ground truth label image (integer valued) predicted label image (integer valued) thresh: float threshold for matching criterion (default 0.5) criterion: string matching criterion (default IoU) report_matches: bool if True, additionally calculate matched_pairs and matched_scores (note, that this returns even gt-pred pairs whose scores are below 'thresh') Returns ------- Matching object with different metrics as attributes Examples -------- >>> y_true = np.zeros((100,100), np.uint16) >>> y_true[10:20,10:20] = 1 >>> y_pred = np.roll(y_true,5,axis = 0) >>> stats = matching(y_true, y_pred) >>> print(stats) Matching(criterion='iou', thresh=0.5, fp=1, tp=0, fn=1, precision=0, recall=0, accuracy=0, f1=0, n_true=1, n_pred=1, mean_true_score=0.0, mean_matched_score=0.0, panoptic_quality=0.0) """ _check_label_array(y_true,'y_true') _check_label_array(y_pred,'y_pred') y_true.shape == y_pred.shape or _raise(ValueError("y_true ({y_true.shape}) and y_pred ({y_pred.shape}) have different shapes".format(y_true=y_true, y_pred=y_pred))) criterion in matching_criteria or _raise(ValueError("Matching criterion '%s' not supported." % criterion)) if thresh is None: thresh = 0 thresh = float(thresh) if np.isscalar(thresh) else map(float,thresh) y_true, _, map_rev_true = relabel_sequential(y_true) y_pred, _, map_rev_pred = relabel_sequential(y_pred) overlap = label_overlap(y_true, y_pred, check=False) normalized_root_mse = get_ssim(y_true, y_pred) scores = matching_criteria[criterion](overlap) assert 0 <= np.min(scores) <= np.max(scores) <= 1 # ignoring background scores = scores[1:,1:] n_true, n_pred = scores.shape n_matched = min(n_true, n_pred) def _single(thr): not_trivial = n_matched > 0 and np.any(scores >= thr) if not_trivial: # compute optimal matching with scores as tie-breaker costs = -(scores >= thr).astype(float) - scores / (2*n_matched) true_ind, pred_ind = linear_sum_assignment(costs) assert n_matched == len(true_ind) == len(pred_ind) match_ok = scores[true_ind,pred_ind] >= thr tp = np.count_nonzero(match_ok) else: tp = 0 fp = n_pred - tp fn = n_true - tp # assert tp+fp == n_pred # assert tp+fn == n_true # the score sum over all matched objects (tp) sum_matched_score = np.sum(scores[true_ind,pred_ind][match_ok]) if not_trivial else 0.0 # the score average over all matched objects (tp) mean_matched_score = _safe_divide(sum_matched_score, tp) # the score average over all gt/true objects mean_true_score = _safe_divide(sum_matched_score, n_true) panoptic_quality = _safe_divide(sum_matched_score, tp+fp/2+fn/2) stats_dict = dict ( criterion = criterion, thresh = thr, fp = fp, tp = tp, fn = fn, precision = precision(tp,fp,fn), recall = recall(tp,fp,fn), accuracy = accuracy(tp,fp,fn), f1 = f1(tp,fp,fn), n_true = n_true, n_pred = n_pred, mean_true_score = mean_true_score, mean_matched_score = mean_matched_score, panoptic_quality = panoptic_quality, normalized_root_mse = normalized_root_mse, ) if bool(report_matches): if not_trivial: stats_dict.update ( # int() to be json serializable matched_pairs = tuple((int(map_rev_true[i]),int(map_rev_pred[j])) for i,j in zip(1+true_ind,1+pred_ind)), matched_scores = tuple(scores[true_ind,pred_ind]), matched_tps = tuple(map(int,np.flatnonzero(match_ok))), ) else: stats_dict.update ( matched_pairs = (), matched_scores = (), matched_tps = (), ) return namedtuple('Matching',stats_dict.keys())(*stats_dict.values()) return _single(thresh) if np.isscalar(thresh) else tuple(map(_single,thresh)) def matching_dataset(y_true, y_pred, thresh=0.5, criterion='iou', by_image=False, show_progress=True, parallel=False): """matching metrics for list of images, see `stardist.matching.matching` """ len(y_true) == len(y_pred) or _raise(ValueError("y_true and y_pred must have the same length.")) return matching_dataset_lazy ( tuple(zip(y_true,y_pred)), thresh=thresh, criterion=criterion, by_image=by_image, show_progress=show_progress, parallel=parallel, ) def matching_dataset_lazy(y_gen, thresh=0.5, criterion='iou', by_image=False, show_progress=True, parallel=False): expected_keys = set(('<KEY> 'normalized_root_mse', 'precision', 'recall', 'accuracy', 'f1', 'criterion', 'thresh', 'n_true', 'n_pred', 'mean_true_score', 'mean_matched_score', 'panoptic_quality')) single_thresh = False if np.isscalar(thresh): single_thresh = True thresh = (thresh,) tqdm_kwargs = {} tqdm_kwargs['disable'] = not bool(show_progress) if int(show_progress) > 1: tqdm_kwargs['total'] = int(show_progress) # compute matching stats for every pair of label images if parallel: from concurrent.futures import ThreadPoolExecutor fn = lambda pair: matching(*pair, thresh=thresh, criterion=criterion, report_matches=False) with ThreadPoolExecutor() as pool: stats_all = tuple(pool.map(fn, tqdm(y_gen,**tqdm_kwargs))) else: stats_all = tuple ( matching(y_t, y_p, thresh=thresh, criterion=criterion, report_matches=False) for y_t,y_p in tqdm(y_gen,**tqdm_kwargs) ) # accumulate results over all images for each threshold separately n_images, n_threshs = len(stats_all), len(thresh) accumulate = [{} for _ in range(n_threshs)] for stats in stats_all: for i,s in enumerate(stats): acc = accumulate[i] for k,v in s._asdict().items(): if k == 'mean_true_score' and not bool(by_image): # convert mean_true_score to "sum_matched_score" acc[k] = acc.setdefault(k,0) + v * s.n_true else: try: acc[k] = acc.setdefault(k,0) + v except TypeError: pass # normalize/compute 'precision', 'recall', 'accuracy', 'f1' for thr,acc in zip(thresh,accumulate): set(acc.keys()) == expected_keys or _raise(ValueError("unexpected keys")) acc['criterion'] = criterion acc['thresh'] = thr acc['by_image'] = bool(by_image) if bool(by_image): for k in ('precision', 'recall', 'accuracy', 'f1', 'mean_true_score', 'mean_matched_score', 'panoptic_quality'): acc[k] /= n_images else: tp, fp, fn, n_true, normalized_root_mse = acc['tp'], acc['fp'], acc['fn'], acc['n_true'], acc['normalized_root_mse'] sum_matched_score = acc['mean_true_score'] mean_matched_score = _safe_divide(sum_matched_score, tp) mean_true_score = _safe_divide(sum_matched_score, n_true) panoptic_quality = _safe_divide(sum_matched_score, tp+fp/2+fn/2) acc.update( precision = precision(tp,fp,fn), recall = recall(tp,fp,fn), accuracy = accuracy(tp,fp,fn), f1 = f1(tp,fp,fn), mean_true_score = mean_true_score, mean_matched_score = mean_matched_score, panoptic_quality = panoptic_quality, normalized_root_mse = normalized_root_mse, ) accumulate = tuple(namedtuple('DatasetMatching',acc.keys())(*acc.values()) for acc in accumulate) return accumulate[0] if single_thresh else accumulate # copied from scikit-image master for now (remove when part of a release) def relabel_sequential(label_field, offset=1): """Relabel arbitrary labels to {`offset`, ... `offset` + number_of_labels}. This function also returns the forward map (mapping the original labels to the reduced labels) and the inverse map (mapping the reduced labels back to the original ones). Parameters ---------- label_field : numpy array of int, arbitrary shape An array of labels, which must be non-negative integers. offset : int, optional The return labels will start at `offset`, which should be strictly positive. Returns ------- relabeled : numpy array of int, same shape as `label_field` The input label field with labels mapped to {offset, ..., number_of_labels + offset - 1}. The data type will be the same as `label_field`, except when offset + number_of_labels causes overflow of the current data type. forward_map : numpy array of int, shape ``(label_field.max() + 1,)`` The map from the original label space to the returned label space. Can be used to re-apply the same mapping. See examples for usage. The data type will be the same as `relabeled`. inverse_map : 1D numpy array of int, of length offset + number of labels The map from the new label space to the original space. This can be used to reconstruct the original label field from the relabeled one. The data type will be the same as `relabeled`. Notes ----- The label 0 is assumed to denote the background and is never remapped. The forward map can be extremely big for some inputs, since its length is given by the maximum of the label field. However, in most situations, ``label_field.max()`` is much smaller than ``label_field.size``, and in these cases the forward map is guaranteed to be smaller than either the input or output images. Examples -------- >>> from skimage.segmentation import relabel_sequential >>> label_field = np.array([1, 1, 5, 5, 8, 99, 42]) >>> relab, fw, inv = relabel_sequential(label_field) >>> relab array([1, 1, 2, 2, 3, 5, 4]) >>> fw array([0, 1, 0, 0, 0, 2, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5]) >>> inv array([ 0, 1, 5, 8, 42, 99]) >>> (fw[label_field] == relab).all() True >>> (inv[relab] == label_field).all() True >>> relab, fw, inv = relabel_sequential(label_field, offset=5) >>> relab array([5, 5, 6, 6, 7, 9, 8]) """ offset = int(offset) if offset <= 0: raise ValueError("Offset must be strictly positive.") if np.min(label_field) < 0: raise ValueError("Cannot relabel array that contains negative values.") max_label = int(label_field.max()) # Ensure max_label is an integer if not np.issubdtype(label_field.dtype, np.integer): new_type = np.min_scalar_type(max_label) label_field = label_field.astype(new_type) labels = np.unique(label_field) labels0 = labels[labels != 0] new_max_label = offset - 1 + len(labels0) new_labels0 = np.arange(offset, new_max_label + 1) output_type = label_field.dtype required_type = np.min_scalar_type(new_max_label) if np.dtype(required_type).itemsize > np.dtype(label_field.dtype).itemsize: output_type = required_type forward_map = np.zeros(max_label + 1, dtype=output_type) forward_map[labels0] = new_labels0 inverse_map = np.zeros(new_max_label + 1, dtype=output_type) inverse_map[offset:] = labels0 relabeled = forward_map[label_field] return relabeled, forward_map, inverse_map # + colab={"base_uri": "https://localhost:8080/"} id="dp0zjdnlD_PI" outputId="d36ae22b-043b-4071-f38f-6989dc00307d" taus = [0.05,0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9] statsAugSeg = [matching_dataset(GT, AugSeg, thresh=t, show_progress=False) for t in tqdm(taus)] statsNoAugSeg = [matching_dataset(GT, NoAugSeg, thresh=t, show_progress=False) for t in tqdm(taus)] # + [markdown] id="JiXCul_BD_PK" # # Augmented prediction and Unaugmented prediction metrics # + [markdown] id="H45ZqdLHD_PM" # # With Augmentation # + colab={"base_uri": "https://localhost:8080/", "height": 556} id="PRqjj6JOD_PN" outputId="68fc5efc-ea94-4d08-ae1a-cd6dbc73b734" fig, (ax1,ax2) = plt.subplots(1,2, figsize=(25,10)) for m in ('precision', 'recall', 'accuracy', 'f1', 'mean_true_score', 'panoptic_quality'): ax1.plot(taus, [s._asdict()[m] for s in statsAugSeg], '.-', lw=2, label=m) ax1.set_xlabel(r'IoU threshold $\tau$') ax1.set_ylabel('Metric value') ax1.grid() #ax1.legend(loc="lower left") for m in ('fp', 'tp', 'fn'): ax2.plot(taus, [s._asdict()[m] for s in statsAugSeg], '.-', lw=2, label=m) ax2.set_xlabel(r'IoU threshold $\tau$') ax2.set_ylabel('Number #') ax2.grid() ax2.legend(); plt.savefig(Results + 'AugSeg', dpi=300) # + [markdown] id="-Jk7GlE_D_PN" # # Without Augmentation # + id="OGk1M43BD_PN" fig, (ax1,ax2) = plt.subplots(1,2, figsize=(25,10)) for m in ('precision', 'recall', 'accuracy', 'f1', 'mean_true_score', 'panoptic_quality'): ax1.plot(taus, [s._asdict()[m] for s in statsNoAugSeg], '.-', lw=2, label=m) ax1.set_xlabel(r'IoU threshold $\tau$') ax1.set_ylabel('Metric value') ax1.grid() #ax1.legend(loc="lower left") for m in ('fp', 'tp', 'fn'): ax2.plot(taus, [s._asdict()[m] for s in statsNoAugSeg], '.-', lw=2, label=m) ax2.set_xlabel(r'IoU threshold $\tau$') ax2.set_ylabel('Number #') ax2.grid() ax2.legend(); plt.savefig(Results + 'AugSeg', dpi=300) # + id="M-qBuuRUD_PO" import pandas as pd df = pd.DataFrame(list(zip(AugSegmse,NoAugSegmse )), index = None, columns =["Augmented", "NoAugmentation"]) print(df) sns.set(style="whitegrid") g = sns.violinplot(data=df, orient ='v') fig = g.get_figure() fig.savefig(Results + "AugCOmpare.png", dpi=300) # + id="7GtuBZC4D_PO" # + id="89_A0LvND_PO"
examples/Predict/SegmentationMetrics.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/mishraoct786/MachineLearning-/blob/main/heroku_model_deployment.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="umteftyripCZ" import numpy as np import pandas as pd import matplotlib.pyplot as plt import sklearn # + id="nd5aiP3sjHox" df =pd.read_csv('hiring.csv') # + colab={"base_uri": "https://localhost:8080/", "height": 300} id="uQDj_pSSlzI0" outputId="f051af8d-a394-4e17-fda9-faa4c413add4" df # + colab={"base_uri": "https://localhost:8080/"} id="6MHmfIeFmCgd" outputId="5debde92-ba3e-46c4-d002-bde561fabfec" df.isnull().sum() # + id="_Pg6skG3msPa" df['test_score(out of 10)'].fillna(df['test_score(out of 10)'].mean(),inplace=True) # + id="uHW3r-lNnel7" df['experience'].fillna(0,inplace=True) # + colab={"base_uri": "https://localhost:8080/", "height": 300} id="BEnmhh73pCur" outputId="b073763e-d749-4106-db1d-15192ea977be" df # + id="Wx7EU_CppEZQ" def stringToNum(word): dict={'zero':0,'one':1,'five':5,'two' : 2, 'seven':7, 'three': 3 , 'ten':10,'eleven':11,0:0} return dict[word] # + id="H0f0y_6vpWaO" df['experience']=df['experience'].apply(lambda x: stringToNum(x)) # + id="PnwuDxlGqKe5" x=df.iloc[:,:3] y=df.iloc[:,-1] # + id="Q6iQiN0erDFR" from sklearn.model_selection import train_test_split x_train,x_test,y_train,y_test = train_test_split(x,y,test_size=0.1,random_state=5) # + colab={"base_uri": "https://localhost:8080/"} id="PcI_MknCzpDg" outputId="f0242f33-9e4c-4fd5-955b-095f7edc9433" from sklearn.linear_model import LinearRegression mymodel=LinearRegression() mymodel.fit(x_train,y_train) # + id="eq2PTFFJGl44" y_pred=mymodel.predict(x_test) # + id="Gt96FF_oHEju" y=mymodel.predict([[5,8,7]]) # + colab={"base_uri": "https://localhost:8080/"} id="B8JDfZTOHcLz" outputId="138fcaa0-2c64-442a-c46a-2dab7c0a80dc" y # + id="WH1gZVHQHfVo" import pickle pickle.dump(mymodel,open("model.pkl","wb")) # + id="zpYcW_pNH274"
heroku_model_deployment.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Import libraries # + # Imports import re import string import json from datetime import datetime from collections import defaultdict, Counter import numpy as np import pandas as pd import matplotlib.pyplot as plt from tqdm import tqdm import torch import torch.nn as nn from torch.nn import Module import torch.optim as optim import torch.nn.functional as F from torch.utils.data import Dataset, DataLoader from torch.nn.utils.rnn import pad_sequence, pack_padded_sequence, pad_packed_sequence from sklearn.model_selection import train_test_split from sklearn.metrics import f1_score from nltk.corpus import stopwords device = 'cpu' import random random.seed(26) np.random.seed(62) torch.manual_seed(2021) # - # ## Load data # + ben_train_df = pd.read_csv('../../data/bengali_hatespeech_sample_train_preprocessed.csv') ben_test_df = pd.read_csv('../../data/bengali_hatespeech_sample_test_preprocessed.csv') display(ben_train_df.head()) # + # remove empty texts ben_train_df = ben_train_df[ben_train_df.sentence.str.len() > 0] # extract sentences and labels train_sentences = [text.split() for text in ben_train_df['sentence']] train_labels = ben_train_df['hate'].to_numpy() # remove empty texts ben_test_df = ben_test_df[ben_test_df.sentence.str.len() > 0] # extract sentences and labels test_sentences = [text.split() for text in ben_test_df['sentence']] test_labels = ben_test_df['hate'].to_numpy() print('Train data:') print(train_sentences[:3]) print(train_labels) print() print('Test data:') print(test_sentences[:3]) print(test_labels) # - # ## Prepare vocab set # + # load mapping {word -> id} and {id -> word} with open('save/word_to_int_dict.json') as f: word_to_int = json.load(f) with open('save/int_to_word_dict.json') as f: int_to_word = json.load(f) # get vocab_size vocab_size = len(word_to_int) print(f'vocab_size: {vocab_size}') # - train_sentences = [[word_to_int[word] for word in sentence] for sentence in train_sentences] test_sentences = [[word_to_int[word] for word in sentence if word in word_to_int] for sentence in test_sentences] # ## Hyper-parameters embedding_path = 'save/embedding_weights.pt' embedding_size = 300 att_dim = 150 learning_rate = 1e-4 batch_size = 32 epochs = 30 # ## Build datasets # + class HOFDataset(Dataset): def __init__(self, sentences, labels): self.data = [] for sentence, label in zip(sentences, labels): if len(sentence): self.data.append( (torch.tensor(sentence, dtype=torch.long), torch.tensor(label, dtype=torch.float)) ) def __len__(self): return len(self.data) def __getitem__(self, index): return self.data[index] def preprocess_batch(batch): texts, labels = list(zip(*batch)) seq_lens = torch.tensor([len(text) for text in texts], dtype=torch.long) texts = pad_sequence(texts, padding_value=0) labels = torch.tensor(labels).unsqueeze(1) seq_lens, sorted_idx = seq_lens.sort(descending=True) texts = texts[:,sorted_idx] labels = labels[sorted_idx] return texts, seq_lens, labels train_dataset = HOFDataset(train_sentences, train_labels) train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, collate_fn=preprocess_batch) test_dataset = HOFDataset(test_sentences, test_labels) test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False, collate_fn=preprocess_batch) # - # ## Network architecture def mask_seq(seq_lens): mask = torch.zeros((len(seq_lens), max(seq_lens))).bool() for i, seq_len in enumerate(seq_lens): mask[i, seq_len:] = True return mask # + # get hindi_vocab_size with open('../../Task_1/save/word_to_int_dict.json') as f: hindi_word_to_int = json.load(f) hindi_vocab_size = len(hindi_word_to_int) # define classifier class Classifier(Module): def __init__(self): super(Classifier, self).__init__() self.embed = nn.Embedding(hindi_vocab_size, embedding_size) self.attention = nn.MultiheadAttention(embed_dim=embedding_size, num_heads=10, dropout=0.5,) self.attention.requires_grad = False # fix all layers except embedding. self.fc = nn.Linear(embedding_size, 1) self.fc.requires_grad = False # fix all layers except embedding. def forward(self, inp, seq_lens): out = self.embed(inp) pad_mask = mask_seq(seq_lens) att_out, _ = self.attention(out, out, out, key_padding_mask=pad_mask) out = F.layer_norm(out + att_out, (out.size(2), )) out = self.fc(out).squeeze(2) pred = torch.zeros((out.size(1), 1)) for i, seq_len in enumerate(seq_lens): pred[i, 0] = out[:seq_len, i].mean() return pred # load pre-trained hindi classifier hindi_clf = Classifier().to(device) hindi_model_weight_path = '../hindi_hindi/save/hindi_clf.pt' hindi_clf.load_state_dict(torch.load(hindi_model_weight_path, map_location=torch.device(device))) print('Hindi classifier:') print(hindi_clf.eval()) # replace the embedding layer to make it a bengali classifier bengali_embed = nn.Embedding(vocab_size, embedding_size) bengali_clf = hindi_clf bengali_clf._modules['embed'] = bengali_embed print('Bengali classifier:') print(bengali_clf.eval()) # - # ## Loss function and optimizer criterion = nn.BCEWithLogitsLoss() optimizer = optim.Adam(bengali_clf.parameters(), lr=learning_rate) # ## Measure performance on test data def predict_test(): losses = 0 acc_cnt = 0 cnt = 0 preds = [] true_labels = [] for texts, seq_lens, labels in test_loader: pred = bengali_clf(texts.to(device), seq_lens).detach().to('cpu') loss = criterion(pred, labels) losses += loss.detach().item() * len(texts) acc_cnt += sum((pred > 0) == (labels > 0)).item() preds.extend(pred.view(-1)) true_labels.extend(labels.view(-1)) cnt += texts.size(1) preds = np.array(preds) > 0 macro_f1 = f1_score(true_labels, preds, average='macro') weighted_f1 = f1_score(true_labels, preds, average='weighted') return losses / cnt, acc_cnt / cnt, macro_f1, weighted_f1 # ## Training # + list_test_acc = [] early_stop = 5 for epoch in range(1, epochs + 1): losses = 0. acc_cnt = 0 cnt = 0 bengali_clf.train() for texts, seq_lens, labels in tqdm(train_loader): optimizer.zero_grad() pred = bengali_clf(texts.to(device), seq_lens) loss = criterion(pred, labels) loss.backward() optimizer.step() losses += loss.detach().item() * len(texts) acc_cnt += sum((pred.to('cpu') > 0) == (labels > 0)).item() cnt += texts.size(1) epoch_loss = losses / cnt epoch_acc = acc_cnt / cnt test_loss, test_acc, test_macro_f1, test_weighted_f1 = predict_test() print(f'Epoch {epoch:2}: Train loss: {epoch_loss:.4f}, acc: {epoch_acc:.4f}. ' f'Test loss: {test_loss:.4f}, acc: {test_acc:.4f}, ' f'macro_f1: {test_macro_f1:.4f}, weighted_f1: {test_weighted_f1:.4f}', flush=True) list_test_acc.append(test_acc) if len(list_test_acc) > early_stop and max(list_test_acc[-early_stop:]) <= max(list_test_acc[:-early_stop]): print(f'Early stopping: test accuracy does not increase after {early_stop} epochs') break # -
Task_2/hindi_bengali/.ipynb_checkpoints/Task-2.3_bengali_classification-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] pycharm={"name": "#%% md\n"} # ## How to read a fid file # # To read a fid file, just simply pass its path to a constructor of the `Dataset` class. # # The aim of `Dataset` class is to wrap around all the complexity of data and meta data storage. It takes the **fid** # specified by user, the **method** and **acqp** files located in the same folder and provides unified access both to # the data array and parameters. # # Data for this example is downloaded from: # # https://gitlab.com/naveau/bruker2nifti_qa # # + pycharm={"name": "#%%\n", "is_executing": false} from brukerapi.dataset import Dataset from pathlib import Path # path to data cloned from data_path = Path('C:/data/bruker2nifti_qa') # both constructors are possible dataset = Dataset(data_path / Path('raw/Cyceron_DWI/20170719_075627_Lego_1_1/2/fid')) # dataset = Dataset(data_path / 'raw/Cyceron_DWI/20170719_075627_Lego_1_1/2') # - # Once dataset is created, it is possible to access the data array. # + pycharm={"name": "#%%\n", "is_executing": false} data = dataset.data print(data.shape) # - # The `dim_type` property of `Dataset` class can be used in order to make sense out of individual dimensions of the data # array. # + pycharm={"name": "#%%\n", "is_executing": false} print(dataset.dim_type) # - # Several most wanted properties of MR measurement, such as the echo time, the flip angle, etc. are implemented as # dataset properties. # + pycharm={"name": "#%% \n", "is_executing": false} print(dataset.TE) print(dataset.TR) print(dataset.flip_angle) print(dataset.sw) print(dataset.transmitter_freq) # - # Other parameters can be accessed using the dot notation. It is only possible to access parameters stored in jcamp-dx # files related to given data set type. A **fid** data set only allows to access parameters from the **method** and # **acqp** files. # + pycharm={"name": "#%%\n", "is_executing": false} # get parameter object print(dataset.PVM_EncSteps1) # - # It is possible to augment `Dataset` by other parameter files, but only files in the same reco, or respective scan # folders are allowed. Please bear in mind, that this augmentation might lead to a loss of uniqueness in the parameter # space. # # You can use it for instance to access reconstruction parameters, or the shape of excitation pulse. # + pycharm={"name": "#%%\n", "is_executing": false} dataset.add_parameters('reco') print(dataset.RECO_inp_size) dataset.add_parameters('spnam40') print(dataset.XYPOINTS)
examples/read_fid.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] pycharm={"name": "#%% md\n"} # This notebook provides instructions for training ProtoCNN on your own data. # Let's assume that our data is in `data/amazon/data.csv`. Let's visualize them: # + pycharm={"name": "#%%\n"} import string import warnings import pandas as pd import torch from pytorch_lightning import seed_everything, Trainer from pytorch_lightning.callbacks import EarlyStopping, LearningRateMonitor, ModelCheckpoint from sklearn.model_selection import train_test_split from torchtext import data from torchtext.data import BucketIterator from torchtext.vocab import GloVe from dataframe_dataset import DataFrameDataset from models.protoconv.data_visualizer import DataVisualizer from models.protoconv.lit_module import ProtoConvLitModule from utils import plot_html warnings.simplefilter("ignore") seed_everything(0) # + pycharm={"name": "#%%\n"} # !head ../data/amazon/data.csv # + [markdown] pycharm={"name": "#%% md\n"} # We will start by loading the data: # + pycharm={"name": "#%%\n"} df_dataset = pd.read_csv(f'../data/amazon/data.csv') df_dataset.head() # + [markdown] pycharm={"name": "#%% md\n"} # We will divide the collection into training and testing # + pycharm={"name": "#%%\n"} train_df, valid_df = train_test_split(df_dataset, test_size=0.2, stratify=df_dataset['label']) train_df.shape, valid_df.shape # + [markdown] pycharm={"name": "#%% md\n"} # Now we will create a `torchtext` dataset, you can use any input format. # We will use a dataset created from a table in pandas. # + pycharm={"name": "#%%\n"} TEXT = data.Field(init_token='<START>', eos_token='<END>', tokenize='spacy', tokenizer_language='en', batch_first=True, lower=True, stop_words=set(string.punctuation)) LABEL = data.Field(dtype=torch.float, is_target=True, unk_token=None, sequential=False, use_vocab=False) train_dataset = DataFrameDataset(train_df, { 'text': TEXT, 'label': LABEL }) val_dataset = DataFrameDataset(valid_df, { 'text': TEXT, 'label': LABEL }) train_loader, val_loader = BucketIterator.splits( (train_dataset, val_dataset), batch_size=32, sort_key=lambda x: len(x.text), device='cuda' ) TEXT.build_vocab(train_dataset.text, vectors=GloVe('42B', cache='../.vector_cache/')) # + [markdown] pycharm={"name": "#%% md\n"} # We will add saving the best model, stopping training early if there is no improvement in loss, # and decreasing the learning rate. We will load the model with the parameters used in the publication. # + pycharm={"name": "#%%\n"} model_checkpoint = ModelCheckpoint(filepath='../checkpoints/{epoch_0:02d}-{val_loss_0:.4f}-{val_acc_0:.4f}', save_weights_only=True, save_top_k=1, monitor='val_acc_0', period=1) callbacks = [ LearningRateMonitor(logging_interval='epoch'), EarlyStopping(monitor=f'val_loss_0', patience=10, verbose=True, mode='min', min_delta=0.005), model_checkpoint ] model = ProtoConvLitModule(vocab_size=len(TEXT.vocab), embedding_dim=TEXT.vocab.vectors.shape[1], fold_id=0, lr=1e-3, itos=TEXT.vocab.itos, verbose_proto=False) # + [markdown] pycharm={"name": "#%% md\n"} # Start training # + pycharm={"name": "#%%\n"} trainer = Trainer(max_epochs=30, callbacks=callbacks, gpus=1, deterministic=True, num_sanity_val_steps=0) trainer.fit(model, train_dataloader=train_loader, val_dataloaders=val_loader) # + [markdown] pycharm={"name": "#%% md\n"} # The result of the best model is stored in the model checkpointer # + pycharm={"name": "#%%\n"} 'Best accuracy: ', model_checkpoint.best_model_score.tolist() # + [markdown] pycharm={"name": "#%% md\n"} # We will now load the weights of the best model and visualize the prototypes along with the random prediction explanations # + pycharm={"name": "#%%\n"} best_model = ProtoConvLitModule.load_from_checkpoint(model_checkpoint.best_model_path) data_visualizer = DataVisualizer(best_model) # + pycharm={"name": "#%%\n"} plot_html(data_visualizer.visualize_prototypes()) # + pycharm={"name": "#%%\n"} plot_html(data_visualizer.visualize_random_predictions(val_loader, n=5)) # + pycharm={"name": "#%%\n"}
src/minimal_train.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np from bs4 import BeautifulSoup import urllib3 import certifi import re from collections import defaultdict from copy import copy, deepcopy http = urllib3.PoolManager( cert_reqs='CERT_REQUIRED', ca_certs=certifi.where()) # + numery = [] with open('numery_mini_ang.txt') as f: numery = [int(n) for n in f.read().splitlines()] with open('numery_mini.txt') as f: numery = [int(n) for n in f.read().splitlines()] with open('numery_lwow.txt') as f: numery = numery + [int(n) for n in f.read().splitlines()] with open('numery_mim.txt') as f: numery = numery + [int(n) for n in f.read().splitlines()] # - def link_from_num(idd): return "http://mathgenealogy.org/id.php?id=" + str(idd) # + # testing for idd in numery: link = link_from_num(idd) page = http.request('GET', link, preload_content=False) soup = BeautifulSoup(page.read(), 'html.parser') mat_name = soup.find('h2') name = str(mat_name.string) st = mat_name.fetchNextSiblings() print(idd) ss = str(soup.select("#paddingWrapper > div:nth-child(6) > span")) print(re.search(r".*([1-3][0-9]{3})", ss).group(1)) school = soup.select_one("#paddingWrapper > div:nth-child(6) > span > span") print(school.string) break # - def scrap_id(idd): link = link_from_num(idd) page = http.request('GET', link, preload_content=False) soup = BeautifulSoup(page.read(), 'html.parser') mat_name = soup.find('h2') try: name = str(mat_name.string).strip() st = mat_name.fetchNextSiblings() name_s = soup.select("#paddingWrapper > div:nth-child(6) > span") year = re.search(r".*([1-3][0-9]{3})", str(name_s)) if year is not None: year = year.group(1) else: year = -1 print("No year for:", name, idd) school = soup.select_one("#paddingWrapper > div:nth-child(6) > span > span").string thesis = soup.select_one('#thesisTitle').string except Exception as exc: print(exc) print(idd) print(mat_name) print(soup) advisors = [] for e in st: if str(e).find("Advisor") != -1: res = re.search(r"id=(\d*)", str(e)) try: advisors.append(int(res.group(1))) except Exception as exc: if str(e).find("Unknown") != -1: advisors.append(-1) else: print("ERRRRROR") print(exc) print(e) print(idd) d = dict() d['advisors'] = copy(advisors) d['name'] = name d['school'] = str(school) d['thesis'] = str(thesis) d['year'] = int(year) return d # + db = defaultdict(dict) for idd in numery: db[idd] = deepcopy(scrap_id(idd)) # + #db_hist = deepcopy(db) # + #db = deepcopy(db_hist) # - depth = 10 db_last = db db_new = defaultdict(dict) for i in range(depth): for k, d in db_last.items(): for adv_id in d['advisors']: if not adv_id in db and not adv_id in db_new and adv_id != -1: db_new[adv_id] = deepcopy(scrap_id(adv_id)) db = {**db, **db_new} db_last = deepcopy(db_new) import networkx as nx import matplotlib.pyplot as plt import pygraphviz as pgv import pickle graph = nx.DiGraph() for mat_id, mat in db.items(): for adv in mat['advisors']: if adv != -1: g_tmp = graph graph.add_edge(adv, mat_id) if nx.is_forest(graph) == False: graph.remove_edge(adv, mat_id) # + # nx.write_gpickle(graph, 'graf.pkl') # - un_graph = graph.to_undirected() nodes_conections = {node : len(nx.descendants(un_graph, node)) for node in graph.nodes()} small_nodes = [key for key, val in nodes_conections.items() if val < 12] graph.remove_nodes_from(small_nodes) graph_lab = nx.relabel_nodes(graph, {mat_id : mat['name'] for mat_id, mat in db.items()}) col_converter = { 'Politechnika Warszawska' : '#a29bfe8f', 'Technical University of Warsaw' : '#a29bfe8f', 'Uniwersytet Warszawski' : '#81ecec8f', 'University of Lwów' : '#74b9ff8f' } # + G = nx.nx_agraph.to_agraph(graph) bad_nodes = set() for node in G.nodes(): ie = int(node) if ie in db: node.attr['color'] = col_converter.get(db[ie]['school'], '#dfe6e98f') if db[ie]['year'] != -1: node.attr['label'] = "Ph.D. " + db[ie]['name'] + " " + str(db[ie]['year']) + "\n" + str(db[ie]['school']) # + "\n" + str(db[ie]['thesis']) else: node.attr['label'] = db[ie]['name'] else: bad_nodes.add(ie) G.remove_nodes_from(bad_nodes) for e in G.edges(): e.attr['color'] = "#b2bec38f" G.node_attr['fontname'] = 'monospace' G.node_attr['style'] = 'filled' G.node_attr['opacity'] = '0.5' #G.node_attr['color']='blue' G.layout(prog='fdp') G.draw(format='svg', path='g.svg') # - res_db = defaultdict(int) for idd, mat in db.items(): for adv in mat['advisors']: if adv in db: if mat['school'] != db[adv]['school']: direction = db[adv]['school'] + " -> " + mat['school'] res_db[direction] += 1 [(name, count) for name, count in res_db.items() if count > 4] def get_mat_json(graph, idd, db): reachable_nodes = nx.descendants(graph, idd) reachable_nodes.add(idd) res_g = nx.relabel_nodes(nx.subgraph(graph, reachable_nodes), {mat_id : mat['name'] for mat_id, mat in db.items()}) return nx.tree_data(res_g , db[idd]['name']) get_mat_json(graph, 12681, db) for i, e in enumerate(db.values()): print(e['name'], e['school']) if i > 10: break
scrapping.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from iminuit import Minuit import numpy as np from scipy.stats import norm, uniform, poisson from matplotlib import pyplot as plt import progressbar # + xrange = (0., 1.) z = 0.2 mu = 0.5 sigma = 0.05 truth = (z, mu, sigma) # + def unbinned(z, mu, sigma): """Unbinned likelihood""" probs = (1.0-z) / (xrange[1] - xrange[0]) + z * norm(mu, sigma).pdf(x) return -np.sum(np.log(probs)) def expected(z, mu, sigma): """Return expected counts per bin in a histogram""" nx = len(x) b = uniform(*xrange).cdf(xe) s = norm(mu, sigma).cdf(xe) m = (1-z) * b + z * s return len(x) * (m[1:] - m[:-1]) def binned(z, mu, sigma): """Binned likelihood for Poisson-distributed counts""" m = expected(z, mu, sigma) return np.sum(m - w * np.log(m)) # + # check expected np.random.seed(1) s = norm(mu, sigma).rvs(int(z * 1000)) b = uniform(*xrange).rvs(1000 - len(s)) x = np.append(s, b) assert len(x) == 1000 fig, ax = plt.subplots(1, 3, figsize=(15, 4), sharex=True) for axi, bins in zip(ax, (5, 50, 100)): plt.sca(axi) w, xe, *_ = plt.hist(x, bins=bins, range=xrange, label="data") m = expected(*truth) plt.step(xe, np.append(0, m), label="expected") plt.legend() plt.xlabel("x") # + np.random.seed(1) bins = (5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 20, 30, 40, 50, 100) ntot = 1000 nmc = 100 pars_unbinned = [] pars_binned = [] covs_unbinned = [] covs_binned = [] minuit_kwargs = { "z": 0.5, "mu": 0.5, "sigma": 0.05, "pedantic": False, "errordef": 0.5, "limit_z" : (0, 1), "limit_mu" : xrange, "limit_sigma" : (0, None) } for imc in progressbar.progressbar(range(nmc)): xs = norm(mu, sigma).rvs(int(z * ntot)) xb = uniform(*xrange).rvs(ntot - len(xs)) x = np.append(xs, xb) m_un = Minuit(unbinned, **minuit_kwargs) m_un.migrad() m_un.migrad() valid = m_un.get_fmin().is_valid if not valid: continue pars_b = [] covs_b = [] for b in bins: w, xe = np.histogram(x, bins=b, range=xrange) m = Minuit(binned, **minuit_kwargs) m.migrad() m.migrad() valid = m.get_fmin().is_valid if not valid: break pars_b.append(m.np_values()) covs_b.append(m.np_covariance()) if not valid: continue pars_unbinned.append(m_un.np_values()) covs_unbinned.append(m_un.np_covariance()) pars_binned.append(pars_b) covs_binned.append(covs_b) pars_unbinned = np.array(pars_unbinned) pars_binned = np.array(pars_binned) covs_unbinned = np.array(covs_unbinned) covs_binned = np.array(covs_binned) # - # bias fig, ax = plt.subplots(1, 3, figsize=(16, 4), sharex=True) plt.subplots_adjust(wspace=0.3) for ipar, name in enumerate(("z", "mu", "sigma")): plt.sca(ax[ipar]) y = pars_unbinned[:, ipar] m = np.mean(y) s = np.std(y) / n ** 0.5 n = len(pars_unbinned) plt.axhline(m, label="unbinned") plt.axhspan(m-s, m+s, alpha=0.2) y = pars_binned[:, :, ipar] m = np.mean(y, axis=0) s = np.std(y, axis=0) / n ** 0.5 plt.errorbar(bins, m, s, fmt="o", color="C1", label="binned") plt.legend() plt.xlabel("bins") plt.semilogx() plt.ylabel(name) plt.axvline((xrange[1] - xrange[0])/sigma, ls="--", color="0.5") plt.axvline((xrange[1] - xrange[0])/(2 * sigma), ls=":", color="0.5") plt.axhline(truth[ipar], color="k", zorder=0) # coverage probability fig, ax = plt.subplots(1, 3, figsize=(16, 4), sharex=True) plt.subplots_adjust(wspace=0.3) for ipar, name in enumerate(("z", "mu", "sigma")): plt.sca(ax[ipar]) y = pars_unbinned[:, ipar] dy = covs_unbinned[:, ipar, ipar] ** 0.5 prob = np.mean((truth[ipar] < y + dy) & (truth[ipar] > y - dy)) plt.axhline(prob, label="unbinned") y = pars_binned[:, :, ipar] dy = covs_binned[:, :, ipar, ipar] ** 0.5 prob = np.mean((truth[ipar] < y + dy) & (truth[ipar] > y - dy), axis=0) plt.plot(bins, prob, "o-", color="C1", label="binned") plt.ylim(0, 1) plt.xlabel("bins") plt.semilogx() plt.ylabel(name) plt.axhline(0.68, ls="-", color="k") plt.axvline((xrange[1] - xrange[0])/sigma, ls="--", color="0.5") plt.axvline((xrange[1] - xrange[0])/(2 * sigma), ls=":", color="0.5") plt.legend(loc="lower right")
Binning loss.ipynb
# --- # title: "Replacing Values In pandas" # author: "<NAME>" # date: 2017-12-20T11:53:49-07:00 # description: "Replacing values in pandas." # type: technical_note # draft: false # aliases: # - /python/pandas_replace_values.html # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### import modules import pandas as pd import numpy as np # ### Create dataframe raw_data = {'first_name': ['Jason', 'Molly', 'Tina', 'Jake', 'Amy'], 'last_name': ['Miller', 'Jacobson', 'Ali', 'Milner', 'Cooze'], 'age': [42, 52, 36, 24, 73], 'preTestScore': [-999, -999, -999, 2, 1], 'postTestScore': [2, 2, -999, 2, -999]} df = pd.DataFrame(raw_data, columns = ['first_name', 'last_name', 'age', 'preTestScore', 'postTestScore']) df # ### Replace all values of -999 with NAN df.replace(-999, np.nan)
docs/python/data_wrangling/pandas_replace_values.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Q# # language: qsharp # name: iqsharp # --- # # Graph Coloring # # The **"Graph Coloring"** quantum kata is a series of exercises designed # to teach you the basics of using Grover search to solve constraint # satisfaction problems, using graph coloring problem as an example. # # * You can read more about graph coloring problems [here](https://en.wikipedia.org/wiki/Graph_coloring). # * It is strongly recommended to complete the [Grover's Algorithm kata](./../GroversAlgorithm/GroversAlgorithm.ipynb) before proceeding to this one. You can also refer to its [README.md](./../GroversAlgorithm/README.md) for the list of resources on Grover's algorithm. # * [SolveSATWithGrover](./../SolveSATWithGrover/SolveSATWithGrover.ipynb) is another kata covering oracle implementation for solving constraint satisfaction problems. # # # Each task is wrapped in one operation preceded by the description of the task. # Your goal is to fill in the blank (marked with the `// ...` comments) # with some Q# code that solves the task. To verify your answer, run the cell using Ctrl/⌘+Enter. # # Within each section, tasks are given in approximate order of increasing difficulty; # harder ones are marked with asterisks. # To begin, first prepare this notebook for execution (if you skip this step, you'll get "Syntax does not match any known patterns" error when you try to execute Q# code in the next cells): %package Microsoft.Quantum.Katas::0.8.1907.1701 # > The package versions in the output of the cell above should always match. If you are running the Notebooks locally and the versions do not match, please install the IQ# version that matches the version of the `Microsoft.Quantum.Katas` package. # > <details> # > <summary><u>How to install the right IQ# version</u></summary> # > For example, if the version of `Microsoft.Quantum.Katas` package above is 0.1.2.3, the installation steps are as follows: # > # > 1. Stop the kernel. # > 2. Uninstall the existing version of IQ#: # > dotnet tool uninstall microsoft.quantum.iqsharp -g # > 3. Install the matching version: # > dotnet tool install microsoft.quantum.iqsharp -g --version 0.1.2.3 # > 4. Reinstall the kernel: # > dotnet iqsharp install # > 5. Restart the Notebook. # > </details> # ## Part I. Colors Representation and Manipulation # ### Task 1.1. Initialize register to a color # # **Inputs:** # # 1. An integer $C$ ($0 \leq C \leq 2^{N} - 1$). # # 2. An array of $N$ qubits in the $|0...0\rangle$ state. # # **Goal:** # # Prepare the array in the basis state which represents the binary notation of $C$. # Use little-endian encoding (i.e., the least significant bit should be stored in the first qubit). # # **Example:** For $N = 2$ and $C = 2$ the state should be $|01\rangle$. # + %kata T11_InitializeColor_Test operation InitializeColor (C : Int, register : Qubit[]) : Unit is Adj { // ... } # - # ### Task 1.2. Read color from a register # # **Input:** An array of $N$ qubits which are guaranteed to be in one of the $2^{N}$ basis states. # # **Output:** # # An $N$-bit integer that represents this basis state, in little-endian encoding. # The operation should not change the state of the qubits. # # **Example:** For $N = 2$ and the qubits in the state $|01\rangle$ return 2 (and keep the qubits in $|01\rangle$). # + %kata T12_MeasureColor_Test operation MeasureColor (register : Qubit[]) : Int { // ... return -1; } # - # ### Task 1.3. Read coloring from a register # # **Inputs:** # # 1. The number of elements in the coloring $K$. # # 2. An array of $K * N$ qubits which are guaranteed to be in one of the $2^{KN}$ basis states. # # **Output:** # # An array of $K$ $N$-bit integers that represent this basis state. # $i$-th integer of the array is stored in qubits with indices $i * N$, $i * N + 1$, ..., $i * N + N - 1$ in little-endian format. # The operation should not change the state of the qubits. # # **Example:** # For $N = 2$, $K = 2$ and the qubits in the state $|0110\rangle$ return `[2, 1]`. # + %kata T13_MeasureColoring_Test operation MeasureColoring (K : Int, register : Qubit[]) : Int[] { // ... return new Int[0]; } # - # ### Task 1.4. 2-bit color equality oracle # # **Inputs:** # # 1. An array of 2 qubits in an arbitrary state $|c_{0}\rangle$ representing the first color. # # 2. An array of 2 qubits in an arbitrary state $|c_{1}\rangle$ representing the second color. # # 3. A qubit in an arbitrary state $|y\rangle$ (target qubit). # # **Goal:** # # Transform state $|c_{0}\rangle|c_{1}\rangle|y\rangle$ into state $|c_{0}\rangle|c_{1}\rangle|y \oplus f(c_{0},c_{1}\rangle$ ($\oplus$ is addition modulo 2), # where $f(x) = 1$ if $c_{0}$ and $c_{1}$ are in the same state, and 0 otherwise. # Leave the query register in the same state it started in. # # In this task you are allowed to allocate extra qubits. # + %kata T14_ColorEqualityOracle_2bit_Test operation ColorEqualityOracle_2bit (c0 : Qubit[], c1 : Qubit[], target : Qubit) : Unit is Adj+Ctl { // ... } # - # ### Task 1.5. N-bit color equality oracle (no extra qubits) # # This task is the same as task 1.4, but in this task you are NOT allowed to allocate extra qubits. # + %kata T15_ColorEqualityOracle_Nbit_Test operation ColorEqualityOracle_Nbit (c0 : Qubit[], c1 : Qubit[], target : Qubit) : Unit is Adj+Ctl { // ... } # - # ## Part II. Vertex coloring problem # ### Task 2.1. Classical verification of vertex coloring # # **Inputs:** # # 1. The number of vertices in the graph $V$ ($V \leq 6$). # # 2. An array of $E$ tuples of integers, representing the edges of the graph ($E \leq 12$). # Each tuple gives the indices of the start and the end vertices of the edge. # The vertices are indexed $0$ through $V - 1$. # # 3. An array of $V$ integers, representing the vertex coloring of the graph. # $i$-th element of the array is the color of the vertex number $i$. # # **Output:** # # True if the given vertex coloring is valid (i.e., no pair of vertices connected by an edge have the same color), and false otherwise. # # **Example:** # # Graph 0 -- 1 -- 2 would have $V = 3$ and `edges = [(0, 1), (1, 2)]`. # Some of the valid colorings for it would be `[0, 1, 0]` and `[-1, 5, 18]`. # + %kata T21_IsVertexColoringValid_Test function IsVertexColoringValid (V : Int, edges: (Int, Int)[], colors: Int[]) : Bool { // ... return true; } # - # ### Task 2.2. Oracle for verifying vertex coloring # # **Inputs:** # # 1. The number of vertices in the graph $V$ ($V \leq 6$). # # 2. An array of $E$ tuples of integers, representing the edges of the graph (E $\leq$ 12). # Each tuple gives the indices of the start and the end vertices of the edge. # The vertices are indexed $0$ through $V - 1$. # # 3. An array of $2V$ qubits `colorsRegister` that encodes the color assignments. # # 4. A qubit in an arbitrary state $|y\rangle$ (target qubit). # # **Goal:** # # Transform state $|x, y\rangle$ into state $|x, y \oplus f(x)\rangle$ ($\oplus$ is addition modulo 2), # where $f(x) = 1$ if if the given vertex coloring is valid, and 0 otherwise. # Leave the query register in the same state it started in. # # Each color in `colorsRegister` is represented as a 2-bit integer in little-endian format. # See task 1.3 for a more detailed description of color assignments. # + %kata T22_VertexColoringOracle_Test operation VertexColoringOracle (V : Int, edges : (Int, Int)[], colorsRegister : Qubit[], target : Qubit) : Unit is Adj+Ctl { // ... } # - # ### Task 2.3. Using Grover's search to find vertex coloring # # **Inputs:** # # 1. The number of vertices in the graph $V$ ($V \leq 6$). # # 2. A marking oracle which implements vertex coloring verification, as implemented in task 2.2. # # **Output:** # # A valid vertex coloring for the graph, in a format used in task 2.1. # + %kata T23_GroversAlgorithm_Test operation GroversAlgorithm (V : Int, oracle : ((Qubit[], Qubit) => Unit is Adj)) : Int[] { // ... return new Int[V]; }
GraphColoring/GraphColoring.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python (ox) # language: python # name: ox # --- # # Simplify network topology and consolidate intersections # # Author: [<NAME>](https://geoffboeing.com/) # # - [Overview of OSMnx](http://geoffboeing.com/2016/11/osmnx-python-street-networks/) # - [GitHub repo](https://github.com/gboeing/osmnx) # - [Examples, demos, tutorials](https://github.com/gboeing/osmnx-examples) # - [Documentation](https://osmnx.readthedocs.io/en/stable/) # - [Journal article/citation](http://geoffboeing.com/publications/osmnx-complex-street-networks/) # + import networkx as nx import osmnx as ox # %matplotlib inline ox.config(log_console=True) ox.__version__ # - # ## 1. Complex intersection consolidation # # Many real-world street networks feature complex intersections and traffic circles, resulting in a cluster of graph nodes where there is really just one true intersection, as we would think of it in transportation or urban design. Similarly, divided roads are often represented by separate centerline edges: the intersection of two divided roads thus creates 4 nodes, representing where each edge intersects a perpendicular edge, but these 4 nodes represent a single intersection in the real world. Traffic circles similarly create a cluster of nodes where each street's edge intersects the roundabout. # # OSMnx can consolidate nearby intersections and optionally rebuild the graph's topology. # get a street network and plot it with all edge intersections point = 37.858495, -122.267468 G = ox.graph_from_point(point, network_type="drive", dist=500) fig, ax = ox.plot_graph(G, node_color="r") # Notice the complex intersections and traffic circles creating clusters of nodes. # # We'll specify that any nodes with 15 meter buffers of each other in this network are part of the same intersection. Adjust this tolerance based on the street design standards in the community you are examining, and use a projected graph to work in meaningful units like meters. We'll also specify that we do not want dead-ends returned in our list of consolidated intersections. # get a GeoSeries of consolidated intersections G_proj = ox.project_graph(G) intersections = ox.consolidate_intersections( G_proj, rebuild_graph=False, tolerance=15, dead_ends=False ) len(intersections) # compare to number of nodes in original graph len(G) # Note that these cleaned up intersections give us more accurate intersection counts and densities, but do not alter or integrate with the network's topology. # # To do that, we need to **rebuild the graph**. # consolidate intersections and rebuild graph topology # this reconnects edge geometries to the new consolidated nodes G2 = ox.consolidate_intersections(G_proj, rebuild_graph=True, tolerance=15, dead_ends=False) len(G2) fig, ax = ox.plot_graph(G2, node_color="r") # Notice how the traffic circles' many nodes are merged into a new single centroid node, with edge geometries extended to connect to it. Similar consolidation occurs at the intersection of the divided roads. # # Running `consolidate_intersections` with `rebuild_graph=True` may yield somewhat (but not very) different intersection counts/densities compared to `rebuild_graph=False`. The difference lies in that the latter just merges buffered node points that overlap, whereas the former checks the topology of the overlapping node buffers before merging them. # # This prevents topologically remote but spatially proximate nodes from being merged. For example: # # - A street intersection may lie directly below a freeway overpass's intersection with an on-ramp. We would not want to merge these together and connnect their edges: they are distinct junctions in the system of roads. # - In a residential neighborhood, a bollarded street may create a dead-end immediately next to an intersection or traffic circle. We would not want to merge this dead-end with the intersection and connect their edges. # # These examples illustrate (two-dimensional) geometric proximity, but topological remoteness. Accordingly, in some situations we may expect higher intersection counts when using `rebuild_graph=True` because it is more cautious with merging in these cases. The trade-off is that it has higher time complexity than `rebuild_graph=False`. # # ## 2. Graph simplification # # Use simplification to clean-up nodes that are not intersections or dead-ends while retaining the complete edge geometry. OSMnx does this automatically by default when constructing a graph. # create a network around some (lat, lng) point and plot it location_point = (33.299896, -111.831638) G = ox.graph_from_point(location_point, dist=500, simplify=False) fig, ax = ox.plot_graph(G, node_color="r") # show which nodes we'd remove if we simplify it (yellow) nc = ["r" if ox.simplification._is_endpoint(G, node) else "y" for node in G.nodes()] fig, ax = ox.plot_graph(G, node_color=nc) # simplify the network G2 = ox.simplify_graph(G) # plot the simplified network and highlight any self-loop edges loops = [edge[0] for edge in nx.selfloop_edges(G2)] nc = ["r" if node in loops else "y" for node in G2.nodes()] fig, ax = ox.plot_graph(G2, node_color=nc) # turn off strict mode and see what nodes we'd remove nc = ["r" if ox.simplification._is_endpoint(G, node, strict=False) else "y" for node in G.nodes()] fig, ax = ox.plot_graph(G, node_color=nc) # simplify network with strict mode turned off G3 = ox.simplify_graph(G.copy(), strict=False) fig, ax = ox.plot_graph(G3, node_color="r") # ## 3. Cleaning up the periphery of the network # # This is related to simplification. OSMnx by default (with clean_periphery parameter equal to True) buffers the area you request by 0.5km, and then retrieves the street network within this larger, buffered area. Then it simplifies the topology so that nodes represent intersections of streets (rather than including all the interstitial OSM nodes). Then it calculates the (undirected) degree of each node in this larger network. Next it truncates this network by the actual area you requested (either by bounding box, or by polygon). Finally it saves a dictionary of node degree values as a graph attribute. # # This has two primary benefits. First, it cleans up stray false edges around the periphery. If clean_periphery=False, peripheral non-intersection nodes within the requested area appear to be cul-de-sacs, as the rest of the edge leading to an intersection outside the area is ignored. If clean_periphery=True, the larger graph is first created, allowing simplification of such edges to their true intersections, allowing their entirety to be pruned after truncating down to the actual requested area. Second, it gives accurate node degrees by both a) counting node neighbors even if they fall outside the retained network (so you don't claim a degree-4 node is degree-2 because only 2 of its neighbors lie within the area), and b) not counting all those stray false edges' terminus nodes as cul-de-sacs that otherwise grossly inflate the count of nodes with degree=1, even though these nodes are really just interstitial nodes in the middle of a chopped-off street segment between intersections. # # See two examples below. # get some bbox bbox = ox.utils_geo.bbox_from_point((45.518698, -122.679964), dist=300) north, south, east, west = bbox G = ox.graph_from_bbox(north, south, east, west, network_type="drive", clean_periphery=False) fig, ax = ox.plot_graph(G, node_color="r") # the node degree distribution for this graph has many false cul-de-sacs k = dict(G.degree()) {n: list(k.values()).count(n) for n in range(max(k.values()) + 1)} # Above, notice all the peripheral stray edge stubs. Below, notice these are cleaned up and that the node degrees are accurate with regards to the wider street network that may extend beyond the limits of the requested area. G = ox.graph_from_bbox(north, south, east, west, network_type="drive") fig, ax = ox.plot_graph(G, node_color="r") # the streets per node distribution for this cleaned up graph is more accurate # dict keys = count of streets emanating from the node (ie, intersections and dead-ends) # dict vals = number of nodes with that count k = nx.get_node_attributes(G, "street_count") {n: list(k.values()).count(n) for n in range(max(k.values()) + 1)} # A final example. Compare the network below to the ones in the section above. It has the stray peripheral edges cleaned up. Also notice toward the bottom left, two interstitial nodes remain in that east-west street. Why? These are actually intersections, but their (southbound) edges were removed because these edges' next intersections were south of the requested area's boundaries. However, OSMnx correctly kept these nodes in the graph because they are in fact intersections and should be counted in measures of intersection density, etc. location_point = (33.299896, -111.831638) G = ox.graph_from_point(location_point, dist=500, simplify=True) fig, ax = ox.plot_graph(G, node_color="r")
notebooks/04-simplify-graph-consolidate-nodes.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Get System Metadata # # [System metadata](https://dataone-architecture-documentation.readthedocs.io/en/latest/apis/Types.html#Types.SystemMetadata) is low level metadata associated with every object synchronized with DataONE. # # Retrieving the system metadata for an object is accomplished by the [`getSystemMetadata(id)`](https://dataone-architecture-documentation.readthedocs.io/en/latest/apis/MN_APIs.html#MNRead.getSystemMetadata) call which accepts a single parameter, the identifier of the object to retrieve. # # + # include some utility data and methods import notebook_utils as nbu # Import the library and create a client instance from d1_client import baseclient_2_0 cn_base_url = "https://cn.dataone.org/cn" client = baseclient_2_0.DataONEBaseClient_2_0(cn_base_url) identifier = "urn:uuid:f310f0cb-1f3d-464c-9274-5036fb8eb471" sysmeta = client.getSystemMetadata(identifier) # System metadata object is returned print(sysmeta) # Show the xml representation of the system metadata print(nbu.asXml(sysmeta, max_lines=-1)) # - # ## Accessing properties of the system metadata # # Properties of the system metadata can be accessed directly from the system metadata instance which is a [PyXB](https://pypi.org/project/PyXB/) object resulting from the deserialization of the XML message. Note some properties are complex (e.g. `identifier`) that are accessed through a `.value()` accessor. For example: # print(f"Identifier: {sysmeta.identifier.value()}") print(f"Series Identifier: {nbu.propertyStr(sysmeta.seriesId)}") print(f"Modified: {sysmeta.dateSysMetadataModified}") print(f"Uploaded: {sysmeta.dateUploaded}") print(f"Format ID: {sysmeta.formatId}") print(f"Size: {sysmeta.size}") print(f"Checksum: hash://{sysmeta.checksum.algorithm.lower()}/{sysmeta.checksum.value()}") print(f"Origin Member Node: {nbu.propertyStr(sysmeta.originMemberNode)}") print(f"Authoritative Member Node: {nbu.propertyStr(sysmeta.authoritativeMemberNode)}") print(f"Obsoletes: {nbu.propertyStr(sysmeta.obsoletes)}") print(f"Obsoleted By: {nbu.propertyStr(sysmeta.obsoletedBy)}") print("Access policy rules:") for rule in sysmeta.accessPolicy.allow: print(f" {', '.join(map(lambda S: S.value(), rule.subject))} can {', '.join(rule.permission)}") print(f"Replication allowed: {sysmeta.replicationPolicy.replicationAllowed}") print("Replicas of this object:") for replica in sysmeta.replica: print(f" {replica.replicaMemberNode.value():15} {replica.replicationStatus:10} {replica.replicaVerified}") # ## Error Handling # # Errors resulting from a call to a DataONE API will normally result in a `DataONEException` which can include detail about the cause of the error. # + # Try and retrieve non-existing system metadata import d1_common.types.exceptions identifier = "something_bogus" try: sysmeta = client.getSystemMetadata(identifier) except d1_common.types.exceptions.DataONEException as e: print(e) # -
python_examples/04_getSystemMetadata.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/krisivarga/deep_learning_HW_big/blob/main/Different_class_number_testing.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + colab={"base_uri": "https://localhost:8080/"} id="zLfvRd_eOaHR" outputId="cff38272-c082-4a42-fb2d-149e64030ae5" import requests, zipfile, io, os import bz2,shutil import xml.etree.ElementTree as ET import numpy as np import pandas as pd # !pip install sentencepiece # !pip install transformers from transformers import XLNetTokenizer from transformers import BertTokenizer import re from sklearn.model_selection import train_test_split import tensorflow from tensorflow import keras from tensorflow.keras import Sequential from tensorflow.keras.utils import to_categorical from tensorflow.keras.optimizers import Adam from tensorflow.keras.layers import Embedding, LSTM, Dropout, Dense, SpatialDropout1D, Conv1D, MaxPooling1D # + id="RfFDp7ofOiDv" zip_file_url = "http://groups.di.unipi.it/~gulli/newsspace200.xml.bz" filename = zip_file_url.split("/")[-1] with open(filename, "wb") as f: r = requests.get(zip_file_url) f.write(r.content) # + colab={"base_uri": "https://localhost:8080/"} id="ieFYKERSOm_k" outputId="b8064dfe-514a-486e-cf2a-8ea39de187f3" dirName = 'data' try: # Create target Directory os.mkdir(dirName) print("Directory " , dirName , " Created ") except FileExistsError: print("Directory " , dirName , " already exists") with bz2.BZ2File("newsspace200.xml.bz") as fr, open("./data/input.xml","wb") as fw: shutil.copyfileobj(fr,fw) # + id="xRqVucKnOqZe" tree = ET.parse("./data/input.xml") root = tree.getroot() titles = [] categories = [] descriptions = [] sources = [] for title in tree.findall('title'): titles.append(title.text) for category in tree.findall('category'): categories.append(category.text) for description in tree.findall('description'): descriptions.append(description.text) for source in tree.findall('source'): sources.append(source.text) # + id="timDRl0OOsPS" titles = np.array(titles).T categories = np.array(categories).T descriptions = np.array(descriptions).T sources = np.array(sources).T # + id="QWX6Mg27PhVv" data = np.dstack((sources,titles,descriptions,categories)).reshape(len(titles),4) # + id="4REoDGCuOyQE" df = pd.DataFrame(data) df.columns = ['source','title', 'desc', 'cat'] # + id="M9qRb9IpPaSZ" def tr(bt,reg,title,sz): a = bt.tokenize(title + " " + sz) r = [] for s in a: if reg.match(s) is not None: r.append(s) return r def bertenc(bt,title,sz): text_to_encode = title + " " + sz a = bt.encode_plus( text_to_encode, max_length= 64, add_special_tokens=True, return_token_type_ids=False, padding="max_length", truncation = True, return_attention_mask=False ) ids = a['input_ids'] return ids def dict_creater(arr): for a in arr: if a != 0: if a in bertencidvoc_dict: count = bertencidvoc_dict[a] + 1 bertencidvoc_dict.update({a:count}) else: bertencidvoc_dict.update({a:1}) return def dict_creater2(arr): for a in arr: if a != 0: if a in bertvoc_dict: count = bertvoc_dict[a] + 1 bertvoc_dict.update({a:count}) else: bertvoc_dict.update({a:1}) return def create_model(i): loaded_model = keras.models.load_model("best_LSTM_CONV1D_model.h5") model = tensorflow.keras.Sequential([ keras.layers.Embedding( input_dim = len(bertvoc_dict), output_dim = loaded_model.layers[0].output_dim, input_length = 64 ), tensorflow.keras.layers.Conv1D( filters=loaded_model.layers[1].filters, kernel_size=loaded_model.layers[1].kernel_size, padding='same', activation='relu' ), tensorflow.keras.layers.Dropout( rate = loaded_model.layers[2].rate ), tensorflow.keras.layers.LSTM( units=loaded_model.layers[3].units ), tensorflow.keras.layers.Dropout( rate = loaded_model.layers[4].rate ), tensorflow.keras.layers.Dense( units = loaded_model.layers[5].units, activation = 'relu' ), tensorflow.keras.layers.Dense( units = i, activation = 'softmax' ) ]) model.compile(optimizer=tensorflow.keras.optimizers.Adam(learning_rate=0.001), loss='categorical_crossentropy', metrics=['accuracy']) return model # + colab={"base_uri": "https://localhost:8080/", "height": 241, "referenced_widgets": ["8d751294945a424cad191585670d8f87", "<KEY>", "7ae2176b53b941ba9b149e46f4285875", "8b47eabb9b4742adb23c7c4860b52eff", "be6f10828fdd402792cf0f7fed684394", "<KEY>", "<KEY>", "4083bd8e7f7a416da5f706def2d76d29", "<KEY>", "f8680f6162c843dbb37e38c437169d2a", "ede4636ddb654b81a2ed29c925c5b151", "8a64c430a1684934b4f2a2c0015be0ea", "<KEY>", "e2ac8ec0787542eea9427cbce57e6bf9", "<KEY>", "ede44dc3d250444296b5f7ab5c6caf81", "0e27e76de88c4d45847e7048113bea4e", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "61c0ae4ca8ae4e69b51e01b256afdc86", "7bd632ede95c43cfa537b8450c0a8858", "91f5d3c4b8354812956aafcc116eacef", "c8fd6aba73be4c12a624df6b9c76929a", "3e452637ec2d447dba3539261031e3a8", "55be365ab4ec4ef185419cfab62e1b17", "ccfab7fceed2434b91527eff7e95278f", "<KEY>", "<KEY>", "be86dede21ac4b1eaeb778978f21da2d", "dc91f29ddd624e048d3fbcad45181b69", "<KEY>", "33819751eea144948c3c44dc2ff3e157", "<KEY>", "<KEY>", "dcf256719e1346909a0de9942e8b6331", "20e30cb07c2247cc933e383093a9043b", "e3839cef7a7141a9a07bb89ef6436243", "c6e30a79452a417a9b2101b9a9c42352", "<KEY>", "3cd879f1a7ce49949bc14adad5e17205", "ff9332ac45614273bd5a6ab8847d0e70", "e58a558c346f42e6b683e3f1a9aa375a", "<KEY>", "e8a53d8f347a44bcb09754bee9f54b56", "<KEY>", "<KEY>", "b2fa4965686e4d99beb2fea49045bea5", "<KEY>", "9142d31c6a6e426f96aeffba983f8e3b", "67631ad6bd00422d9af704e5ee70c26e", "049b96ec3efc4deb86b3e1e12a419e02", "<KEY>", "5acdf6f0158145cab63250f9afabe2c2", "<KEY>", "60dfb88a22f948e7bbd147e1a7a905b6", "<KEY>", "<KEY>", "34ded994262a4caf809e04b0035df2ec", "f3cc22fd4e9d4fde858e36d0d82547de", "<KEY>", "<KEY>", "<KEY>", "320248212ec14da1b4a9b45b79ec261b", "<KEY>", "<KEY>", "0073a6c6cdfe4634a8b118adc1eba469", "<KEY>", "42ad6d4d01f0427d91f5409b8acd2ddb", "0cc335f488a64911b706e6305d74ef2b", "<KEY>", "cc788ed36bd1433aa490214aedc79238", "<KEY>", "9132a63b0e21435c93ee52a900a48b67", "ce08785d4e3b45c99ebe97985141517d"]} id="Objzb7f3O3Pv" outputId="f9e25439-f27f-4d27-98c6-2ab52977dc76" res = list() for i in range(5,11): #select top i classes selected_cats = df['cat'].value_counts()[:i].index.tolist() df_selected = df.loc[df['cat'].isin(selected_cats)] #drop empty desc rows df_selected = df_selected.dropna() #load tokenizer regex = re.compile('^[a-zA-Z]{3,}$') xltokenizer = XLNetTokenizer.from_pretrained("xlnet-base-cased") btokenizer = BertTokenizer.from_pretrained("bert-base-uncased") #tokenize df_selected['berttoken'] = df_selected.apply(lambda row: tr(btokenizer, regex, str(row['title']), str(row['desc'])), axis=1) df_selected['bertencodeplus'] = df_selected.apply(lambda row: bertenc(btokenizer, str(row['title']), str(row['desc'])), axis=1) #voc creation bertencidvoc_dict = {} bertvoc_dict = {} df_selected.apply(lambda row: dict_creater(row['bertencodeplus']), axis=1) df_selected.apply(lambda row: dict_creater2(row['berttoken']), axis=1) #label to numeric c = 0 category_dictionary = {} cat_rev_dict= {} for s in selected_cats: category_dictionary.update({c:s}) cat_rev_dict.update({s:i}) c = c + 1 df_selected['cat_enc'] = df_selected.apply(lambda row: cat_rev_dict[str(row['cat'])] , axis=1) #split data to X and y X = np.array(df_selected["bertencodeplus"].values.tolist()) y = df_selected['cat_enc'].to_numpy() #split data to train, test, valid X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42, shuffle = True) X_test, X_valid, y_test, y_valid = train_test_split(X_test, y_test, test_size=0.5, random_state=42, shuffle = True) #cat to onehotencode Y_train = to_categorical(y_train, num_classes=5, dtype='float32') Y_test = to_categorical(y_test, num_classes=5, dtype='float32') Y_valid = to_categorical(y_valid, num_classes=5, dtype='float32') #load model model = create_model(i) #fit model print('Model for {0} classes started to fit'.format(i)) model.fit(X_train,Y_train, epochs=10, validation_data=(X_valid,Y_valid)) #evaluate model results = model.evaluate(X_test,Y_test) res.append(i,results)
Different_class_number_testing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] toc=true # <h1>Table of Contents<span class="tocSkip"></span></h1> # <div class="toc"><ul class="toc-item"></ul></div> # - import pandas as pd import numpy as np import matplotlib.pyplot as plt # %matplotlib inline d = {'start': ['08:00', '13:20', '10:00', '13:55', '14:00', '14:05'], 'end': ['09:15', '15:20', '14:00', '16:25', '17:45', '17:45']} mtgs = pd.DataFrame(data=d) mtgs.head() mtgs['start'] = pd.to_datetime(mtgs['start']) mtgs['end'] = pd.to_datetime(mtgs['end']) mtgs.head() rooms = pd.DataFrame(columns=['room_1']) rooms['time'] = pd.date_range(start='5/13/2019 00:00', end='5/14/2019 00:00', freq='T') rooms.head() rooms.info() rooms.index = rooms['time'] rooms.drop(columns='time', inplace=True) rooms['room_1'] = True # rooms['room_2'] = True # rooms['room_3'] = True # rooms['room_4'] = True # rooms['room_5'] = True # rooms['room_6'] = True # rooms['room_7'] = True # rooms['room_8'] = True rooms.head() mtgs for i in range(len(mtgs)): print(mtgs.iloc[i]['start'], "is the start time of meeting {}".format(i+1)) print(mtgs.iloc[i]['end'], "is the end time of meeting {}".format(i+1)) print() print('-'*25) print() # + # rooms.loc[rooms.index > '01:00'] = False # - for i in range(len(mtgs)): start = mtgs.iloc[i]['start'] end = mtgs.iloc[i]['end'] print(start, end) break rooms.room_4 = False rooms.room_8 = False rooms.head(10) for i in rooms.loc[rooms.index <= '00:10'].index: print("At time {}".format(i)) for room in rooms.columns: if rooms.loc[i][room] == True: print("{} is available".format(room)) else: print("{} is unavailable".format(room)) print() start = mtgs.iloc[0]['start'] end = mtgs.iloc[0]['end'] times = [] for i in rooms.index: if i >= start and i <= end: times.append(i) else: pass times[:5], times[-5:] count = 0 for i in range(len(times)): print(times[i]) value = times[i] print() print(rooms.loc[value].room_1) if rooms.loc[value].room_1 == True: count += 1 print(count) pass if value == times[-1]: print("end") break data = [[1, '08:00', '09:15'], [2, '13:20', '15:20'], [3, '10:00', '14:00'], [4, '13:55', '16:25'], [5, '14:00', '17:45'], [6, '14:05', '17:45']] df = pd.DataFrame(data, columns=['ID', 'START', 'END']) df['START'] = pd.to_datetime(df.START) df['END'] = pd.to_datetime(df.END) df df1 = df.sort_values(by=['START']) df2 = df.sort_values(by=['START']) print(df1) print() print(df2) df = df1.merge(df1[(df2.START > df1.START) & (df2.START < df1.END)], on='ID', how='inner') df df = df.groupby('ID').count() df minMeetingRooms = df.max()+1 print(minMeetingRooms) df = pd.read_csv('employee-attrition.csv') df.head() columns = ['Name', 'Age', 'Height', 'Weight'] df = pd.DataFrame(columns=columns) print(df.shape) df.head() df.Name = np.ones(100) print(df.shape) df.head() df.Age = np.random.randint(0, 100, 100) print(df.shape) df.head() df.Height = np.random.randint(0, 100, 100) print(df.shape) df.head() df.Weight = np.random.randint(0, 100, 100) print(df.shape) df.head() df.Name[1] = 20 df.head() df['Time'] = np.nan df.head() df.Time = pd.to_datetime(df.Time) df.head() table = pd.DataFrame(columns=['StartTime', 'EndTime']) table.head() table['StartTime'] = pd.to_datetime(table['StartTime']) table['EndTime'] = pd.to_datetime(table['EndTime']) table.info()
sql_wayfair_challenge_playground.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.8.3 64-bit # name: python38364bit55709c7191ff44b1b648f99363c3d6d1 # --- # + class ChessBoard: white = (1, 1, 1) red = (1, .2, 0) blue = (.1, .1, 1) def __init__(self): self.grid = np.zeros((8,8,3)) def render(sefl): plt.imshow(self.grid) def color_cell(self, row, col, color): self.grid[row, col] = color # + board = ChessBoard() white = (1, 1, 1) board.grid[0,0] = white board.render()
test.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from __future__ import division import gc import os import time import sys import random from tqdm.notebook import tqdm import numpy as np import matplotlib.pyplot as plt from PIL import Image from IPython.display import clear_output from collections import namedtuple, deque import torch import torch.nn as nn import torch.optim as optim from torch.optim import lr_scheduler import torch.utils.data as data from torch.utils.tensorboard import SummaryWriter import torchvision.utils as vutils import torchvision.transforms as transforms # - class ImageBuffer: """Fixed-size buffer to store image tuples.""" def __init__(self, buffer_size): """Initialize a ImageBuffer object. Params ====== buffer_size (int): maximum size of buffer """ self.memory = deque(maxlen=buffer_size) # internal memory (deque) self.images = namedtuple("Images", field_names=['real_image_X', 'fake_image_X', 'real_image_Y', 'fake_image_Y']) def add(self, real_image_X, fake_image_X, real_image_Y, fake_image_Y): """Add a new images to memory.""" image_pair = self.images(real_image_X, fake_image_X.clone(), real_image_Y, fake_image_Y.clone()) self.memory.append(image_pair) def sample(self): """Return a batch of image tuples from memory.""" return self.memory.popleft() def __len__(self): """Return the current size of internal memory.""" return len(self.memory) # + # custom weights initialization called on netG and netD def weights_init(m): classname = m.__class__.__name__ if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1): nn.init.normal_(m.weight.data, 0.0, 0.02) elif classname.find('BatchNorm') != -1: nn.init.normal_(m.weight.data, 1.0, 0.02) nn.init.constant_(m.bias.data, 0) def model_summary(model): print('=================================================================') print(model) print('=================================================================') total_params = sum(p.numel() for p in model.parameters()) trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad) print('Total params: {:,}'.format(total_params)) print('Trainable params: {:,}'.format(trainable_params)) print('Non-trainable params: {:,}'.format(total_params - trainable_params)) class ConvBlock(nn.Module): def __init__(self, in_channels, filters, kernel_size=7, strides=1, padding=0, activation=nn.ReLU): super(ConvBlock, self).__init__() self.blocks = nn.Sequential( nn.Conv2d(in_channels, out_channels=filters, kernel_size=kernel_size, stride=strides, padding=padding), nn.InstanceNorm2d(num_features=filters), activation(inplace=True)) def forward(self, input_tensor): x = self.blocks(input_tensor) return x class DeConvBlock(nn.Module): def __init__(self, in_channels, filters, kernel_size=7, strides=1, padding=0, output_padding=1, activation=nn.ReLU): super(DeConvBlock, self).__init__() self.blocks = nn.Sequential( nn.ConvTranspose2d(in_channels, out_channels=filters, kernel_size=kernel_size, stride=strides, padding=padding, output_padding=output_padding), nn.InstanceNorm2d(num_features=filters), activation(inplace=True)) def forward(self, input_tensor): x = self.blocks(input_tensor) return x class ResidualBlock(nn.Module): def __init__(self, in_channels, filters, kernel_size=3, strides=1, padding=0, activation=nn.ReLU): super(ResidualBlock, self).__init__() self.conv_blocks = nn.Sequential( nn.Conv2d(in_channels, out_channels=filters, kernel_size=kernel_size, stride=strides, padding=padding), nn.ReflectionPad2d(1), nn.InstanceNorm2d(num_features=filters), activation(inplace=True), nn.Conv2d(in_channels, out_channels=filters, kernel_size=kernel_size, stride=strides, padding=padding), nn.ReflectionPad2d(1), nn.InstanceNorm2d(num_features=filters)) def forward(self, input_tensor): x = self.conv_blocks(input_tensor) x = x + input_tensor return x class BaseModel(nn.Module): def __init__(self): super(BaseModel, self).__init__() self.model = None def forward(self, x): pass def summary(self): if self.model != None: print('=================================================================') print(self.model) print('=================================================================') total_params = sum(p.numel() for p in self.model.parameters()) trainable_params = sum(p.numel() for p in self.model.parameters() if p.requires_grad) print('Total params: {:,}'.format(total_params)) print('Trainable params: {:,}'.format(trainable_params)) print('Non-trainable params: {:,}'.format(total_params - trainable_params)) else: print('Model not created') class ResNetGenerator(BaseModel): def __init__(self, input_channel=3, output_channel=3, filters=64, n_blocks=9): super(ResNetGenerator, self).__init__() # Downsample layers layers = [ nn.ReflectionPad2d(3), ConvBlock(in_channels=input_channel, filters=filters, kernel_size=7, strides=1, activation=nn.LeakyReLU), ConvBlock(in_channels=filters, filters=filters * 2, kernel_size=3, strides=2, padding=1, activation=nn.LeakyReLU), ConvBlock(in_channels=filters * 2, filters=filters * 4, kernel_size=3, strides=2, padding=1, activation=nn.LeakyReLU) ] # Residual layers for i in range(n_blocks): layers.append(ResidualBlock(in_channels=filters * 4, filters=filters * 4, kernel_size=3, strides=1, activation=nn.LeakyReLU)) # Upsample layers layers += [ DeConvBlock(in_channels=filters * 4, filters=filters * 2, kernel_size=3, strides=2, padding=1, output_padding=1, activation=nn.LeakyReLU), DeConvBlock(in_channels=filters * 2, filters=filters, kernel_size=3, strides=2, padding=1, output_padding=1, activation=nn.LeakyReLU), nn.ReflectionPad2d(3), nn.Conv2d(in_channels=filters, out_channels=output_channel, kernel_size=7, stride=1, padding=0) ] # Output layer layers += [nn.Tanh()] # Create model self.model = nn.Sequential(*layers) def forward(self, input_tensor): x = self.model(input_tensor) return x class PatchGANDiscriminator(BaseModel): def __init__(self, input_channel, filters=64): super(PatchGANDiscriminator, self).__init__() layers = [ nn.Conv2d(in_channels=input_channel, out_channels=filters, kernel_size=4, stride=2, padding=1), nn.LeakyReLU(inplace=True), ConvBlock(in_channels=filters, filters=filters * 2, kernel_size=4, strides=2, padding=1, activation=nn.LeakyReLU), ConvBlock(in_channels=filters * 2, filters=filters * 4, kernel_size=4, strides=2, padding=1, activation=nn.LeakyReLU), ConvBlock(in_channels=filters * 4, filters=filters * 8, kernel_size=4, strides=1, padding=1, activation=nn.LeakyReLU), ] # Output layer layers += [nn.Conv2d(in_channels=filters * 8, out_channels=1, kernel_size=4, stride=1, padding=1)] # Create model self.model = nn.Sequential(*layers) def forward(self, input_tensor): x = self.model(input_tensor) return x # + INPUT_SHAPE = 256 SCALE_WIDTH = 256 DATASET = 'horse2zebra' DATASET_PATH = os.path.join("datasets", DATASET) # Dataset path OUTPUT_PATH = 'outputs' base_logdir = os.path.join("logs", 'pytorch') # Sets up a log directory. USE_BUFFER = True # Use image buffer to train discriminator REPLAY_PROB = 0.5 # The probability of using previous fake images to train discriminator BUFFER_SIZE = 50 # The maximum size of image buffer BATCH_SIZE = 1 EPOCHs = 200 CURRENT_EPOCH = 1 # Epoch start from SAVE_EVERY_N_EPOCH = 5 # Save checkpoint at every n epoch DISCRIMINATOR_LOSS_WEIGHT = 0.5 # Discriminator loss will be multiplied by this weight SOFT_FAKE_LABEL_RANGE = [0.0, 0.3] # The label of fake label will be generated within this range. SOFT_REAL_LABEL_RANGE = [0.7, 1.2] # The label of real label will be generated within this range. LR = 0.0002 LR_DECAY_EPOCH = 100 LAMBDA = 10 # loss weight for cycle consistency ngpu = 1 device = torch.device("cuda" if torch.cuda.is_available() else "cpu") writer = SummaryWriter(base_logdir) # + G_XtoY = ResNetGenerator(input_channel=3, output_channel=3, filters=64, n_blocks=9).to(device) G_YtoX = ResNetGenerator(input_channel=3, output_channel=3, filters=64, n_blocks=9).to(device) # Handle multi-gpu if desired if (device.type == 'cuda') and (ngpu > 1): G_XtoY = nn.DataParallel(G_XtoY, list(range(ngpu))) G_YtoX = nn.DataParallel(G_YtoX, list(range(ngpu))) # Apply the weights_init function to randomly initialize all weights # to mean=0, stdev=0.2. G_XtoY.apply(weights_init) G_YtoX.apply(weights_init) # - G_XtoY.summary() # + Dx = PatchGANDiscriminator(input_channel=3, filters=64).to(device) Dy = PatchGANDiscriminator(input_channel=3, filters=64).to(device) # Handle multi-gpu if desired if (device.type == 'cuda') and (ngpu > 1): Dx = nn.DataParallel(Dx, list(range(ngpu))) Dy = nn.DataParallel(Dy, list(range(ngpu))) # Apply the weights_init function to randomly initialize all weights # to mean=0, stdev=0.2. Dx.apply(weights_init) Dy.apply(weights_init) # - Dx.summary() # + def denormalize(images, std=0.5, mean=0.5): # For plot images = (images * std) + mean return images def deprocess(input_tensor): if len(input_tensor.shape) == 3: return np.transpose(denormalize(input_tensor.to(device).cpu()), (1,2,0)) elif len(input_tensor.shape) == 4: return np.transpose(denormalize(input_tensor.to(device).cpu()), (0, 2,3,1)) # + class GeneratorFolderDataset(data.Dataset): """Load images from folder for generator.""" def __init__(self, root_dir, transform=None): """ Args: root_dir (string): Directory with all the images. transform (callable, optional): Optional transform to be applied on a sample. """ self.root_dir = root_dir self.filenames = os.listdir(root_dir) self.transform = transform def __len__(self): return len(self.filenames) def __getitem__(self, idx): if torch.is_tensor(idx): idx = idx.tolist() img_name = self.filenames[idx] img_path = os.path.join(self.root_dir, img_name) sample = Image.open(img_path) if self.transform: sample = self.transform(sample) return sample class GeneratorDataset(data.Dataset): """Load images first for generator. """ def __init__(self, root_dir, transform=None): """ Args: root_dir (string): Directory with all the images. transform (callable, optional): Optional transform to be applied on a sample. """ self.root_dir = root_dir self.filenames = os.listdir(root_dir) self.transform = transform def __len__(self): return len(self.filenames) def __getitem__(self, idx): if torch.is_tensor(idx): idx = idx.tolist() img_name = self.filenames[idx] img_path = os.path.join(self.root_dir, img_name) sample = Image.open(img_path).convert('RGB') if self.transform: sample = self.transform(sample) return sample # + preprocess_train_transformations = transforms.Compose([ transforms.CenterCrop(INPUT_SHAPE), transforms.RandomHorizontalFlip(p=0.5), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), ]) preprocess_test_transformations = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), ]) train_data_X = GeneratorDataset(root_dir=os.path.join(DATASET_PATH, "trainA"), transform=preprocess_train_transformations) train_data_Y = GeneratorDataset(root_dir=os.path.join(DATASET_PATH, "trainB"), transform=preprocess_train_transformations) test_data_X = GeneratorDataset(root_dir=os.path.join(DATASET_PATH, "testA"), transform=preprocess_test_transformations) test_data_Y = GeneratorDataset(root_dir=os.path.join(DATASET_PATH, "testB"), transform=preprocess_test_transformations) train_image_loader_X = torch.utils.data.DataLoader(train_data_X, batch_size=BATCH_SIZE, shuffle=True, num_workers=0) train_image_loader_Y = torch.utils.data.DataLoader(train_data_Y, batch_size=BATCH_SIZE, shuffle=True, num_workers=0) test_image_loader_X = torch.utils.data.DataLoader(test_data_X, batch_size=BATCH_SIZE, shuffle=False, num_workers=0) test_image_loader_Y = torch.utils.data.DataLoader(test_data_Y, batch_size=BATCH_SIZE, shuffle=False, num_workers=0) print("Found {} images in {}".format(len(train_data_X), 'trainA')) print("Found {} images in {}".format(len(train_data_Y), 'trainB')) print("Found {} images in {}".format(len(test_data_X), 'testA')) print("Found {} images in {}".format(len(test_data_Y), 'testB')) # - iter_train_image_X = iter(train_image_loader_X) iter_train_image_Y = iter(train_image_loader_Y) iter_test_image_X = iter(test_image_loader_X) iter_test_image_Y = iter(test_image_loader_Y) sample_X = next(iter_test_image_X) sample_Y = next(iter_test_image_Y) plt.subplot(121) plt.title('X') plt.imshow(deprocess(sample_X)[0]) plt.subplot(121) plt.title('Y') plt.imshow(deprocess(sample_Y)[0]) image_buffer = ImageBuffer(buffer_size=BUFFER_SIZE) training_steps = min(len(train_data_X), len(train_data_Y)) print(training_steps) # + Dx_optimizer = optim.Adam(Dx.parameters(), lr=0.0002, betas=(0.5, 0.999)) Dy_optimizer = optim.Adam(Dy.parameters(), lr=0.0002, betas=(0.5, 0.999)) G_XtoY_optimizer = optim.Adam(G_XtoY.parameters(), lr=0.0002, betas=(0.5, 0.999)) G_YtoX_optimizer = optim.Adam(G_YtoX.parameters(), lr=0.0002, betas=(0.5, 0.999)) def lambda_rule(epoch): lr = 1.0 - max(0, epoch + 1 - LR_DECAY_EPOCH) / float(LR_DECAY_EPOCH + 1) return lr Dx_optimizer_scheduler = lr_scheduler.LambdaLR(Dx_optimizer, lr_lambda=lambda_rule) Dy_optimizer_scheduler = lr_scheduler.LambdaLR(Dy_optimizer, lr_lambda=lambda_rule) G_XtoY_optimizer_scheduler = lr_scheduler.LambdaLR(G_XtoY_optimizer, lr_lambda=lambda_rule) G_YtoX_optimizer_scheduler = lr_scheduler.LambdaLR(G_YtoX_optimizer, lr_lambda=lambda_rule) # + def discriminator_loss(real_image, generated_image): real_loss = (real_image - torch.FloatTensor(real_image.size()).uniform_(SOFT_REAL_LABEL_RANGE[0], SOFT_REAL_LABEL_RANGE[1]).to(device)).pow(2).mean() fake_loss = (generated_image - torch.FloatTensor(generated_image.size()).uniform_(SOFT_FAKE_LABEL_RANGE[0], SOFT_FAKE_LABEL_RANGE[1]).to(device)).pow(2).mean() total_loss = real_loss + fake_loss return total_loss * 0.5 def discriminator_loss_test(real_image, generated_image): mse = torch.nn.MSELoss() real_loss = mse(real_image, torch.FloatTensor(real_image.size()).uniform_(SOFT_REAL_LABEL_RANGE[0], SOFT_REAL_LABEL_RANGE[1]).to(device)) fake_loss = mse(generated_image, torch.FloatTensor(generated_image.size()).uniform_(SOFT_FAKE_LABEL_RANGE[0], SOFT_FAKE_LABEL_RANGE[1]).to(device)) total_loss = real_loss + fake_loss return total_loss * 0.5 def generator_loss(generated_image): loss =(generated_image - torch.FloatTensor(generated_image.size()).uniform_(SOFT_REAL_LABEL_RANGE[0], SOFT_REAL_LABEL_RANGE[1]).to(device)).pow(2).mean() return loss def generator_loss_test(generated_image): mse = torch.nn.MSELoss() loss = mse(generated_image, torch.FloatTensor(generated_image.size()).uniform_(SOFT_REAL_LABEL_RANGE[0], SOFT_REAL_LABEL_RANGE[1]).to(device)) return loss def cycle_consistency_loss(real_image, cycled_image): loss = (real_image - cycled_image).abs().mean() return loss * LAMBDA def cycle_consistency_loss_test(real_image, cycled_image): mae = torch.nn.L1Loss() loss = mae(real_image, cycled_image).abs().mean() return loss * LAMBDA def identity_loss(real_image, generated_image): loss = (real_image - generated_image).abs().mean() return loss * 0.5 * LAMBDA def identity_loss_test(real_image, generated_image): mae = torch.nn.L1Loss() loss = mae(real_image, generated_image).abs().mean() return loss * 0.5 * LAMBDA # + checkpoint_path = os.path.join("checkpoints", 'pytorch', DATASET, ) if not os.path.exists(checkpoint_path): os.makedirs(checkpoint_path) def save_training_checkpoint(epoch): state_dict = { 'G_XtoY':G_XtoY.state_dict(), 'G_YtoX':G_YtoX.state_dict(), 'Dx':Dx.state_dict(), 'Dy':Dy.state_dict(), 'G_XtoY_optimizer':G_XtoY_optimizer.state_dict(), 'G_YtoX_optimizer':G_YtoX_optimizer.state_dict(), 'Dx_optimizer':Dx_optimizer.state_dict(), 'Dy_optimizer':Dy_optimizer.state_dict(), 'Dx_optimizer_scheduler': Dx_optimizer_scheduler.state_dict(), 'Dy_optimizer_scheduler': Dy_optimizer_scheduler.state_dict(), 'G_XtoY_optimizer_scheduler': G_XtoY_optimizer_scheduler.state_dict(), 'G_YtoX_optimizer_scheduler': G_YtoX_optimizer_scheduler.state_dict(), 'epoch': epoch } save_path = os.path.join(checkpoint_path, 'training-checkpoint') torch.save(state_dict, save_path) def save_models(): state_dict = { 'G_XtoY':G_XtoY, 'G_YtoX':G_YtoX } save_path = os.path.join(checkpoint_path, 'model') torch.save(state_dict, checkpoint_path) # if a checkpoint exists, restore the latest checkpoint. if os.path.isfile(os.path.join(checkpoint_path, 'training-checkpoint')): checkpoint = torch.load(os.path.join(checkpoint_path, 'training-checkpoint')) G_XtoY.load_state_dict(checkpoint['G_XtoY']) G_YtoX.load_state_dict(checkpoint['G_YtoX']) Dx.load_state_dict(checkpoint['Dx']) Dy.load_state_dict(checkpoint['Dy']) G_XtoY_optimizer.load_state_dict(checkpoint['G_XtoY_optimizer']) G_YtoX_optimizer.load_state_dict(checkpoint['G_YtoX_optimizer']) Dx_optimizer.load_state_dict(checkpoint['Dx_optimizer']) Dy_optimizer.load_state_dict(checkpoint['Dy_optimizer']) Dx_optimizer_scheduler.load_state_dict(checkpoint['Dx_optimizer_scheduler']) Dy_optimizer_scheduler.load_state_dict(checkpoint['Dy_optimizer_scheduler']) G_XtoY_optimizer_scheduler.load_state_dict(checkpoint['G_XtoY_optimizer_scheduler']) G_YtoX_optimizer_scheduler.load_state_dict(checkpoint['G_YtoX_optimizer_scheduler']) CURRENT_EPOCH = checkpoint['epoch'] print ('Latest checkpoint of epoch {} restored!!'.format(CURRENT_EPOCH)) # + def generate_images(model, test_input, img_name='img', step=0): prediction = model(test_input.to(device)).cpu().detach() plt.figure(figsize=(12, 12)) display_list = [test_input, prediction] title = ['Input Image', 'Predicted Image'] # Using the file writer, log the reshaped image. writer.add_image(os.path.join('train', img_name), denormalize(prediction)[0].numpy(), step) writer.flush() for i in range(2): plt.subplot(1, 2, i+1) plt.title(title[i]) # getting the pixel values between [0, 1] to plot it. plt.imshow(deprocess(display_list[i])[0]) plt.axis('off') plt.show() def generate_test_images(model1, model2, test_input, img_name='img', step=0, show_result=False): ''' Generate images and cycled images, then save them to tensorboard ''' with torch.no_grad(): prediction1 = model1(test_input.to(device)) prediction2 = model2(prediction1) test_input = test_input.cpu() prediction1 = prediction1.cpu() prediction2 = prediction2.cpu() display_list = [test_input, prediction1, prediction2] title = ['Input Image', 'Predicted Image', 'Cycled Image'] writer.add_image(os.path.join('test', img_name, ' original'), denormalize(test_input)[0].numpy(), step) writer.add_image(os.path.join('test', img_name, ' predicted'), denormalize(prediction1)[0].numpy(), step) writer.add_image(os.path.join('test', img_name, ' cycled'), denormalize(prediction2)[0].numpy(), step) writer.flush() if show_result: plt.figure(figsize=(12, 12)) for i in range(3): plt.subplot(1, 3, i+1) plt.title(title[i]) # getting the pixel values between [0, 1] to plot it. plt.imshow(deprocess(display_list[i])[0]) plt.axis('off') plt.show() def save_test_images(model1, model2, test_input, folder_name='img', step=0, save=False, show_result=False): ''' Generate images and cycled images, then save them as jpg ''' with torch.no_grad(): prediction1 = model1(test_input.to(device)) prediction2 = model2(prediction1) test_input = test_input.cpu() prediction1 = prediction1.cpu() prediction2 = prediction2.cpu() display_list = [test_input, prediction1, prediction2] title = ['original', 'predicted', 'cycled'] figure_title = ['Input Image', 'Predicted Image', 'Cycled Image'] base_folder = os.path.join(OUTPUT_PATH, folder_name) if not os.path.exists(base_folder): os.makedirs(base_folder) if save: for img, title in zip(display_list, title): save_folder = os.path.join(base_folder, title) if not os.path.exists(save_folder): os.makedirs(save_folder) img = deprocess(img)[0] plt.imsave(os.path.join(save_folder, '{}.jpg'.format(step)), img) if show_result: plt.figure(figsize=(12, 12)) for i in range(3): plt.subplot(1, 3, i+1) plt.title(figure_title[i]) # getting the pixel values between [0, 1] to plot it. plt.imshow(deprocess(display_list[i])[0]) plt.axis('off') plt.show() # - # Write graph writer.add_graph(G_XtoY, sample_X.to(device)) writer.add_graph(G_YtoX, sample_Y.to(device)) writer.add_graph(Dx, sample_X.to(device)) writer.add_graph(Dy, sample_Y.to(device)) writer.flush() class TimeCounter(): def __init__(self): self.init_time = time.time() self.stored_time = self.init_time def count(self, task_name="task"): current_time = time.time() consumed_time = current_time - self.stored_time self.stored_time = current_time print('Task {}, consume {} sec'.format(task_name, consumed_time)) # + current_learning_rate = LR trace = True for epoch in range(CURRENT_EPOCH, EPOCHs + 1): start = time.time() print('Start of epoch %d' % (epoch,)) # Reset dataloader iter_train_image_X = iter(train_image_loader_X) iter_train_image_Y = iter(train_image_loader_Y) # Average the losses of an epoch and write them. G_XtoY_loss_mean = 0 G_YtoX_loss_mean = 0 Dx_loss_mean = 0 Dy_loss_mean = 0 for step in range(training_steps): # The probability to use previous images to train discriminators replay_previous = True if REPLAY_PROB > random.random() else False real_image_X = iter_train_image_X.next().to(device) real_image_Y = iter_train_image_Y.next().to(device) # Generate fake images for discriminators fake_image_X = G_YtoX(real_image_Y).detach() fake_image_Y = G_XtoY(real_image_X).detach() image_buffer.add(real_image_X, fake_image_X, real_image_Y, fake_image_Y) Dx_optimizer.zero_grad() Dy_optimizer.zero_grad() if USE_BUFFER and replay_previous: # Get previous generated fake images buffered_images = image_buffer.sample() # Compute the discriminator loss using buffered images real_buffer_image_X = buffered_images.real_image_X fake_buffer_image_X = buffered_images.fake_image_X real_buffer_image_Y = buffered_images.real_image_Y fake_buffer_image_Y = buffered_images.fake_image_Y Dx_real_buffer = Dx(real_buffer_image_X) Dx_fake_buffer = Dx(fake_buffer_image_X) Dx_loss = discriminator_loss(Dx_real_buffer, Dx_fake_buffer) Dy_real_buffer = Dy(real_buffer_image_Y) Dy_fake_buffer = Dy(fake_buffer_image_Y) Dy_loss = discriminator_loss(Dy_real_buffer, Dy_fake_buffer) else: # Compute the discriminator loss using the latest fake images Dx_real = Dx(real_image_X) Dx_fake = Dx(fake_image_X) Dy_real = Dy(real_image_Y) Dy_fake = Dy(fake_image_Y) Dx_loss = discriminator_loss(Dx_real, Dx_fake) Dy_loss = discriminator_loss(Dy_real, Dy_fake) # ============================ # Update discriminators # ============================ Dx_loss.backward() Dy_loss.backward() Dx_optimizer.step() Dy_optimizer.step() # ============================ # Compute the generator loss # ============================ G_XtoY_optimizer.zero_grad() G_YtoX_optimizer.zero_grad() fake_image_Y = G_XtoY(real_image_X) fake_image_X = G_YtoX(real_image_Y) dis_fake_image_Y = Dy(fake_image_Y) dis_fake_image_X = Dx(fake_image_X) G_XtoY_loss = generator_loss(dis_fake_image_Y) G_YtoX_loss = generator_loss(dis_fake_image_X) # ============================ # Compute the cycle consistency loss # ============================ cycled_XtoYtoX = G_YtoX(fake_image_Y) cycled_YtoXtoY = G_XtoY(fake_image_X) cycled_XtoY_loss = cycle_consistency_loss(real_image_X, cycled_XtoYtoX) cycled_YtoX_loss = cycle_consistency_loss(real_image_Y, cycled_YtoXtoY) total_cycle_loss = cycled_XtoY_loss + cycled_YtoX_loss # ============================ # Compute the identity loss # ============================ same_image_Y = G_XtoY(real_image_Y) same_image_X = G_YtoX(real_image_X) identity_loss_for_YtoX = identity_loss(real_image_X, same_image_X) identity_loss_for_XtoY = identity_loss(real_image_Y, same_image_Y) # ============================ # Combine all generator losses # ============================ total_G_XtoY_loss = G_XtoY_loss + identity_loss_for_XtoY total_G_YtoX_loss = G_YtoX_loss + identity_loss_for_YtoX total_G_losses = total_G_XtoY_loss + total_G_YtoX_loss + total_cycle_loss # ============================ # Update generators # ============================ total_G_losses.backward() G_XtoY_optimizer.step() G_YtoX_optimizer.step() # Add losses G_XtoY_loss_mean = G_XtoY_loss_mean + total_G_XtoY_loss.item() + total_cycle_loss.item() G_YtoX_loss_mean = G_YtoX_loss_mean + total_G_YtoX_loss.item() + total_cycle_loss.item() Dx_loss_mean += Dx_loss.item() Dy_loss_mean += Dy_loss.item() if step % 10 == 0: print ('.', end='') # ============================ # Write scalars at the end of an epoch # ============================ writer.add_scalar('Loss/total_G_XtoY_loss', G_XtoY_loss_mean / training_steps, epoch) writer.add_scalar('Loss/total_G_YtoX_loss', G_YtoX_loss_mean / training_steps, epoch) writer.add_scalar('Loss/Dx_loss', Dx_loss_mean / training_steps, epoch) writer.add_scalar('Loss/Dy_loss', Dy_loss_mean / training_steps, epoch) writer.flush() # ============================ # Update schedulers # ============================ Dx_optimizer_scheduler.step() Dy_optimizer_scheduler.step() G_XtoY_optimizer_scheduler.step() G_YtoX_optimizer_scheduler.step() clear_output(wait=True) # Using a consistent image (sample_X) so that the progress of the model # is clearly visible. generate_images(G_XtoY, sample_X, img_name='sample_X', step=epoch) generate_images(G_YtoX, sample_Y, img_name='sample_Y', step=epoch) if epoch % SAVE_EVERY_N_EPOCH == 0: save_training_checkpoint(epoch) print ('Saving checkpoint for epoch {} at {}'.format(epoch, checkpoint_path)) print ('Time taken for epoch {} is {} sec\n'.format(epoch, time.time()-start)) gc.collect() # - for step, image in enumerate(tqdm(test_image_loader_X)): show_result = True if (step + 1) >= len(test_data_X) else False # Save to tensorboard generate_test_images(G_XtoY, G_YtoX, image, img_name='test_XtoY', step=step, show_result=show_result) # Save image # save_test_images(G_XtoY, G_YtoX, image, folder_name='test_XtoY', step=step, save=True, show_result=show_result) # + for step, image in enumerate(tqdm(test_image_loader_Y)): show_result = True if (step + 1) >= len(test_data_Y) else False # Save to tensorboard generate_test_images(G_YtoX, G_XtoY, image, img_name='test_YtoX', step=step, show_result=show_result) # Save image # save_test_images(G_YtoX, G_XtoY, image, folder_name='test_YtoX', step=step, save=True, show_result=show_result)
pytorch_cycle_gan.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [Root] # language: python # name: Python [Root] # --- # # Trial by hx # need to make completely new dataframe of peak values with: # - No averages # - Trial numbers # Then, sort by trial and look at peaks based on previous trial drive_path = 'c:/' import numpy as np import pandas as pd import os import sys import matplotlib.pyplot as plt from scipy.stats import ks_2samp from scipy.stats import anderson_ksamp from scipy.stats import kruskal from scipy.stats import variation # %matplotlib import seaborn as sns from scipy.stats import zscore from scipy.stats import nanmean from scipy.stats import nanstd filename='C:\Users\Annie\Documents\Data\Ca_Imaging\GoodFiles\\fullpeak.csv' comp=pd.read_csv(filename) comp_sorted=comp.reindex_axis(comp.mean().sort_values().index, axis=1) comp_labels=pd.DataFrame(comp.Mouse) comp_group=pd.DataFrame(comp.Group) tmp=[comp_group,comp_labels,comp_sorted] composite_full=pd.concat(tmp,axis=1) cfull=pd.melt(composite_full,['Group','Mouse'],var_name="Odor") test=pd.read_csv('C:\Users\Annie\Documents\Data\Ca_Imaging\GoodFiles\\160325_1\\9dt.txt') trials=pd.read_csv('C:\\Users\\Annie\\Documents\\Data\\Ca_Imaging\\Analysis\\Odor_Panel\\Odor_Trials.csv') composite_full.head() # + #Order all the trials df=pd.DataFrame([]) for a in trials.File.unique(): temp=trials[trials.File==a] tone=temp[['Odor','T1']] tone.columns=['Odor','Trial'] ttwo=temp[['Odor','T2']] ttwo.columns=['Odor','Trial'] tthree=temp[['Odor','T3']] tthree.columns=['Odor','Trial'] tfour=temp[['Odor','T4']] tfour.columns=['Odor','Trial'] trialsdf=pd.concat([tone,ttwo,tthree,tfour],axis=0,ignore_index=True) sortedtrials=trialsdf.sort_values('Trial') sortedtrials=sortedtrials[sortedtrials.Trial!=0] sortedtrials=sortedtrials.reset_index(drop=True) sortedtrials['Mouse']=a df=df.append(sortedtrials,ignore_index=True) #Get Odor1 and Odor2 # TH=pd.DataFrame([]) # full=pd.DataFrame([]) # for x in df.Mouse.unique(): # if <len(sortedtrials.Trial.unique()): # Odor1=sortedtrials[sortedtrials.Trial==x]['Odor'].values # Odor2=sortedtrials[sortedtrials.Trial==x+1]['Odor'].values # tmp=({'Mouse':a,'Odor1':Odor1,'Odor2':Odor2,'Trial1':x,'Trial2':x+1}) # TH=TH.append(tmp,ignore_index=True) # - df[df.Mouse==x] for x in df.Mouse.unique(): for y in df[df.Mouse==x]['Trial'].unique(): if y<len(df[df.Mouse==x]['Trial']): Odor1=df[df.Mouse==x] # + #WHY IS X NOT CHANGING BACK TO INT& WHY CAN I NOT GET ODOR 1 valuess # -
TrialHistory.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import md_table as mt base_addr = 0x60000000 size_t = 4 no_cell = 20 # + memory_map = mt.malloc(size_t, no_cell, base_addr) assert 'header' in memory_map, "'header' missing" assert 'content_formatter' in memory_map, "'content_formatter' missing" address_list = list(filter(lambda key: isinstance(key, int), memory_map.keys())) min_addr, max_addr = min(address_list), max(address_list) assert min_addr == base_addr, 'Base address = {min_addr:x} != expected {base_addr:x}' assert max_addr == (base_addr + size_t * no_cell), 'Last address = {max_addr:x} != expected {(base_addr + size_t * no_cell):x}' assert len(address_list) > no_cell, f"# elements {len(address_list)} smaller than desired {no_cell}" # + value = 'zzz' one_addr = base_addr + no_cell // 2 memory_map[one_addr] = value assert memory_map[one_addr] == value, 'Assignment error' # -
test_md_table.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # #!/usr/bin/env python # -*- coding: utf-8 -*- import sys sys.path.append('../') from loglizer.models import SVM from loglizer import dataloader, preprocessing import numpy as np struct_log = '../data/HDFS/HDFS_100k.log_structured.csv' # The structured log file label_file = '../data/HDFS/anomaly_label.csv' # The anomaly label file if __name__ == '__main__': (x_train, y_train), (x_test, y_test) = dataloader.load_HDFS(struct_log, label_file=label_file, window='session', train_ratio=0.5, split_type='uniform') feature_extractor = preprocessing.FeatureExtractor() x_train = feature_extractor.fit_transform(x_train, term_weighting='tf-idf') x_test = feature_extractor.transform(x_test) print(np.array(x_train).shape) model = SVM() model.fit(x_train, y_train) print(np.array(x_train).shape) # print('Train validation:') # precision, recall, f1 = model.evaluate(x_train, y_train) # print('Test validation:') # precision, recall, f1 = model.evaluate(x_test, y_test) # + # #!/usr/bin/env python # -*- coding: utf-8 -*- import sys sys.path.append('../') from loglizer.models import PCA from loglizer import dataloader, preprocessing struct_log = '../data/HDFS/HDFS_100k.log_structured.csv' # The structured log file label_file = '../data/HDFS/anomaly_label.csv' # The anomaly label file if __name__ == '__main__': (x_train, y_train), (x_test, y_test) = dataloader.load_HDFS(struct_log, label_file=label_file, window='session', train_ratio=0.5, split_type='uniform') feature_extractor = preprocessing.FeatureExtractor() x_train = feature_extractor.fit_transform(x_train, term_weighting='tf-idf', normalization='zero-mean') x_test = feature_extractor.transform(x_test) # print("输入后的训练数据:",x_train) # print("尺寸:",x_train.shape) # print("输入后的测试数据:",x_test) # print("尺寸:",x_test.shape) model = PCA() model.fit(x_train) # print('Train validation:') # precision, recall, f1 = model.evaluate(x_train, y_train) # print('Test validation:') # precision, recall, f1 = model.evaluate(x_test, y_test) # - help(model.fit())
demo/PCA_SVM.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ##### Set up the environment import matplotlib.pyplot as plt import numpy as np import random import math from collections import namedtuple # %matplotlib inline # Optimization for mathplotlib import matplotlib as mpl import matplotlib.style as mplstyle mpl.rcParams['path.simplify'] = True mpl.rcParams['path.simplify_threshold'] = 1.0 mpl.rcParams['agg.path.chunksize'] = 10000 mplstyle.use('fast') # # Models # #### updateing # $du = \frac{(-\Delta u + R.I_t)}{\tau}dt$, $u_{new} = u + du $ # # #### Threshhold # $ u(t) = \theta \implies \text{Fire + Rest } (u = u_{rest}) $ # ## Leaky Integrate & Fire Model # #### formulate # $\tau.\frac{du}{dt} = -(u - u_{rest}) + R.I(t)$, $\Delta u = u - u_{rest}$ ,$I_t = I(t)$ # + class LIF: def __init__(self, config: namedtuple): self.τ = config.tau self.θ = config.threshold self.R = config.resistor self.u_rest = config.uRest self.dt = config.dt self.u = self.u_rest self.spike_trace = [] def integrate(self, It, t): self.u += self.update(It) if self.u >= self.θ: self.u = self.u_rest self.spike_trace.append(t) return self.u def update(self, It): return self.right_hand_side(It) * (self.dt / self.τ) def right_hand_side(self, It): return -(self.u - self.u_rest) + self.R * It # - # ## Exponential Leaky Integrate & Fire Model # #### formulate # $\tau.\frac{du}{dt} = -(u - u_{rest}) + R.I(t) + \Delta_{T}exp(\frac{u - \theta_{rh}}{\Delta_{T}})$ # # First part describe the leak of a passive membrane same as LIF model. # # $\Delta_{T}$ is the sharpness parameter. # # $\theta_{rh}$ is the firing threshold. class ELIF: def __init__(self, config: namedtuple): self.τ = config.tau self.θ = config.threshold self.R = config.resistor self.u_rest = config.uRest self.dt = config.dt self.ΔT = config.DeltaT self.u = self.u_rest self.spike_trace = [] def integrate(self, It, t): self.u += self.update(It) if self.u >= self.θ: self.u = self.u_rest self.spike_trace.append(t) return self.u def update(self, It): return self.right_hand_side(It) * (self.dt / self.τ) def right_hand_side(self, It): return -(self.u - self.u_rest) + self.R * It + self.ΔT * math.exp((self.u - self.θ)/self.ΔT) # #### How new term looks # + configs = [(5,1), (10,4)] x = np.linspace(0, 100, 100) y = lambda config: config[0] * np.exp((x - config[1]) / config[0]) plt.figure() plt.subplot(211) plt.plot(x, y(configs[0]), label='EL term') plt.xticks([]) plt.text(20, 1 * 10**9, r'$\Delta_{T}=%d, \theta_{rh}=%d$' %(*configs[0], ), fontsize=16) plt.title(r"$\Delta_T * e^{\frac{u-\theta_{rh}}{\Delta_T}}$", fontsize=20) plt.subplot(212) plt.plot(x, y(configs[1]), label='EL term') plt.text(20, 100000, r'$\Delta_{T}=%d, \theta_{rh}=%d$' %(*configs[1], ), fontsize=16) # - # ## Addaptive Exponential Leaky Integrate & Fire Model # #### formulate # $\tau.\frac{du}{dt} = -(u - u_{rest}) + R.I(t) + \Delta_{T}exp(\frac{u - \theta_{rh}}{\Delta_{T}}) - Rw$ # # $\tau.\frac{dw}{dt} = a(u - u_{rest}) - w + b \tau_w \sum\limits_{t^f}\sigma(t - t^f)$ # # - At each threshold crossing, the adaptation variable w is increased by an amount b # - Adaptation is characterized by two parameters: # - the parameter a is the source of subthreshold adaptation, because it couples adaptation to the voltage # - spike-triggered adaptation is controlled by a combination of a and b. class AELIF: def __init__(self, config: namedtuple): self.τm = config.tauM self.θ = config.threshold self.R = config.resistor self.u_rest = config.uRest self.dt = config.dt self.ΔT = config.DeltaT self.a = config.a self.b = config.b self.w = config.w self.τw = config.tauW self.u = self.u_rest self.spike_trace = [] def integrate(self, It, t): self.u += self.update(It) self.w += self.update_w(t) if self.u >= self.θ: self.u = self.u_rest self.spike_trace.append(t) return self.u def update(self, It): return self.right_hand_side(It) * (self.dt / self.τm) def update_w(self, t): return self.a * (self.u - self.u_rest) - self.w + self.b * self.τw * self.spike_trace.count(t) def right_hand_side(self, It): return -(self.u - self.u_rest) + self.R * (It - self.w) + self.ΔT * math.exp((self.u - self.θ) / self.ΔT) # ### Configs # # ```python # params_creator.get('LIF') = namedtuple('LIFParams', 'dt resistor tau threshold uRest') # params_creator.get('ELIF') = namedtuple('ELIFParams', 'DeltaT dt resistor tau threshold uRest'), # params_creator.get('AELIF') = namedtuple('AdaptiveELIFParams', 'DeltaT a b dt resistor tauM tauW threshold uRest w'), # params_creator.get('Env') = namedtuple('Enviroment', 'current_variation currents_params time_window') # ``` params_creator = { 'LIF': namedtuple('LIFParams', 'dt resistor tau threshold uRest'), 'ELIF': namedtuple('ELIFParams', 'DeltaT dt resistor tau threshold uRest'), 'AELIF': namedtuple('AdaptiveELIFParams', 'DeltaT a b dt resistor tauM tauW threshold uRest w'), 'Env': namedtuple('Enviroment', 'current_variation currents_params time_window') } # ### Plotters def plot_fi_curve(spike_trace, time_window, currents): frequencies = [len(spikes) / time_window for spikes in spike_trace] plt.plot(currents, frequencies, 'r') plt.xlabel(xlabel='I(t)') plt.ylabel(ylabel='f = 1/T') plt.title("frequency-current relation") plt.show() def plot_firing_pattern(plotter, params): fig, axs = plt.subplots(2) axs[0].plot(plotter['times'], plotter['potentials'], 'g-') axs[0].plot(plotter['times'], [params.uRest] * len(plotter['times']), 'k--') axs[0].plot(plotter['times'], [params.threshold] * len(plotter['times']), 'b--') axs[0].set(xlabel='time', ylabel='u (t)') axs[1].plot(plotter['times'], plotter['currents'], 'b-') axs[1].set(xlabel='time', ylabel='I (t)') for ax in axs.flat: ax.label_outer() plt.show() # ### Constant Current class CurrentManager: def __init__(self, start, stop, step): self.arrange_params = (start, stop, step) self.currents = None def __enter__(self): self.currents = np.arange(*self.arrange_params) return self.currents def __exit__(self, type, value, traceback): del self.currents def constant_current(Model, model_params, env): with CurrentManager(*env.currents_params) as currents: spike_trace = [] for current in currents: plotter = { "potentials": [], "currents": [], "times": [] } model = Model(model_params) # Model # time sequences starting_t = 0 time_intervals = np.arange(starting_t, starting_t + env.time_window, model_params.dt) # integrations for t in time_intervals: u = model.integrate(current, t) plotter['potentials'].append(u) plotter['currents'].append(current) plotter['times'].append(t) spike_trace.append(model.spike_trace) # Firing pattern plot_firing_pattern(plotter, model_params) plot_fi_curve(spike_trace, env.time_window, currents) def noisy_current(Model, model_params, env): with CurrentManager(*env.currents_params) as currents: for current in currents: plotter = { "potentials": [], "currents": [], "times": [] } model = Model(model_params) # Model # time sequences starting_t = 0 time_intervals = np.arange(starting_t, starting_t + env.time_window, model_params.dt) # integrations for t in time_intervals: It = current + np.random.uniform(*env.current_variation) u = model.integrate(It, t) plotter['potentials'].append(u) plotter['currents'].append(It) plotter['times'].append(t) # Firing pattern plot_firing_pattern(plotter, model_params) # #### Enviroments Parameters # current_variation = (lower, uperbound) for noise value # # currents_params = (start, stop, step) # # time_window = interval gap # current_variation currents_params time_window env = params_creator.get('Env')((-1,1), (0, 50, 5), 10) # #### Leaky Integrate & Fire Model Parameters # + # 'dt resistor tau threshold uRest' lif_params = [ params_creator.get('LIF')(0.01, 10, 10, -50, -70), params_creator.get('LIF')(0.01, 10, 5, -50, -70), # τ = 5 params_creator.get('LIF')(0.01, 10, 20, -50, -70), # τ = 20 params_creator.get('LIF')(0.01, 20, 10, -50, -70), # R = 20 params_creator.get('LIF')(0.01, 10, 10, -20, -70), # threshhold = -20 params_creator.get('LIF')(0.01, random.random()*10, random.random()*10, -20, -70), # Random ] # - # #### Exponential Leaky Integrate & Fire Model Parameters # DeltaT dt resistor tau threshold uRest elif_params = [ params_creator.get('ELIF')(1, 0.01, 10, 10, -50, -70), params_creator.get('ELIF')(1, 0.01, 10, 20, -50, -70), # τ = 20 params_creator.get('ELIF')(5, 0.01, 5, 10, -50, -65), # delta_t = 5, r = 5, uRest=-65 params_creator.get('ELIF')(2, 0.01, 10, 5, -55, -70), # τ = 20, threshhold=-55 params_creator.get('ELIF')(1, 0.1, 10, 15, -50, -70), # τ = 15, dt=0.1 params_creator.get('ELIF')(random.random()*1, 0.1, random.random()*10, random.random()*15, -50, -70), # Random ] # #### Adaptive Exponential Leaky Integrate & Fire Model Parameters # DeltaT a b dt resistor tauM tauW threshold uRest w aelif_params = [ params_creator.get('AELIF')(1, 1, 0.5, 0.01, 10, 5, 5, -50, -70, 2), params_creator.get('AELIF')(5, 0.2, 0.5, 0.01, 10, 5, 5, -50, -70, 2), params_creator.get('AELIF')(1, 1, 0.5, 0.01, 10, 10, 5, -50, -70, 0.2), params_creator.get('AELIF')(1, 1, 0.5, 0.01, 10, 5, 15, -50, -70, 2), params_creator.get('AELIF')(1, 1, 1.5, 0.01, 5, 5, 10, -50, -70, 2), params_creator.get('AELIF')(1, 1, random.random(), 0.01, random.random()*10, random.random()*5, random.random()*5, -50, -70, 2), ] # ## Start Here config_number = 5 assert 0 <= config_number <= min([len(lif_params), len(elif_params), len(aelif_params)]), 'config_number is not in acceptable range' print(lif_params[config_number]) print(elif_params[config_number]) print(aelif_params[config_number]) constant_current(LIF, lif_params[config_number], env) noisy_current(LIF, lif_params[config_number], env) constant_current(ELIF, elif_params[config_number], env) noisy_current(ELIF, elif_params[config_number], env) constant_current(AELIF, aelif_params[config_number], env) noisy_current(AELIF, aelif_params[config_number], env) constant_current(AELIF, aelif_params[-1], env)
notebooks/Project 1/SNN Models.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline import matplotlib.pyplot as plt import numpy as np import seaborn as sns x= np.linspace(0,4*np.pi,10) x f=np.sin(x) f plt.plot(x,f,marker='o') plt.xlabel('x') plt.ylabel('f(x)'); from scipy.interpolate import interp1d x=np.linspace(0,4*np.pi,10) f=np.sin(x) sin_approx=interp1d(x,f,kind='cubic') newx=np.linspace(0,4*np.pi,100) newf=sin_approx(newx) plt.plot(x,f,marker='o',linestyle='',label='original data') plt.plot(newx,newf,marker='.',label='interpolated') plt.legend(); plt.xlabel('x') plt.ylabel('f(x)'); plt.plot(newx,np.abs(np.sin(newx)-sin_approx(newx))) plt.xlabel('x') plt.ylabel('Absolute error'); x=4*np.pi*np.random.rand(15) f=np.sin(x) sin_approx=interp1d(x,f,kind='cubic') newx=np.linspace(np.min(x),np.max(x),100) newf=sin_approx(newx) plt.plot(x,f,marker='o',linestyle='',label='original data') plt.plot(newx,newf,marker='.',label='interpolated'); plt.legend(); plt.xlabel('x') plt.ylabel('f(x)'); plt.plot(newx,np.abs(np.sin(newx)-sin_approx(newx))) plt.xlabel('x') plt.ylabel('Absolute error');
days/day11/Untitled.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Figure(s) in the manuscript created by this notebook: Fig. 6C, 6D. # # This notebook takes the manually annotated cell outlines from the IRE1 photoconversion experiment and plots some basic statistics from them. # + # User-defined parameters for analysis: # Plotting and figure saving params save_figs = False plot_settings = '../src/plotting_settings.py' save_dir = '../reports/figures/Fig6_IRE1_photoconversion' # Parameters for loading data from images manually measured in ImageJ. # Each csv file represents one cell and should contain # four measurements in the following order: # 1) Cell outline at the start of the experiment, # 2) Cell outline before clusters begin to dissolve, # 3) Cell outline right after clusters dissolve, # 4) A region of comparable area to the cell to be used for background correction. # Path to csv data data_dir = '../data/processed/Fig6_IRE1_photoconv_2019-04-10/ManualOverlays' frame_time = 10 # Duration of each frame, in minutes. Used for time scaling later stress_start_min = 150 # time, in minutes, of ER stressor addition by the first frame # + # load the built-in and custom modules # uncomment for debugging """ %load_ext autoreload %autoreload 2 """ import os, sys, inspect import glob import matplotlib import matplotlib.pylab as plt import numpy as np from scipy import stats import pandas as pd import seaborn as sns # Add source code directory (src) to path to enable module import module_dir = '../src' os.sys.path.insert(0, module_dir) # - # Set up figure save dirs and load plotting style if save_figs: # %matplotlib # %run $plot_settings save # Make directory for saving figures save_dir_pdf = os.path.join(save_dir, 'pdf') if not os.path.exists(save_dir_pdf): os.makedirs(save_dir_pdf) else: # %matplotlib inline # %run $plot_settings plot_only # + # Load data result_files = sorted(glob.glob(os.path.join(data_dir,'*.csv'))) filenames_no_ext = [] data_by_file = [] # Go file by file and read data for i, file in enumerate(result_files): # Read data from the provided source file data = pd.read_csv(file) #, index_col=0) data.rename(columns={ data.columns[0]: "Meas_Type" }, inplace=True) filename_no_ext = os.path.split(os.path.splitext(file)[0])[1] data['file_name'] = filename_no_ext data['Cell_ID'] = i data_by_file.append(data) filenames_no_ext.append(filename_no_ext) raw_data = pd.concat(data_by_file) raw_data.reset_index(inplace=True, drop=True) raw_data['Meas_Type'].replace(to_replace=1, value ="Start", inplace=True) raw_data['Meas_Type'].replace(to_replace=2, value ="Pre_dis", inplace=True) raw_data['Meas_Type'].replace(to_replace=3, value ="Post_dis", inplace=True) raw_data['Meas_Type'].replace(to_replace=4, value ="Bkgnd", inplace=True) # + # Create new dataframe where relevant properties are grouped by cell cells = raw_data.loc[raw_data['Meas_Type'] == 'Start', ['Cell_ID', 'file_name']].copy() cells.reset_index(inplace=True, drop=True) # Subtract background and perform basic measurements for measurement in raw_data['Meas_Type'].unique(): data = raw_data.loc[raw_data['Meas_Type'] == measurement].copy() bkgnd = raw_data.loc[raw_data['Meas_Type'] == 'Bkgnd'].copy() data.reset_index(inplace=True, drop=True) bkgnd.reset_index(inplace=True, drop=True) corrected_intensity = data['Mean'] - bkgnd['Mean'] int_col = 'Mean_' + measurement + '_bc' area_col = 'Area_' + measurement sum_col = 'SumI_' + measurement time_col = 'Time_m_' + measurement cells[int_col] = corrected_intensity cells[area_col] = data['Area'] cells[sum_col] = data['Area'] * corrected_intensity cells[time_col] = (data['Slice']-1) * frame_time + stress_start_min # + # Analyze and plot the data # Create summary figure fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(3.4, 1.8)) fig.tight_layout(pad=2) cells['NormI_Start'] = 1 cells['NormI_Pre_dis'] = cells['SumI_Pre_dis'] / cells['SumI_Start'] cells['NormI_Post_dis'] = cells['SumI_Post_dis'] / cells['SumI_Start'] cells['Frac_dissolved'] = cells['SumI_Post_dis'] / cells['SumI_Pre_dis'] cells['Dissol_duration'] = cells['Time_m_Post_dis'] - cells['Time_m_Pre_dis'] times = pd.melt(cells, id_vars=['Cell_ID'], value_vars=['Time_m_Start', 'Time_m_Pre_dis', 'Time_m_Post_dis'], var_name='Condition', value_name='Time_min') intensities = pd.melt(cells, id_vars=['Cell_ID'], value_vars=['NormI_Start', 'NormI_Pre_dis', 'NormI_Post_dis'], var_name = 'Int_condition', value_name='Total_Intensities') timecourse = pd.concat([times, intensities], axis=1) timecourse = timecourse.loc[:,~timecourse.columns.duplicated()] timecourse['Cell_ID_str'] = 'Cell ID ' + timecourse['Cell_ID'].astype(str) sns.lineplot(x="Time_min", y="Total_Intensities", hue="Cell_ID_str", data=timecourse, ax=axes[0], legend=False) sns.scatterplot(x="Time_min", y="Total_Intensities", hue="Cell_ID_str", data=timecourse, ax=axes[0], size=5, legend=False, style='Condition') axes[0].set_xlabel('Stress duration (min)') axes[0].set_ylabel('Photoconverted IRE1 fluorescence') axes[0].set_ylim(0,) sns.boxplot(x='Condition', y='Total_Intensities', data=timecourse, linewidth=0.5, showfliers = False, boxprops={'facecolor':'None'}, ax=axes[1]) sns.swarmplot(x='Condition', y='Total_Intensities', data=timecourse, zorder=0.5, ax=axes[1], size=3) axes[1].set_ylim(0,) axes[1].set_ylabel(None) plt.show() if save_figs: fig_filename_pdf = os.path.join(save_dir_pdf, 'Photoconversion_summary.pdf') plt.savefig(fig_filename_pdf) plt.show() # + # Calculate fraction of retained IRE1 intensity after dissolution frac_left = [] for cell in timecourse['Cell_ID'].unique(): row_pre_dis = (timecourse['Cell_ID'] == cell) & (timecourse['Int_condition'] == 'NormI_Pre_dis') pre_dis = float(timecourse.loc[row_pre_dis,'Total_Intensities']) row_post_dis = (timecourse['Cell_ID'] == cell) & (timecourse['Int_condition'] == 'NormI_Post_dis') post_dis = float(timecourse.loc[row_post_dis,'Total_Intensities']) frac_l = post_dis/pre_dis frac_left.append(frac_l) print(np.mean(frac_left), stats.sem(frac_left))
notebooks/06_Photoconversion.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <img src="http://vision.skills.network/logo-light.png" width="400" alt="CV Studio logo" /> # # <h2>Transfer Learning with Convolutional Neural Networks For Classification with PyTorch and <a href="https://vision.skills.network/?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-cvstudio-2021-01-01"> Computer Vision Learning # Studio # (CV Studio)</a></h2> <p><b> V 0.2</b></p> # <h4>Project: Final_project_stop_signs</h4> # <h4>Training Run: Transfer learning for Stop Sign Classification</h4> # # Estimated time needed: **40** minutes # # In this lab, you will train a deep neural network for image classification using <a href="https://cs231n.github.io/transfer-learning/?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-cvstudio-2021-01-01">transfer learning</a>, the image dataset will automatically be download from your <a href="https://vision.skills.network/?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-cvstudio-2021-01-01">CV Studio</a> account. Experiment with different hyperparameters. # # ## Objectives # # In this lab you will train a state of the art image classifier using and <a href="https://vision.skills.network/?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-cvstudio-2021-01-01">CV Studio</a>, CV Studio is a fast, easy and collaborative open source image annotation tool for teams and individuals. In practice, very few people train an entire Convolutional Network from scratch (with random initialization), because it is relatively rare to have a dataset of sufficient size. Instead, it is common to pretrain a ConvNet on a very large dataset in the lab, then use this Network to train your model. We will use the Convolutional Network as a feature generator, only training the output layer. In general, 100-200 images will give you a good starting point, and it only takes about half an hour. Usually, the more images you add, the better your results, but it takes longer and the rate of improvement will decrease. # # - Import Libraries and Define Auxiliary Functions # - Create Dataset Object # - Load Model and Train # # * * * # # ## Import Libraries and Define Auxiliary Functions # # + # #! conda install -c pytorch torchvision # #! pip install skillsnetwork tqdm # #!pip install skillsnetwork # - # Libraries for OS and Cloud # import os import uuid import shutil import json from botocore.client import Config import ibm_boto3 import copy from datetime import datetime from skillsnetwork import cvstudio # Libraries for Data Processing and Visualization # from PIL import Image import matplotlib.pyplot as plt import numpy as np import pandas as pd import math from matplotlib.pyplot import imshow from tqdm import tqdm from ipywidgets import IntProgress import time # Deep Learning Libraries # import torch import torchvision.models as models from torch.utils.data import Dataset, DataLoader,random_split from torch.optim import lr_scheduler from torchvision import transforms import torch.nn as nn torch.manual_seed(0) # Plot train cost and validation accuracy: # def plot_stuff(COST,ACC): fig, ax1 = plt.subplots() color = 'tab:red' ax1.plot(COST, color = color) ax1.set_xlabel('Iteration', color = color) ax1.set_ylabel('total loss', color = color) ax1.tick_params(axis = 'y', color = color) ax2 = ax1.twinx() color = 'tab:blue' ax2.set_ylabel('accuracy', color = color) # we already handled the x-label with ax1 ax2.plot(ACC, color = color) ax2.tick_params(axis = 'y', color = color) fig.tight_layout() # otherwise the right y-label is slightly clipped plt.show() # Plot the transformed image: # def imshow_(inp, title=None): """Imshow for Tensor.""" inp = inp .permute(1, 2, 0).numpy() print(inp.shape) mean = np.array([0.485, 0.456, 0.406]) std = np.array([0.229, 0.224, 0.225]) inp = std * inp + mean inp = np.clip(inp, 0, 1) plt.imshow(inp) if title is not None: plt.title(title) plt.pause(0.001) plt.show() # Compare the prediction and actual value: # def result(model,x,y): #x,y=sample z=model(x.unsqueeze_(0)) _,yhat=torch.max(z.data, 1) if yhat.item()!=y: text="predicted: {} actual: {}".format(str(yhat.item()),y) print(text) # Define our device as the first visible cuda device if we have CUDA available: # device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") print("the device type is", device) # ## Load Data # # In this section we will preprocess our dataset by changing the shape of the image, converting to tensor and normalizing the image channels. These are the default preprocessing steps for image data. In addition, we will perform data augmentation on the training dataset. The preprocessing steps for the test dataset is the same, but W do not prform data augmentation on the test dataset. # # <code> # <p>mean = [0.485, 0.456, 0.406]</p> # <p>std = [0.229, 0.224, 0.225]</p> # <p>composed = transforms.Compose([transforms.Resize((224, 224)), # transforms.RandomHorizontalFlip(),transforms.RandomRotation(degrees=5) # , transforms.ToTensor() # , transforms.Normalize(mean, std)])</p> # </code> # # Download the data: # # + # Get the Dataset # Initialize the CV Studio Client cvstudioClient = cvstudio.CVStudio() # # Download All Images cvstudioClient.downloadAll() # - # We need to get our training and validation dataset. 90% of the data will be used for training. # percentage_train=0.9 train_set=cvstudioClient.getDataset(train_test='train',percentage_train=percentage_train) val_set=cvstudioClient.getDataset(train_test='test',percentage_train=percentage_train) # We can plot some of our dataset: # # + i=0 for x,y in val_set: imshow_(x,"y=: {}".format(str(y.item()))) i+=1 if i==3: break # - # ## Hyperparameters # # Experiment with different hyperparameters: # # <b>Epoch</b> indicates the number of passes of the entire training dataset, here we will set the number of epochs to 10: # n_epochs=10 # <b>Batch size</b> is the number of training samples utilized in one iteration. If the batch size is equal to the total number of samples in the training set, then every epoch has one iteration. In Stochastic Gradient Descent, the batch size is set to one. A batch size of 32--512 data points seems like a good value, for more information check out the following <a href="https://arxiv.org/abs/1609.04836?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-cvstudio-2021-01-01">link</a>. # batch_size=32 # <b>Learning rate</b> is used in the training of neural networks. Learning rate is a hyperparameter with a small positive value, often in the range between 0.0 and 1.0. # lr=0.000001 # <b>Momentum</b> is a term used in the gradient descent algorithm to improve training results: # momentum=0.9 # If you set to <code>lr_scheduler=True</code> for every epoch use a learning rate scheduler changes the range of the learning rate from a maximum or minimum value. The learning rate usually decays over time. # lr_scheduler=True base_lr=0.001 max_lr=0.01 # # Load Model and Train # # This function will train the model # def train_model(model, train_loader,validation_loader, criterion, optimizer, n_epochs,print_=True): loss_list = [] accuracy_list = [] correct = 0 #global:val_set n_test = len(val_set) accuracy_best=0 best_model_wts = copy.deepcopy(model.state_dict()) # Loop through epochs # Loop through the data in loader print("The first epoch should take several minutes") for epoch in tqdm(range(n_epochs)): loss_sublist = [] # Loop through the data in loader for x, y in train_loader: x, y=x.to(device), y.to(device) model.train() z = model(x) loss = criterion(z, y) loss_sublist.append(loss.data.item()) loss.backward() optimizer.step() optimizer.zero_grad() print("epoch {} done".format(epoch)) scheduler.step() loss_list.append(np.mean(loss_sublist)) correct = 0 for x_test, y_test in validation_loader: x_test, y_test=x_test.to(device), y_test.to(device) model.eval() z = model(x_test) _, yhat = torch.max(z.data, 1) correct += (yhat == y_test).sum().item() accuracy = correct / n_test accuracy_list.append(accuracy) if accuracy>accuracy_best: accuracy_best=accuracy best_model_wts = copy.deepcopy(model.state_dict()) if print_: print('learning rate',optimizer.param_groups[0]['lr']) print("The validaion Cost for each epoch " + str(epoch + 1) + ": " + str(np.mean(loss_sublist))) print("The validation accuracy for epoch " + str(epoch + 1) + ": " + str(accuracy)) model.load_state_dict(best_model_wts) return accuracy_list,loss_list, model # Load the pre-trained model resnet18. Set the parameter pretrained to true. # model = models.resnet18(pretrained=True) # We will only train the last layer of the network set the parameter <code>requires_grad</code> to <code>False</code>, the network is a fixed feature extractor. # for param in model.parameters(): param.requires_grad = False # Number of classes # n_classes=train_set.n_classes n_classes # Replace the output layer model.fc of the neural network with a nn.Linear object, to classify <code>n_classes</code> different classes. For the parameters in_features remember the last hidden layer has 512 neurons. # # Type your code here model.fc = nn.Linear(512, n_classes) # Set device type # model.to(device) # Cross-entropy loss, or log loss, measures the performance of a classification model combines LogSoftmax in one object class. It is useful when training a classification problem with C classes. # criterion = nn.CrossEntropyLoss() # Create a training loader and validation loader object. # train_loader = torch.utils.data.DataLoader(dataset=train_set , batch_size=batch_size,shuffle=True) validation_loader= torch.utils.data.DataLoader(dataset=val_set , batch_size=1) # Use the optim package to define an Optimizer that will update the weights of the model for us. # optimizer = torch.optim.SGD(model.parameters(), lr=lr, momentum=momentum) # We use <a href='https://arxiv.org/pdf/1506.01186.pdf?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-cvstudio-2021-01-01'>Cyclical Learning Rates</a> # if lr_scheduler: scheduler = torch.optim.lr_scheduler.CyclicLR(optimizer, base_lr=0.001, max_lr=0.01,step_size_up=5,mode="triangular2") # Now we are going to train model,for 500 images this take 25 minutes, depending on your dataset # # <img src="https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-CV0101EN-Coursera/images/this_make_take_time.gif" alt="this may take some time"> # # + start_datetime = datetime.now() start_time=time.time() accuracy_list,loss_list, model=train_model(model,train_loader , validation_loader, criterion, optimizer, n_epochs=n_epochs) end_datetime = datetime.now() current_time = time.time() elapsed_time = current_time - start_time print("elapsed time", elapsed_time ) # - # Now run the following to report back the results of the training run to CV Studio # # + parameters = { 'epochs': n_epochs, 'learningRate': lr, 'momentum':momentum, 'percentage used training':percentage_train, "learningRatescheduler": {"lr_scheduler":lr_scheduler,"base_lr":base_lr, "max_lr" :max_lr} } result = cvstudioClient.report(started=start_datetime, completed=end_datetime, parameters=parameters, accuracy={ 'accuracy': accuracy_list, 'loss': loss_list }) if result.ok: print('Congratulations your results have been reported back to CV Studio!') # - # Save the model to model.pt # # + # Save the model to model.pt torch.save(model.state_dict(), 'model.pt') # Save the model and report back to CV Studio result = cvstudioClient.uploadModel('model.pt', {'numClasses': n_classes}) # - # Plot train cost and validation accuracy, you can improve results by getting more data. # plot_stuff(loss_list,accuracy_list) # Load the model that performs best: # model = models.resnet18(pretrained=True) model.fc = nn.Linear(512, n_classes) model.load_state_dict(torch.load( "model.pt")) model.eval() # ## What's Next # # You can also deploy your model via Web Application or Web App . This allows users to interact with your model like a website. They can upload the image with a user interface and view the results. Let's see how we can deploy a web app in CV Studio. In CV Studio, go to the use model section and select New Application. Fill out the window as follows, giving your model a name and selecting the Model in this project, select **TEST - 1-click Deploy your Model to Cloud (Code Engine)** and select the model from the training run as shown here: # # <p> # <img src="https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-CV0101EN-SkillsNetwork/images/Image_2021-05-20_at_8.04_AM.jpeg" alt="popup" width="400" height="500"> # </p> # # Then once the window is filled out press the Create Application button and your model will begin deploying. # # <p> # <img src="https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-CV0101EN-SkillsNetwork/images/Image_2021-05-20_at_8.07_AM.jpeg" alt="popup" width="500" height="100"> # </p> # # Wait until the status changes from "deploying" to "ready". Once the status changes to ready, your application is ready for you to use! # # <p> # <img src="https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-CV0101EN-SkillsNetwork/images/Image_2021-05-20_at_8.08_AM.jpeg" alt="popup" width="500" height="100"> # </p> # # You can press the URL to go to your web application. # # <p> # <img src="https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-CV0101EN-SkillsNetwork/images/Image_2021-05-20_at_3.12_PM.jpeg" alt="popup" width="500" height="400"> # </p> # # ## Authors # # <NAME>,has a PhD in Electrical Engineering, his research focused on using machine learning, signal processing, and computer vision to determine how videos impact human cognition. Joseph has been working for IBM since he completed his PhD. # # ## Change Log # # | Date (YYYY-MM-DD) | Version | Changed By | Change Description | # | ----------------- | ------- | ---------- | ----------------------- | # | 2021-05-25 | 0.3 | Yasmine | Modifies Multiple Areas | # | 2021-05-25 | 0.3 | Kathy | Modified Multple Areas. | # | 2021-03-08 | 0.2 | Joseph | Modified Multiple Areas | # | 2021-02-01 | 0.1 | Joseph | Modified Multiple Areas | # # Copyright © 2021 IBM Corporation. All rights reserved. #
Introduction-to-computer-vision- final-project.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="AJPfSOKPTvEa" colab_type="text" # # Welcome to hent-AI colab! # # This colab can utilize Googles vast resources for super fast decensoring using this project. All you need is a Google Drive and a good amount of free space on it. # # hent-AI git project page: https://github.com/natethegreate/hentAI # # # Prereqs # In your Google Drive, make a folder called hent-AI. Inside that folder, make a folder called videos. # # Don't worry about getting the weights or models. This repo will auto download them to this Google Drive folder for you, so make sure your drive isn't full. # # # Tutorial: # Now, you can start running this notebook. # # * Under the runtime option above, hit '**Change runtime type**' and make sure Hardware Accelerator is set to **GPU**. # # * Then, start running the code cells below by hitting the play buttons on their top left. (Or hit Runtime->Run all). They each have comments and instructions if needed. # # * *Some of the cells will require input, as a y/n box. Make sure to do those or else nothing will continue.* # # * When you mount the google drive and have to authorize it, be sure to select your google account that you wish to place the models and videos on. # # * When decensoring finished, the output video will be called `(video name)_decensored.avi` # # * The filesystem is the window looking button on the left. Click on it, and you'll see the local hent-AI folder dropdown, and the drive folder above it. # # * Expand the hent-AI folder, then the expand drive / My Drive folders # # * Simply drag the decensored video avi from the hent-AI folder to the drive/My Drive folder. This will transfer the decensored video from this instance to your actual Google Drive, and is the fastest way to get the video. # # * Or, you can right-click the video and download from here, but it will be much slower. # # * Images will save into your drive in the /decensor_input and /decensor_input_original folders, for use with DeepCreamPy. # # # Notes # Colab **automatically disconnects** if you dont do anything for 30 minutes, and sessions last **at most** 12 hours. Whenever you launch a new instance, You will have to run all these steps again. But, everything on your Google drive can stay and it does not have to be repeated. # # So, its best if you have all the clips you want to decensor ready so you can run them all at once. # # Also, there are some memory-related issues that happen with high-resolution and longer videos. 480 to 720p is reccommended for videos. # + id="jZRvYf7xS1ER" colab_type="code" colab={} # !nvidia-smi #First check what GPU have. Tesla T4 will not work. P100 and K80s are confirmed working. # + id="XHWQ0UBKS4n7" colab_type="code" colab={} # Install conda and python 3.5.2 # !pip3 install conda # !wget https://repo.anaconda.com/archive/Anaconda3-5.2.0-Linux-x86_64.sh && bash Anaconda3-5.2.0-Linux-x86_64.sh -bfp /usr/local # + id="G2Y8mbOITAvo" colab_type="code" colab={} # You will need to confirm the yes/no prompt when installing python 3.5.2 # !conda list python # !conda install python=3.5.2 # + id="k0w19wiOTIQ9" colab_type="code" colab={} # Get to cuda 9 # !wget https://developer.nvidia.com/compute/cuda/9.0/Prod/local_installers/cuda-repo-ubuntu1604-9-0-local_9.0.176-1_amd64-deb # !dpkg -i cuda-repo-ubuntu1604-9-0-local_9.0.176-1_amd64-deb # !apt-key add /var/cuda-repo-9-0-local/7fa2af80.pub # !apt-get update # !apt-get install cuda=9.0.176-1 # + id="F8FMJ2XqTM0y" colab_type="code" colab={} # Get hent-AI repo # %cd /content/ # !git clone https://github.com/natethegreate/hent-AI.git # + id="5zExTS0JTTsJ" colab_type="code" colab={} # %cd hent-AI/ # !git checkout master # + id="AxlSZOPs3CF2" colab_type="code" colab={} # Get ffmpeg just in case # !pip install ffmpeg-python # !add-apt-repository ppa:jon-severinsson/ffmpeg # !apt-get update # !apt-get install ffmpeg # + id="HpUVONY0TU_t" colab_type="code" colab={} # Mount Google Drive. Follow authentication below from google.colab import drive drive.mount('/content/drive', force_remount=True) # + id="GiNm4Kw3TmTf" colab_type="code" colab={} # Create directories, you'll only need to do this if you dont already have them in your drive # !mkdir /content/drive/My\ Drive/hent-AI/ # !mkdir /content/drive/My\ Drive/hent-AI/videos # !mkdir /content/drive/My\ Drive/hent-AI/images # !mkdir /content/drive/My\ Drive/hent-AI/images/decensor_input # !mkdir /content/drive/My\ Drive/hent-AI/images/decensor_input_original # + id="gWg2-tsnDNV2" colab_type="code" colab={} # Get models, if you dont already have them. # %cd "/content/drive/My Drive/" # !wget --no-check-certificate "https://de-next.owncube.com/index.php/s/mDGmi7NgdyyQRXL/download?path=%2F&files=4x_FatalPixels_340000_G.pth&downloadStartSecret=r4q3aw60ijm" -O hent-AI/4x_FatalPixels_340000_G.pth # !wget --no-check-certificate "https://www.dropbox.com/s/zvf6vbx3hnm9r31/weights268.zip?dl=0" -O hent-AI/weights.zip # + id="Kb2_SOflUF0f" colab_type="code" colab={} # Get requirements. This will take some time and lots of disk space. MAKE SURE TO PRESS THE "RESTART RUNTIME" BUTTON AT THE BOTTOM OF THE OUTPUT HERE # %cd /content/hent-AI/ # !pip install --upgrade pip # !pip install -r requirements-gpu.txt # + id="xTQ0tIzxUIQ8" colab_type="code" colab={} # %cd /content/hent-AI/ # !git checkout master # + id="vWyKzajTUPTN" colab_type="code" colab={} # Install mask rcnn # !python setup.py install # + id="AzCLbJSKdx2e" colab_type="code" colab={} # Create folders if they are not already made. Ignore errors if they show up here. # !mkdir ESR_temp/ # !mkdir ESR_temp/temp/ # !mkdir ESR_temp/ESR_out/ # !mkdir ESR_temp/temp2/ # + id="jqXdGKwYUQhm" colab_type="code" colab={} # Extract both the hent-AI weights and the ESRGAN weights # !unzip /content/drive/My\ Drive/hent-AI/weights.zip # !7z x /content/drive/My\ Drive/hent-AI/4x_FatalPixels_340000_G.7z # !cp /content/drive/My\ Drive/hent-AI/4x_FatalPixels_340000_G.pth . # Auto downloader will download .pth, so no need to extract it # + id="dDcNhEI5ZL8R" colab_type="code" colab={} # !ls # Verify models are inside this hent-AI folder # + id="GYWKXoRkVgna" colab_type="code" colab={} # Ignore this cell # Remove tensorflow normal to operate on GPU only? NOTE: You will need to authorize both uninstalls. MAKE SURE TO PRESS THE "RESTART RUNTIME" BUTTON AT THE BOTTOM OF THE OUTPUT HERE # # !pip uninstall tensorflow # # !pip uninstall protobuf # # !pip install tensorflow==1.8.0 # # !pip install --force-reinstall tensorflow-gpu==1.9.0 # + id="ibV9Nbp-V_GA" colab_type="code" colab={} # Runtime may be restarted. Ignore any errors here # %cd hent-AI/ # !git checkout master # + id="nJIO0YaphQ6A" colab_type="code" colab={} # !git pull #ignore me # + id="TgE4GkjwWEIp" colab_type="code" colab={} # Make sure videos are in the videos folder inside hent-AI. You may need to confirm y/n if a video will be overwritten. # # !python samples/hentai/hentai.py inference --weights=weights.h5 --sources=/content/drive/My\ Drive/hent-AI/videos/ --dtype=esrgan # Use this if you want to detect bars on images for use with DCP. Make sure to comment-out all other lines. # # !python samples/hentai/hentai.py inference --weights=weights.h5 --sources=/content/drive/My\ Drive/hent-AI/images/ --dtype=bar --dcpdir=/content/drive/My\ Drive/hent-AI/images/ # Use this if you want to detect mosaics on images for use with DCP. Make sure to comment-out all other lines. # !python samples/hentai/hentai.py inference --weights=weights.h5 --sources=/content/drive/My\ Drive/hent-AI/images/ --dtype=mosaic --dcpdir=/content/drive/My\ Drive/hent-AI/images/ # + [markdown] id="xRkRAQBsdmNW" colab_type="text" # Now, use the filesystem on the left to manually drag decensored videos back into your drive folder. Then they will show up in your Google drive. #
hent_AI_COLAB_1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:PythonData] # language: python # name: conda-env-PythonData-py # --- # - Problem: There's a chess board. For each square, the amount of rice doubles. So for the first square, there is one grain of rice. In the second square, there are two grains of rice. ETC. Find the total number of rice grains. # - This is the iterative function. def rice(number_blocks): total = 0 sub_total = 1 for thing in range(1, number_blocks + 1): if thing == 1: total += thing else: sub_total *= 2 total += sub_total return total # - This is the recursive function. def rice_r(number_blocks): if number_blocks == 0: return 0 if number_blocks == 1: return 1 else: return (1 + 2 * rice_r(number_blocks -1)) # - This will test two functions using preset n's. n0 = 0 # This should return 0 n1 = 1 # This should return 1 n2 = 2 # This should return 3 n3 = 4 # This should return 15 print (rice(n0)) print (rice(n1)) print (rice(n2)) print (rice(n3)) print (rice_r(n0)) print (rice_r(n1)) print (rice_r(n2)) print (rice_r(n3))
07_DoublingNumbers/07_DoublingNumbers.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # ### Question1 # Create a function that takes a list of strings and integers, and filters out the list so that it # returns a list of integers only. # Examples # filter_list([1, 2, 3, 'a', 'b', 4]) ➞ [1, 2, 3, 4] # filter_list(['A', 0, 'Edabit', 1729, 'Python', '1729']) ➞ [0, 1729] # filter_list(['Nothing', 'here']) ➞ [] # + def filter_list(lst: list) -> list: return [i for i in lst if type(i) == int] print(filter_list([1, 2, 3, 'a', 'b', 4])) print(filter_list(['A', 0, 'Edabit', 1729, 'Python', '1729'])) print(filter_list(['Nothing', 'here'])) # - # ### Question2 # Given a list of numbers, create a function which returns the list but with each element's # index in the list added to itself. This means you add 0 to the number at index 0, add 1 to the # number at index 1, etc... # Examples # add_indexes([0, 0, 0, 0, 0]) ➞ [0, 1, 2, 3, 4] # add_indexes([1, 2, 3, 4, 5]) ➞ [1, 3, 5, 7, 9] # add_indexes([5, 4, 3, 2, 1]) ➞ [5, 5, 5, 5, 5] def add_indexes(lst: list) -> list: for i in range(len(lst)): lst[i] += i return lst print(add_indexes([0, 0, 0, 0, 0])) print(add_indexes([1, 2, 3, 4, 5])) print(add_indexes([5, 4, 3, 2, 1])) # ### Question3 # Create a function that takes the height and radius of a cone as arguments and returns the # volume of the cone rounded to the nearest hundredth. See the resources tab for the formula. # # Examples # cone_volume(3, 2) ➞ 12.57 # # cone_volume(15, 6) ➞ 565.49 # cone_volume(18, 0) ➞ 0 # + import math def cone_volume(height:int, radius:int) -> float: return round((1/3) * math.pi * (radius ** 2) * height, 2) print(cone_volume(3, 2)) print(cone_volume(15, 6)) print(cone_volume(18, 0)) # - # ### Question4 # This Triangular Number Sequence is generated from a pattern of dots that form a triangle. # The first 5 numbers of the sequence, or dots, are: # 1, 3, 6, 10, 15 # This means that the first triangle has just one dot, the second one has three dots, the third one # has 6 dots and so on. # Write a function that gives the number of dots with its corresponding triangle number of the # sequence. # Examples # triangle(1) ➞ 1 # triangle(6) ➞ 21 # triangle(215) ➞ 23220 # A Rule # We can make a "Rule" so we can calculate any triangular number. # # First, rearrange the dots like this: # # triangular numbers 1 to 5 # # Then double the number of dots, and form them into a rectangle: # # triangular numbers when doubled become n by n+1 rectangles # # Now it is easy to work out how many dots: just multiply n by n+1 # # Dots in rectangle = n(n+1) # # But remember we doubled the number of dots, so # # Dots in triangle = n(n+1)/2 # # We can use xn to mean "dots in triangle n", so we get the rule: # # Rule: xn = n(n+1)/2 # + def triangle(n): return n*(n+1)*0.5 n = int(input('Enter the trinalge number :')) print("The {}th triangle has {} dots ".format(n,int(triangle(n)))) # - # ### Question5 # Create a function that takes a list of numbers between 1 and 10 (excluding one number) and # returns the missing number. # Examples # missing_num([1, 2, 3, 4, 6, 7, 8, 9, 10]) ➞ 5 # missing_num([7, 2, 3, 6, 5, 9, 1, 4, 8]) ➞ 10 # missing_num([10, 5, 1, 2, 4, 6, 8, 3, 9]) ➞ 7 # + def missing_num(lst): return [x for x in range(1, 11) if x not in lst] print(missing_num([1, 2, 3, 4, 6, 7, 8, 9, 10])) print(missing_num([7, 2, 3, 6, 5, 9, 1, 4, 8])) print(missing_num([10, 5, 1, 2, 4, 6, 8, 3, 9]))
assignments/PythonBasicsProgramming/Programming_Assignment_20.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # required module # # !pip install protobuf numpy # required for this example # # !pip install onnx chainer # + from graphviewer.proto.attr_value_pb2 import AttrValue from graphviewer.proto.graph_pb2 import GraphDef from graphviewer.proto.node_def_pb2 import NodeDef from graphviewer.proto.tensor_shape_pb2 import TensorShapeProto from graphviewer.proto.versions_pb2 import VersionDef def get_graphdef_from_file(path): import onnx model = onnx.load(path) return parse(model.graph) def parse(graph): nodes_proto, nodes = [], [] import itertools for node in itertools.chain(graph.input, graph.output): nodes_proto.append(node) for node in nodes_proto: shapeproto = TensorShapeProto( dim=[TensorShapeProto.Dim(size=d.dim_value) for d in node.type.tensor_type.shape.dim]) nodes.append(NodeDef( name=node.name.encode(encoding='utf_8'), op='Variable', input=[], attr={ 'dtype': AttrValue(type=node.type.tensor_type.elem_type), 'shape': AttrValue(shape=shapeproto), } )) for node in graph.node: attr = [] for s in node.attribute: attr.append(' = '.join([str(f[1]) for f in s.ListFields()])) attr = ', '.join(attr).encode(encoding='utf_8') nodes.append(NodeDef( name=node.output[0].encode(encoding='utf_8'), op=node.op_type, input=node.input, attr={'parameters': AttrValue(s=attr)}, )) mapping = {} for node in nodes: mapping[node.name] = node.op + '_' + node.name return GraphDef(node=nodes, versions=VersionDef(producer=22)) # + from IPython.display import display from IPython.display import HTML import numpy from graphviewer.proto.graph_pb2 import GraphDef def strip_consts(graph_def, max_const_size=32): """Strip large constant values from graph_def.""" strip_def = GraphDef() for n0 in graph_def.node: n = strip_def.node.add() n.MergeFrom(n0) if n.op == 'Const': tensor = n.attr['value'].tensor size = len(tensor.tensor_content) if size > max_const_size: tensor.tensor_content = '<stripped {:d} bytes>'.format(size) return strip_def def show_graph(graph_def, max_const_size=32): """Visualize TensorFlow graph.""" if hasattr(graph_def, 'as_graph_def'): graph_def = graph_def.as_graph_def() strip_def = strip_consts(graph_def, max_const_size=max_const_size) code = """ <script> function load() {{ document.getElementById("{id}").pbtxt = {data}; }} </script> <link rel="import" href="https://tensorboard.appspot.com/tf-graph-basic.build.html" onload=load()> <div style="height:600px"> <tf-graph-basic id="{id}"></tf-graph-basic> </div> """.format(data=repr(str(strip_def)), id='graph'+str(numpy.random.rand())) iframe = """ <iframe seamless style="width:960px;height:720px;border:0" srcdoc="{}"></iframe> """.format(code.replace('"', '&quot;')) display(HTML(iframe)) # - gdef = get_graphdef_from_file('model.onnx') show_graph(gdef) # + from collections import Counter import chainer from chainer.computational_graph import build_computational_graph from graphviewer.parser.dtypes import convert_dtype def parse(outputs): cgraph = build_computational_graph([outputs]) nodes = [] input_dict = {} for head, tail in cgraph.edges: input_dict.setdefault(id(tail), []).append(head) name_cnt = Counter() id_to_name = {} def name_resolver(node): name = id_to_name.get(id(node), None) if name is not None: return name if isinstance(node, chainer.variable.VariableNode): name = 'Variable{:d}'.format(name_cnt['Variable']) name_cnt['Variable'] += 1 else: name = '{}{:d}'.format(node.label, name_cnt[node.label]) name_cnt[node.label] += 1 id_to_name[id(node)] = name return name for node in cgraph.nodes: assert isinstance(node, ( chainer.variable.VariableNode, chainer.function_node.FunctionNode)) if id(node) not in input_dict: shpeproto = TensorShapeProto( dim=[TensorShapeProto.Dim(size=s) for s in node.shape]) nodes.append(NodeDef( name=name_resolver(node).encode(encoding='utf_8'), op='Variable', input=[], attr={ 'dtype': AttrValue(type=convert_dtype(node.dtype)), 'shpae': AttrValue(shape=shpeproto), } )) else: inputs = [name_resolver(n).encode(encoding='utf_8') for n in input_dict[id(node)]] attr = node.label.encode(encoding='utf_8') # TODO nodes.append(NodeDef( name=name_resolver(node).encode(encoding='utf_8'), op=node.__class__.__name__, input=inputs, attr={'parameters': AttrValue(s=attr)}, )) return GraphDef(node=nodes, versions=VersionDef(producer=22)) # + import chainer.functions as F import chainer.links as L # Network definition class Net(chainer.Chain): def __init__(self): super(Net, self).__init__() with self.init_scope(): # the size of the inputs to each layer will be inferred self.conv1 = L.Convolution2D(None, 10, ksize=5) self.conv2 = L.Convolution2D(None, 20, ksize=5) self.l1 = L.Linear(None, 50) self.l2 = L.Linear(None, 10) def forward(self, x): x = F.relu(F.max_pooling_2d(self.conv1(x), 2)) x = F.relu(F.max_pooling_2d(self.conv2(x), 2)) x = F.relu(self.l1(x)) x = F.dropout(x) return self.l2(x) model = L.Classifier(Net()) # - x = chainer.Variable(numpy.random.rand(1, 1, 28, 28).astype(numpy.float32)) t = chainer.Variable(numpy.random.rand(1).astype(numpy.int32)) y = model(x, t) gdef = parse(y) show_graph(gdef)
example_for_code_explain.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- # + [markdown] id="UrWJ4QBTJRsv" # # Trabalhando com coordenadas celestes no WCS 1: Especificando, lendo e plotando # # ## Autores # # <NAME>, <NAME>, <NAME>, <NAME>, <NAME> # # ## Tradução # # <NAME> # # ## Objetivos de aprendizagem # # 1. Demonstrar duas maneiras de construir o objeto `astropy.wcs.WCS` # 2. Mostre uma imagem da nebulosa Helix com RA e DEC rotulados # 3. Plotar uma escala de barras em uma imagem com informações WCS # # ## Palavras chaves # # WCS, coordenadas, matplotlib # # ## Conteúdo complementar # 1. "An Introduction to Modern Astrophysics" ([Carroll & Ostlie](https://ui.adsabs.harvard.edu/abs/2006ima..book.....C/abstract)) # 2. [FITS WCS page at GSFC](https://fits.gsfc.nasa.gov/fits_wcs.html) # # ## Sumário # # Esta série de tutoriais tem como objetivo mostrar como o conteúdo do Capítulo 1 de "An Introduction to Modern Astrophysics" de Carroll e Ostlie pode ser aplicado a situações de pesquisa em astrofísica da vida real, usando ferramentas do ecossistema Astropy. Apresentaremos duas abordagens diferentes para construir um objeto `astropy.wcs.WCS`, que contém metadados que (neste caso) definem um mapeamento entre as coordenadas da imagem e as coordenadas do céu. O subpacote `astropy.wcs` está em conformidade com os padrões do FITS World Coordinate System (WCS) usado extensivamente pela comunidade de pesquisa em astronomia. Criaremos um WCS 2D para uma imagem da icônica nebulosa Helix (uma nebulosa planetária) e exibiremos uma imagem da nebulosa com as coordenadas do céu (aqui, equatorial, ICRS RA e Dec) rotuladas. Finalmente, sobreporemos uma barra de escala na imagem da nebulosa Helix usando WCS para dar ao leitor uma noção do tamanho angular da imagem. # + id="ApMUBsVVJRsw" from astropy.wcs import WCS from astropy.io import fits import matplotlib.pyplot as plt # + [markdown] id="AbDZ0uWTJRsz" # ## Seção 1: Duas maneiras de criar um objeto `astropy.wcs.WCS` # # *Coordenadas mundiais* servem para localizar uma medição em algum espaço de parâmetros multidimensional. Um Sistema de Coordenadas Mundiais (WCS) especifica as coordenadas físicas, ou mundiais, a serem anexadas a cada pixel ou voxel de uma imagem ou matriz N-dimensional. Um [conjunto elaborado de padrões e convenções](https://fits.gsfc.nasa.gov/fits_wcs.html) foi desenvolvido para o formato Flexible Image Transport System (FITS) ([Wells et al. 1981](https: //ui.adsabs.harvard.edu/abs/1981A&AS...44..363W/abstract)). Um exemplo típico do WCS é especificar a Ascensão Reta (RA) e a Declinação (Dec) no céu associada a uma determinada localização de pixel ou spaxel em uma imagem celestial bidimensional ([Greisen & Calabretta 2002](https://ui .adsabs.harvard.edu/abs/2002A&A...395.1061G/abstract); [Calabretta e Greisen 2002](https://ui.adsabs.harvard.edu/abs/2002A&A...395.1077C/abstract)) . # # O [subpacote `astropy.wcs`](https://docs.astropy.org/en/stable/wcs/) implementa padrões e convenções FITS para Sistemas de Coordenadas Mundiais. Usando o objeto `astropy.wcs.WCS` e `matplotlib`, podemos gerar imagens do céu que possuem eixos rotulados com coordenadas como ascensão reta (RA) e declinação (Dec). Isso requer selecionar as projeções apropriadas para `matplotlib` e fornecer um objeto `astropy.visualization.WCSAxes`. # # Existem duas maneiras principais de inicializar um objeto `WCS`: com um dicionário Python (ou objeto semelhante a um dicionário, como um cabeçalho de arquivo FITS) ou com listas Python. Neste conjunto de exemplos, inicializaremos um objeto `astropy.wcs.WCS` com duas dimensões, como seria necessário para representar uma imagem. # # O padrão WCS define um conjunto de palavras-chave que são usadas para representar o sistema de coordenadas mundiais para um determinado conjunto de dados (por exemplo, imagem). Aqui está uma lista das palavras-chave essenciais do WCS e seus usos; Em cada caso, o inteiro $n$ denota o eixo dimensional (começando com 1) ao qual a palavra-chave está sendo aplicada. Em nossos exemplos abaixo, teremos duas dimensões de imagem (eixos), então $n$ será 1 ou 2. # * **CRVALn**: o valor da coordenada em um ponto de referência (por exemplo, valor RA e DEC em graus) # * **CRPIXn**: a localização do pixel do ponto de referência (por exemplo, CRPIX1=1, CRPIX2=1 descreve o centro de um pixel de canto) # * **CDELTn**: o incremento de coordenadas no ponto de referência (por exemplo, a diferença no valor 'RA' do pixel de referência para seu vizinho ao longo do eixo RA) # * **CTYPEn**: uma sequência de 8 caracteres que descreve o tipo de eixo (por exemplo, 'RA---TAN' e 'DEC---TAN' descrevem a projeção do céu do plano tangente típica que os astrônomos usam) # * **CUNITn**: uma string descrevendo a unidade para cada eixo (se não for especificada, a unidade padrão é graus.) # * **NAXISn**: um inteiro que define o número de pixels em cada eixo # # Algumas boas referências do padrão WCS podem ser encontradas [aqui](https://fits.gsfc.nasa.gov/fits_wcs.html). # + [markdown] id="A_uD9fzbUlet" # ### Método 1: Construindo um objeto WCS com um dicionário # # Uma maneira de definir um objeto Astropy `WCS` é construir um dicionário contendo todas as informações essenciais (ou seja, especificando valores para as palavras-chave WCS listadas acima) que mapeiam o espaço de coordenadas de pixel para o espaço de coordenadas mundiais. # # Neste exemplo, definimos dois eixos de coordenadas com: # * Uma projeção Gnomônica (plano tangente), que corresponde ao sistema de coordenadas RA/Dec # * Um local de referência de (RA,DEC) = (337,52, -20,83), conforme definido pelas chaves **CRVALn** # * O pixel no valor da coordenada (1,1) como local de referência (chaves **CRPIXn**) # * Unidades de graus (**CUNITn = 'deg'**) # * Tamanhos de pixel de 1 x 1 segundo de arco (**CDELTn = 0,002778** em graus) # * Um tamanho de imagem de 1024 x 1024 pixels (chave **NAXISn**) # + id="3fRBbYmbJRs0" wcs_input_dict = { 'CTYPE1': 'RA---TAN', 'CUNIT1': 'deg', 'CDELT1': -0.0002777777778, 'CRPIX1': 1, 'CRVAL1': 337.5202808, 'NAXIS1': 1024, 'CTYPE2': 'DEC--TAN', 'CUNIT2': 'deg', 'CDELT2': 0.0002777777778, 'CRPIX2': 1, 'CRVAL2': -20.833333059999998, 'NAXIS2': 1024 } wcs_helix_dict = WCS(wcs_input_dict) # + [markdown] id="AA4wMjYqJRs2" # Agora vamos imprimir o objeto `WCS` definido com um dicionário Python para verificar seu conteúdo: # + id="_3_wPImmJRs3" wcs_helix_dict # To check output # + [markdown] id="bP4tySTaWcgx" # Nesta demonstração (abaixo), assumimos que conhecemos todos os valores de palavras-chave relevantes do WCS a serem especificados. Normalmente, no entanto, contaremos com software para produzir esses valores para nós. Por exemplo, as informações WCS são mais frequentemente incluídas automaticamente em arquivos FITS produzidos por software usado para tirar imagens com a maioria dos instrumentos em observatórios astronômicos. Nos casos em que as informações do WCS são fornecidas para nós em um arquivo FITS, elas normalmente são incluídas em um cabeçalho FITS, que, quando lido em Python, atua como um objeto de dicionário. Demonstramos isso mais adiante neste tutorial. # + [markdown] id="YlwK4xHQU6Un" # ### Método 2: Crie um objeto WCS vazio antes de atribuir valores # # Alternativamente, poderíamos inicializar o objeto `astropy.wcs.WCS` e atribuir os valores da palavra-chave com listas correspondentes a cada eixo respectivo. # + id="bGI2DpWdJRs6" wcs_helix_list = WCS(naxis=2) wcs_helix_list.wcs.crpix = [1, 1] wcs_helix_list.wcs.crval = [337.5202808, -20.833333059999998] wcs_helix_list.wcs.cunit = ["deg", "deg"] wcs_helix_list.wcs.ctype = ["RA---TAN", "DEC--TAN"] wcs_helix_list.wcs.cdelt = [-0.0002777777778, 0.0002777777778] # + [markdown] id="ZjCbtC-bAtMr" # Vamos imprimir o objeto `WCS` mais uma vez para verificar como nossos valores foram atribuídos. # + id="iwx8iECxJRs8" wcs_helix_list # To check output # + [markdown] id="rL7-Qut6AGDM" # Observe que quando inicializamos o objeto WCS desta forma, os valores `NAXIS` são definidos como 0. Para atribuir coordenadas à nossa imagem, precisaremos corrigir a forma do array de objetos `WCS` para que corresponda à nossa imagem. Podemos fazer isso atribuindo um valor ao atributo `array_shape` do objeto `WCS`: # + id="6PnSCW0_-pQs" wcs_helix_list.array_shape = [1024, 1024] # + [markdown] id="AszZGXxNBDoz" # Agora, quando imprimimos o objeto `WCS`, podemos ver que os valores `NAXIS` foram atualizados do tamanho padrão de 0 a 1024. # + id="qdg6jLCa_Sck" wcs_helix_list # + [markdown] id="qKvxquTdEOo5" # ## Seção 2: Mostre uma imagem da nebulosa Helix com RA e Dec rotulados # # Na maioria das vezes, podemos obter o objeto `astropy.wcs.WCS` necessário do cabeçalho do arquivo FITS de um telescópio ou banco de dados astronômico. Este processo é descrito abaixo. # # ### Passo 1: Leia o arquivo FITS # # Vamos ler o arquivo FITS contendo uma imagem da nebulosa Helix do repositório `astropy-data` GitHub usando o subpacote `astropy.io.fits`. A função `astropy.io.fits.open()` carregará o conteúdo de um arquivo FITS no Python e aceita um caminho de arquivo local ou uma URL (como demonstrado aqui). Esta imagem (arquivo FITS) foi originalmente acessada do [Digitized Sky Survey](https://archive.eso.org/dss/dss), mas é fornecida no repositório `astropy-data` por conveniência: # + id="MRVbLwvcE__Y" header_data_unit_list = fits.open('https://github.com/astropy/astropy-data/raw/6d92878d18e970ce6497b70a9253f65c925978bf/tutorials/celestial-coords1/tailored_dss.22.29.38.50-20.50.13_60arcmin.fits') # + [markdown] id="tTKAPz2pJzH0" # Os arquivos FITS são um formato de arquivo binário usado principalmente por astrônomos e podem conter informações organizadas em muitas "extensões", que contêm informações de cabeçalho (por exemplo, metadados) e dados (por exemplo, dados de imagem). Podemos verificar quantas extensões existem em um arquivo FITS, bem como visualizar um resumo do conteúdo de cada extensão, imprimindo as informações do objeto FITS. # + id="n5T4-QCzJVKa" header_data_unit_list.info() # + [markdown] id="we-RjJ8PJo1w" # Isso nos mostra que nosso arquivo FITS contém apenas uma extensão, rotulada 'PRIMARY' (ou extensão número 0). Copiaremos os dados da imagem desta extensão para a variável `image`, e os dados do cabeçalho para a variável `header`: # + id="w4W_HiR-Ji6g" image = header_data_unit_list[0].data header = header_data_unit_list[0].header # + [markdown] id="EUVTT2WdKCGd" # Podemos imprimir o cabeçalho da imagem FITS na tela para que todo o seu conteúdo possa ser verificado ou utilizado. Observe que as informações do WCS para essas informações podem ser encontradas na parte inferior do cabeçalho impresso, abaixo. # + id="XcerK_cZJ30W" header # + [markdown] id="g3FyYZ-bQ5b7" # Observe que o cabeçalho *original* (conforme baixado do DSS) viola os padrões FITS WCS (porque inclui palavras-chave CDELTn e uma matriz de valores de CD; incluindo palavras-chave de matriz PC obsoletas). O cabeçalho foi limpo para estar em conformidade com os padrões existentes. # + [markdown] id="FzAtb_LWLyWn" # ### Etapa 2: Leia no sistema de coordenadas da imagem FITS com astropy.wcs.WCS # # Como o cabeçalho contém informações WCS e age como um dicionário Python, um objeto Astropy `WCS` pode ser criado diretamente do cabeçalho FITS. # + id="3Wl5J0HBLoIp" wcs_helix = WCS(header) # + [markdown] id="Akv472vTZFLp" # Vamos imprimir o objeto `WCS` para ver quais valores foram extraídos do cabeçalho. # + id="1C7y709IZNvl" wcs_helix # + [markdown] id="O10H9rmtQEnM" # ### Etapa 3: Plote a nebulosa Helix com eixos de coordenadas do céu (RA e DEC) # # Os dados da imagem, `image`, são uma matriz 2D de valores e, por si só, não contêm informações sobre as coordenadas do céu dos pixels. Assim, se plotarmos a imagem por si só, os eixos de plotagem mostrariam valores de pixel. (Estaremos usando a biblioteca `matplotlib` para a plotagem.) # + id="gV0MXRKrWchC" fig = plt.figure(figsize=(10, 10)) plt.imshow(image, origin='lower', cmap='cividis') # + [markdown] id="7F8oKtmyWchD" # Todas as informações que mapeiam esses valores de pixel para as coordenadas do céu vêm dos metadados WCS, que carregamos no objeto `wcs_helix` (do cabeçalho do arquivo FITS). Este objeto `WCS` é construído para que possa ser fornecido ao `matplotlib` com a palavra-chave `projection`, como mostrado na chamada para `matplotlib.pyplot.subplot` abaixo, a fim de produzir eixos que mostrem informações de coordenadas do céu. de valores de pixel. Também sobreporemos uma grade de coordenadas em coordenadas equatoriais ICRS passando o nome do quadro de coordenadas do céu (aqui, "icrs") para o método `ax.get_coords_overlay()`. # + id="fnv48KDbOkxB" fig = plt.figure(figsize=(10, 10)) ax = plt.subplot(projection=wcs_helix) plt.imshow(image, origin='lower', cmap='cividis', aspect='equal') plt.xlabel(r'RA') plt.ylabel(r'Dec') overlay = ax.get_coords_overlay('icrs') overlay.grid(color='white', ls='dotted') # + [markdown] id="DVVT552iQYn4" # ## Exercício # # Copie o bloco de código acima e, em vez disso, sobreponha uma grade de coordenadas em coordenadas galácticas. # + id="XPA-2LOLQLnB" fig = plt.figure(figsize=(10, 10)) ax = plt.subplot(projection=wcs_helix) plt.imshow(image, origin='lower', cmap='cividis', aspect='equal') plt.xlabel(r'RA') plt.ylabel(r'Dec') overlay = ax.get_coords_overlay('galactic') overlay.grid(color='white', ls='dotted') # + [markdown] id="FexGrb2hXj85" # ## Seção 3: Plote um marcador de escala em uma imagem com WCS # # Para adicionar um marcador de escala (ou seja, uma linha de um tamanho *angular* específico) à imagem da nebulosa Helix, usaremos o método matplotlib `Axes.arrow` para desenhar uma linha. # # Primeiro, precisamos decidir onde colocar a barra de escala. No exemplo abaixo, definimos o centro do marcador de escala em `(RA, Dec) = (337 graus, -21,2 graus)`. # # Em seguida, usamos o atributo `transform` de `Axes.arrow` para desenhar nossas barras de escala em graus (em vez de coordenadas de pixel). Nesse caso, desenhamos um marcador de escala com um comprimento de 0,1 graus. As entradas do método de seta são `ax.arrow(x, y, dx, dy, **kwargs)`, com `x` e `y` sendo o `RA` e o `Dec` do início da linha. Usamos `dx=0` para que não haja componente horizontal na barra, e `dy=0.1`, que dá o comprimento da seta na direção vertical. Para garantir que a seta seja desenhada no quadro de coordenadas J2000 ICRS, passamos `ax.get_transform('icrs')` para a palavra-chave `transform`. # # Finalmente, usamos `matplotlib.pyplot.text` para marcar o comprimento do marcador de escala. # + id="OkH12BWESpGp" fig = plt.figure(figsize=(10, 10), frameon=False) ax = plt.subplot(projection=wcs_helix) ax.arrow(337, -21.2, 0, 0.1, head_width=0, head_length=0, fc='white', ec='white', width=0.003, transform=ax.get_transform('icrs')) plt.text(337.05, -21.18, '0.1 deg', color='white', rotation=90, transform=ax.get_transform('icrs')) plt.imshow(image, origin='lower', cmap='cividis', aspect='equal') plt.xlabel(r'RA') plt.ylabel(r'Dec') # + [markdown] id="N_CWLccBPXvu" # ## Exercício # # Faça uma barra horizontal com o mesmo comprimento. Tenha em mente que o ângulo de 1 hora = 15 graus. # + id="bIePbBar8uFv"
tutorials/wcs-celestial-coords1/celestial_coords1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: cv101 # language: python # name: cv101 # --- # ## More image processing # We need to include the home directory in our path, so we can read in our own module. import os import sys sys.path.append(os.path.join("..")) import cv2 import numpy as np from utils.imutils import jimshow from utils.imutils import jimshow_channel import matplotlib.pyplot as plt # __Load image__ image = cv2.imread(os.path.join("..", "data", "img", "terasse.jpeg")) jimshow(image, "Original image") # ## Plot colour historgrams # # ```cv2.calcHist([image], channels, mask, histSize, ranges[, hist[, accumulate]])``` # # - images : it is the source image of type uint8 or float32 represented as “[img]”. # - channels : it is the index of channel for which we calculate histogram. # - For grayscale image, its value is [0] and # - color image, you can pass [0], [1] or [2] to calculate histogram of blue, green or red channel respectively. # - mask : mask image. To find histogram of full image, it is given as “None”. # - histSize : this represents our BIN count. For full scale, we pass [256]. # - ranges : this is our RANGE. Normally, it is [0,256]. # __Only blue channel__ hist = cv2.calcHist([image], [0], None, [256], [0,256]) plt.plot(hist, color="Blue") hist = cv2.calcHist([image], [1], None, [256], [0,256]) plt.plot(hist, color="Green") hist = cv2.calcHist([image], [2], None, [256], [0,256]) plt.plot(hist, color="Red") # __Split channels__ (BLUE, GREEN, RED) = cv2.split(image) hist = cv2.calcHist([RED], [0], None, [256], [0,256]) plt.plot(hist, color="r") # ## Plot multiple channels at the same time # + # split channels channels = cv2.split(image) # names of colours color_names = ["b", "g", "r"] # create figure plt.figure() # add title plt.title("Multiple channels") # add xlabel plt.xlabel("Bins") # add ylabel plt.ylabel("Number of pixels") # loop over our channels for channel, color_name in zip(channels, color_names): # Create a histogram hist = cv2.calcHist([channel], [0], None, [256], [0,256]) # Plot histogram plt.plot(hist, color = color_name) # Set limit of x-axis plt.xlim([0, 256]) # show the plot plt.show() # - # __Comparing histograms__ image2 = cv2.imread(os.path.join("..", "data", "img", "the_bedroom.jpg")) jimshow(image, "Image 1") jimshow(image2, "Image 2") # + # split channels channels = cv2.split(image2) # names of colours color_names = ["b", "g", "r"] # create figure plt.figure() # add title plt.title("Multiple channels") # add xlabel plt.xlabel("Bins") # add ylabel plt.ylabel("Number of pixels") # loop over our channels for channel, color_name in zip(channels, color_names): # Create a histogram hist = cv2.calcHist([channel], [0], None, [256], [0,256]) # Plot histogram plt.plot(hist, color = color_name) # Set limit of x-axis plt.xlim([0, 256]) # show the plot plt.show() # - # ## Multidimensional histograms # Now that we are working with multi-dimensional histograms, we need to keep in mind the number of bins we are using. # # In previous examples, I’ve used 256 bins for demonstration purposes. However, if we used a 256 bins for each dimension in a 2D histogram, our resulting histogram would have 256 × 256 = 65,536 separate pixel counts. Not only is this wasteful of resources, it’s not practical. # # Most applications use somewhere between 8 and 64 bins when computing multi-dimensional histograms. So we use 32 instead of 256. # Create matplotlib figure fig = plt.figure(figsize=(20,10)) # plot a 2D color histogram for green and blue ax = fig.add_subplot(131) hist = cv2.calcHist([image[1], image[0]], [0, 1], None, [32, 32], [0, 256, 0, 256]) p = ax.imshow(hist) ax.set_title("2D Color Histogram for Green and Blue") plt.colorbar(p) # __Adding multiple plots to one visualisation__ # + # Create matplotlib figure fig = plt.figure(figsize=(20,10)) # plot a 2D color histogram for green and blue ax = fig.add_subplot(131) hist = cv2.calcHist([image[1], image[0]], [0, 1], None, [32, 32], [0, 256, 0, 256]) p = ax.imshow(hist) ax.set_title("2D Color Histogram for Green and Blue") plt.colorbar(p) # plot a 2D color histogram for green and red ax = fig.add_subplot(132) hist = cv2.calcHist([image[1], image[2]], [0, 1], None, [32, 32], [0, 256, 0, 256]) p = ax.imshow(hist) ax.set_title("2D Color Histogram for Green and Red") plt.colorbar(p) # plot a 2D color histogram for blue and red ax = fig.add_subplot(133) hist = cv2.calcHist([image[0], image[2]], [0, 1], None, [32, 32], [0, 256, 0, 256]) p = ax.imshow(hist) ax.set_title("2D Color Histogram for Blue and Red") plt.colorbar(p) # - # ## Comparing histograms # __Extract histograms__ hist1 = cv2.calcHist([image], [0,1,2], None, [8,8,8], [0,256, 0,256, 0,256]) hist2 = cv2.calcHist([image2], [0,1,2], None, [8,8,8], [0,256, 0,256, 0,256]) # __MinMax normalization__ # Before comparing historgrams, we first have to normalise them to occur within a narrower ranger of values. Why do we need to do this? # # We're doing to use MinMax normalisation, which we calculate in the following way for each value: # # ```norm_pixel = (value-min) / (max-min)``` hist1 = cv2.normalize(hist1, hist1, 0,255, cv2.NORM_MINMAX) hist2 = cv2.normalize(hist2, hist2, 0,255, cv2.NORM_MINMAX) # __Comparing the first two histograms__ round(cv2.compareHist(hist1, hist2, cv2.HISTCMP_CHISQR), 2) # __Load a new image__ image3 = cv2.imread(os.path.join("..", "data", "img", "florence.jpg")) jimshow(image3, "Image 3") # __Compare with image 1__ hist3 = cv2.calcHist([image3], [0,1,2], None, [8,8,8], [0,256, 0,256, 0,256]) hist3 = cv2.normalize(hist3, hist3, 0,255, cv2.NORM_MINMAX) round(cv2.compareHist(hist1, hist1, cv2.HISTCMP_CHISQR), 2) round(cv2.compareHist(hist1, hist2, cv2.HISTCMP_CHISQR), 2) round(cv2.compareHist(hist1, hist3, cv2.HISTCMP_CHISQR), 2)
notebooks/session3_inclass_rdkm.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np import matplotlib.pyplot as plt data=pd.read_excel('titanic.xls') data=data.dropna() data['age']=data['age'].astype(int) data.head() data.head() data.sex=data.sex.map(dict(female=1,male=0)) data.embarked=data.embarked.map(dict(C=0,Q=1,S=2)) data['embarked']=data['embarked'].astype(int) data=data.drop('name',axis=1) data.head() newdata=data newdata.head() from sklearn.metrics import confusion_matrix from sklearn.model_selection import train_test_split from sklearn.tree import DecisionTreeClassifier from sklearn.metrics import accuracy_score from sklearn.metrics import classification_report def splitdata1(data): X=data.values[:,1:8] Y=data.values[:,0] X_train,X_test,y_train,y_test=train_test_split(X,Y,test_size=0.2,random_state=100) return X_train,X_test,y_train,y_test X_train1,X_test1,y_train1,y_test1=splitdata1(newdata) X_train1 # + # Using all features: # Here, we use all the features of the given dataset to create the decision tree and check the accuracy # - def generateGiniClassifierObject(X_train,X_test,y_train): clf_gini = DecisionTreeClassifier(criterion="gini",random_state=100,max_depth=3,min_samples_leaf=5) clf_gini.fit(X_train, y_train) return clf_gini clf1=generateGiniClassifierObject(X_train1,X_test1,y_train1) def findPredictionForY(X_test, clf_object): y_pred = clf_object.predict(X_test) print("Predicted values:") print(y_pred) return y_pred # Function to calculate accuracy def findAccuracyOfModel(y_test, y_pred): print("Confusion Matrix: ",confusion_matrix(y_test, y_pred)) print ("Accuracy : ",accuracy_score(y_test,y_pred)*100) print("Report : ",classification_report(y_test, y_pred)) # + # Using a subset of features: # Here, we use a subset of the features of the given dataset to create the decision tree and check the accuracy # Selected subset attributes: survived, pclass, age, sex, sibsp, parch # Ignored attributes: fare, embarked # - newdata1=data.drop(['fare','embarked'],axis=1) newdata1.head() def splitdata2(data): X=data.values[:,1:6] Y=data.values[:,0] X_train,X_test,y_train,y_test=train_test_split(X,Y,test_size=0.2,random_state=100) return X_train,X_test,y_train,y_test X_train2,X_test2,y_train2,y_test2=splitdata2(newdata1) X_train2 clf2=generateGiniClassifierObject(X_train2,X_test2,y_train2) # + # Using a modified subset of features (Changing the values of features and adding new attributes): # Here, we use a modified subset of the features of the given dataset to create the decision tree and check the accuracy # Modification criteria is: # new feature family = sibsp + parch # new feature weighted_class = pclass*2 if pclass =1 ; pclass*3 if pclass =2 ; pclass*4 if pclass =3, etc # - newdata2=data newdata2.head() newdata2['family']=newdata2['sibsp']+newdata2['parch'] newdata2['pclass'].unique() newdata2.weighted_class=int(0) newdata2.loc[newdata2.pclass==1,'weighted_class']=2 newdata2.loc[newdata2.pclass==2,'weighted_class']=6 newdata2.loc[newdata2.pclass==3,'weighted_class']=12 newdata2['weighted_class'].unique() newdata2.head() def splitdata3(data): X=data.values[:,1:10] Y=data.values[:,0] X_train,X_test,y_train,y_test=train_test_split(X,Y,test_size=0.2,random_state=100) return X_train,X_test,y_train,y_test X_train3,X_test3,y_train3,y_test3=splitdata3(newdata2) X_train3 clf3=generateGiniClassifierObject(X_train3,X_test3,y_train3) def main(): print('Decision Tree Classifier:\n') print('Case 1: Using all attributes:\n') y_pred1=findPredictionForY(X_test1,clf1) findAccuracyOfModel(y_test1,y_pred1) print('--------------------------------\n') print('Case 2: Using a subset of attributes:\n') y_pred2=findPredictionForY(X_test2,clf2) findAccuracyOfModel(y_test2,y_pred2) print('--------------------------------\n') print('Case 3: Using modified atttributes:\n') y_pred3=findPredictionForY(X_test3,clf3) findAccuracyOfModel(y_test3,y_pred3) print('--------------------------------\n') if __name__=='__main__': main()
decision tree/DecisionTree.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Pandemic Basic Data (2018-2020) import numpy as np import pandas as pd import os import glob import ntpath def Pandemic_20182020(datasets_address,export_address): path = os.getcwd() csv_files = glob.glob(os.path.join(datasets_address, "*.csv")) j=0 col = columns=['Date','Year','Month','Number_of_Trips','Number_of_Trips_by_Subscribers','Number_of_Trips_by_Customers','Number_of_Bikes','Number_of_Used_Stations','MAN','WOMAN','Unknown_Gender'] final_data = pd.DataFrame(columns = col) for i in csv_files: year = ntpath.basename(i)[0:4] month = ntpath.basename(i)[4:6] data = pd.read_csv(i) final_data.loc[j,'Date'] = year +'-' + month final_data.loc[j,'Year'] = year final_data.loc[j,'Month'] = month final_data.loc[j,'Number_of_Trips'] = len(data) final_data.loc[j,'Number_of_Trips_by_Subscribers'] = len(data[data.loc[:,'usertype']=='Subscriber']) final_data.loc[j,'Number_of_Trips_by_Customers'] = len(data) - len(data[data.loc[:,'usertype']=='Subscriber']) final_data.loc[j,'Number_of_Bikes'] = len(data.loc[:,'bikeid'].unique()) final_data.loc[j,'Number_of_Used_Stations'] = len(np.unique(np.vstack((data.loc[:,'start station id'],data.loc[:,'end station id'])))) final_data.loc[j,'MAN'] = len(data[data.loc[:,'gender']==1]) final_data.loc[j,'WOMAN'] = len(data[data.loc[:,'gender']==2]) final_data.loc[j,'Unknown_Gender'] = len(data) - len(data[data.loc[:,'gender']==1]) - len(data[data.loc[:,'gender']==2]) j+=1 final_data.to_csv(export_address,index=False) Pandemic_20182020('C:/Users/adria/OneDrive/Desktop/CSV_files','C:/Users/adria/OneDrive/Desktop/Pandemic_20182020.csv')
Pouya-tasks/4- Pandemic Necessary Data/Pandemic.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # PyTorch Training and using checkpointing on SageMaker Managed Spot Training # The example here is almost the same as [PyTorch Cifar10 local training](https://github.com/aws/amazon-sagemaker-examples/blob/master/sagemaker-python-sdk/pytorch_cnn_cifar10/pytorch_local_mode_cifar10.ipynb). # # This notebook tackles the exact same problem with the same solution, but it has been modified to be able to run using SageMaker Managed Spot infrastructure. SageMaker Managed Spot uses [EC2 Spot Instances](https://aws.amazon.com/ec2/spot/) to run Training at a lower cost. # # Please read the original notebook and try it out to gain an understanding of the ML use-case and how it is being solved. We will not delve into that here in this notebook. # ## Overview # # The **SageMaker Python SDK** helps you deploy your models for training and hosting in optimized, productions ready containers in SageMaker. The SageMaker Python SDK is easy to use, modular, extensible and compatible with TensorFlow, MXNet, PyTorch and Chainer. This tutorial focuses on how to create a convolutional neural network model to train the [Cifar10 dataset](https://www.cs.toronto.edu/~kriz/cifar.html) using **PyTorch in local mode**. # # ### Set up the environment # # This notebook was created and tested on a single ml.p2.xlarge notebook instance. # # Let's start by specifying: # # - The S3 bucket and prefix that you want to use for training and model data. This should be within the same region as the Notebook Instance, training, and hosting. # - The IAM role arn used to give training and hosting access to your data. See the documentation for how to create these. Note, if more than one role is required for notebook instances, training, and/or hosting, please replace the sagemaker.get_execution_role() with appropriate full IAM role arn string(s). # + import sagemaker import uuid sagemaker_session = sagemaker.Session() print('SageMaker version: ' + sagemaker.__version__) bucket = sagemaker_session.default_bucket() prefix = 'sagemaker/DEMO-pytorch-cnn-cifar10' role = sagemaker.get_execution_role() checkpoint_suffix = str(uuid.uuid4())[:8] checkpoint_s3_path = 's3://{}/checkpoint-{}'.format(bucket, checkpoint_suffix) print('Checkpointing Path: {}'.format(checkpoint_s3_path)) # + import os import subprocess instance_type = 'local' if subprocess.call('nvidia-smi') == 0: ## Set type to GPU if one is present instance_type = 'local_gpu' print("Instance type = " + instance_type) # - # ### Download the Cifar10 dataset # + from utils_cifar import get_train_data_loader, get_test_data_loader, imshow, classes trainloader = get_train_data_loader() testloader = get_test_data_loader() # - # ### Data Preview # + import numpy as np import torchvision, torch # get some random training images dataiter = iter(trainloader) images, labels = dataiter.next() # show images imshow(torchvision.utils.make_grid(images)) # print labels print(' '.join('%9s' % classes[labels[j]] for j in range(4))) # - # ### Upload the data # We use the ```sagemaker.Session.upload_data``` function to upload our datasets to an S3 location. The return value inputs identifies the location -- we will use this later when we start the training job. inputs = sagemaker_session.upload_data(path='data', bucket=bucket, key_prefix='data/cifar10') # # Construct a script for training # Here is the full code for the network model: # !pygmentize source_dir/cifar10.py # ## Script Functions # # SageMaker invokes the main function defined within your training script for training. When deploying your trained model to an endpoint, the model_fn() is called to determine how to load your trained model. The model_fn() along with a few other functions list below are called to enable predictions on SageMaker. # # ### [Predicting Functions](https://github.com/aws/sagemaker-pytorch-containers/blob/master/src/sagemaker_pytorch_container/serving.py) # * model_fn(model_dir) - loads your model. # * input_fn(serialized_input_data, content_type) - deserializes predictions to predict_fn. # * output_fn(prediction_output, accept) - serializes predictions from predict_fn. # * predict_fn(input_data, model) - calls a model on data deserialized in input_fn. # # The model_fn() is the only function that doesn't have a default implementation and is required by the user for using PyTorch on SageMaker. # ## Create a training job using the sagemaker.PyTorch estimator # # The `PyTorch` class allows us to run our training function on SageMaker. We need to configure it with our training script, an IAM role, the number of training instances, and the training instance type. For local training with GPU, we could set this to "local_gpu". In this case, `instance_type` was set above based on your whether you're running a GPU instance. # # After we've constructed our `PyTorch` object, we fit it using the data we uploaded to S3. Even though we're in local mode, using S3 as our data source makes sense because it maintains consistency with how SageMaker's distributed, managed training ingests data. # # + from sagemaker.pytorch import PyTorch hyperparameters = {'epochs': 2} cifar10_estimator = PyTorch(entry_point='source_dir/cifar10.py', role=role, framework_version='1.7.1', py_version='py3', hyperparameters=hyperparameters, instance_count=1, instance_type=instance_type) cifar10_estimator.fit(inputs) # - # ## Run a baseline training job on SageMaker # # Now we run training jobs on SageMaker, starting with our baseline training job. # # Once again, we create a PyTorch estimator, with a couple key modfications from last time: # # * `instance_type`: the instance type for training. We set this to `ml.p3.2xlarge` because we are training on SageMaker now. For a list of available instance types, see [the AWS documentation](https://aws.amazon.com/sagemaker/pricing/instance-types). # * `metric_definitions`: the metrics (defined above) that we want sent to CloudWatch. # + from sagemaker.pytorch import PyTorch hyperparameters = {'epochs': 10} cifar10_estimator = PyTorch(entry_point='source_dir/cifar10.py', role=role, framework_version='1.7.1', py_version='py3', hyperparameters=hyperparameters, instance_count=1, instance_type='ml.p3.2xlarge', base_job_name='cifar10-pytorch') cifar10_estimator.fit(inputs) # - # # Managed Spot Training with a PyTorch Estimator # # For Managed Spot Training using a PyTorch Estimator we need to configure two things: # 1. Enable the `train_use_spot_instances` constructor arg - a simple self-explanatory boolean. # 2. Set the `train_max_wait` constructor arg - this is an int arg representing the amount of time you are willing to wait for Spot infrastructure to become available. Some instance types are harder to get at Spot prices and you may have to wait longer. You are not charged for time spent waiting for Spot infrastructure to become available, you're only charged for actual compute time spent once Spot instances have been successfully procured. # # Normally, a third requirement would also be necessary here - modifying your code to ensure a regular checkpointing cadence - however, PyTorch Estimators already do this, so no changes are necessary here. Checkpointing is highly recommended for Manage Spot Training jobs due to the fact that Spot instances can be interrupted with short notice and using checkpoints to resume from the last interruption ensures you don't lose any progress made before the interruption. # # Feel free to toggle the `use_spot_instances` variable to see the effect of running the same job using regular (a.k.a. "On Demand") infrastructure. # # Note that `max_wait` can be set if and only if `use_spot_instances` is enabled and **must** be greater than or equal to `max_run`. use_spot_instances = True max_run=600 max_wait = 1200 if use_spot_instances else None # ## Simulating Spot interruption after 5 epochs # # Our training job should run on 10 epochs. # # However, we will simulate a situation that after 5 epochs a spot interruption occurred. # # The goal is that the checkpointing data will be copied to S3, so when there is a spot capacity available again, the training job can resume from the 6th epoch. # # Note the `checkpoint_s3_uri` variable which stores the S3 URI in which to persist checkpoints that the algorithm persists (if any) during training. # # The `debugger_hook_config` parameter must be set to `False` to enable checkpoints to be copied to S3 successfully. # + hyperparameters = {'epochs': 5} spot_estimator = PyTorch(entry_point='source_dir/cifar10.py', role=role, framework_version='1.7.1', py_version='py3', instance_count=1, instance_type='ml.p3.2xlarge', base_job_name='cifar10-pytorch-spot-1', hyperparameters=hyperparameters, checkpoint_s3_uri=checkpoint_s3_path, debugger_hook_config=False, use_spot_instances=use_spot_instances, max_run=max_run, max_wait=max_wait) spot_estimator.fit(inputs) # - # ### Savings # Towards the end of the job you should see two lines of output printed: # # - `Training seconds: X` : This is the actual compute-time your training job spent # - `Billable seconds: Y` : This is the time you will be billed for after Spot discounting is applied. # # If you enabled the `use_spot_instances` var then you should see a notable difference between `X` and `Y` signifying the cost savings you will get for having chosen Managed Spot Training. This should be reflected in an additional line: # - `Managed Spot Training savings: (1-Y/X)*100 %` # ### View the job training Checkpoint configuration # We can now view the Checkpoint configuration from the training job directly in the SageMaker console. # # Log into the [SageMaker console](https://console.aws.amazon.com/sagemaker/home), choose the latest training job, and scroll down to the Checkpoint configuration section. # # Choose the S3 output path link and you'll be directed to the S3 bucket were checkpointing data is saved. # # You can see there is one file there: # # ```python # checkpoint.pth # ``` # # This is the checkpoint file that contains the epoch, model state dict, optimizer state dict, and loss. # ### Continue training after Spot capacity is resumed # # Now we simulate a situation where Spot capacity is resumed. # # We will start a training job again, this time with 10 epochs. # # What we expect is that the tarining job will start from the 6th epoch. # # This is done when training job starts. It checks the checkpoint s3 location for checkpoints data. If there are, they are copied to `/opt/ml/checkpoints` on the training conatiner. # # In the code you can see the function to load the checkpoints data: # # ```python # def _load_checkpoint(model, optimizer, args): # print("--------------------------------------------") # print("Checkpoint file found!") # print("Loading Checkpoint From: {}".format(args.checkpoint_path + '/checkpoint.pth')) # checkpoint = torch.load(args.checkpoint_path + '/checkpoint.pth') # model.load_state_dict(checkpoint['model_state_dict']) # optimizer.load_state_dict(checkpoint['optimizer_state_dict']) # epoch_number = checkpoint['epoch'] # loss = checkpoint['loss'] # print("Checkpoint File Loaded - epoch_number: {} - loss: {}".format(epoch_number, loss)) # print('Resuming training from epoch: {}'.format(epoch_number+1)) # print("--------------------------------------------") # return model, optimizer, epoch_number # ``` # # + hyperparameters = {'epochs': 10} spot_estimator = PyTorch(entry_point='source_dir/cifar10.py', role=role, framework_version='1.7.1', py_version='py3', instance_count=1, instance_type='ml.p3.2xlarge', base_job_name='cifar10-pytorch-spot-2', hyperparameters=hyperparameters, checkpoint_s3_uri=checkpoint_s3_path, debugger_hook_config=False, use_spot_instances=use_spot_instances, max_run=max_run, max_wait=max_wait) spot_estimator.fit(inputs) # - # ### Analyze training job logs # # Analyzing the training job logs, we can see that now, the training job starts from the 6th epoch. # # We can see the output of `_load_checkpoint` function: # # ``` # -------------------------------------------- # Checkpoint file found! # Loading Checkpoint From: /opt/ml/checkpoints/checkpoint.pth # Checkpoint File Loaded - epoch_number: 5 - loss: 0.8455273509025574 # Resuming training from epoch: 6 # -------------------------------------------- # ``` # ### View the job training Checkpoint configuration after job completed 10 epochs # # We can now view the Checkpoint configuration from the training job directly in the SageMaker console. # # Log into the [SageMaker console](https://console.aws.amazon.com/sagemaker/home), choose the latest training job, and scroll down to the Checkpoint configuration section. # # Choose the S3 output path link and you'll be directed to the S3 bucket were checkpointing data is saved. # # You can see there is still that one file there: # # ```python # checkpoint.pth # ``` # # You'll be able to see that the date of the checkpoint file was updated to the time of the 2nd Spot training job. # # Deploy the trained model to prepare for predictions # # The deploy() method creates an endpoint which serves prediction requests in real-time. # + from sagemaker.pytorch import PyTorchModel predictor = spot_estimator.deploy(initial_instance_count=1, instance_type='ml.m4.xlarge') # - # # Invoking the endpoint # + # get some test images dataiter = iter(testloader) images, labels = dataiter.next() # print images imshow(torchvision.utils.make_grid(images)) print('GroundTruth: ', ' '.join('%4s' % classes[labels[j]] for j in range(4))) outputs = predictor.predict(images.numpy()) _, predicted = torch.max(torch.from_numpy(np.array(outputs)), 1) print('Predicted: ', ' '.join('%4s' % classes[predicted[j]] for j in range(4))) # - # # Clean-up # # To avoid incurring extra charges to your AWS account, let's delete the endpoint we created: predictor.delete_endpoint()
Session09_AWSSagemakerAndLargeScaleModelTraining/pytorch_managed_spot_training_checkpointing/pytorch_managed_spot_training_checkpointing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/MrLeeking21/CPEN-21A-ECE-2-3/blob/main/Midterm_Exam.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="zrQhqMg_zNmK" # ##"Midterm Exam" # + [markdown] id="GWAzt2x0zWOW" # #####Problem Statement 1 # + id="UrWtjIw6zkLW" colab={"base_uri": "https://localhost:8080/"} outputId="259b85c7-7bbc-4c5c-bf20-3cc55f28eed4" x="<NAME>" print("Full name:"" "+x) # + id="eglYcMrhzt8-" colab={"base_uri": "https://localhost:8080/"} outputId="d3bca56f-793b-45df-9395-c3dda59645cf" x="202016619" print("Student number:"" "+x) # + id="VlrADIadzue6" colab={"base_uri": "https://localhost:8080/"} outputId="35a1fc0c-d8d3-4a22-e32d-5f70e81db9f3" x="21" print("Age:"" "+x) # + id="WhniZIX10hc2" colab={"base_uri": "https://localhost:8080/"} outputId="f55e9986-942b-4d0c-ef8a-754419ad223d" x="March 23,2000" print("Birthday:"" "+x) # + id="cxRDVqm70p1a" colab={"base_uri": "https://localhost:8080/"} outputId="a14c686a-613b-4e73-db66-cae0b73baa40" x="Southville Phase 1-A,Brgy.Inocencio,Trece Martires Cavite" print("Address:"" "+x) # + id="qy3KJ7Jv1Fic" colab={"base_uri": "https://localhost:8080/"} outputId="02ad0440-b3be-47b7-db64-4e27955f8973" x="BSECE 2-3" print("Course:"" "+x) # + id="K8YXOjj41R1-" colab={"base_uri": "https://localhost:8080/"} outputId="4aa8fc8d-2965-4668-ba51-a7001ee62ddb" x="82" print("Last Sem GWA:"" "+x) # + [markdown] id="x6Szo1cOzcLL" # #####Problem Statement 2 # + colab={"base_uri": "https://localhost:8080/"} id="H-zE6Ryo8H-X" outputId="fee61c72-0f9a-46d3-8dc3-f9edb6a4dc67" n = 4 answ = "Y" print(2<n and n<6) print(2<n or n==6) print(not(2<n or n==6)) print(not(n<6)) print(answ=="Y" and answ=="y") print(answ=="Y" and answ=="y") print(not(answ=="y")) print((2<n and n==5 + 1) or answ=="No") print((n==2 and n==7) or answ=="Y") print(n==2 and (n==7 or answ=="Y")) # + [markdown] id="O9jYQVtu8guw" # #####Problem Statement 3 # + id="AFqE0Bk3_X8G" colab={"base_uri": "https://localhost:8080/"} outputId="befa9f63-5232-407d-b3a8-02a9d5156dc7" x = 2 y = -3 w = 7 z = -10 print(x/y) print(w/y/x) print(z/y%x) print(x%-y*w) print(x%y) print(z%w-y/x*5+5) print(9-x%(2+y)) print(z//w) print((2+y)**2) print(w/x*2)
Midterm_Exam.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pyspark import pandas as pd from pyspark.sql import SparkSession pd.set_option('display.max_columns',500) pd.set_option('display.width',1000) sp = SparkSession.builder.appName("read-and-write").getOrCreate() path_file = 'hdfs://namenode1.live.fiinlab.local:9000/staging/datos_input.csv' df_load = sp.read.csv(path_file, header=True) df_load.toPandas()
testing/jaak-it_demo/08_JaakIt_hdfs_read.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import torch import os, sys import numpy as np # + HOME_DIRECTORY=os.path.abspath(os.path.join(os.getcwd(), os.pardir)) os.chdir(HOME_DIRECTORY) # - os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # sync ids with nvidia-smi os.environ["CUDA_VISIBLE_DEVICES"] = "2" os.environ["MKL_SERVICE_FORCE_INTEL"]="1" # + # script params port=5219 sampling_fn="random" lSet_partition=1 base_seed=1 num_GPU=1 al_iterations=4 num_aml_trials=5 #50 budget_size=5000 #2500 dataset="CIFAR10" init_partition=10 step_partition=10 clf_epochs=5 #150 num_classes=10 swa_lr=5e-4 swa_freq=50 swa_epochs=5 #50 log_iter=40 #Data arguments train_dir=f"{HOME_DIRECTORY}/data/{dataset}/train-{dataset}/" test_dir=f"{HOME_DIRECTORY}/data/{dataset}/test-{dataset}/" lSetPath=f"{HOME_DIRECTORY}/data/{dataset}/partition_{lSet_partition}/lSet_{dataset}.npy" uSetPath=f"{HOME_DIRECTORY}/data/{dataset}/partition_{lSet_partition}/uSet_{dataset}.npy" valSetPath=f"{HOME_DIRECTORY}/data/{dataset}/partition_{lSet_partition}/valSet_{dataset}.npy" out_dir=f"{HOME_DIRECTORY}/sample_results_aml_rn18" model_style=resnet_style model_type=resnet_2 model_depth=18 # - # !python3 $HOME_DIRECTORY/tools/main_aml.py --n_GPU $num_GPU \ # --port $port --sampling_fn $sampling_fn --lSet_partition $lSet_partition \ # --seed_id $base_seed \ # --init_partition $init_partition --step_partition $step_partition \ # --dataset $dataset --budget_size $budget_size \ # --out_dir $out_dir \ # --num_aml_trials $num_aml_trials --num_classes $num_classes \ # --al_max_iter $al_iterations \ # --model_type $model_type --model_depth $model_depth \ # --clf_epochs $clf_epochs \ # --eval_period 1 --checkpoint_period 1 \ # --lSetPath $lSetPath --uSetPath $uSetPath --valSetPath $valSetPath \ # --train_dir $train_dir --test_dir $test_dir \ # --dropout_iterations 25 \ # --cfg configs/$dataset/$model_style/$model_type/R-18_4gpu_unreg.yaml \ # --vaal_z_dim 32 --vaal_vae_bs 64 --vaal_epochs 2 \ # --vaal_vae_lr 5e-4 --vaal_disc_lr 5e-4 --vaal_beta 1.0 --vaal_adv_param 1.0
notebooks/.ipynb_checkpoints/automl_rsb_resnet18-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # PyCon 2019: Data Science Best Practices with pandas ([video](https://www.youtube.com/watch?v=dPwLlJkSHLo)) # # ### GitHub repository: https://github.com/justmarkham/pycon-2019-tutorial # # ### Instructor: <NAME> # # - Website: https://www.dataschool.io # - YouTube: https://www.youtube.com/dataschool # - Patreon: https://www.patreon.com/dataschool # - Twitter: https://twitter.com/justmarkham # - GitHub: https://github.com/justmarkham import jovian jovian.commit(secret=False, nb_filename=None, files=[], capture_env=True, env_type='conda', notebook_id=259, create_new=None, artifacts=[], do_git_commit=True, git_commit_msg='jovian commit') git add pycon2019_official py/pycon2019_official.ipynb # ## 1. Introduction to the TED Talks dataset # # https://www.kaggle.com/rounakbanik/ted-talks import pandas as pd pd.__version__ import matplotlib.pyplot as plt # %matplotlib inline ted = pd.read_csv('ted.csv') # each row represents a single talk ted.head() # rows, columns ted.shape # object columns are usually strings, but can also be arbitrary Python objects (lists, dictionaries) ted.dtypes # count the number of missing values in each column ted.isna().sum() # ## 2. Which talks provoke the most online discussion? # sort by the number of first-level comments, though this is biased in favor of older talks ted.sort_values('comments').tail() # correct for this bias by calculating the number of comments per view ted['comments_per_view'] = ted.comments / ted.views # interpretation: for every view of the same-sex marriage talk, there are 0.002 comments ted.sort_values('comments_per_view').tail() # make this more interpretable by inverting the calculation ted['views_per_comment'] = ted.views / ted.comments # interpretation: 1 out of every 450 people leave a comment ted.sort_values('views_per_comment').head() # Lessons: # # 1. Consider the limitations and biases of your data when analyzing it # 2. Make your results understandable # ## 3. Visualize the distribution of comments # line plot is not appropriate here (use it to measure something over time) ted.comments.plot() # histogram shows the frequency distribution of a single numeric variable ted.comments.plot(kind='hist') # modify the plot to be more informative ted[ted.comments < 1000].comments.plot(kind='hist') # check how many observations we removed from the plot ted[ted.comments >= 1000].shape # can also write this using the query method ted.query('comments < 1000').comments.plot(kind='hist') # can also write this using the loc accessor ted.loc[ted.comments < 1000, 'comments'].plot(kind='hist') # increase the number of bins to see more detail ted.loc[ted.comments < 1000, 'comments'].plot(kind='hist', bins=20) # boxplot can also show distributions, but it's far less useful for concentrated distributions because of outliers ted.loc[ted.comments < 1000, 'comments'].plot(kind='box') # Lessons: # # 1. Choose your plot type based on the question you are answering and the data type(s) you are working with # 2. Use pandas one-liners to iterate through plots quickly # 3. Try modifying the plot defaults # 4. Creating plots involves decision-making # ## 4. Plot the number of talks that took place each year # # Bonus exercise: calculate the average delay between filming and publishing # event column does not always include the year ted.event.sample(10) # dataset documentation for film_date says "Unix timestamp of the filming" ted.film_date.head() # results don't look right pd.to_datetime(ted.film_date).head() # [pandas documentation for `to_datetime`](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.to_datetime.html) # now the results look right pd.to_datetime(ted.film_date, unit='s').head() ted['film_datetime'] = pd.to_datetime(ted.film_date, unit='s') # verify that event name matches film_datetime for a random sample ted[['event', 'film_datetime']].sample(5) # new column uses the datetime data type (this was an automatic conversion) ted.dtypes # datetime columns have convenient attributes under the dt namespace ted.film_datetime.dt.year.head() # similar to string methods under the str namespace ted.event.str.lower().head() # count the number of talks each year using value_counts() ted.film_datetime.dt.year.value_counts() # points are plotted and connected in the order you give them to pandas ted.film_datetime.dt.year.value_counts().plot() # need to sort the index before plotting ted.film_datetime.dt.year.value_counts().sort_index().plot() # we only have partial data for 2017 ted.film_datetime.max() # Lessons: # # 1. Read the documentation # 2. Use the datetime data type for dates and times # 3. Check your work as you go # 4. Consider excluding data if it might not be relevant # ## 5. What were the "best" events in TED history to attend? # count the number of talks (great if you value variety, but they may not be great talks) ted.event.value_counts().head() # use views as a proxy for "quality of talk" ted.groupby('event').views.mean().head() # find the largest values, but we don't know how many talks are being averaged ted.groupby('event').views.mean().sort_values().tail() # show the number of talks along with the mean (events with the highest means had only 1 or 2 talks) ted.groupby('event').views.agg(['count', 'mean']).sort_values('mean').tail() # calculate the total views per event ted.groupby('event').views.agg(['count', 'mean', 'sum']).sort_values('sum').tail() # Lessons: # # 1. Think creatively for how you can use the data you have to answer your question # 2. Watch out for small sample sizes # ## 6. Unpack the ratings data # previously, users could tag talks on the TED website (funny, inspiring, confusing, etc.) ted.ratings.head() # two ways to examine the ratings data for the first talk ted.loc[0, 'ratings'] ted.ratings[0] # this is a string not a list type(ted.ratings[0]) # convert this into something useful using Python's ast module (Abstract Syntax Tree) import ast # literal_eval() allows you to evaluate a string containing a Python literal or container ast.literal_eval('[1, 2, 3]') # if you have a string representation of something, you can retrieve what it actually represents type(ast.literal_eval('[1, 2, 3]')) # unpack the ratings data for the first talk ast.literal_eval(ted.ratings[0]) # now we have a list (of dictionaries) type(ast.literal_eval(ted.ratings[0])) # define a function to convert an element in the ratings Series from string to list def str_to_list(ratings_str): return ast.literal_eval(ratings_str) # test the function str_to_list(ted.ratings[0]) # Series apply method applies a function to every element in a Series and returns a Series ted.ratings.apply(str_to_list).head() # lambda is a shorter alternative ted.ratings.apply(lambda x: ast.literal_eval(x)).head() # an even shorter alternative is to apply the function directly (without lambda) ted.ratings.apply(ast.literal_eval).head() ted['ratings_list'] = ted.ratings.apply(lambda x: ast.literal_eval(x)) # check that the new Series looks as expected ted.ratings_list[0] # each element in the Series is a list type(ted.ratings_list[0]) # data type of the new Series is object ted.ratings_list.dtype # object is not just for strings ted.dtypes # Lessons: # # 1. Pay attention to data types in pandas # 2. Use apply any time it is necessary # ## 7. Count the total number of ratings received by each talk # # Bonus exercises: # # - for each talk, calculate the percentage of ratings that were negative # - for each talk, calculate the average number of ratings it received per day since it was published # expected result (for each talk) is sum of count ted.ratings_list[0] # start by building a simple function def get_num_ratings(list_of_dicts): return list_of_dicts[0] # pass it a list, and it returns the first element in the list, which is a dictionary get_num_ratings(ted.ratings_list[0]) # modify the function to return the vote count def get_num_ratings(list_of_dicts): return list_of_dicts[0]['count'] # pass it a list, and it returns a value from the first dictionary in the list get_num_ratings(ted.ratings_list[0]) # modify the function to get the sum of count def get_num_ratings(list_of_dicts): num = 0 for d in list_of_dicts: num = num + d['count'] return num # looks about right get_num_ratings(ted.ratings_list[0]) # check with another record ted.ratings_list[1] # looks about right get_num_ratings(ted.ratings_list[1]) # apply it to every element in the Series ted.ratings_list.apply(get_num_ratings).head() # another alternative is to use a generator expression sum((d['count'] for d in ted.ratings_list[0])) # use lambda to apply this method ted.ratings_list.apply(lambda x: sum((d['count'] for d in x))).head() # another alternative is to use pd.DataFrame() pd.DataFrame(ted.ratings_list[0])['count'].sum() # use lambda to apply this method ted.ratings_list.apply(lambda x: pd.DataFrame(x)['count'].sum()).head() ted['num_ratings'] = ted.ratings_list.apply(get_num_ratings) # do one more check ted.num_ratings.describe() # Lessons: # # 1. Write your code in small chunks, and check your work as you go # 2. Lambda is best for simple functions # ## 8. Which occupations deliver the funniest TED talks on average? # # Bonus exercises: # # - for each talk, calculate the most frequent rating # - for each talk, clean the occupation data so that there's only one occupation per talk # ### Step 1: Count the number of funny ratings # "Funny" is not always the first dictionary in the list ted.ratings_list.head() # check ratings (not ratings_list) to see if "Funny" is always a rating type ted.ratings.str.contains('Funny').value_counts() # write a custom function def get_funny_ratings(list_of_dicts): for d in list_of_dicts: if d['name'] == 'Funny': return d['count'] # examine a record in which "Funny" is not the first dictionary ted.ratings_list[3] # check that the function works get_funny_ratings(ted.ratings_list[3]) # apply it to every element in the Series ted['funny_ratings'] = ted.ratings_list.apply(get_funny_ratings) ted.funny_ratings.head() # check for missing values ted.funny_ratings.isna().sum() # ### Step 2: Calculate the percentage of ratings that are funny ted['funny_rate'] = ted.funny_ratings / ted.num_ratings # "gut check" that this calculation makes sense by examining the occupations of the funniest talks ted.sort_values('funny_rate').speaker_occupation.tail(20) # examine the occupations of the least funny talks ted.sort_values('funny_rate').speaker_occupation.head(20) # ### Step 3: Analyze the funny rate by occupation # calculate the mean funny rate for each occupation ted.groupby('speaker_occupation').funny_rate.mean().sort_values().tail() # however, most of the occupations have a sample size of 1 ted.speaker_occupation.describe() # ### Step 4: Focus on occupations that are well-represented in the data # count how many times each occupation appears ted.speaker_occupation.value_counts() # value_counts() outputs a pandas Series, thus we can use pandas to manipulate the output occupation_counts = ted.speaker_occupation.value_counts() type(occupation_counts) # show occupations which appear at least 5 times occupation_counts[occupation_counts >= 5] # save the index of this Series top_occupations = occupation_counts[occupation_counts >= 5].index top_occupations # ### Step 5: Re-analyze the funny rate by occupation (for top occupations only) # filter DataFrame to include only those occupations ted_top_occupations = ted[ted.speaker_occupation.isin(top_occupations)] ted_top_occupations.shape # redo the previous groupby ted_top_occupations.groupby('speaker_occupation').funny_rate.mean().sort_values() # Lessons: # # 1. Check your assumptions about your data # 2. Check whether your results are reasonable # 3. Take advantage of the fact that pandas operations often output a DataFrame or a Series # 4. Watch out for small sample sizes # 5. Consider the impact of missing data # 6. Data scientists are hilarious
PyCon_2019.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/therealsun10/Elective-Lab/blob/main/Operations_and_Expressions_in_Python.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="j60tOxh6-85l" # ##Boolean Operators # + colab={"base_uri": "https://localhost:8080/"} id="KvpqM2tV-vck" outputId="1b8ebc78-646d-4509-ce93-6e32561946c7" x = 10 y = 9 x>y x==y x<y # + [markdown] id="9POP0lFZ_FKd" # ##Bool Function # + colab={"base_uri": "https://localhost:8080/"} id="7rVPDIKL_EXF" outputId="ff1da2d0-ee63-4a53-f260-92a555d86835" print(bool("GJ")) print(bool(17)) print(bool(None)) print(bool()) print(bool([])) print(bool(0)) # + [markdown] id="WDcf8320Avo0" # ##Defining Function # + colab={"base_uri": "https://localhost:8080/"} id="2hhtBlWhA8yG" outputId="b6601b74-4c50-4d46-850c-b3f75a023db1" def myFunction(): return False print(myFunction()) # + colab={"base_uri": "https://localhost:8080/"} id="8uyevPMIBE0O" outputId="c29a3519-8548-4f55-8444-1d5d9ab7263e" def myFunction(): return False if myFunction(): print("Yes") else: print("No") # + [markdown] id="lRsQGCDo_jhs" # ##Application 1 # + colab={"base_uri": "https://localhost:8080/"} id="BFVOCbB9_ejN" outputId="440e7061-3ac6-433e-8a50-d3a59bedabcf" print (10>9) a=6 b=7 print(a==b) print(a!=a) # + [markdown] id="7IShCLAJ_f0l" # ##Arithmetic Operators # + colab={"base_uri": "https://localhost:8080/"} id="7GvRE9yX_uIQ" outputId="11adb6ef-9c09-40ef-a2a8-70facf666c65" print(a+b) print(a-b) print(a*b) print(a**b) print(a/b) print(10%5) # + [markdown] id="50zTIC1n_4GH" # ##Bitwise Operators # + colab={"base_uri": "https://localhost:8080/"} id="C8hsz9C2_6Sj" outputId="1bb8c926-bf61-4ca0-978b-3bb3af79cd8c" c=60 d=13 print(c&d) print(c|d) print(c<<1) print(c<<2) print(c>>1) # + [markdown] id="FNkWein3AB9H" # ##Assignment Operators # + colab={"base_uri": "https://localhost:8080/"} id="R4web5P_AK1m" outputId="6ca8cfc9-3e22-4fbe-8bb7-cd8d412b5394" ##c+=3 same as c = c+3 c+=3 c%=3 print (c) # + [markdown] id="4MFcbg3JATG1" # ##Logical Operators # + colab={"base_uri": "https://localhost:8080/"} id="dUzhL69gAXo0" outputId="2d22f6f7-26f3-47f7-ff74-fdc5d881107d" c = True d = True (c and d) # + [markdown] id="g1_DkvdEAbfl" # ##Identity # + colab={"base_uri": "https://localhost:8080/"} id="AgiyCgfzAdHF" outputId="55791229-6472-405d-821e-615d8fff780d" c is d c is not d # + [markdown] id="wjNoVhFvAgi0" # ##Application 2 # # + id="IzVRaaV7AlST" e = 10 f = 5 # Implement the operations +, //, bit shift right >> twice
Operations_and_Expressions_in_Python.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # ## Download Images from tweet ids # # This notebook aims to download images using the api, based on the tweets ids that were collected with DMI TCAT import requests import pandas as pd import json import os import tqdm import time import hashlib import concurrent params_search = { "tweet.fields": "public_metrics,referenced_tweets,possibly_sensitive,created_at,source,reply_settings,withheld", "expansions": "author_id,in_reply_to_user_id,attachments.media_keys", "media.fields": "url,public_metrics,type,alt_text", "user.fields": "id,verified,name" } ROOT_FOLDER = "/home/tyra/Documents/CERES/resources" OUTPUT_FOLDER = "/home/tyra/Documents/CERES/PMA/MPT" CREDENTIALS_FILES = r"/home/tyra/Documents/CERES/credentials_pro.json" def generate_token(): with open(CREDENTIALS_FILES, 'r') as f: return f"Bearer {json.load(f)['token']}" s = requests.Session() s.headers.update({"Authorization": generate_token()}) # open the json containing all the ids that we want to fetch with open(os.path.join(ROOT_FOLDER, 'pma.json'), 'r') as f: ids = json.load(f) ids = [str(i) for i in ids] len(ids) def download_media(media_key=None, url=None, **kwargs): if not media_key or not url: raise ValueError("Missing field when trying to save media") file_type = url.split('.')[-1] sha1 = None # download the file try: res = requests.get(url) except requests.RequestException: raise ValueError(f"There was an error when downloading the media with following url: {url}, please check your connection or url") # calculate signature of content, if this signature already exists then just increment the number of buffer = res.content signature = hashlib.sha1(buffer).hexdigest() file_name = f"{signature}.{file_type}" with open(os.path.join(OUTPUT_FOLDER, 'media', file_name), 'wb') as f: f.write(res.content) with open(os.path.join(OUTPUT_FOLDER, 'sha1.json'), 'r') as f: sha1 = json.load(f) sha1[media_key] = file_name with open(os.path.join(OUTPUT_FOLDER, 'sha1.json'), 'w') as f: json.dump(sha1, f) # Save all the tweets that were already fetched so in case of error, we can just continue from where we stopped. # + try: with open(os.path.join(OUTPUT_FOLDER, 'fetched.json'), 'r') as f: fetched = json.load(f) already_parsed = len(fetched) nb_calls = round((len(ids) - already_parsed) / 100) except: fetched = [] already_parsed = 0 nb_calls = round((len(ids) - already_parsed) / 100) already_parsed # + for occ in tqdm.tqdm(range(nb_calls)): # fetch ids 100 by 100 ids_to_fetch = ids[already_parsed + occ * 100: already_parsed + occ * 100 + 100] res = s.get('https://api.twitter.com/2/tweets?ids=' + ','.join(ids_to_fetch), params=params_search) # we made too many calls, lets wait 15min if 'data' not in res.json(): print(res.json()) print('making a break') time.sleep(900) res = s.get('https://api.twitter.com/2/tweets?ids=' + ','.join(ids_to_fetch), params=params_search) # get results tweets = res.json()['data'] media = res.json()['includes'].get('media', None) for tweet in tweets: # save all medium info in the tweet json for index, key in enumerate(tweet.get('attachments', {}).get('media_keys', [])): for medium in media: if medium['media_key'] == key: tweet['attachments']['media_keys'][index] = medium break try: with open(os.path.join(OUTPUT_FOLDER, f"{tweet['id']}.json"), 'w') as f: json.dump(tweet, f) except Exception as e: print(e) # # get the origin tweet if tweet is a retweet --> update that's too much requests # for rtweet in tweet.get('referenced_tweets', []): # if rtweet['type'] == 'retweeted' and rtweet['id'] not in fetched: # res = s.get(f"https://api.twitter.com/2/tweets/{rtweet['id']}", params=params_search) # if 'data' not in res.json(): # print(res.json()) # with open(os.path.join(OUTPUT_FOLDER, f"{rtweet['id']}.json"), 'w') as f: # json.dump(res.json()['data'], f) # fetched.append(rtweet['id']) # if media: # for medium in media: # # check if all media were properly downloaded first time # in_sha_1 = medium['media_key'] in sha1 # if not in_sha_1: # missing_media.append(medium) # with open(os.path.join(OUTPUT_FOLDER, 'missing_media.json'), 'w') as f: # json.dump(missing_media, f) if media: for medium in media: if medium['type'] == 'photo': download_media(medium['media_key'], medium['url']) # write fetch only if everything was written fetched = [*fetched, *ids_to_fetch] with open(os.path.join(OUTPUT_FOLDER, 'fetched.json'), 'w') as f: json.dump(fetched, f) # - missing_videos = [m for m in missing_media if m['type'] == 'video'] missing_gif = [m for m in missing_media if m['type'] == 'animated_gif'] missing_photos = [m for m in missing_media if m['type'] == 'photo'] print(len(missing_videos), len(missing_gif), len(missing_photos))
Download Twitter Images.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import matplotlib.pylab as plot from astropy.io import ascii,fits from scipy import interpolate import grb_catalogs_copy from BurstCube.LocSim.Detector import * from BurstCube.LocSim.Spacecraft import * from astropy.coordinates import SkyCoord from astropy import units as u from scipy.optimize import curve_fit import math from astropy.table import Table import pandas as pd ## code to use when reading in GBM effective area in order to get data into the desired format def getGBMdata(gbmfile=None): """Reads the GBM NaI effective area file and returns a numpy array with two columns ``energy`` and ``aeff``. Parameters ---------- gbmfile : string Name of file that contains the GBM data. Returns ---------- gbmdata : array numpy array with two columns ``energy`` and ``aeff`` """ return np.genfromtxt(gbmfile,skip_header=2,names=('energy', 'aeff')) # + ## bit of useful code for interpolating in log space def loginterpol(x,y,x1): f=interpolate.interp1d(np.log10(x),np.log10(y),bounds_error=False,fill_value="extrapolate",kind='linear') y1=10**f(np.log10(x1)) return y1 def loginterpol2d(x,y,z,x1,y1): wz=np.where(z==0)[0] zz=z zz[wz]=1. f=interpolate.interp2d(x,y,np.log10(zz),bounds_error=False,fill_value="extrapolate",kind='linear') z1=10**f(x1,y1) # + #read in GBM Trigger Catalog trigfit=fits.open('gbmtrigcat.fits') trig=trigfit[1].data #print(np.shape(gbm)) #(np.shape(gbm)) #print(Table.read('gbmtrigcat.fits')) #print(Table.read('gbmgrbcat_copy.fits')) gbmfit=fits.open('gbmgrbcat_copy.fits') gbm=gbmfit[1].data #trigfit=fits.open('GRBsampletrig.fits') #trig=trigfit[1].data #select the GRBs I am interested in. I can connect these together into one statement grb1 = gbm['Name'] == 'GRB120817168' grbs1 = gbm[grb1] #gbm[grb1] grb2 = gbm['Name'] == 'GRB170817529' grb1708 = gbm[grb2] #grbs = np.concatenate([gbm[grb1],gbm[grb2]]) #print(grbs) # - ## generate random positions on the sky with equal area probability def random_sky(n=1): u=np.random.rand(n) v=np.random.rand(n) phi=2*np.pi*u theta=np.arccos(2*v-1.) dec=-np.degrees(theta-np.pi/2.) ra=np.degrees(np.pi*2-phi) return ra,dec ## read in the GBM Aeff aeff_gbm = getGBMdata('/home/alyson/NASA/Simulation/BurstCube/Users/ajoens/gbm_effective_area.dat') #print(aeff_gbm) # + #Integrating the best fit spectrum for each GRB in the energy range of 50-300 KeV to get max. observed photon flux. #This will give us the photon flux in units of ph/cm^2/s. Currently only doing this for GBM and will then add in BurstCube mo=grb1708['PFLX_BEST_FITTING_MODEL'] m1 = grbs1['PFLX_BEST_FITTING_MODEL'] #f=np.zeros([len(s),nsims]) # produces an array of zeros with the given shape and type pf1708=np.zeros(len(grb1708)) gbmcr1708=np.zeros(len(grb1708)) pf1=np.zeros(len(grb1708)) gbmcr1=np.zeros(len(grb1708)) outE=np.logspace(np.log10(50),np.log10(300),100) # returns numbers spaced evenly on a log scale for i in range(len(grb1708)): #for j in range(nsims): #E=np.array(eng[w[j]+1:w[j+1]+1]) #AeffBC=loginterpol(E,aeffs['aeff'][w[j]+1:w[j+1]+1],outE) AeffGBM=loginterpol(aeff_gbm['energy'],aeff_gbm['aeff'],outE) #eng[w[j]+1:w[j+1]+1]) #Aratio=(AeffBC/AeffGBM) # not sure what *grb_catalogs_copy.pl(outE,gbm['PFLX_PLAW_INDEX'][s[i]] is and why we need it. I think we only need the model photon flux times the aeffGBM and we want it integrated over the energy range provided in outE # this should give us an array of the maximum observed photon flux for GBM if mo[i]=='PFLX_PLAW': gbmcr1708[i]=np.trapz(grb1708['PFLX_PLAW_AMPL']*grb_catalogs_copy.pl(outE,grb1708['PFLX_PLAW_INDEX'])*AeffGBM,outE) pf1708[i]=np.trapz(grb1708['PFLX_PLAW_AMPL']*grb_catalogs_copy.pl(outE,grb1708['PFLX_PLAW_INDEX']),outE) #pf[i]=gbm['PFLX_PLAW_PHTFLUX'][s[i]] if mo[i]=='PFLX_COMP': gbmcr1708[i]=np.trapz(grb1708['PFLX_COMP_AMPL']*grb_catalogs_copy.comp(outE,grb1708['PFLX_COMP_INDEX'],grb1708['PFLX_COMP_EPEAK'])*AeffGBM,outE) pf1708[i]=np.trapz(grb1708['PFLX_COMP_AMPL']*grb_catalogs_copy.comp(outE,grb1708['PFLX_COMP_INDEX'],grb1708['PFLX_COMP_EPEAK']),outE) #pf[i]=gbm['PFLX_COMP_PHTFLUX'][s[i]] if mo[i]=='PFLX_BAND': gbmcr1708[i]=np.trapz(grb1708['PFLX_BAND_AMPL']*grb_catalogs_copy.band(outE,grb1708['PFLX_BAND_ALPHA'],grb1708['PFLX_BAND_EPEAK'],grb1708['PFLX_BAND_BETA'])*AeffGBM,outE) pf1708[i]=np.trapz(grb1708['PFLX_BAND_AMPL']*grb_catalogs_copy.band(outE,grb1708['PFLX_BAND_ALPHA'],grb1708['PFLX_BAND_EPEAK'],grb1708['PFLX_BAND_BETA']),outE) #pf[i]=gbm['PFLX_BAND_PHTFLUX'][s[i]] if mo[i]=='PFLX_SBPL': gbmcr1708[i]=np.trapz(grb1708['PFLX_SBPL_AMPL']*grb_catalogs_copy.sbpl(outE,grb1708['PFLX_SBPL_INDX1'],grb1708['PFLX_SBPL_BRKEN'],grb1708['PFLX_SBPL_INDX2'])*AeffGBM,outE) pf1708[i]=np.trapz(grb1708['PFLX_SBPL_AMPL']*grb_catalogs_copy.sbpl(outE,grb1708['PFLX_SBPL_INDX1'],grb1708['PFLX_SBPL_BRKEN'],grb1708['PFLX_SBPL_INDX2']),outE) #pf[i]=gbm['PFLX_SBPL_PHTFLUX'][s[i]] if m1[i]=='PFLX_PLAW': gbmcr1[i]=np.trapz(grbs1['PFLX_PLAW_AMPL']*grb_catalogs_copy.pl(outE,grbs1['PFLX_PLAW_INDEX'])*AeffGBM,outE) pf1[i]=np.trapz(grbs1['PFLX_PLAW_AMPL']*grb_catalogs_copy.pl(outE,grbs1['PFLX_PLAW_INDEX']),outE) #pf[i]=gbm['PFLX_PLAW_PHTFLUX'][s[i]] if m1[i]=='PFLX_COMP': gbmcr1[i]=np.trapz(grbs1['PFLX_COMP_AMPL']*grb_catalogs_copy.comp(outE,grbs1['PFLX_COMP_INDEX'],grbs1['PFLX_COMP_EPEAK'])*AeffGBM,outE) pf1[i]=np.trapz(grbs1['PFLX_COMP_AMPL']*grb_catalogs_copy.comp(outE,grbs1['PFLX_COMP_INDEX'],grbs1['PFLX_COMP_EPEAK']),outE) #pf[i]=gbm['PFLX_COMP_PHTFLUX'][s[i]] if m1[i]=='PFLX_BAND': gbmcr1[i]=np.trapz(grbs1['PFLX_BAND_AMPL']*grb_catalogs_copy.band(outE,grbs1['PFLX_BAND_ALPHA'],grbs1['PFLX_BAND_EPEAK'],grbs1['PFLX_BAND_BETA'])*AeffGBM,outE) pf1[i]=np.trapz(grbs1['PFLX_BAND_AMPL']*grb_catalogs_copy.band(outE,grbs1['PFLX_BAND_ALPHA'],grbs1['PFLX_BAND_EPEAK'],grbs1['PFLX_BAND_BETA']),outE) #pf[i]=gbm['PFLX_BAND_PHTFLUX'][s[i]] if m1[i]=='PFLX_SBPL': gbmcr1[i]=np.trapz(grbs1['PFLX_SBPL_AMPL']*grb_catalogs_copy.sbpl(outE,grbs1['PFLX_SBPL_INDX1'],grbs1['PFLX_SBPL_BRKEN'],grbs1['PFLX_SBPL_INDX2'])*AeffGBM,outE) pf1[i]=np.trapz(grbs1['PFLX_SBPL_AMPL']*grb_catalogs_copy.sbpl(outE,grbs1['PFLX_SBPL_INDX1'],grbs1['PFLX_SBPL_BRKEN'],grbs1['PFLX_SBPL_INDX2']),outE) #pf[i]=gbm['PFLX_SBPL_PHTFLUX'][s[i]] pf = np.array(pf1708) #gbmcr = np.array(gbmcr) print(gbmcr1708) print(mo) print(np.trapz(grb1708['Flnc_Plaw_Phtfluxb']*AeffGBM,outE)) print(grb1708['Flnc_Plaw_Phtfluxb']) # - # comparing our calculated values to other values found in the catalog print('calculated photon flux 1708 = ',pf1708) print('photon flux found in catalog 1708 = ',grb1708['Flnc_Plaw_Phtfluxb']) print('calculated photon count rate 1708 = ',gbmcr1708) print('actual count rate is about 75') print('photon fluence found in catalog 1708 = ',grb1708['Flnc_Plaw_Phtflncb']) #using SkyCoord to convert coordinates to degrees and solve for distances. def separation(ra1,dec1,ra2,dec2): c=SkyCoord(ra=ra1*u.deg,dec=dec1*u.deg) d=SkyCoord(ra=ra2*u.deg,dec=dec2*u.deg) dist=c.separation(d) dist=dist.value return dist # + #this all together will give us the number of source photons ## setup GBM gbm_pointings = {'01': ('45:54:0','20:36:0'), '02': ('45:6:0','45:18:0'), '03': ('58:24:0','90:12:0'), '04': ('314:54:0','45:12:0'), '05': ('303:12:0','90:18:0'), '06': ('3:24:0','89:48:0'), '07': ('224:54:0','20:24:0'), '08': ('224:36:0','46:12:0'), '09': ('236:36:0','90:0:0'), '10': ('135:12:0','45:36:0'), '11': ('123:42:0','90:24:0'), '12': ('183:42:0','90:18:0')} fermi = Spacecraft(gbm_pointings,window=0.1) res = 250 rr,dd = np.meshgrid(np.linspace(0,360,res,endpoint=False),np.linspace(-90,90,res)) exposure_positions = np.vstack([rr.ravel(),dd.ravel()]) gbm_exposures = np.array([[ detector.exposure(position[0],position[1]) for position in exposure_positions.T] for detector in fermi.detectors]) # + # now that GBM's pointings are set up we will throw GRBs at it and determine it's exposure for each GRB. #generate GRBs and throw them at GBM def throw_grbs(fermi,minflux,maxflux): nsims=int(np.round(len(grb1708))) ra,dec=random_sky(nsims) ra=np.array(ra)-180 dec=np.array(dec) #sigma=0.65,mean=1.5 #change the sigma and mean in order to create a log fit for simulated GBM. Automate this fit. #flux=np.random.lognormal(size=nsims,sigma=0.55,mean=0.6)*(np.log10(maxflux)-np.log10(minflux))+np.log10(minflux) #GBM exposures for each random GRB. Believe this is an array with the different exposures for each detector randgbmexposures = np.array([[detector.exposure(ra[i],dec[i]) for i in range(nsims)] for detector in fermi.detectors]) print("randgbmexposures=", randgbmexposures) #Order randgbmexposures into descending order for column in randgbmexposures.T: newrandgbm = -np.sort(-randgbmexposures.T) gbmexposures = np.transpose(newrandgbm) print("gbmexposures=",gbmexposures) #Select the second highest value. #We will use this to ensure the second highest exposure detector has a sig >4.5 secondhighest = gbmexposures[1,:] print("Second highest =", secondhighest) return gbmexposures, secondhighest, randgbmexposures # + #define the peak flux interval #interval = grb1708['PFLX_SPECTRUM_STOP']-grb1708['PFLX_SPECTRUM_START'] interval = grb1708['PFLX_SPECTRUM_STOP']-grb1708['PFLX_SPECTRUM_START'] #interval = trig['Trigger_Timescale'][s] interval = msinterval/1000 print(interval) #triginterval = trig['End_Time'][s]-trig['Time'][s] #print(triginterval) #wt=np.shape(triginterval) #print(wt) #print(gbm['Actual_64ms_Interval'][s]) # + flux=pf minflux=min(flux) maxflux=max(flux) gbmexposures, secondhighest, randgbmexposures = throw_grbs(fermi,minflux,maxflux) # + interval = .256*2 secondhightest=1. source = gbmcr1708*secondhighest*interval #print(source) #source = gbmcr1708*secondhighest*.256 print(source) print('countrate=',gbmcr1708) sourcepf = grb1708['Pflx_Plaw_Phtfluxb']*secondhighest*interval print(sourcepf) # + countrate = np.trapz(grb1708['Pflx_Plaw_Phtfluxb']*AeffGBM,outE)*secondhighest*interval print(countrate) # - #Assuming a background count rate. units: cts/s bckgrd=300 #scale the background count rate scaledbckgrd = bckgrd*secondhighest*interval print(scaledbckgrd) sig = source / (math.sqrt(source + scaledbckgrd)) print(sig) sig = countrate / (math.sqrt(countrate + scaledbckgrd)) print(sig)
ajoens/GRBtestcode.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # --- # # Core implementation of Linked List def _standardize_start_stop(start, stop, size): """Convenience function for standardizing start, stop indexes for slicing functionality. It pushes start and stop into the range [0, size]. 1. Allows for negative indexing, -i -> size-i 2. Replaces default None from slice() with expected start/stop values. """ try: if start is None: start = 0 elif start < 0: start += size if stop is None: # Note: This means for slicing, None gives the exclusive end of slice. # Therefore, obj[i:] -> [obj[i], obj[i+1], ..., obj[-1]] correctly. stop = size elif stop < 0: stop += size except TypeError: raise TypeError("Start and stop index must be integer.") return start, stop class _BaseLinkedList: """This is implementation of a forward, singlely-linked list. Attributes: ._head ._size Methods: ._load_list(self, start, stop) ._iternodes(start=None, stop=None) ._get_node(idx) Notes: Items packed in via nodes expressed as node=[<item>, <next_node>]. So it's a deeply nested structure, i.e. >>> LinkedList(1, 2, 3)._head == [1, [2, [3, None]]] True Carries information about the size of the linked list as a member variable: >>> LinkedList(1, 2, 3)._size 3 """ def __init__(self, *items): self._load_list(*items) def _load_list(self, *items): """Loads items into nested [a_0, [a_1, ..., [a_n-1, None]]] structure.""" self._size = len(items) if not items: self._head = [] else: self._head = [items[0], None] i = 1 node = self._head while i < self._size: new_node = [items[i], None] node[1] = new_node node = new_node i += 1 def _iternodes(self, start=None, stop=None): """Iterates through the nodes, returning each node at a time. Parameters: * start : [None, int] Start with node at index=start. * stop : [None, int] Stop yielding nodes at index=stop (i.e. exclusive end). """ start, stop = _standardize_start_stop(start, stop, self._size) if start == 0 and stop == self._size: node = self._head while node: yield node node = node[1] else: node = self._head i = 0 while node: if start <= i < stop: yield node elif i == stop: break node = node[1] i += 1 def _get_node(self, idx): if idx < 0: idx += self._size node = self._head i = 0 while node: if i == idx: return node node = node[1] i += 1 # If nothing returned, it means index is out of bounds. raise IndexError("Index out of bounds.") # ### Mock-ups for tests: # + x = _BaseLinkedList(1, 2, 3) x._get_node(0) # - for node in x._iternodes(): print(node) # + try: x._get_node(-4) raise Exception("Failure: IndexError not called!") except IndexError as e: print(f"Succeeded, IndexError('{e}') raised on index -4.") try: x._get_node(4) raise Exception("Failure: IndexError not called!") except IndexError as e: print(f"Succeeded, IndexError('{e}') raised on index 4.") # - # --- # # Linked List in Python class LinkedList(_BaseLinkedList): """This provides a linked list container for data.""" def __init__(self, *items): self._load_list(*items) def __len__(self): return self._size def __iter__(self): for (item, _) in self._iternodes(): yield item def __contains__(self, val): for (item, _) in self._iternodes(): if item == val: return True return False def __getitem__(self, key): if isinstance(key, int): item, _ = self._get_node(key) return item elif isinstance(key, slice): if key.step != None: raise ValueError("Slicing with steps not implemented for LinkedList.") start, stop = _standardize_start_stop(key.start, key.stop, self._size) items = [] for (item, _) in self._iternodes(start, stop): items.append(item) return items else: raise TypeError("Index must be integer or slice.") def __repr__(self): str_of_items = ", ".join((str(item) for item in self)) return f"LinkedList({str_of_items})" def append(self, new_item): node = self._get_node(-1) node[1] = [new_item, None] self._size += 1 def prepend(self, new_item): self._head = [new_item, self._head] self._size += 1 def insert(self, idx, new_item): node = self._get_node(idx) next_node = node[1] node[1] = [new_item, next_node] self._size += 1 xlist = LinkedList(1,2,4) xlist xlist.prepend(0) xlist xlist.append(5) xlist xlist.insert(idx=2, new_item=3) xlist for item in xlist: print(item) print(f"First item: {xlist[0]}, last item: {xlist[-1]}") 1 2 print(f" xlist[1:3] = {xlist[1:3]}\n xlist[:2] = {xlist[:2]}\n xlist[2:] = {xlist[2:]}\n xlist[:] = {xlist[:]}") 1 in xlist # --- # # DoublyLinkedList Implementation class _BaseDoublyLinkedList: """This is implementation of a forward, singlely-linked list. Attributes: ._head ._size Methods: ._load_list(self, start, stop) ._iternodes(start=None, stop=None) ._get_node(idx) Notes: Items packed in via nodes expressed as node=[<item>, <next_node>]. So it's a deeply nested structure, i.e. >>> LinkedList(1, 2, 3)._head == [1, [2, [3, None]]] True Carries information about the size of the linked list as a member variable: >>> LinkedList(1, 2, 3)._size 3 """ def __init__(self, *items): self._load_list(*items) def _load_list(self, *items): """Loads items into nested [a_0, [a_1, ..., [a_n-1, None]]] structure.""" self._size = len(items) if not items: self._head = [] self._tail = [] else: self._head = [items[0], None, None] i = 1 node = self._head while i < self._size: new_node = [items[i], None, node] node[1] = new_node node = new_node i += 1 self._tail = node def _iternodes(self, start=None, stop=None): """Iterates through the nodes, returning each node at a time. Parameters: * start : [None, int] Start with node at index=start. * stop : [None, int] Stop yielding nodes at index=stop (i.e. exclusive end). """ start, stop = _standardize_start_stop(start, stop, self._size) # Just loop through if full list if full range requested. if start == 0 and stop == self._size: node = self._head prev_node = [] while node: yield node node = node[1] # Otherwise, narrow yields down to requested range. else: node = self._head i = 0 while node: if start <= i < stop: yield node elif i == stop: break node = node[1] i += 1 def _reversed_iternodes(self, start=None, stop=None): """Iterates through the nodes, returning each node at a time. Parameters: * start : [None, int] Start with node at index=start. * stop : [None, int] Stop yielding nodes at index=stop (i.e. exclusive end). """ start, stop = _standardize_start_stop(start, stop, self._size) # Just loop through if full list if full range requested. if start == 0 and stop == self._size: node = self._tail while node: yield node node = node[2] # Otherwise, narrow yields down to requested range. else: node = self._tail i = self._size-1 while node: if start <= i < stop: yield node elif i == start: break node = node[2] i -= 1 def _get_node(self, idx): if idx < 0: idx += self._size node = self._head i = 0 while node: if i == idx: return node node = node[1] i += 1 # If nothing returned, it means index is out of bounds. raise IndexError("Index out of bounds.") class DoublyLinkedList(_BaseDoublyLinkedList): """This provides a doubly-linked list container for data.""" def __init__(self, *items): self._load_list(*items) def __len__(self): return self._size def __iter__(self): for (item, _, _) in self._iternodes(): yield item def __reversed__(self): for (item, _, _) in self._reversed_iternodes(): yield item def __contains__(self, val): for (item, _, _) in self._iternodes(): if item == val: return True return False def __getitem__(self, key): if isinstance(key, int): item, _, _ = self._get_node(key) return item elif isinstance(key, slice): if key.step != None: raise ValueError("Slicing with steps not implemented for LinkedList.") start, stop = _standardize_start_stop(key.start, key.stop, self._size) items = [] for (item, _, _) in self._iternodes(start, stop): items.append(item) return items else: raise TypeError("Index must be integer or slice.") def __repr__(self): str_of_items = ", ".join((str(item) for item in self)) return f"DoublyLinkedList({str_of_items})" def append(self, new_item): node = [new_item, None, self._tail] self._tail[1] = node self._tail = node self._size += 1 def prepend(self, new_item): node = [new_item, self._head, None] self._head[2] = node self._head = node self._size += 1 def insert(self, idx, new_item): node = self._get_node(idx) prev_node = node[2] new_node = [new_item, node, prev_node] prev_node[1] = new_node node[2] = new_node self._size += 1 # ### Mock-up for tests: # + xlist = DoublyLinkedList(1, 2, 3, 4) for item in xlist: print(item) print() for item in reversed(xlist): print(item) # - xlist[2] 2 in xlist xlist.prepend(0) xlist xlist.append(5) xlist
data-structs/linked_list.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Modeling: # # <NAME> # # --- # # As we see in the last step, Random Forest Regressor with the Hyperparameters that the GrideSearch pick, is the model with best R2 score and least mean absolute error. # With the R2 of 0.63 and mae of 40 million dollar for the box-office, we can already assume that this model won't be able to get a perfect prediction of the box-office. # Here I try to make the same model, train i ton train set, then use the test set to see the performance of the model for the diffrent class of film. #importing the necessary libraries import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import sklearn from sklearn.model_selection import train_test_split, cross_validate, GridSearchCV,cross_val_score from sklearn.linear_model import LinearRegression,Lasso,ElasticNet from sklearn.ensemble import RandomForestRegressor from sklearn.dummy import DummyRegressor from sklearn.preprocessing import StandardScaler from sklearn.feature_selection import SelectKBest, f_regression from sklearn.pipeline import make_pipeline from sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error,explained_variance_score # Read the process csv file from EDA movie_score = pd.read_csv('/Users/Amin/Documents/GitHub/Movie_boxoffice_reviews/data/processed/pre_process_movie.csv') # + #Save the class of the movies as movie_class then drop it from the data movie_class = movie_score['class'] movie_score.drop(columns='class',inplace=True) # - #split the data to train/test set. 70% train and 30% test. X_train, X_test, y_train, y_test = train_test_split(movie_score.drop(columns='All_time_gross'), movie_score.All_time_gross, test_size=0.2, random_state=47) # + #check the train and test data shape X_train.shape,y_train.shape # - X_test.shape,y_test.shape #Making the Random Forest Regressor model with the same HyperParameters from Pre-Process GridSearch rf_model = RandomForestRegressor(max_depth=60,n_estimators=50, min_samples_leaf= 2,min_samples_split= 6,random_state=47) rf_model.fit(X_train,y_train) cv_results = cross_validate(rf_model, X_train, y_train, scoring='neg_mean_absolute_error', cv=5, n_jobs=-1) np.mean(cv_results['test_score']) # Now let's get the prediction for the test set and see the model performance. #get the prediction for the test set y_pred = rf_model.predict(X_test) mean_absolute_error(y_test,y_pred) explained_variance_score(y_test,y_pred) # The mean absolute error for the test set is much higher. # # Let's create some metrics for each perdiction and analyze the perdiction for each class. # + # make a result df and create some metrics for the predictions. result = pd.DataFrame(y_test.values,y_pred).reset_index() result = pd.concat([result,movie_class],axis=1).dropna() result # - result = result.rename(columns={'index':'box-office',0:'prediction'}) result['absolute_error'] = np.abs(result['prediction'] - result['box-office']) result['acuracy_of_perdiction'] = 100 - ((result['diff']/result['box-office']*100)) result.head(10) result.acuracy_of_perdiction.describe() result.acuracy_of_perdiction[result.acuracy_of_perdiction<0] # As we can see, sometime the error of prediction is more than the box-office itself ( specially for the Blockbuster) and that wil cause the acuracy be a negative percentage. # We can set that to zero and assume in that cases acuracy percnetage is zero. result.loc[result.acuracy_of_perdiction<0,'acuracy_of_perdiction'] = 0 result.head(10) result_indie = result[result['class'] == 'Indie'] result_indie.acuracy_of_perdiction.describe() result_average = result[result['class'] == 'Average'] result_average.acuracy_of_perdiction.describe() result_blockbuster = result[result['class'] == 'Blockbuster'] result_blockbuster.acuracy_of_perdiction.describe() sns.set(rc={'figure.figsize':(8,10)}) _ = sns.boxplot(y='acuracy_of_perdiction',data=result,x='class') sns.set(rc={'figure.figsize':(8,10)}) _ = sns.boxplot(y='absolute_error',data=result,x='class') # We can see the acuracy for the Blockbuster is worse among the clases and that is mainly because box-office for those film has a very high number, and even if the prediction is in close range of the real value the diffrence still be a high number and that would cause the acuracy percentage drop. # beside that we see what we expect from the R2 score. Acuracy arond 20 to 60 percent with the median around 30 depands on the class. # While the model won't be able to get us a perfect prediction, but we can assume it will classify the film class with high acuracy. # We can examine that with some real vote data that are more recent and are not in our original dataset. # # The absoulate error on the other hand is around 50 to 10 millions for the most cases, which is a good number for the Blockbuster and Average class. Althought there are some outlier in Indie and average class with very high error. # # let's start with a big movie in 2021. __Spider-Man : No Way Home.__ spider = pd.DataFrame({ 'tom_cri_vote':[93], 'tom_cri_num':[385], 'tom_aud_vote':[98], 'tom_aud_num':[25000], 'met_cri_vote':[71], 'met_cri_num':[59], 'met_aud_vote':[89], 'met_aud_num':[2013], 'imdb_vote':[87], 'imdb_num':[451025], 'imdb_us_vote':[87], 'imdb_us_num':[34371], 'imdb_nus_vote':[84], 'imdb_nus_num':[87342], 'imdb_fem_vote':[88], 'imdb_fem_num':[26676], 'imdb_mal_vote':[86], 'imdb_mal_num':[162657] }) spider_box_predict = rf_model.predict(spider.iloc[0].array.reshape(1, -1)) spider_box_predict # The model predict that this film box-office will be 850 million dollars based on the vote data that we provide. # The film end up with 1.7 Bilion dollar in box-office. There is a very large error in the model prediction ( around 900 million dollar) but the model succesfuly predict that this film going to be a big Blockbuster with very high box-office gross. # # Let's check another promising film in 2021: __Dune__. althoght it is not a Super Hero or a Marvel movie, but with very well-known casts, huge advertisment campain and pick the chrismas time relese, the producer hope to gain a good gross in box-office, number close to a billion. dune = pd.DataFrame({ 'tom_cri_vote':[83], 'tom_cri_num':[456], 'tom_aud_vote':[90], 'tom_aud_num':[5000], 'met_cri_vote':[74], 'met_cri_num':[67], 'met_aud_vote':[80], 'met_aud_num':[1371], 'imdb_vote':[81], 'imdb_num':[478505], 'imdb_us_vote':[81], 'imdb_us_num':[44798], 'imdb_nus_vote':[80], 'imdb_nus_num':[142364], 'imdb_fem_vote':[81], 'imdb_fem_num':[39777], 'imdb_mal_vote':[81], 'imdb_mal_num':[224114] }) dune_box_predict = rf_model.predict(dune.iloc[0].array.reshape(1, -1)) dune_box_predict # Model predicts 650 million dollar box-office for Dune and in reality it gained 400 millions. This is even a closer prediction and succeful classification of the movie as a Blockbuster. Seems if the box-office is less than a billion we will have a closer prediction. # # Now let's check another promising film, it is not a Marvel Super Hero or a Sci-fi movie, but it has a very famouse director, and <NAME> and <NAME> as a cast and an interesting story. __House of Gucci__ house_of_gucci = pd.DataFrame({ 'tom_cri_vote':[63], 'tom_cri_num':[335], 'tom_aud_vote':[83], 'tom_aud_num':[2500], 'met_cri_vote':[59], 'met_cri_num':[57], 'met_aud_vote':[80], 'met_aud_num':[857], 'imdb_vote':[68], 'imdb_num':[64497], 'imdb_us_vote':[67], 'imdb_us_num':[6084], 'imdb_nus_vote':[67], 'imdb_nus_num':[20533], 'imdb_fem_vote':[69], 'imdb_fem_num':[8867], 'imdb_mal_vote':[67], 'imdb_mal_num':[27604] }) house_of_gucci_box_predict = rf_model.predict(house_of_gucci.iloc[0].array.reshape(1, -1)) house_of_gucci_box_predict # Model predicts a 270 million dollar box-office for the film which is less than we expect form this cast and director. # In reality it gains only 150 millions dollars, people didn't like it even with the big cast and famous <NAME> as a director. # # Model classify it sussefuly as an average movie acording to the vote number we used and predict a close box-office for the film. # # Now Let's try the Independent film with a low budget that critics loved it. Story about princess diana that seems still is hot a story specially in 2021. __Spencer__ # # spencer = pd.DataFrame({ 'tom_cri_vote':[83], 'tom_cri_num':[321], 'tom_aud_vote':[52], 'tom_aud_num':[500], 'met_cri_vote':[76], 'met_cri_num':[53], 'met_aud_vote':[67], 'met_aud_num':[260], 'imdb_vote':[68], 'imdb_num':[36549], 'imdb_us_vote':[69], 'imdb_us_num':[4371], 'imdb_nus_vote':[67], 'imdb_nus_num':[12148], 'imdb_fem_vote':[65], 'imdb_fem_num':[5724], 'imdb_mal_vote':[68], 'imdb_mal_num':[15806] }) spencer_box_predict = rf_model.predict(spencer.iloc[0].array.reshape(1, -1)) spencer_box_predict # This film gets a high vote from the critics and nominate for oscars season, but it ends up with 20 millions dollars at box-office. # # Model perdict 44 millions, very close number and classify correctly as an indie film. # # Let's try a more independent film, agained loved by critics at Cann film festival, but this one doesn't even have a hot story or interesting cast. bergman_island = pd.DataFrame({ 'tom_cri_vote':[86], 'tom_cri_num':[118], 'tom_aud_vote':[52], 'tom_aud_num':[50], 'met_cri_vote':[81], 'met_cri_num':[26], 'met_aud_vote':[66], 'met_aud_num':[20], 'imdb_vote':[68], 'imdb_num':[3878], 'imdb_us_vote':[68], 'imdb_us_num':[556], 'imdb_nus_vote':[67], 'imdb_nus_num':[1727], 'imdb_fem_vote':[66], 'imdb_fem_num':[475], 'imdb_mal_vote':[68], 'imdb_mal_num':[2268] }) bergman_island_box_predict = rf_model.predict(bergman_island.iloc[0].array.reshape(1, -1)) bergman_island_box_predict # Model predict only 2 million dollars for this film based on the vote data and it actually ends up gain only 700 thousend dollars in real life. # Even if we create a movie classes with smaller margine ( in compare to the original three class that I made), this model would be abale to recognize the movie class with high accuracy, and predict a close number for the box-office. # first_cow = pd.DataFrame({ 'tom_cri_vote':[96], 'tom_cri_num':[210], 'tom_aud_vote':[63], 'tom_aud_num':[500], 'met_cri_vote':[89], 'met_cri_num':[44], 'met_aud_vote':[70], 'met_aud_num':[114], 'imdb_vote':[71], 'imdb_num':[16957], 'imdb_us_vote':[71], 'imdb_us_num':[2907], 'imdb_nus_vote':[71], 'imdb_nus_num':[7091], 'imdb_fem_vote':[70], 'imdb_fem_num':[1440], 'imdb_mal_vote':[72], 'imdb_mal_num':[10606] }) first_cow_box_predict = rf_model.predict(first_cow.iloc[0].array.reshape(1, -1)) first_cow_box_predict
notebooks/Untitled3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="-m5PXfx01Wmw" colab_type="text" # # Cross validation # + [markdown] id="4Zc-ZsYr053a" colab_type="text" # 台新分析開發環境案客戶資料 # https://drive.google.com/drive/u/2/folders/1AAXWPcB9xiDe_qX87ce5XL-utkASWxRe # # 請先連到這個分享資料夾,並且按下右鍵,加入你的 Google 雲端硬碟 # # + id="DLxDQLLTw2Sc" colab_type="code" outputId="03f128c9-6018-4e3f-a092-43392bb7344e" colab={"base_uri": "https://localhost:8080/", "height": 35} # 這個是授權存取你的 Google 雲端硬碟 from google.colab import drive drive.mount('/content/drive') # + id="y55N58WSzWHr" colab_type="code" outputId="8f990a00-a918-4279-d248-606089703361" colab={"base_uri": "https://localhost:8080/", "height": 300} # 確認真的連上了資料 # !ls drive/My\ Drive/台新分析開發環境案客戶資料 # + id="2Hrx_j1vwDXn" colab_type="code" colab={} # 正式開始 import pandas as pd import os.path import numpy as np # + id="hxwJyFIO1vOe" colab_type="code" colab={} DATA_DIR='drive/My Drive/台新分析開發環境案客戶資料' sr1_fn = os.path.join(DATA_DIR, 'Copy of sr_1.csv') with open(sr1_fn, encoding='cp950', errors='replace') as sr1_f: sr1_df = pd.read_csv(sr1_f) profile_fn = os.path.join(DATA_DIR, 'Copy of profile.csv') profile_df = pd.read_csv(profile_fn) profile_df.set_index('CUST_NO', inplace=True) # + id="BZl63rGk2B2U" colab_type="code" outputId="e2059808-7c5f-4d07-ed3c-66a53139f44c" colab={"base_uri": "https://localhost:8080/", "height": 203} sr1_df.head() # + id="G7eVZetOZkjP" colab_type="code" outputId="9eb1e9df-4878-4b7d-a95a-8759733dd8c8" colab={"base_uri": "https://localhost:8080/", "height": 314} df = sr1_df[['CUST_NO', 'mcc', 'amt']].groupby(['CUST_NO', 'mcc']).sum().unstack().fillna(0) df.head() # + id="XuqGKPKMbPku" colab_type="code" outputId="3a4792d5-9db4-4d80-dc48-75fba858965f" colab={"base_uri": "https://localhost:8080/", "height": 314} # ratio of the spends df[df<0]=0 df = df[df.sum(axis=1)>0] df = df.div(df.sum(axis=1), axis=0) df.head() # + id="evb24gNJdvat" colab_type="code" outputId="f5979d4f-8b24-499a-8014-6428c2bfdabb" colab={"base_uri": "https://localhost:8080/", "height": 141} df.sum(axis=0).sort_values().tail() # + id="HAxcPZEjN46F" colab_type="code" outputId="e07cea59-fc64-481f-efc2-de9937a19450" colab={"base_uri": "https://localhost:8080/", "height": 269} import matplotlib.pyplot as plt y = profile_df.c_gender[df.index] == 'M' X = df.values.astype('float') # 標出 sample 點 plt.scatter(df['amt', 5311][:5000], df['amt', 5411][:5000], c=y[:5000], cmap=plt.cm.rainbow, zorder=10, s=1); # + id="FPT22oaDdWF7" colab_type="code" outputId="6cd009ed-1c04-4c34-81d5-3928043b600c" colab={"base_uri": "https://localhost:8080/", "height": 35} X.shape # + id="XSgBo9_yOwwB" colab_type="code" colab={} from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test =train_test_split(X, y, test_size=0.3, random_state=42) # + id="7szsP6aQQF_L" colab_type="code" outputId="8d4b0ad0-0c08-42f5-c456-04563d8d3944" colab={"base_uri": "https://localhost:8080/", "height": 35} X_train.shape # + id="qT5DSxHEgatK" colab_type="code" outputId="dba46bf9-8f99-4534-bf65-e6cf1cd74c18" colab={"base_uri": "https://localhost:8080/", "height": 35} X_test.shape # + id="OReMRAo4Pv4F" colab_type="code" outputId="5dbc6910-3f3f-42d9-afc9-2aabf9eca29b" colab={"base_uri": "https://localhost:8080/", "height": 161} from sklearn.linear_model import LogisticRegression clf = LogisticRegression() clf.fit(np.array(X_train), y_train ) # + id="niIUB_2fkfim" colab_type="code" outputId="36bc14e1-30a0-4536-da56-5df70a2dac65" colab={"base_uri": "https://localhost:8080/", "height": 52} print(clf.score(X_train, y_train)) print(clf.score(X_test, y_test)) # + id="VxBsOVf0wgH_" colab_type="code" outputId="bb6c7766-fb5e-4f9c-8eac-7cc183100627" colab={"base_uri": "https://localhost:8080/", "height": 232} from sklearn.model_selection import cross_val_score clf = LogisticRegression() scores = cross_val_score(clf, X, y, cv=5) scores # + id="hnnRebFaxTBs" colab_type="code" outputId="1c4957bc-6d33-4c3a-ae31-463be7b7e81d" colab={"base_uri": "https://localhost:8080/", "height": 35} print("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2)) # + id="qkp8L3vKxVfl" colab_type="code" outputId="e74f5569-32c6-4409-8f13-d7a47e1c6774" colab={"base_uri": "https://localhost:8080/", "height": 444} # customization clf = LogisticRegression() scores = cross_val_score(clf, X, y, cv=10, scoring='f1_macro', n_jobs=1) print(scores) print("F1: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2)) # + id="7GCunyIpy0-y" colab_type="code" outputId="a1b7d317-d897-4e42-b5f9-c5d09573eabf" colab={"base_uri": "https://localhost:8080/", "height": 250} #pipeline from sklearn import preprocessing from sklearn.pipeline import make_pipeline clf = make_pipeline(preprocessing.StandardScaler(), LogisticRegression()) scores = cross_val_score(clf, X, y, cv=5, scoring='f1_macro', n_jobs=1) print(scores) print("F1: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
cross_validation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Good practices, programming style and conventions # Python is a very versatile programming language, and it has a very flexible syntax. Therefore, it is also very easy to write *ugly code* in python. This chapter reviews a number of guidelines and conventions widely accepted by the community. # + [markdown] toc=true # <h1>Table of Contents<span class="tocSkip"></span></h1> # <div class="toc"><ul class="toc-item"><li><span><a href="#Good-practices,-programming-style-and-conventions" data-toc-modified-id="Good-practices,-programming-style-and-conventions-11"><span class="toc-item-num">11&nbsp;&nbsp;</span>Good practices, programming style and conventions</a></span><ul class="toc-item"><li><span><a href="#Use-PEP-8" data-toc-modified-id="Use-PEP-8-11.1"><span class="toc-item-num">11.1&nbsp;&nbsp;</span>Use PEP 8</a></span></li><li><span><a href="#Give-meaningful-names-to-variables-and-functions" data-toc-modified-id="Give-meaningful-names-to-variables-and-functions-11.2"><span class="toc-item-num">11.2&nbsp;&nbsp;</span>Give meaningful names to variables and functions</a></span></li><li><span><a href="#Prefer-writing-many-small-functions-instead-of-monolithic-code-blocks" data-toc-modified-id="Prefer-writing-many-small-functions-instead-of-monolithic-code-blocks-11.3"><span class="toc-item-num">11.3&nbsp;&nbsp;</span>Prefer writing many small functions instead of monolithic code blocks</a></span></li><li><span><a href="#Document-your-code" data-toc-modified-id="Document-your-code-11.4"><span class="toc-item-num">11.4&nbsp;&nbsp;</span>Document your code</a></span></li><li><span><a href="#Refactor-your-code" data-toc-modified-id="Refactor-your-code-11.5"><span class="toc-item-num">11.5&nbsp;&nbsp;</span>Refactor your code</a></span></li><li><span><a href="#The-Zen-of-Python" data-toc-modified-id="The-Zen-of-Python-11.6"><span class="toc-item-num">11.6&nbsp;&nbsp;</span>The Zen of Python</a></span></li><li><span><a href="#What's-next?" data-toc-modified-id="What's-next?-11.7"><span class="toc-item-num">11.7&nbsp;&nbsp;</span>What's next?</a></span></li></ul></li></ul></div> # - # ## Use PEP 8 # [PEP 8](https://www.python.org/dev/peps/pep-0008/) is a style guide for python code. It introduces new rules where the syntax leaves them open. As an example: # Not PEP8 compliant a=2*3 # PEP8 compliant a = 2*3 # PEP8 gives **guidelines**, not strict rules. It is your choice to comply with them or not. As a matter of fact however, many open source projects have adopted PEP8 and require to use it if you want to contribute. If you want to use it too, a good choice is to turn on PEP8 checking in your favorite IDE (in Spyder: ``Tools -> Preferences -> Editor -> Code Introspection/Analysis -> `` Tick ``Real-time code style analysis`` for PEP8 style analysis). # ## Give meaningful names to variables and functions # Code space is less important than your time. Give meaningful name to your variables and functions, even if it makes them quite long! For example, prefer: # ```python # def fahrenheit_to_celsius(temp): # """Converts degrees Fahrenheits to degrees Celsius.""" # ``` # to the shorter but less explicit: # ```python # def f2c(tf): # """Converts degrees Fahrenheits to degrees Celsius.""" # ``` # ## Prefer writing many small functions instead of monolithic code blocks # [Separation of concerns](https://en.wikipedia.org/wiki/Separation_of_concerns) is an important design principle for separating a computer program into distinct sections, such that each section addresses a separate concern. This increases the code readability and facilitates unit testing. # # For example, a scientific script can often be organized in well defined steps: # - data input (read a NetCDF or text file) # - data pre-processing (filtering missing data, discarding useless variables) # - data processing (actual computation) # - data visualization (producing a plot) # - output (writing the processed data to a file for later use) # # Each of these steps and sub-steps should be separated in different functions, maybe even in different scripts. # ## Document your code # Write comments in your code, and document the functions with docstrings. Here again there are [some conventions](https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt) that I recommend to follow. # # Note that this is useful even if you don't plan to share your code. At the very last there is at least one person reading your code: yourself! And it's nice to be nice to yourself ;-) # Abstruse Goose made a funny comic about code documentation: http://abstrusegoose.com/432 # ## Refactor your code # It is impossible to get all things right at the first shot. Some real-life examples: # - after a while, a function that you found useful becomes clunky and its [signature](https://en.wikipedia.org/wiki/Type_signature) needs to be redefined # - a code that worked with data A doesn't work with data B # - you find yourself copy-pasting 5 lines of codes many times in the same script # - the name of this variable made sense with data A, but not anymore with data B # - nobody understands your code # # All these are good indicators that your code needs some [refactoring](https://en.wikipedia.org/wiki/Code_refactoring). This happens often (even for big programs) and becomes more and more difficult the longer you wait. For example, in the process of doing your assignments for next week you might need to revisit your code. Here also, good IDEs can help you in this process, either by letting you know where the functions are used, and by letting you know when your tests fail. # ## The Zen of Python import this # ## What's next? # Back to the [table of contents](00-Introduction.ipynb#ctoc), or [jump to this week's assignment](12-Assignment-03.ipynb).
notebooks/11-Good-practices.ipynb
# -*- coding: utf-8 -*- # + [markdown] slideshow={"slide_type": "slide"} # <center> # # <h1> Logical Foundations </h1> # <h2> CS3100 Fall 2019 </h2> # </center> # + [markdown] slideshow={"slide_type": "slide"} # ## Review # # ### Previously # # * Prolog basics # # ### This lecture # # * Logical foundations of prolog # + First-order logic # * Syntax, Semantics and properties # + Definite Clause programs # * Syntax, semantics, connection to prolog, SLD resolution # + [markdown] slideshow={"slide_type": "slide"} # ## First-order logic # # Terms and functions: # # \\[ # \begin{array}{rcl} # \text{term} & := & \text{constant} ~\mid~ \text{variable} ~\mid~ \text{functions} \\ # \text{functions} & := & f(t1,t2,...,tn) \mid g(t1,t2,....,tn) \\ # & & \quad \text{where f and g are function symbols.} \\ # & & \quad \text{where t1,t2... are terms.} # \end{array} # \\] # # $ # \newcommand{\s}[1]{\text{s}(#1)} # \newcommand{\mul}[2]{\text{mul}(#1,#2)} # \newcommand{\square}[1]{\text{square}(#1)} # % # \newcommand{\even}[1]{\text{even}(#1)} # \newcommand{\odd}[1]{\text{odd}(#1)} # \newcommand{\prime}[1]{\text{prime}(#1)} # \newcommand{\composite}[1]{\text{composite}(#1)} # \newcommand{\divides}[2]{\text{divides}(#1,#2)} # \newcommand{\gt}[2]{\text{gt}(#1,#2)} # \newcommand{\le}[2]{\text{le}(#1,#2)} # $ # + [markdown] slideshow={"slide_type": "slide"} # ## Natural numbers # # Consider the terms for encoding natural numbers $\mathbb{N}$. # # * **Constant:** Let $z$ be $0$. # * **Functions**: Given the natural numbers $x$ and $y$, let the function # + $\s{x}$ represent the successor of $x$ # + $\mul{x}{y}$ represent the product of $x$ and $y$. # + $\square{x}$ represent the square of $x$. # + [markdown] slideshow={"slide_type": "slide"} # ## First-order logic # # \\[ # \begin{array}{rcl} # t \in \text{term} & := & \text{constant} ~\mid~ \text{variable} ~\mid~ \text{functions} \\ \\ # f,g \in \text{formulas} & := & p(t_1,\ldots,t_n) \quad \text{where } p \text{ is the predicate symbol} \\ # & \mid & \neg f \mid f ~\wedge~ g \mid f ~\vee~ g \mid f \rightarrow g \mid f \leftrightarrow g \\ # & \mid & \forall X.f \mid \exists X.f \quad \text{where } X \text{ is a variable} # \end{array} # \\] # + [markdown] slideshow={"slide_type": "slide"} # ## Predicates on natural numbers # # * $\even{x}$ - the natural number $x$ is even. # * $\odd{x}$ - the natural number $x$ is odd. # * $\prime{x}$ - the natural number $x$ is prime. # * $\divides{x}{y}$ - the natural number $x$ divides $y$. # * $\le{x}{y}$ - the natural number $x$ is less than or equal to $y$ # * $\gt{x}{y}$ - the natural number $x$ is greater than $y$. # + [markdown] slideshow={"slide_type": "slide"} # ## Precedence # # From strongest to weakest # # 1. $\neg$ # 2. $\vee$ # 3. $\wedge$ # 4. $\rightarrow, \leftrightarrow$ # 5. $\forall, \exists$ # + [markdown] slideshow={"slide_type": "slide"} # ## Precedence # # Hence, # # \\[ # ((\neg b) ~\wedge~ c) \rightarrow a) # \\] # # can be simplified to # # \\[ # \neg b ~\wedge~ c \rightarrow a # \\] # + [markdown] slideshow={"slide_type": "slide"} # ## Some statements on natural numbers # # * Every natural number is even or odd, but not both. # * A natural number is even if and only if it is divisible by two. # * If some natural number, $x$, is even, then so is $x^2$. # + [markdown] slideshow={"slide_type": "slide"} # ## Some statements on natural numbers # # * Every natural number is even or odd, but not both. # + $\forall x. (\even{x}~\vee~ \odd{x}) \wedge \neg(\even{x} ~\wedge~ \odd{x})$ # * A natural number is even if and only if it is divisible by two. # + $\forall x. \even{x} \leftrightarrow \divides{2}{x}$ # * If some natural number, $x$, is even, then so is $x^2$. # + $\forall x. \even{x} \rightarrow \even{\square{x}}$ # + [markdown] slideshow={"slide_type": "slide"} # ## Some statements on natural numbers # # * A natural number $x$ is even if and only if $x+1$ is odd. # * Any prime number that is greater than 2 is odd. # * For any three natural numbers $x$, $y$, and $z$, if $x$ divides $y$ and $y$ divides $z$, then $x$ divides $z$. # + [markdown] slideshow={"slide_type": "slide"} # ## Some statements on natural numbers # # * A natural number $x$ is even if and only if $x+1$ is odd. # + $\forall x. \even{x} \leftrightarrow \odd{\s{x}}$ # * Any prime number that is greater than 2 is odd. # + $\forall x. \prime{x} ~\wedge~ \gt{x}{\s{\s{z}}} \rightarrow \odd{x}$ # * For any three natural numbers 𝑥, 𝑦, and 𝑧, if 𝑥 divides 𝑦 and 𝑦 divides 𝑧, then 𝑥 divides 𝑧. # + $\forall x,y,z. \divides{x}{y} ~\wedge~ \divides{y}{z} \rightarrow \divides{x}{z}$ # + [markdown] slideshow={"slide_type": "slide"} # ## Some statements on natural numbers # # * There exists an odd composite number (recall, composite number is greater than 1 and not prime). # * Every natural number greater than one has a prime divisor. # + [markdown] slideshow={"slide_type": "slide"} # ## Some statements on natural numbers. # # * There exists an odd composite (not prime) number. # + $\exists x. \odd{x} ~\wedge~ \composite{x}$ # * Every natural number greater than one has a prime divisor. # + $\forall x. \gt{x}{\s{z}} \rightarrow (\exists p. \prime{p} ~\wedge~ \divides{p}{x})$ # + [markdown] slideshow={"slide_type": "slide"} # ## Logical Equivalences # # \\[ # \begin{array}{rcl} # \neg \neg f & \equiv & f \\ # f \rightarrow g & \equiv & \neg f \vee g \\ # f \leftrightarrow g & \equiv & (f \rightarrow g) \wedge (g \rightarrow f)\\ # \neg (f \vee g) & \equiv & \neg f \wedge \neg g \\ # \neg (f \wedge g) & \equiv & \neg f \vee \neg g \\ # \neg \forall x. f(x) & \equiv & \exists x. \neg f(x) \\ # \neg \exists x. f(x) & \equiv & \forall x. \neg f(x) \\ # \end{array} # \\] # + [markdown] slideshow={"slide_type": "slide"} # ## Logical Equivalences # # \\[ # \begin{array}{rcl} # \forall x.(f(x) \wedge g(x)) & \equiv & (\forall x.f(x)) \wedge (\forall x.g(x)) \\ # \forall x.(f(x) \vee g(x)) & \not \equiv & (\forall x.f(x)) \vee (\forall x.g(x)) \\ # \end{array} # \\] # # Pick $f$ as $even$ and $g$ as $odd$. # # \\[ # \begin{array}{rcl} # \exists x.(f(x) \vee g(x)) & \equiv & (\exists x.f(x)) \vee (\exists x.g(x)) \\ # \exists x.(f(x) \wedge g(x)) & \not \equiv & (\exists x.f(x)) \wedge (\exists x.g(x)) \\ # \end{array} # \\] # # Pick $f$ as $even$ and $g$ as $odd$. # + [markdown] slideshow={"slide_type": "slide"} # ## Inference rules # # \\[ # \begin{array}{cccc} # \displaystyle{\frac{f \quad f \rightarrow g}{g}} & (\rightarrow E) & # \qquad \displaystyle{\frac{\forall x. f(x)}{f(t)}} & (\forall E) \\ \\ # \displaystyle{\frac{f(t)}{\exists x. f(x)}} & (\exists I) & # \qquad \displaystyle{\frac{f \quad g}{f \wedge g}} & (\wedge I) # \end{array} # \\] # + [markdown] slideshow={"slide_type": "slide"} # ## Interpretation # # * What we have seen so far is a syntactic study of first-order logic. # + Semantics = meaning of first-order logic formulas. # * Given an alphabet $A$ from which terms are drawn from and a domain $\mathcal{D}$, an **interpretation** maps: # + each constant $c \in A$ to an element in $\mathcal{D}$ # + each $n$-ary function $f \in A$ to a function $\mathcal{D}^n \rightarrow \mathcal{D}$ # + each $n$-ary preducate $p \in A$ to a relation $D_1 \times \ldots \times D_n$ # + [markdown] slideshow={"slide_type": "slide"} # ## Interpretation # # For our running example, choose the domain of natural numbers $\mathbb{N}$ with # # * The constant $z$ maps to $0$. # * The function $\s{x}$ maps to the function $\s{x} = x+1$ # * The predicate $\text{le}$ maps to the relation $\leq$ # + [markdown] slideshow={"slide_type": "slide"} # ## Models # # * A **model** for a set of first-order logic formulas is equivalent to the assignment to truth variables in predicate logic. # * A interpretation $M$ for a set of first-order logic formulas $P$ is a model for $P$ iff every formula of $P$ is true in $M$. # * If $M$ is a model for $f$, we write $M \models f$, which is read as "models" or "satisfies". # # + [markdown] slideshow={"slide_type": "slide"} # ## Models # # Take $f = \forall y.\le{z}{y}$. The following are models for $f$ # # * Domain $\mathbb{N}$, $z$ maps to $0$, $\s{x}$ maps to $\s{x} = x + 1$ and le maps to $\leq$. # * Domain $\mathbb{N}$, $z$ maps to $0$, $\s{x}$ maps to $\s{x} = x + 2$ and le maps to $\leq$. # * Domain $\mathbb{N}$, $z$ maps to $0$, $\s{x}$ maps to $\s{x} = x$ and le maps to $\leq$. # # whereas the following aren't: # # * The integer domain $\mathbb{Z}$, $\ldots$ # * Domain $\mathbb{N}$, $z$ maps to $0$, $\s{x}$ maps to $\s{x} = x + 1$ and le maps to $\geq$ # + [markdown] slideshow={"slide_type": "slide"} # ## Quiz # # Which of these interpretations are models of $f= \forall y.le(z,y)$? # # 1. Domain $\mathbb{N} \setminus \{0\}$, $z$ maps to 1, $s(x)$ maps to $s(x) = x+1$ and le maps to $\leq$. # 2. Domain $\mathbb{N} \setminus \{0\}$, $z$ maps to 1, $s(x)$ maps to $s(x) = x*2$ and le maps to $\leq$. # 3. Domain $\mathbb{N}$, $z$ maps to 0, $s(x)$ maps to $s(x) = x + 1$ and le maps to $<$. # 4. Domain is the domain of sets, $z$ maps to $\emptyset$, $s(x)$ maps to $s(x) = \{x\}$ and $le(x,y) = x \subseteq y \vee \exists e \in y.le(x,e)$. # + [markdown] slideshow={"slide_type": "slide"} # ## Quiz # # Which of these interpretations are models of $f= \forall y.le(z,y)$? # # 1. Domain $\mathbb{N} \setminus \{0\}$, $z$ maps to 1, $s(x)$ maps to $s(x) = x+1$ and le maps to $\leq$. **yes** # 2. Domain $\mathbb{N} \setminus \{0\}$, $z$ maps to 1, $s(x)$ maps to $s(x) = x*2$ and le maps to $\leq$. **yes** # 3. Domain $\mathbb{N}$, $z$ maps to 0, $s(x)$ maps to $s(x) = x + 1$ and le maps to $<$. **no** # 4. Domain is the domain of sets, $z$ maps to $\emptyset$, $s(x)$ maps to $s(x) = \{x\}$ and $le(x,y) = x \subseteq y \vee \exists e \in y.le(x,e)$. **yes** # + [markdown] slideshow={"slide_type": "slide"} # ## Models # # * A set of forumulas $P$ is said to be **satisfiable** if there is a model $M$ for $P$. # * Some formulas do not have models. Easiest one is $f \wedge \neg f$ # + Such (set of) formulas are said to be **unsatisfiable**. # + [markdown] slideshow={"slide_type": "slide"} # ## Logical consequence & validity # # Given a set of formulas $P$, a formula $f$ is said to be a logical consequence of $P$ iff for every model $M$ of $P$, $M \models f$. # # How can you prove this? # # * Show that $\neg f$ is false in every model $M$ of $P$. # + Equivalent to, $P \cup {\neg f}$ is **unsatisfiable**. # # A formula $f$ is said to be **valid**, if it is true in every model (written as $\models f$). # # **Theorem:** It is undecidable whether a given first-order logic formula $f$ is **valid**. # + [markdown] slideshow={"slide_type": "slide"} # ## Restricting the language # # * Clearly, the full first-order logic is not a practical model for computation as it is undecidable. # + How can we do better? # * Restrict the language such that the language is **semi-decidable**. # * A language $L$ is said to be **decidable** if there exists a turing machine that # + accepts every string in L and # + rejects every string not in L # * A language $L$ is said to be **semi-decidable** if there exists a turing machine that # + accepts every string in L and # + for every string not in L, rejects it or loops forever. # + [markdown] slideshow={"slide_type": "slide"} # ## Definite logic programs # # * Definite clauses are such a restriction on first-order logic that is semi-decidable. # * Prolog is basically programming with definite clauses. # * In order to define definite clauses formally, we need some auxiliary definitions. # + [markdown] slideshow={"slide_type": "slide"} # ## Definite clauses # # * An **atomic forumla** is a formula without connectives. # + $\even{x}$ and $\prime{x}$ # + but not $\neg \even{x}$, $\even{x} \vee \prime{y}$ # * A **clause** is a first-order logic formula of the form $\forall(L_1 \vee \ldots \vee L_n)$, where every $L_i$ is an atomic formula (a postive literal) or the negation of an atomic formula (a negative literal). # * A **definite clause** is a clause with exactly one positive literal. # + $\forall(A_0 \vee \neg A_1 \ldots \vee \neg A_n)$ # + Usually written down as, $A_0 \leftarrow A_1 \wedge \ldots \wedge A_n$, for $n \geq 0$. # + or more simply, $A_0 \leftarrow A_1, \ldots, A_n$, for $n \geq 0$. # * A **definite program** is a finite set of definite clauses. # + [markdown] slideshow={"slide_type": "slide"} # ## Definite Clauses and Prolog # # * Prolog facts are definite clauses with no negative literals. # + The prolog fact `even(z)` is equivalent to # + the definite clause $\forall z. \even{z} \leftarrow \top$, where $\top$ stands for true. # * Prolog rules are definite clauses. # + The prolog rule `ancestor(X,Y) :- parent(X,Z), ancestor(Z,Y)` is equivalent to # + the definite clause $\forall x,y,z. \text{ancestor}(x,y) \leftarrow \text{parent}(x,z) ~\wedge~ \text{ancestor}(z,y)$ # + equivalent to, $\forall x,y. \text{ancestor}(x,y) \leftarrow \exists z. \text{parent}(x,z) ~\wedge~ \text{ancestor}(z,y)$ # + [markdown] slideshow={"slide_type": "slide"} # ## Consistency of Definite Clause Programs # # * Every definite clause program has a model! # * Proof # + there is no way to encode negative information in definite clause programs. # + Hence, there is no way to construct an inconsistent system (such as $f \wedge \neg f$). # + Therefore, every definite clause program has a model. # + [markdown] slideshow={"slide_type": "slide"} # ## Models for Logic Programs # # * Every definite clause program has a model # + How do we compute this model? # + Why? In order to provide a semantics for logic program. # + [markdown] slideshow={"slide_type": "fragment"} # <center> # <h1> More Definitions! :-( </h1> # </center> # + [markdown] slideshow={"slide_type": "slide"} # ## Herbrand Universe # # Given a logic program $P$, the Herbrand universe of the logic program $U(P)$ is the set of all ground terms that can be formed from the constants and function symbols in $P$. # # For our encoding of natural numbers, with the constant $z$ and the function $s(x)$, the Herbrand universe is $\{z,s(z),s(s(z)),\ldots\}$. # # * If there are no function symbols, the Herbrand universe is finite. # * If there are no constants, add an arbitrary constant to form the Herbrand base. # + [markdown] slideshow={"slide_type": "slide"} # ## Herbrand Base # # * The Herbrand base, denoted by $B(P)$ is the set of all ground goals that can be formed from the predicates in $P$ and the terms of the Herbrand universe. # * For our encoding of natural numbers, let $\even{x}$ be the only predicate. # + Then, $B(P) = \{\even{z},\even{s(z)},\ldots\}$. # * Herbrand base is infinite if Herbrand universe is. # + [markdown] slideshow={"slide_type": "slide"} # ## Herbrand Interpretation # # * Interpretation of a logic program is the subset of the Herbrand base. # * An interpretation assigns true or false to elements of the Herbrand base. # + A goal is true if it belongs to the interpretation. # + [markdown] slideshow={"slide_type": "slide"} # ## Herbrand model # # A model $M$ of a logic program is an interpretation such that for all ground instantiations of the form $A \leftarrow B_1, B_2, \ldots, B_n$, if $B_1$ to $B_n$ belongs to $M$, then $A$ belongs to $M$. # # Let the logic progam be # # ```prolog # even(z). # even(s(s(X)) :- even(X). # ``` # # A Herbrand model of this program includes $\{\even{z},\even{s(s(z))},\ldots\}$. # + [markdown] slideshow={"slide_type": "slide"} # ## Herbrand Model # # * But the Herbrand model may also include elements from $S = \{\even{s(z)}, \even{s(s(s(z)))},\ldots\}$. # + In particular, Herbrand base of a definite program P always is a Herbrand model of the program. # * There are an infinite number of Herbrand models if the Herbrand base is infinite. # + [markdown] slideshow={"slide_type": "slide"} # ## Least Herbrand Model # # * Hence, we define a Least Herbrand model, which is the intersection of every Herbrand model. # + Least Herbrand Model does not include elements from $S$. # * Least Herbrand Model **precisely** defines the declarative meaning of the logic program. # + Every logic program has a least Herbrand model. # + [markdown] slideshow={"slide_type": "slide"} # ## Quiz # # Given a language $S$ with constants `robb`, `rickard` and `ned`, predicates `father/2` and `ancestor/2`, and facts `father(rickard,ned)` and `father(ned,robb)`, and rules `ancestor(X,Y) :- father(X,Y)` and `ancestor(X,Y) :- father(X,Z), ancestor(Z,Y)` which of these statements are true? # # 1. Herbrand Universe $U(S)$ is infinite. # 2. Herbrand Base $B(S)$ is finite. # 3. `father(ancestor(robb))` $\in B(S)$. # 4. `father(ned,ned)` $\in M$, where $M$ is a Herbrand model of the program. # 5. `father(ned,ned)` $\in M$, where $M$ is the least Herbrand model of the program. # + [markdown] slideshow={"slide_type": "slide"} # ## Quiz # # Given a language $S$ with constants `robb`, `rickard` and `ned`, predicates `father/2` and `ancestor/2`, and facts `father(rickard,ned)` and `father(ned,robb)`, and rules `ancestor(X,Y) :- father(X,Y)` and `ancestor(X,Y) :- father(X,Z), ancestor(Z,Y)` which of these statements are true? # # 1. Herbrand Universe $U(S)$ is infinite. **false** # 2. Herbrand Base $B(S)$ is finite. **true** # 3. `father(ancestor(robb))` $\in B(S)$. **false** # 4. `father(ned,ned)` $\in M$, where $M$ is a Herbrand model of the program. **true** # 5. `father(ned,ned)` $\in M$, where $M$ is the least Herbrand model of the program. **false** # + [markdown] slideshow={"slide_type": "slide"} # ## Answering Prolog Queries # # * Least Herbrand Model is only used to discuss semantics # + Not used for computation by Prolog. # * How does prolog compute the answers to queries? # + [markdown] slideshow={"slide_type": "slide"} # ## Prolog Queries # # * Let us assume that the prolog program $P$ is family tree of House Stark encoded in the previous lecture. # * We would like to answer "is Rickard the ancestor of Robb?" # + $q = \text{ancestor}(rickard,robb)$ # * We construct a logical statement # + $\neg \text{ancestor}(rickard,robb)$ # + which is the **negation** of the original question. # + [markdown] slideshow={"slide_type": "slide"} # ## Prolog Queries # # * The system attempts to show that $\neg \text{ancestor}(rickard,robb)$ is false in every model of $P$. # + equivalent to showing $P \cup \{\neg \text{ancestor}(rickard,robb)\}$ is unsatisfiable. # * Then, we can conclude that for every model $M$ of $P$, $M \models q$. # + that is, "Rickard is the ancestor of Robb". # + [markdown] slideshow={"slide_type": "slide"} # ## SLD Resolution # # * The whole point of restricting the first-order logic language to definite clauses is to have a better decision procedue. # * There is a **semi-decidable** decision procedure for definite clauses called **SLD resolution**. # + SLD = Selective Linear Resolution with Definite Clauses. # + given an unsatisfiable set of formulae it is guaranteed to derive false # + however given a satisfiable set, it may never terminate. # + [markdown] slideshow={"slide_type": "slide"} # ## SLD Resolution example # # ```prolog # father(rickard,ned). # father(rickard,brandon). # father(rickard,lyanna). # father(ned,robb). # father(ned,sansa). # father(ned,arya). # parent(X,Y) :- father(X,Y). # ancestor(X,Y) :- parent(X,Y). # ancestor(X,Y) :- parent(X,Z), ancestor(Z,Y). # ?- ancestor(rickard, robb). # ``` # + [markdown] slideshow={"slide_type": "slide"} # ## SLD Resolution example # # * The logical version goal is $\neg \text{ancestor(rickard,robb)}$. # * The system attemps to disprove this by **finding a counter-example**. # + How can I derive `ancestor(rickard,robb)`? # * I can see a rule `ancestor(X,Y) :- parent(X,Y)` which allows me to derive `ancestor(X,Y)`. # + the logical equivalent is, $\forall x,y.(ancestor(x,y) \leftarrow parent(x,y))$. # * **Deduce**: # + Apply $(\forall E)$ rule for $x$ and $y$ and pick $x = rickard$ and $y = robb$. # + Apply $(\rightarrow E)$ rule on the result to get a new goal $parent(rickard,robb)$. # * The original goal to derive `ancestor(rickard,robb)` has been replaced by the goal to derive `parent(rickard,robb)`. # + [markdown] slideshow={"slide_type": "slide"} # ## SLD Resolution example # # * How can you derive `parent(rickard,robb)`? # * Observe the rule `parent(X,Y) :- father(X,Y)` # + logical equivalent is $\forall x,y. parent(x,y) \leftarrow father(x,y)$. # * **Deduce**: Apply rules $(\forall E)$ and $(\rightarrow E)$. # * New goal: `father(rickard,robb)`. # * No fact matches this goal! # + **Backtrack!** # + [markdown] slideshow={"slide_type": "slide"} # ## SLD Resolution example # # * How can I derive `ancestor(rickard, robb)`? # * Observe the rule `ancestor(X,Y) :- parent(X,Z), ancestor(Z,Y)` # + logical equivalent is $\forall x,y. ancestor(x,y) \leftarrow \exists z. parent(x,z) \wedge ancestor(z,y)$ # * **Deduce**: Apply rules $(\forall E), (\rightarrow E), (\exists I), (\wedge I)$ in that order. # * We get two new goals, `parent(rickard,Z)` and `ancestor(Z,robb)` where `Z` is the same variable introduced by $(\exists I)$. # + [markdown] slideshow={"slide_type": "slide"} # ## SLD Resolution example # # * The goal `parent(rickard,Z)` in turn leads to the goal `father(rickard,Z)`. # + The first rule `father(rickard,ned)` unifies with this goal with `Z = ned`. # + Hence, the first goal is proved. # * The other goal is now specialised to `ancestor(ned,robb)`. # * The second goal can now be proved as `ancestor(ned,robb)` $\leftarrow$ `parent(ned,robb)` $\leftarrow$ `father(ned,robb)`. # + We have a fact `father(ned,robb)`. Hence, proved. # + [markdown] slideshow={"slide_type": "slide"} # ## SLD Resolution example # # * By deriving `q = ancestor(rickard,robb)` from the given program $P$, we have shown that $P \cup \{\neg q\}$ is unsatisfiable. # * Hence, `ancestor(rickard,robb)` is a logical consequence of the given program $P$. # + [markdown] slideshow={"slide_type": "slide"} # ## Computation is deduction # # * When a prolog program computes the result of the query, it is performing logical deduction through SLD resolution. # * In our example, # + We picked the clauses in the order they appear in the program # + Did a depth-first search for proof # + Given the conjunction of goals $g1 \wedge g2$, chose to prove $g1$ first. # * SWI-Prolog implementation has the same behaviour # + Other prolog implementation may choose different strategies BFS instead of DFS, pick last conjunct in a conjunction of goals, etc. # + [markdown] slideshow={"slide_type": "slide"} # ## Tracing in SWI-Prolog # # ``` # father(rickard,ned). # father(rickard,brandon). # father(rickard,lyanna). # father(ned,robb). # father(ned,sansa). # father(ned,arya). # parent(X,Y) :- father(X,Y). # ancestor(X,Y) :- parent(X,Y). # ancestor(X,Y) :- parent(X,Z), ancestor(Z,Y). # ?- ancestor(rickard, robb). # ``` # + [markdown] slideshow={"slide_type": "slide"} # <center> # # <h1> Fin. </h1> # </center>
lectures/lec18/lec18.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd try: login = pd.read_csv(r'login.txt', header=None) user = login[0][0] pw = login[0][1] print('User information is ready!') except: print('Login information is not avaliable!!!') print(user,pw) # Database information host = '##.###.###.##' db_name = 'db_name' table_name = 'table_name' # connect to database conn = psycopg2.connect(host=host, database=db_name, user=user, password=pw) cur = conn.cursor() sql = """ select * from {table_name} """.format(table_name=table_name) dp = pd.read_sql(sql, conn) # connect to database url = 'jdbc:postgresql://'+host+':5432/'+db_name+'?user='+user+'&password='+pw properties ={'driver': 'org.postgresql.Driver', 'password': pw,'user': user} ds = spark.read.jdbc(url=url, table=table_name, properties=properties)
doc/code/CodeForTutor/login.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python (tunnel) # language: python # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/wesleybeckner/python_foundations/blob/main/notebooks/solutions/SOLN_P3_Random_TicTacToe_Agents.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="vhe1yX4AMckE" # # Python Foundations, Project Part 3: Building Agents (Random AI Agent) # # **Instructor**: <NAME> # # **Contact**: <EMAIL> # <br> # # --- # # <br> # # In part III of our tic-tac-toe and AI journey, we're going to create NPCs for our game! # # We will need to pull in our OOP work from the previous project notebook. Be sure to copy your code and run those cells below before we get started on section 3 # # <br> # # --- # # <br> # # <a name='top'></a> # # + [markdown] id="mNtJitcRW51Y" # <a name='x.0'></a> # # ## 3.0 Preparing Environment and Importing Data # # [back to top](#top) # + [markdown] id="chdcBoBL8SNm" # <a name='x.0.1'></a> # # ### 3.0.1 Import Packages # # [back to top](#top) # + id="eJ7-FDq3JhhI" class TicTacToe: def __init__(self, winner='', start_player=''): self.winner = winner self.start_player = start_player self.board = {1: ' ', 2: ' ', 3: ' ', 4: ' ', 5: ' ', 6: ' ', 7: ' ', 8: ' ', 9: ' ',} self.win_patterns = [[1,2,3], [4,5,6], [7,8,9], [1,4,7], [2,5,8], [3,6,9], [1,5,9], [7,5,3]] def visualize_board(self): """ Visualizes the board during gameplay Parameters ---------- board_values : list The values ('X', 'O', or ' ' at each board location) Returns ------- None """ print( "|{}|{}|{}|\n|{}|{}|{}|\n|{}|{}|{}|\n".format(*self.board.values()) ) def check_winning(self): """ Checks if the game has a winner Parameters ---------- board : dict the tictactoe board as a dictionary Returns ------- win_statement : str defaults to an empty string if no winner. Otherwise 'X' Won! or 'O' Won! """ for pattern in self.win_patterns: values = [self.board[i] for i in pattern] if values == ['X', 'X', 'X']: self.winner = 'X' return "'X' Won!" elif values == ['O', 'O', 'O']: self.winner = 'O' return "'O' Won!" return '' # here the definition of check_stalemate is given def check_stalemate(self): if ' ' not in self.board.values(): self.winner = 'Stalemate' return "It's a stalemate!" def play_game(): print("'X' will go first!") tic_tac_toe = TicTacToe() while True: for player in (['X', 'O']): tic_tac_toe.visualize_board() move = input("{}, what's your move?".format(player)) if move in ['q', 'quit']: tic_tac_toe.winner = 'F' print('quiting the game') break move = int(move) if tic_tac_toe.board[move] != ' ': while True: move = input("{}, that position is already taken! "\ "What's your move?".format(player)) move = int(move) if tic_tac_toe.board[move] != ' ': continue else: break tic_tac_toe.board[move] = player tic_tac_toe.check_winning() tic_tac_toe.check_stalemate() if tic_tac_toe.winner == '': continue else: print(tic_tac_toe.check_winning()) tic_tac_toe.visualize_board() break if tic_tac_toe.winner != '': break # + [markdown] id="J3dlcfPtfccI" # ## 3.1 Creating an Agent # # we'll start by creating an agent that just selects a random playing number # + [markdown] id="hOp1PvYlf4oo" # ### Q1 Use a Random Number Generator # # import the random library. Use it to randomly generate numbers 1-9 (inclusive? yep, inclusive. We're going to use this to select the keys in our board dictionary! 📚) # + id="bdqc_josNU_L" colab={"base_uri": "https://localhost:8080/"} outputId="b43fd4ba-2dbe-4657-c210-0ddc59fd0717" # cell for Q4 # generates a random number 1-9 import random random.randint(1,9) # + [markdown] id="H2Z6O-iygtRl" # ### Q2 Play a Valid Move # # Nice, now that we are generating random numbers 1-9. We need to check if a random number that's been drawn is a valid move. We're going to do this with... yeah that's right you guessed it, a `while loop` # + id="cyQruaqghODb" while True: # generate a random number 1-9 and set it equal to the variable name <move> move = random.randint(1,9) if board[move] != ' ': # if the move is invalid then... continue else: # otherwise the move is valid and we can exit the loop! break # + [markdown] id="ThoZp0mHgg7g" # Wow and that's it, we just need to wrap it in our `play_game` function. Before we do that, we need to handle the ability of our engine to perform 1 and 2 player games. In order to do this, I'm going to introduce a new object to organize how the game is setup. I'm going to call it `player_meta` # # ``` # player_meta = {'first': {'label': 'X', # 'type': 'human'}, # 'second': {'label': 'O', # 'type': 'human'}} # ``` # # # # + [markdown] id="ufftBKwp_YWY" # Note that in the above `player_meta` I have `'type'` specificed to `'human'`, the other player `'type'` will be `'ai'`! Keep this in mind when answering Q3 below. # + [markdown] id="MixEXLUNlBGy" # ### Q3 Game Setup and Random AI # # I will skeleton out where the `input` questions need to be sent to the user, it is your task to determine how the `player_meta` dictionary should subsequently be updated! # + id="1Ep9Bi5dgkCa" def play_game(): tic_tac_toe = TicTacToe() ############################################################################## ################################# GAME SETUP ################################# ############################################################################## players = int(input("How many Players? (type 0, 1, or 2)")) player_meta = {'first': {'label': 'X', 'type': 'human'}, 'second': {'label': 'O', 'type': 'human'}} if players == 1: first = input("who will go first? (X, (AI), or O (Player))") if first == 'O': ### UPDATE PLAYER_META HERE ### player_meta['first']['label'] = 'O' player_meta['second']['label'] = 'X' player_meta['second']['type'] = 'ai' else: ### UPDATE PLAYER_META HERE ### player_meta['first']['label'] = 'X' player_meta['second']['label'] = 'O' player_meta['first']['type'] = 'ai' elif players == 0: # insert wargames quote first = random.choice(['X', 'O']) if first == 'O': ### UPDATE PLAYER_META HERE ### player_meta['first']['label'] = 'O' player_meta['second']['label'] = 'X' player_meta['second']['type'] = 'ai' player_meta['first']['type'] = 'ai' else: ### UPDATE PLAYER_META HERE ### player_meta['second']['type'] = 'ai' player_meta['first']['type'] = 'ai' ############################################################################## ################################# GAME PLAY ################################## ############################################################################## while True: # in the following line, instead of alternating between 'X' and 'O', we now # alternate between the first and second player, which has the associated # metadata of label (X or O) and type (ai or human). for player in ['first', 'second']: tic_tac_toe.visualize_board() # we set the player_label and player_type according to which player is # playing in this round player_label = player_meta[player]['label'] player_type = player_meta[player]['type'] if player_type == 'human': move = input("{}, what's your move?".format(player_label)) # we're going to allow the user to quit the game from the input line if move in ['q', 'quit']: tic_tac_toe.winner = 'F' print('quiting the game') break move = int(move) if tic_tac_toe.board[move] != ' ': while True: move = input("{}, that position is already taken! "\ "What's your move?".format(player)) move = int(move) if tic_tac_toe.board[move] != ' ': continue else: break ########################################################################## ################### YOUR RANDOM AI AGENT CODE GOES HERE ################## ########################################################################## else: while True: # generate a random number 1-9 and set it equal to the variable name <move> move = random.randint(1,9) if tic_tac_toe.board[move] != ' ': # if the move is invalid then... continue else: # otherwise the move is valid and we can exit the loop! break tic_tac_toe.board[move] = player_label # the winner varaible will now be check within the board object tic_tac_toe.check_winning() tic_tac_toe.check_stalemate() if tic_tac_toe.winner == '': # clear_output() continue elif tic_tac_toe.winner == 'Stalemate': print(tic_tac_toe.check_stalemate()) tic_tac_toe.visualize_board() break else: print(tic_tac_toe.check_winning()) tic_tac_toe.visualize_board() break if tic_tac_toe.winner != '': return tic_tac_toe # + id="gPwRLHXt1SQA" colab={"base_uri": "https://localhost:8080/"} outputId="1ff2a0bd-66ea-4963-96b0-4850543f1a08" play_game() # + [markdown] id="uWyxdhmXImDj" # ## 3.2 OOP and Inheritance # # We'll want to reconfigure our code a bit to allow for AI to play AI. We'll use this feature to record a bunch of games and generate some data for analysis and, eventually, machine learning. # # We also want to run our game system without having any input from the user for our data generation. This is a good point to reconfigure our code. Notice how long and bulky `play_game` is getting. # # # + [markdown] id="sHqy-bzGq7_o" # ### Q4 Inheriting from TicTacToe # # To better organize our code, we're going to create a new class called `GameEngine` and we're going to inherit all the properties of `TicTacToe`. Do we remember how to do this? let's try it. The simplest way is the following: # # ``` # class GameEngine(TicTacToe): # def __init__(self): # super().__init__() # ``` # # Nice. Pretty clean, right? The only new thing here I want us to introduce is the parameter `setup` setup will determine whether we have the user enter fields to setup the `player_meta` dictionary, or whether we automatically set it up to allow ai vs ai and not require input from the user (which we will need if we are to run thousands of games automatically for data generation!!!) # # ``` # class GameEngine(TicTacToe): # def __init__(self, setup='auto'): # super().__init__() # self.setup = setup # ``` # # notice the new parameter flag! We're going to use it in this next step. Be sure to: # # 1. access all methods/attributes via the `self` # 2. use code you already wrote above for setting up and for the random AI agent # + id="LeYsz5yJ1Tbv" class GameEngine(TicTacToe): def __init__(self, setup='auto'): super().__init__() self.setup = setup self.player_meta = {'first': {'label': 'X', 'type': 'human'}, 'second': {'label': 'O', 'type': 'human'}} def setup_game(self): if self.setup == 'user': ########################################################################## ## YOUR GAME SETUP CODE FROM ABOVE GOES HERE, NOTE THE NEW IF STATEMENT ## ########################################################################## players = int(input("How many Players? (type 0, 1, or 2)")) if players == 1: first = input("who will go first? (X, (AI), or O (Player))") if first == 'O': ### UPDATE PLAYER_META HERE ### self.start_player = 'O' self.player_meta['first']['label'] = 'O' self.player_meta['second']['label'] = 'X' self.player_meta['second']['type'] = 'ai' else: ### UPDATE PLAYER_META HERE ### self.start_player = 'X' self.player_meta['first']['label'] = 'X' self.player_meta['second']['label'] = 'O' self.player_meta['first']['type'] = 'ai' elif players == 0: # insert wargames quote first = random.choice(['X', 'O']) if first == 'O': ### UPDATE PLAYER_META HERE ### self.start_player = 'O' self.player_meta['first']['label'] = 'O' self.player_meta['second']['label'] = 'X' self.player_meta['second']['type'] = 'ai' self.player_meta['first']['type'] = 'ai' else: ### UPDATE PLAYER_META HERE ### self.start_player = 'X' self.player_meta['second']['type'] = 'ai' self.player_meta['first']['type'] = 'ai' elif self.setup == 'auto': ########################################################################## ## THE NEW AUTOSETUP FEATURE THAT WILL ALLOW THE GAME TO RANDOMLY SETUP ## ########################################################################## first = random.choice(['X', 'O']) if first == 'O': self.start_player = 'O' self.player_meta['first']['label'] = 'O' self.player_meta['second']['label'] = 'X' self.player_meta['second']['type'] = 'ai' self.player_meta['first']['type'] = 'ai' else: self.start_player = 'X' self.player_meta['second']['type'] = 'ai' self.player_meta['first']['type'] = 'ai' def play_game(self): while True: for player in ['first', 'second']: self.visualize_board() player_label = self.player_meta[player]['label'] player_type = self.player_meta[player]['type'] if player_type == 'human': move = input("{}, what's your move?".format(player_label)) # we're going to allow the user to quit the game from the input line if move in ['q', 'quit']: self.winner = 'F' print('quiting the game') break move = int(move) if self.board[move] != ' ': while True: move = input("{}, that position is already taken! "\ "What's your move?".format(player)) move = int(move) if self.board[move] != ' ': continue else: break ######################################################################## ################### YOUR RANDOM AI AGENT CODE GOES HERE ################ ######################################################################## else: while True: # generate a random number 1-9 and set it equal to the variable name <move> move = random.randint(1,9) if self.board[move] != ' ': # if the move is invalid then... continue else: # otherwise the move is valid and we can exit the loop! break self.board[move] = player_label # the winner varaible will now be check within the board object self.check_winning() self.check_stalemate() if self.winner == '': # clear_output() continue elif self.winner == 'Stalemate': print(self.check_stalemate()) self.visualize_board() break else: print(self.check_winning()) self.visualize_board() break if self.winner != '': return self # + [markdown] id="dDT80KJ0rHRY" # And now we can practice using our Engine: # + colab={"base_uri": "https://localhost:8080/"} id="wErDqzzOLGVg" outputId="99a60ed4-7292-4870-a7e3-9c7d0b2df17d" game = GameEngine() game.setup_game() board = game.play_game() # + [markdown] id="U9JupO5eSnLm" # ### Q5 Test the Engine # # Check that we can still use the `GameEngine` to play a human v human or human v AI game of tic-tac-toe: # + colab={"base_uri": "https://localhost:8080/"} id="5GGKHscfLv9u" outputId="3f5651e2-a54b-4e4f-844d-eda8fcd7fdb8" game = GameEngine(setup='user') game.setup_game() # + colab={"base_uri": "https://localhost:8080/"} id="QaWWXWUUSidv" outputId="10f34aad-b0fe-4647-a767-03b8e28922bf" game.play_game() # + [markdown] id="JAZWckhxTH8V" # ## 3.3 Simulating Data # # We will now want to run our game thousands of times to collect data for building our AI agents. The following code should run without any input from the user: # # ``` # game = GameEngine() # game.setup_game() # board = game.play_game() # ``` # + colab={"base_uri": "https://localhost:8080/"} id="CN6RceppSjTS" outputId="159d6ebe-783c-4ab3-ca9e-5752ef06e676" game = GameEngine() game.setup_game() board = game.play_game() # + [markdown] id="fxTIvVfZrgkj" # ### Q6 Record 1000 Games # # Write a for loop that creates 1000 games and saves the game data in the following dictionary format, replacing `<THE GAME NUMBER>` with whatever index you are using in the for loop: # # ``` # data = {} # data['game {}'.format(<THE GAME NUMBER>)] = {'board': board.board, # 'winner': board.winner, # 'starting player': board.start_player} # ``` # + id="dm0XhHClTqp7" # Cell for Q9 data = {} for i in range(1000): game = GameEngine() game.setup_game() board = game.play_game() data['game {}'.format(i)] = {'board': board.board, 'winner': board.winner, 'starting player': board.start_player} # + id="HSfS4t3RUPw9" import json with open('data.txt', 'w') as outfile: json.dump(data, outfile)
notebooks/solutions/SOLN_P3_Random_TicTacToe_Agents.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Imports # %reload_ext autoreload # %autoreload 2 # %matplotlib inline # %pwd from fastai import * from fastai.text import * from fastai.callbacks import * get_ipython().config.get('IPKernelApp', {})['parent_appname'] = "" path = Path('/home/hendrix/data/jhlm/') df = pd.read_csv(path/'alllessons2.csv') path.ls() # # load data_lm = load_data(path) learn = (language_model_learner(data_lm,AWD_LSTM) .load(Path('/home/hendrix/data/jhlm/models/jhlm_0'))) learn.export(fname='jhlm.pkl') learn = load_learner(path,'jhlm.pkl') # + [markdown] heading_collapsed=true # ## predict # + hidden=true print(learn.predict('this is really cool',30,temperature=.3)) # + hidden=true print(learn.predict('i think its really cool',30,temperature=.3)) # + hidden=true print(learn.predict('learning rate finder', 30, temperature=.7)) # + hidden=true print(learn.predict('learning rate finder', 50, temperature=.7)) # + hidden=true print(learn.predict('learning rate finder', 50, temperature=.7)) # + hidden=true print(learn.predict('learning rate finder', 50, temperature=.7)) # + hidden=true print(learn.predict('i think when', 100, temperature=.4)) # + hidden=true print(learn.predict('gan', 100, temperature=.4)) # - # ## beam search print(learn.beam_search('my best tips are', 30, beam_sz=200,temperature=.3)) print(learn.beam_search('i think its really cool', 30, beam_sz=200,temperature=.3)) print(learn.beam_search('i think its really cool that', 30, beam_sz=200,temperature=.3)) print(learn.beam_search('its a really good idea to', 30, beam_sz=200,temperature=.3)) print(learn.beam_search('camvid', 150, beam_sz=200,temperature=.3)) print(learn.beam_search('learning rate finder', 150, beam_sz=200,temperature=.3)) print(learn.beam_search('gan', 100, temperature=.4)) print(learn.beam_search('to use a gan', 100, temperature=.4)) print(learn.beam_search('You should', 100, temperature=.4)) print(learn.beam_search('Its a good idea to', 100, temperature=.4)) print(learn.beam_search('in training', 100, temperature=.4)) print(learn.beam_search('in training', 100, temperature=.9)) print(learn.beam_search('in training', 100, temperature=.99)) print(learn.beam_search('in training', 100, temperature=.99)) print(learn.beam_search('u net', 100, temperature=.99)) print(learn.beam_search('network', 150, temperature=.99)) print(learn.beam_search('network', 200, temperature=.99)) print(learn.beam_search('for this architecture', 100, temperature=.99)) print(learn.beam_search('for this architecture', 100, temperature=.99)) print(learn.beam_search('this architecture', 100, temperature=.99)) print(learn.beam_search('this architecture', 100, temperature=.99)) # # End
nbs/ubuntu/predicting-awd.ipynb
# --- # title: "Recall" # author: "<NAME>" # date: 2017-12-20T11:53:49-07:00 # description: "How to evaluate a Python machine learning using recall." # type: technical_note # draft: false # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <a alt="Recall" href="https://machinelearningflashcards.com"> # <img src="/images/machine_learning_flashcards/Recall_print.png" class="flashcard center-block"> # </a> # ## Preliminaries # Load libraries from sklearn.model_selection import cross_val_score from sklearn.linear_model import LogisticRegression from sklearn.datasets import make_classification # ## Generate Features And Target Data # Generate features matrix and target vector X, y = make_classification(n_samples = 10000, n_features = 3, n_informative = 3, n_redundant = 0, n_classes = 2, random_state = 1) # ## Create Logistic Regression # Create logistic regression logit = LogisticRegression() # ## Cross-Validate Model Using Recall # Cross-validate model using precision cross_val_score(logit, X, y, scoring="recall")
docs/machine_learning/model_evaluation/recall.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## savetxt (save ndarray to text file) # ## loadtxt (load data from text file to ndarray) import numpy as np print(f'numpy version = {np.__version__}') celsius=np.arange(0, 101) fah=celsius * 1.8 + 32 fah celsius t=np.stack((celsius, fah), axis=1) t np.savetxt('temperature.csv', t) np.savetxt('temperature2.csv', t, delimiter=',', fmt='%.1f') np.savetxt('temperature3.csv', t, delimiter=',', fmt='%.1f', header='celsius,fahrenheit') np.savetxt('temperature4.csv', t, delimiter=',', fmt='%.1f', header='celsius,fahrenheit', comments='') # ## loadtxt s4=np.loadtxt('temperature4.csv', delimiter=',', skiprows=1) s4 s3=np.loadtxt('temperature3.csv', delimiter=',') s3 s2=np.loadtxt('temperature2.csv', delimiter=',') s2 s=np.loadtxt('temperature.csv') s x=np.loadtxt('temperature.csv', usecols=(0, 1)) x
numpy_savetxt_loadtxt.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Q1. Behaviour of the Complex Plane import numpy as np import matplotlib.pyplot as plt import cmath def mandel(c, i): z = c for l in range(i): z = z**2 + c mset = abs(z) < 50 return mset # + xl = np.linspace(-2, 2, 500) yl = np.linspace(-2, 2, 500) c = xl[:,np.newaxis]+yl[np.newaxis,:]*1j mset = mandel(c, 50) # - plt.figure(figsize = (6, 6)) plt.pcolormesh(xl, yl, mset.T, cmap='viridis') plt.title("Mandelbrot Set") plt.xlabel("Real") plt.ylabel("Imaginary") plt.show() def mandel(xlim, ylim, max_iter = 50, epsilon = 50): xl = np.linspace(-2, 2, xlim) yl = np.linspace(-2, 2, ylim) x, y = np.meshgrid(xl, yl, sparse=True) mset = x + y*1j ycount=0 for i in mset: xcount=0 for j in i: c = j z = c m = 0 while abs(z) < epsilon and m < max_iter: z = z**2 + c m+=1 mset[ycount][xcount] = m xcount+=1 ycount+=1 return xl, yl, abs(mset) x, y, mset = mandel(500, 500, max_iter=100, epsilon=10) # + plt.figure(figsize=(8, 6)) plt.pcolormesh(x, y, mset, cmap='viridis') plt.colorbar() #plt.xlim([-0.75, -0.25]) #plt.ylim([0.25, 0.75]) plt.show() # - # # Q2. Epidemics # + import scipy.integrate as itg t = np.arange(0, 4000) N = 1000 g = 0.0001 #how quickly do people recover b = 0.01 #how infectious is the disease a = 0.00005 #how deadly is the disease I0 = 1 S0 = 999 D0 = 0 R0 = N - S0 - I0 - D0 def prime(iv, t, N, g, b): S, I, R, D = iv dSdt = -b*S*I/N dIdt = b*S*I/N - g*I - a*I dRdt = g*I dDdt = a*I return dSdt, dIdt, dRdt, dDdt y0 = S0, I0, R0, D0 S, I, R, D = itg.odeint(prime, y0, t, args=(N, g, b)).T plt.plot(S, label='Susceptible') plt.plot(I, label='Infected') plt.plot(R, label='Recovered') plt.plot(D, label='Dead') plt.xlabel("Days Since Patient Zero") plt.ylabel("Population") plt.legend() #plt.title("Infectious, Slow Recovery, Somewhat Deadly") #plt.savefig("withD.png") plt.show() # -
assignment2/assignment2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Searching for motifs # ## Using MEME to find motifs # [MEME](https://meme-suite.org/meme/) must be installed to use this functionality. `find_motifs` searches for motifs upstream of all genes in an iModulon. The `gene_table` must contain the columns `accession` and `operon` for this function to work (see `notebooks/gene_annotation.ipynb`). # # `find_motifs` supports many of the command-line options for MEME: # # * `outdir`: Directory for output files # * `palindrome`: If True, limit search to palindromic motifs (default: False) # * `nmotifs`: Number of motifs to search for (default: 5) # * `upstream`: Number of basepairs upstream from first gene in operon to include in motif search (default: 500) # * `downstream`: Number of basepairs upstream from first gene in operon to include in motif search (default: 100) # * `verbose`: Show steps in verbose output (default: True) # * `force`: Force execution of MEME even if output already exists (default: False) # * `evt`: E-value threshold (default: 0.001) # * `cores` Number of cores to use (default: 8) # * `minw`: Minimum motif width in basepairs (default: 6) # * `maxw`: Maximum motif width in basepairs (default: 40) # * `minsites`: Minimum number of sites required for a motif. Default is the number of operons divided by 3. from pymodulon.motif import * from pymodulon.io import load_json_model ica_data = load_json_model('../putidaPRECISE321.json') pputida_fasta = "../data/sequence_files/genome.fasta" for i in ica_data.imodulon_table.index: motifs = find_motifs(ica_data, i, pputida_fasta, outdir='./motif_search/tmp_maxw_30', maxw=30) # This `MotifInfo` object is automatically stored as a dictionary in the IcaData object. It will persist after saving and re-loading the IcaData object. ica_data.motif_info # ## Using TOMTOM to compare motifs against external databases # Once you have a motif from MEME, you can use [TOMTOM](https://meme-suite.org/meme/tools/tomtom) to compare your motif against external databases. The `compare_motifs` function makes this process simple. # # The `MotifInfo` object generated in the `find_motifs` function contains the MEME file location, which is the primary input for `compare_motifs`. for i in ica_data.motif_info: compare_motifs(ica_data.motif_info[i], verbose=False) # # Generating a table summarizing results of motif search # + motif_table=pd.DataFrame(index=list) for i in motif_table.index: a = ica_data.motif_info[i].sites if i != 'PvdS': motif_table.loc[i,'Oplists'] = a.loc['MEME-1'][-a.loc['MEME-1','pvalue'].isna()].index else: motif_table.loc[i,'Oplists'] = a.loc['MEME-4'][-a.loc['MEME-4','pvalue'].isna()].index # - for i in motif_table.index: motif_table.loc[i,'Op_num']=len(motif_table.loc[i,'Oplists']) motif_table.loc[i,'Op_total_num']=len(ica_data.view_imodulon(i).operon.unique()) gene_num=0 for j in motif_table.loc[i,'Oplists']: gene_num = gene_num+len(ica_data.view_imodulon(i).operon[ica_data.view_imodulon(i).operon == j]) motif_table.loc[i,'motif_gene_num'] = gene_num motif_table.loc[i,'gene_total_num']=len(ica_data.view_imodulon(i)) if i != 'PvdS': motif_table.loc[i,'consensus'] = ica_data.motif_info[i].motifs.loc['MEME-1','consensus'] motif_table.loc[i,'width'] = ica_data.motif_info[i].motifs.loc['MEME-1','width'] else: motif_table.loc[i,'consensus'] = ica_data.motif_info[i].motifs.loc['MEME-4','consensus'] motif_table.loc[i,'width'] = ica_data.motif_info[i].motifs.loc['MEME-4','width'] motif_table.drop(columns=['Oplists']).to_csv('./motif_search/motif_table.csv') from pymodulon.io import * save_to_json(ica_data,'../putidaPRECISE321.json')
notebooks/7_searching_for_motifs.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <a href='http://www.holoviews.org'><img src="../../assets/hv+bk.png" alt="HV+BK logos" width="40%;" align="left"/></a> # <div style="float:right;"><h2>Exercise 3: Networks and GeoViews</h2></div> # + import numpy as np import pandas as pd import holoviews as hv import geoviews as gv import networkx as nx from bokeh.sampledata.airport_routes import airports, routes from holoviews.operation.datashader import datashade, directly_connect_edges, bundle_graph hv.extension('bokeh') # - # Note that if you see an error about ``airports.csv`` not existing when running the above line, make sure to run the *Downloading sample data* section of the [Setup notebook](../00_Setup.ipynb). # # # ### Example 1 # # In this exercise we will plot the locations of all US airports on a map. Before we get started let's inspect the ``airports`` dataframe using the ``head`` method. # Now declare a ``gv.Points`` element from the ``airports`` dataframe plotting the 'Longitude' and 'Latitude', assign the object to a variable, and display it. Be sure to think carefully about what kind of dimensions (key or value) those columns represent. # # <b><a href="#hint1" data-toggle="collapse">Hint</a></b> # # <div id="hint1" class="collapse"> # ``Points``, unlike ``Scatter``, has two key dimensions (``kdims``). It represents a two-dimensional space (in this case the position of each airport on a map). # </div> # <b><a href="#solution1" data-toggle="collapse">Solution</a></b> # # <div id="solution1" class="collapse"> # <br> # <code>points = gv.Points(airports, ['Longitude', 'Latitude']) # points</code> # </div> # # You should now be able to see that the dataset includes various US airbases in Europe, in addition to those actually located in US states. Let's focus only on airports in US states by using the ``select`` method to select airports between -180 and 0 in Longitude and above 0 degrees in Latitude, and assign that to your variable. # # <b><a href="#hint2" data-toggle="collapse">Hint</a></b> # # <div id="hint2" class="collapse"> # The select method allows selecting coordinates, multiple values and ranges of values, e.g. to select rows where the 'value' column has values between 0 and 100 use ``dataset.select(column=(0, 100))``. ``None`` can be used for any range start or end that you do not need to enforce. # </div> # <b><a href="#solution2" data-toggle="collapse">Solution</a></b> # # <div id="solution2" class="collapse"> # <br> # <code>points = points.select(Longitude=(-180, 0), Latitude=(0, None)) # points</code> # </div> # # Finally, overlay the points on a map tile source using the ``gv.WMTS`` element and the tile source URL provided below. Then adjust the width and height of the ``Points`` and enable the 'hover' tool. # # <b><a href="#hint3" data-toggle="collapse">Hint</a></b> # # <div id="hint3" class="collapse"> # Tools can be enabled on bokeh plots by supplying a list of tools, e.g. ``tools=['box_select', 'hover']``. # </div> url = 'https://maps.wikimedia.org/osm-intl/{Z}/{X}/{Y}.png' # <b><a href="#solution3" data-toggle="collapse">Solution</a></b> # # <div id="solution3" class="collapse"> # <br> # <code>%%opts Points [width=500 height=400 tools=['hover']] (size=2 color='black' fill_alpha=0) # gv.WMTS(url) * points</code> # </div> # # ### Example 2 # In this exercise we will plot a network graph of all the airport connections. As always, first inspect the relevant datasets. We've already seen the structure of the ``airports`` dataset, so let's look at the ``routes`` dataframe as well: # Let's make a NetworkX graph to work with this data about airport routes: g = nx.from_pandas_edgelist(routes, 'SourceID', 'DestinationID') # ``g`` is an abstract graph object, but we can make a visualizable ``hv.Graph`` object from it using the ``hv.Graph.from_networkx`` classmethod. ``Graph.from_networkx`` accepts the NetworkX graph as the first argument and a layout function such as ``nx.spring_layout`` as the second argument. Once it displays, reduce the ``node_size`` so you can make out the different nodes. # # <b><a href="#hint4" data-toggle="collapse">Hint</a></b> # # <div id="hint4" class="collapse"> # ``Graph`` style options are split into ``node_`` and ``edge_`` options. # </div> # <b><a href="#solution4" data-toggle="collapse">Solution</a></b> # # <div id="solution4" class="collapse"> # <br> # <code>%%opts Graph (node_size=4) # hv.Graph.from_networkx(g, nx.spring_layout)</code> # </div> # # If you hover over the nodes you will notice that while it includes an index the hover information is otherwise not very useful. We will now add additional node information by supplying a Dataset indexed by the AirportID. # # First, declare a ``hv.Dataset`` for the ``airports`` with the 'AirportID' as a key dimension and the 'Name' and 'TZ' (or timezone) as value dimensions. Then supply the Dataset as the third argument to the ``Graph.from_networkx`` function and check that the hover now shows more useful information. # # Finally, customize the plot by adjusting ``width`` and ``height``, adjusting the ``node_size`` and ``edge_line_width`` and add a ``color_index`` and a ``cmap`` of 'tab20'. # # <b><a href="#hint5" data-toggle="collapse">Hint</a></b> # # <div id="hint5" class="collapse"> # Ensure the ``Dataset`` with additional node info defines 'AirportID' as the sole key dimension. # </div> # <b><a href="#solution5" data-toggle="collapse">Solution</a></b> # # <div id="solution5" class="collapse"> # <br> # <code>%%opts Graph [width=600 height=600 color_index='TZ'] (node_size=4 cmap='tab20' edge_line_width=1) # ds = hv.Dataset(airports, 'AirportID', ['Name', 'TZ']) # hv.Graph.from_networkx(g, nx.spring_layout, ds)</code> # </div> # ### Example 3 # # In this exercise we will combine what we learned about geographic data with graph support to visualize the flight connections on a map. Since there are many more connections than we can easily view, we will count the number of connections between airports and select the fifty busiest. # + # Count the number of connections from each airport counts = routes.groupby('SourceID')[['Stops']].count().reset_index().rename(columns={'Stops': 'Connections'}) airports_df = pd.merge(airports, counts, left_on='AirportID', right_on='SourceID', how='left') # Select only airports located in US states & convert from Web Mercator to Latitudes/Longitudes airport_points = gv.Points(airports_df, ['Longitude', 'Latitude']).select(Longitude=(-180, 0), Latitude=(0, None)) projected_points = gv.operation.project_points(airport_points) busiest_ids = list(routes.groupby('SourceID').count().sort_values('Stops').iloc[-50:].index.values) tiles = gv.WMTS('https://maps.wikimedia.org/osm-intl/{Z}/{X}/{Y}@2x.png') # - # We now have the ``AirportIDs`` for the 50 busiest airports defined on the ``busiest_ids`` variable, a tile source defined on the ``tiles`` variable and below we have already defined a number of options and a ``Nodes`` element. # # Define an hv.Graph from the ``routes`` and ``nodes``, declaring 'SourceID' and 'DestinationID' as the key dimensions. Then use the ``graph.select`` method to select by ``AirportID`` using the ``busiest_ids``. Then overlay the selected Graph on top of the ``tiles``. Also try switching the ``selection_mode`` on the ``select`` method from 'edges' to 'nodes' and observe the difference. # # <b><a href="#hint6" data-toggle="collapse">Hints</a></b> # # <div id="hint6" class="collapse"> # When constructing a ``Graph`` supply the edges and nodes as tuple, e.g. ``hv.Graph((edges, nodes))``. # # The ``selection_mode`` is a special keyword of the Graph.select method. # </div> # + # %%opts Graph [width=800 height=800] (edge_selection_line_color='black' edge_hover_line_color='red') # %%opts Graph (node_size=8 edge_line_width=1 edge_line_alpha=0 edge_nonselection_line_alpha=0) nodes = hv.Nodes(projected_points, ['Longitude', 'Latitude', 'AirportID'], ['Name', 'City', 'Connections']) # - # <b><a href="#solution6" data-toggle="collapse">Solution</a></b> # # <div id="solution6" class="collapse"> # <br> # <code>%%opts Graph [width=800 height=800] (edge_selection_line_color='black' edge_hover_line_color='red') # # %%opts Graph (node_size=8 edge_line_width=1 edge_line_alpha=0 edge_nonselection_line_alpha=0) # nodes = hv.Nodes(projected_points, ['Longitude', 'Latitude', 'AirportID'], ['Name', 'City', 'Connections']) # # # Declare nodes, graph and tiles # graph = hv.Graph((routes, nodes), ['SourceID', 'DestinationID']) # # # Select 50 busiest airports # busiest_airports = graph.select(AirportID=busiest_ids, selection_mode='edges') # # tiles * busiest_airports</code> # </div>
examples/tutorial/exercises/Exercise-3-networks-and-geoviews.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: .venv # language: python # name: .venv # --- from pong_env import PongSoloEnv from stable_baselines3 import PPO env = PongSoloEnv() model = PPO("MlpPolicy", env, verbose=1, tensorboard_log="./ppo_tensorboard/") model.learn(total_timesteps=500_000) model.save("ppo") # + model = PPO.load("ppo") obs = env.reset() for i in range(1000): action, _states = model.predict(obs, deterministic=True) obs, reward, done, info = env.step(action) env.render() if done: obs = env.reset() env.close() # -
train.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # **Run the codes in this sequence: A, B, C, D, E, F, G, H, I, J, K.** # # (A) Import libraries # + from threading import * import time from time import sleep from datetime import datetime from datetime import timedelta import rpy2.robjects as robjects from pymeasure.instruments import list_resources import ftd2xx # Thorlabs MFF101 import ftd2xx.defines as constants from ftd2xx import listDevices, getDeviceInfoDetail from newportxps import NewportXPS #motion controller from newportxps.XPS_C8_drivers import XPS, XPSException from collections import OrderedDict import yaqc import os import platform import numpy as np import csv from flipper import * #import flipper mirror codes from spectra import * #import spectrometer codes from pymeasure.instruments.lighthousephotonics import Sprout #import laser codes laser_power = Sprout('COM4') laser_power.adapter.connection.baud_rate = 19200 laser_power.adapter.connection.read_termination = '\r' laser_power.adapter.connection.write_termination = '\r' laser_power.power=0.01 # - # # (B) Activate motion controller # + xpsd_remoteip = '192.168.254.254' # xpsd_hostip ='192.168.0.254' hostname = 'XPS-1b81' xps = NewportXPS(xpsd_remoteip) print(xps.status_report()) xps.kill_group('XYZ') xps.initialize_allgroups() xps.home_allgroups() #pos_all() to get position of all axes def pos_all(): for sname, _ in xps.stages.items(): print('{}: {}'.format(sname, xps.get_stage_position(sname))) xps.kill_group('XYZ') xps.initialize_allgroups() xps.home_allgroups() # - # ## (C) Switch on the spectrometer capture_photo("start",2,1,0) # the values 2,1,0 don't matter, they are later used for indexing tasks # # (D) Prepare initial dataset # + power=[] time=[] pressure=[] gas=[] print("Enter number of initial samples: ") n=int(input()) for x in range(n): powr=np.random.randint(150,1000) tm=np.random.randint(1001,5000) pr=np.random.randint(100,1000) for i in range(9): power.append(powr) time.append(tm) pressure.append(pr) p=str(date.today()) # path=r'C:\Users\UWAdmin\line stuff' # os.chdir(path) # os.mkdir('Campaign ' + p) # os.chdir('Campaign ' + p) row=['power','time','pressure','ratio'] with open('dataset.csv', 'w', newline='') as f: writer = csv.writer(f) writer.writerow(row) writer.writerows(zip(power,time,pressure)) with open('dataset-pre.csv', 'w', newline='') as f: writer = csv.writer(f) writer.writerow(row) writer.writerows(zip(power,time,pressure)) # os.chdir(path) df2=pd.read_csv('dataset.csv') df2=df2.drop_duplicates() df2.head() df2.to_csv('data.csv',index=False) # - # # (E) Defining threads # # -Thread for Mirror # # -Thread for switching the mirror on and off # + class Aa(Thread): def run(self): if d['ramptime'][0]<0.5: time.sleep(.5-d['ramptime'][0]) print("motion controller started") xps.run_trajectory('foreward',) time.sleep(total_time) laser_power.power=0.01 print("finisherd and current position is:\n") pos_all() break class Bb(Thread): def run(self): for i in range(2): if i==0: if d['ramptime'][0]<0.5: print("mirror on") start_time = time.monotonic() mirror('on') end_time = time.monotonic() print(timedelta(seconds=end_time - start_time)) time.sleep(d['ramptime'][1]) #time for linear line else: time.sleep(mirror_sleep) print("mirror on") start_time = time.monotonic() mirror('on') end_time = time.monotonic() print(timedelta(seconds=end_time - start_time)) time.sleep(d['ramptime'][1]) #time for linear line else: mirror('off') print("mirror off") # - # # (F) Define coordinate details # # **Takes input for** # # -number of lines # # -starting position of x # # -starting position of y # # -vertical gap/step size # + df=pd.read_csv("dataset.csv") df.head() print("Enter number of lines, starting point for x axis, y axis and step size for y axis:") lines,startx,starty,step_y= [x for x in input().split()] print(lines,startx,starty,step_y) move_y=[0 for i in range(int(lines))] for i in range(int(lines)): # print(i) if i==0: move_y[i]=float(starty) if i>0 : move_y[i]=float(move_y[i-1])+float(step_y) # print(move_y[i-1]+float(step_y)) print("Lines will be made at following y axis:",move_y,"\n") # - # # (K) Begin the experiment # + df2=pd.read_csv('data.csv') print("Pateerning is to be started\n") import time time.sleep(5) cc=0 steps=0 for i in range(8): print("Line number: ",i) line=i #move the axes to their start position xps.move_stage('XYZ.Pos2',move_y[i]) pos_all() print("\n\n") df2=pd.read_csv('data.csv') time_of_file=df2['time'][i] #scantime=time_of_file/1000 #define the trajectory xps.define_line_trajectories(start=float(startx), stop=float(startx)+2, step=0.01, scantime=(time_of_file/1000), axis='Pos1') print("\nCurrent time is:",time_of_file) pressure_of_file=df2['pressure'][i] for j in range(3): gopr(pressure_of_file) close_all() time.sleep(5) print("\n Pressure is now : ",pressure_of_file,"\n") # k+=3 xps.download_trajectory('foreward.trj') import pandas as pd d = pd.read_csv('foreward.trj',header=None) # scantime 2 (see segment 1 or row1) d = d.loc[:, (d != 0).any(axis=0)] d.columns = ['ramptime','rampdist','rampvel'] total_time=np.sum(d['ramptime']) mirror_sleep=d['ramptime'][0]-.5 #power will be set to assigned value and make sure mirror is off and power is given enough time to reach it's value power_of_file=df2["power"][i] ij=3 ################## print("pre-pattern checking\n") check=[(float(startx)+.4),float(startx)+.8,(float(startx)+1.2)] mm=0 for kk in range(3): xps.move_stage('XYZ.Pos1',check[kk]) print(pos_all()) for iii in range(3): mirror("on") #writing G/D of 3 spots in 3 lines inside dataset.csv # capture_photo("on",kk,line) print("hey") GD=capture_photo("first",kk,line,iii) mirror("off") time.sleep(5) #################### #experiment starts after pre-checking print("power is now:",power_of_file) xps.move_stage('XYZ.Pos1',(float(startx)-d['rampdist'][0])) pos_all() time.sleep(15) print("\n\n") mirror("off") a=power_of_file laser_power.power=((a-2.7979)/0.4639)/1000 time.sleep(15) ##Draw Lines time.sleep(5) import sys a=Aa() b=Bb() # stop_threads = False a.start() b.start() a.join() b.join() time.sleep(10) print("\n Job done\n") ##raman spectra analysis laser_power.power=0.01 time.sleep(15) xps.move_stage('XYZ.Pos1',6.5) xps.move_stage('XYZ.Pos2',9.5) xps.move_stage('XYZ.Pos3',9.3) ######################################## #post patterning raman analysis check=[(float(startx)+.4),float(startx)+.8,(float(startx)+1.2)] mm=0 for kk in range(3): xps.move_stage('XYZ.Pos1',check[kk]) print(pos_all()) for iii in range(3): mirror("on") #writing G/D of 3 spots in 3 lines inside dataset.csv # capture_photo("on",kk,line) print("hey") GD=capture_photo("on",kk,line,iii) mirror("off") time.sleep(5) if i<=3: take_median(steps,line) steps+=9 if i>3: mv=take_median(steps,line) steps+=9 if mv is None: rsum((0)) else: mv=float(mv) rsum((mv)) write_more() repeats() if i>=3: print("AI started thinking") robjects.r(''' sourin<-function(s){ if(s==1){ require(XML) suppressWarnings({suppressMessages({ library(mlrMBO) library(ggplot2) library(doMC) })}) ps = makeParamSet( makeIntegerParam("power", lower = 151, upper =1500), makeIntegerParam("time", lower = 2000, upper = 3000), makeIntegerParam("pressure", lower = 100, upper = 130) ) ctrl = makeMBOControl(y.name = "ratio") ctrl = setMBOControlInfill(ctrl, opt = "focussearch",opt.focussearch.maxit = 10, opt.focussearch.points = 10000, crit =makeMBOInfillCritCB(cb.lambda=1)) #dataset-2 should be replaced and later this line as well data=read.csv("data.csv") data<-na.omit(data) suppressMessages({opt.state = initSMBO(par.set = ps,design = data, control = ctrl, minimize = FALSE, noisy = TRUE)}) print("Proposed parameters:") prop <- suppressWarnings({proposePoints(opt.state)}) print(prop$prop.points) print(prop$crit.components) saveRDS(prop,'./prop.rds') saveRDS(opt.state,'./opt.state.rds') dummy_ratio=0 cat("Expected value (upper bound):\n") cat(paste(prop$crit.components$mean, " (", \ prop$crit.components$mean + prop$crit.components$se, ")\n", sep = "")) x<-c(prop$prop.points,dummy_ratio,prop$crit.components$mean,\ prop$crit.components$mean + prop$crit.components$se) x2<-c(prop$prop.points) #data_plot=read.csv("plot_data.csv") #dataset-2 should be replaced and later this line as well write.table(x2, file = "data.csv", sep = ",",\ append = TRUE, quote = FALSE,col.names = FALSE, row.names = FALSE) #write.table(x, file = "plot_data.csv", sep = ",",\ #append = TRUE, quote = FALSE,col.names = FALSE, row.names = FALSE) } else{ prop=readRDS('./prop.rds') opt.state=readRDS('./opt.state.rds') library(mlrMBO) print(prop$crit.components) print(prop$prop.points) print("Proposed parameters:") updateSMBO(opt.state, x = prop$prop.points, y = s) prop <- suppressWarnings({proposePoints(opt.state)}) print(prop$prop.points) print(prop$crit.components) saveRDS(prop,'./prop.rds') saveRDS(opt.state,'./opt.state.rds') dummy_ratio=0 cat("Expected value (upper bound):\n") cat(paste(prop$crit.components$mean, " (", prop$crit.components$mean + prop$crit.components$se, ")\n", sep = "")) x<-c(prop$prop.points,dummy_ratio,prop$crit.components$mean,prop$crit.components$mean + prop$crit.components$se) x2<-c(prop$prop.points) #data_plot=read.csv("plot_data.csv") #dataset-2 should be replaced and later this line as well write.table(x2, file = "data.csv", sep = ",", append = TRUE, quote = FALSE,col.names = FALSE, row.names = FALSE) #write.table(x, file = "plot_data.csv", sep = ",", append = TRUE, quote = FALSE,col.names = FALSE, row.names = FALSE) }} ''') if i==3: ##AI wil start working when initial design has 4 data (0,1,2,3= 4 data) rsum=robjects.r['sourin'] rsum((1)) write_more() repeats() # - # # (G) Parameters predicted by BO is written on data.csv. This is further written on dataset.csv and dataset-pre.csv using write_more() function def write_more(): d=pd.read_csv('data.csv') ln=d.shape[0] vpower=d['power'][ln-1] vtime=d['time'][ln-1] vpressure=d['pressure'][ln-1] d1=pd.read_csv('dataset.csv') ln=d1.shape[0] d1.loc[ln,"power"]=vpower d1.loc[ln,"time"]=vtime d1.loc[ln,"pressure"]=vpressure d1.to_csv('dataset.csv',index=False) d1.to_csv('dataset-pre.csv',index=False) # # (H) Each line has 3 spots to check and each spot 3 times. so in the csv 9 rows of data for each line # # **This is done using repeats() function** def repeats(): d1=pd.read_csv('data.csv') df2=pd.read_csv('dataset.csv') ln=len(df2['power']) m=ln print(ln) counter=m for i in range(8): toAdd = [df2['power'][m-1],df2['time'][m-1],df2['pressure'][m-1]] filename="dataset.csv" with open(filename, "r") as infile: reader = list(csv.reader(infile)) reader.insert(counter+1, toAdd) with open(filename, "w", newline='') as outfile: writer = csv.writer(outfile) for line in reader: writer.writerow(line) for i in range(8): toAdd = [df2['power'][m-1],df2['time'][m-1],df2['pressure'][m-1]] filename="dataset-pre.csv" with open(filename, "r") as infile: reader = list(csv.reader(infile)) reader.insert(counter+1, toAdd) with open(filename, "w", newline='') as outfile: writer = csv.writer(outfile) for line in reader: writer.writerow(line) # # (I) For the 9 readings of a single line, median is taken. it's done by take_median() function def take_median(i,idx): print(os.getcwd()) df=pd.read_csv('dataset.csv') lth=len(df['ratio']) k=i median=[] valss=np.sort([df['ratio'][k+8],df['ratio'][k+7],df['ratio'][k+6],df['ratio'][k+5],df['ratio'][k+4],df['ratio'][k+3], df['ratio'][k+2],df['ratio'][k+1],df['ratio'][k]]) lst=[s for s in valss if str(s)!='nan'] result=np.median(lst) df2=pd.read_csv('data.csv') df2.loc[idx,"ratio"]=result df2.to_csv('data.csv',index=False) return result # # (J) To make sure that the material gets laser beam of max. intensity , focusing of z axis needs to be properly adjusted. this is done by adjust() def adjust(): line=1 start = timeit.default_timer() zz=[8.7,8.8,8.9,9,9.1,9.2,9.3,9.4] intensity=[] for i in range(len(zz)): xps.move_stage('XYZ.Pos3',zz[i]) time.sleep(5) p=capture_photo('adjust',i,line) intensity.append(p) mirror('off') stop = timeit.default_timer() #print('Time: ', stop - start) print(intensity) ix=intensity.index(np.max(intensity)) xps.move_stage('XYZ.Pos3',zz[ix]) print("adjusted z position is: ",zz[ix]) return intensity
line codes/patterning lines.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd import matplotlib.pyplot as plt import numpy as np from pathlib import Path from typing import List plt.rcParams['axes.axisbelow'] = True # - def read_data(ilp_paths, fpt_paths) -> pd.DataFrame: ilp_df = pd.concat(map(pd.read_pickle, ilp_paths)) fpt_df = pd.concat(map(pd.read_pickle, fpt_paths)) ilp_df["name"] = "Basic" ilp_df.loc[ilp_df["single_constraints"], "name"] = "Single" ilp_df.loc[ilp_df["sparse_constraints"], "name"] = "Sparse" fpt_df["name"] = fpt_df.apply(lambda row: f"{row['selector']} {row['lower_bound']} {row['search_strategy']}", axis=1) fpt_df["total_calls"] = fpt_df["calls"].apply(sum) ilp_df["total_calls"] = np.nan fpt_df["last_time"] = fpt_df["time"].str[-1].astype(float) / 10**9 fpt_df["last_k"] = fpt_df["k"].str[-1].astype(float) fpt_df["last_calls"] = fpt_df["calls"].str[-1].astype(float) ilp_df["last_time"] = np.nan ilp_df["last_k"] = np.nan ilp_df["last_calls"] = np.nan headers = list(set(ilp_df.columns) & set(fpt_df.columns)) df = pd.concat([ilp_df[headers], fpt_df[headers]]) df["total_time"] = df["total_time"] / 10**9 df.loc[df["total_time"] < 0, "total_time"] = np.nan df["solution_size"] = df.solutions.apply(lambda x: len(x[0]["edits"]) if len(x) > 0 else np.nan) df.loc[df["total_time"] > 100, "solved"] = False return df def plot_solved_by_time_curve(df, output_path: Path, *, names : List[str] = None, labels : List[str] = None, min_number_of_solutions: int = None, y: str = "time"): if min_number_of_solutions is None: min_number_of_solutions = 0 if names is None: names = list(df["name"].unique()) if labels is None: labels = names y_label = dict(total_time="Total Time [s]", total_calls="Total Calls", last_time="Time of last search step [s]", last_calls="Number of calls of last search step", solution_cost="Solution cost", solution_size="Solution size")[y] d = dict() for name in names: g = df.loc[df["name"] == name] g = g.loc[g["solutions"].apply(lambda x: len(x[0]["edits"]) >= min_number_of_solutions if len(x) != 0 else True)] solved = g["solved"] t = pd.Series(g[y]) # .astype(float) t[~solved] = np.nan # t.max() * 1.5 d[name] = t.values fig, ax = plt.subplots(figsize=(8, 4)) ax.set_xscale("log") ax.grid(True) for name, label in zip(names, labels): ax.plot(np.sort(d[name]), range(len(d[name])), label=label) for y_max in (0, len(list(d.values())[0])): ax.axhline(y=y_max, c="darkgrey") ax.set_ylim((-50, None)) if "time" in y: ax.set_xlim((10**-3, 10**2)) ax.set_ylabel("Number of solved instances") ax.set_xlabel(y_label) ax.legend(loc="upper left") # fig.legend(loc="upper left", bbox_to_anchor=(0.9, 0.9)) plt.show() # + ilp_paths = list((Path.cwd() / "../experiments/C4P4/").glob("ilp*/*.solutions.df.gzip")) fpt_paths = list((Path.cwd() / "../experiments/C4P4/").glob("fpt*/*.solutions.df.gzip")) df = read_data(ilp_paths, fpt_paths) subset_df = df[df["dataset"] == "bio-C4P4-subset"] bio_df = df[df["dataset"] == "bio"] # - for y in ["solution_cost", "solution_size", "total_time", "total_calls", "last_time", "last_calls"]: plot_solved_by_time_curve(bio_df, Path(f"solved-curve-ilp-vs-fpt-bio-{y}.pdf"), names=["Sparse", "MostAdjacentSubgraphs SortedGreedy Fixed"], labels=["ILP Sparse", "FPT, known $k^*$"], min_number_of_solutions=10, y=y) plot_solved_by_time_curve(bio_df, Path(f"solved-curve-ilp-vs-fpt-{y}.pdf"), names=["Basic", "Single", "Sparse", "MostAdjacentSubgraphs SortedGreedy Exponential", "MostAdjacentSubgraphs SortedGreedy Fixed"], labels=["ILP", "ILP Single", "ILP Sparse", "FPT, estimated exponential growth", "FPT, known $k^*$"], min_number_of_solutions=10, y=y) plot_solved_by_time_curve(bio_df, Path(f"solved-curve-search-strategies-{y}.pdf"), names=["MostAdjacentSubgraphs SortedGreedy Exponential", "MostAdjacentSubgraphs SortedGreedy PrunedDelta", "MostAdjacentSubgraphs SortedGreedy IncrementByMinCost", "MostAdjacentSubgraphs SortedGreedy IncrementByMultiplier", "MostAdjacentSubgraphs SortedGreedy Fixed"], labels=["Exponential growth estimation", "Prune preventention", "Increment by minimum cost", "Increment by 1", "Known $k^*$"], min_number_of_solutions=10, y=y) plot_solved_by_time_curve(subset_df, Path(f"solved-curve-lower-bounds-exponential-{y}.pdf"), names=["MostAdjacentSubgraphs Greedy Exponential", "MostAdjacentSubgraphs LocalSearch Exponential", "MostAdjacentSubgraphs SortedGreedy Exponential", "MostAdjacentSubgraphs Trivial Exponential"], labels=["Simple packing", "Local search", "Greedy lower bound", "No lower bound"], min_number_of_solutions=10, y=y) plot_solved_by_time_curve(subset_df, Path(f"solved-curve-selectors-exponential-{y}.pdf"), names=["MostAdjacentSubgraphs SortedGreedy Exponential", "FirstFound SortedGreedy Exponential", "MostMarkedPairs SortedGreedy Exponential"], labels=["Most adjacent subgraphs", "First subgraph found", "Most marked vertex pairs"], min_number_of_solutions=10, y=y) # + for x in ["n", "solution_size", "solution_cost"]: fig, ax = plt.subplots(figsize=(8, 3)) #ax.set_yscale("log") #ax.set_xlim((0, 400)) for name in ["Sparse", "MostAdjacentSubgraphs SortedGreedy Fixed"]: a = bio_df[(bio_df["name"] == name) & (bio_df["solution_size"] >= 10)] ax.hist(a.loc[a["solved"], x], alpha=1, bins=50) #ax.scatter(a["n"], a["total_time"], s=10, alpha=0.25) plt.show() # - for a_name, a_label, b_name, b_label in [ ("Basic", "ILP", "MostAdjacentSubgraphs SortedGreedy Exponential", "FPT"), ("Sparse", "ILP Sparse", "MostAdjacentSubgraphs SortedGreedy Exponential", "FPT")]: fig, ax = plt.subplots(figsize=(4.2, 4)) ax.set_aspect("equal") ax.set_xlim((10**-5, 10**3)) ax.set_ylim((10**-5, 10**3)) ax.set_xscale("log") ax.set_yscale("log") ax.grid(True) ax.set_xlabel(f"{a_label} Total Time [s]") ax.set_ylabel(f"{b_label} Total Time [s]") a = subset_df[subset_df["name"] == a_name].copy().set_index("instance") b = subset_df[subset_df["name"] == b_name].copy().set_index("instance") b = b.loc[a.index,:] a.loc[(~a["solved"]) | (a["total_time"] > 10**2), "total_time"] = 10**2.5 b.loc[(~b["solved"]) | (b["total_time"] > 10**2), "total_time"] = 10**2.5 ax.scatter(a["total_time"], b["total_time"], s=15, c="C0", alpha=0.5) x = np.logspace(-5, 3, 10) ax.plot(x, x, "k") fig.tight_layout() plt.savefig(f"solved-scatter-{a_name.replace(' ', '-')}-vs-{b_name.replace(' ', '-')}.pdf") plt.show() subset_df.loc[(subset_df["name"] == "Sparse") & (subset_df["total_time"] <= 100), "solved"].sum() df["name"].unique() # + names = ["MostAdjacentSubgraphs SortedGreedy Exponential", "MostAdjacentSubgraphs SortedGreedy PrunedDelta", "MostAdjacentSubgraphs SortedGreedy IncrementByMinCost", "MostAdjacentSubgraphs SortedGreedy IncrementByMultiplier", "MostAdjacentSubgraphs SortedGreedy Fixed"] fig, ax = plt.subplots() #ax.set_yscale("log") ax.grid(True) #ax.set_ylim((10**-4, 10**5)) for name in names: a = df[(df["name"] == name) & df["solved"]] ax.scatter(a["last_k"], a["last_k"] / a["solution_cost"], label=name) ax.legend() plt.show()
notebooks/solved-by-x-changed.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import glob import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns; sns.set() from sklearn.naive_bayes import GaussianNB from sklearn import svm from sklearn.metrics import classification_report from sklearn.metrics import accuracy_score from sklearn.metrics import confusion_matrix from skimage.transform import pyramid_gaussian from sklearn.model_selection import train_test_split from sklearn.metrics import average_precision_score from sklearn.metrics import precision_recall_curve from sklearn.model_selection import GridSearchCV from sklearn.model_selection import cross_val_score # import matplotlib.pyplot as plt from inspect import signature # print(glob.glob("../ILIYAN Master Thesis/Dataset/*")) # %matplotlib inline # %precision 2 # - df = pd.read_csv('dataset/features_norm.csv', index_col=0) df_t1 = pd.read_csv('dataset/fs/df_top_1.csv', index_col=0) df_t1t2 = pd.read_csv('dataset/fs/df_top_1_and_2.csv', index_col=0) df_rf = pd.read_csv('dataset/fs/df_embeded_rf_feature.csv', index_col=0) def draw_confusionmatrix(y_test, y_hat): plt.figure(figsize=(10,7)) cm = confusion_matrix(y_test, y_hat) ax = sns.heatmap(cm, annot=True, fmt="d") plt.ylabel('True label') plt.xlabel('Predicted label') acc = accuracy_score(y_test, y_hat) print(f"Sum Axis-1 as Classification accuracy: {acc}") print('\n') print(classification_report(y_test, y_hat)) print('\n') def gen_train_and_test(df, test_size=0.20, random_state=42): X = df.loc[:, df.columns != 'Class'] y = df.Class X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=random_state) return X_train, X_test, y_train, y_test def split_train_and_test(X, y, test_size=0.20, random_state=42): X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=random_state) return X_train, X_test, y_train, y_test def start_model(df, model): X_train, X_test, y_train, y_test = gen_train_and_test(df) model.fit(X_train, y_train) y_hat = model.predict(X_test) draw_confusionmatrix(y_test, y_hat) def start_grid_cv(df, cv=5, n_jobs=-1): param_grid = { 'kernel':('linear', 'rbf'), 'C':(0.001, 0.01, 0.1, 0.25, 0.5, 0.75, 1, 10), 'gamma': (0.001, 0.01, 0.1, 1,2,3,'auto'), 'decision_function_shape':('ovo','ovr'), 'shrinking':(True,False) } X_train, X_test, y_train, y_test = gen_train_and_test(df) clf = GridSearchCV(estimator=svm.SVC(), param_grid=param_grid, cv=cv, n_jobs=n_jobs) clf.fit(X_train, y_train) print("avg accuracy:"+str(np.average(cross_val_score(clf, X_train, y_train, scoring='accuracy')))) print("avg f1:"+str(np.average(cross_val_score(clf, X_train, y_train, scoring='f1')))) best_params = clf.best_params_ print(best_params) return best_params # ### Using "df" params = start_grid_cv(df) svm_gscv=svm.SVC(kernel=params['kernel'], C=params['C'], decision_function_shape=params['decision_function_shape'], gamma=params['gamma'], shrinking=params['shrinking']) start_model(df, model=svm_gscv) params = start_grid_cv(df, cv=10) svm_gscv=svm.SVC(kernel=params['kernel'], C=params['C'], decision_function_shape=params['decision_function_shape'], gamma=params['gamma'], shrinking=params['shrinking']) start_model(df, model=svm_gscv) # ### Using "df_rf" params = start_grid_cv(df_rf) svm_gscv=svm.SVC(kernel=params['kernel'], C=params['C'], decision_function_shape=params['decision_function_shape'], gamma=params['gamma'], shrinking=params['shrinking']) start_model(df_rf, model=svm_gscv) params = start_grid_cv(df_rf, cv=10) svm_gscv=svm.SVC(kernel=params['kernel'], C=params['C'], decision_function_shape=params['decision_function_shape'], gamma=params['gamma'], shrinking=params['shrinking']) start_model(df_rf, model=svm_gscv) # ### Using "df_t1" params = start_grid_cv(df_t1) svm_gscv=svm.SVC(kernel=params['kernel'], C=params['C'], decision_function_shape=params['decision_function_shape'], gamma=params['gamma'], shrinking=params['shrinking']) start_model(df_t1, model=svm_gscv) params = start_grid_cv(df_t1, cv=10) svm_gscv=svm.SVC(kernel=params['kernel'], C=params['C'], decision_function_shape=params['decision_function_shape'], gamma=params['gamma'], shrinking=params['shrinking']) start_model(df_t1, model=svm_gscv) # ### Using "df_t1t2" params = start_grid_cv(df_t1t2) svm_gscv=svm.SVC(kernel=params['kernel'], C=params['C'], decision_function_shape=params['decision_function_shape'], gamma=params['gamma'], shrinking=params['shrinking']) start_model(df_t1t2, model=svm_gscv) params = start_grid_cv(df_t1t2, cv=10) svm_gscv=svm.SVC(kernel=params['kernel'], C=params['C'], decision_function_shape=params['decision_function_shape'], gamma=params['gamma'], shrinking=params['shrinking']) start_model(df_t1t2, model=svm_gscv)
SVM Optimization.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.9.7 64-bit (''biomass'': conda)' # language: python # name: python3 # --- # ## Exploratory Notebook for GEDI Level 4A Data # # Eventually this notebook should be incorporated into the src.data module. # # Code based on this [NASA tutorial](https://github.com/ornldaac/gedi_tutorials/blob/main/1_gedi_l4a_search_download.ipynb). # Autoreload source modules # To immediately pick up code changes in src/ # %load_ext autoreload # %autoreload 2 ### Imports import datetime as dt import geopandas as gpd import os import pandas as pd import requests import concurrent from tqdm.autonotebook import tqdm import h5py import numpy as np # + ### Constants doi = '10.3334/ORNLDAAC/1907' # GEDI L4A DOI cmrurl='https://cmr.earthdata.nasa.gov/search/' # CMR API base url dt_format = '%Y-%m-%dT%H:%M:%SZ' # CMR date time formatting string # Spatial boundary # (lon_min, lat_min, lon_max, lat_max) bound = (-73.9872354804, -33.7683777809, -34.7299934555, 5.24448639569) # Brazil # Temporal boundary # start_date = dt.datetime(2019, 4, 18) # end_date = dt.datetime(2020, 9, 2) # Data for the full GEDI date range in Brazil is 2.3 TB start_date = dt.datetime(2019, 7, 1) end_date = dt.datetime(2019, 7, 31) # Around 150 GB of data # + # Get NASA EarthData's unique identifier for this dataset doisearch = cmrurl + 'collections.json?doi=' + doi concept_id = requests.get(doisearch).json()['feed']['entry'][0]['id'] # Use appropriate formatting for the space/time bounds temporal_str = start_date.strftime(dt_format) + ',' + end_date.strftime(dt_format) bound_str = ','.join(map(str, bound)) # + ### Fetch a list of the URLs for the granules of interest page_num = 1 page_size = 2000 # CMR page size limit granule_arr = [] while True: cmr_params = { "collection_concept_id": concept_id, "page_size": page_size, "page_num": page_num, "temporal": temporal_str, "bounding_box[]": bound_str } granulesearch = cmrurl + 'granules.json' response = requests.get(granulesearch, params=cmr_params) granules = response.json()['feed']['entry'] if granules: for g in granules: granule_url = '' granule_poly = '' # Read the file size in Mb granule_size = float(g['granule_size']) # Read the bounding polygons in this granule if 'polygons' in g: polygons= g['polygons'] multipolygons = [] for poly in polygons: i = iter(poly[0].split (" ")) lat_lon = list(map(" ".join,zip(i,i))) multipolygons.append(Polygon([[float(p.split(" ")[1]), float(p.split(" ")[0])] for p in lat_lon])) granule_poly = MultiPolygon(multipolygons) # Get URL to HDF5 files for links in g['links']: if 'title' in links and links['title'].startswith('Download') and links['title'].endswith('.h5'): granule_url = links['href'] granule_arr.append([granule_url, granule_size, granule_poly]) page_num += 1 else: break # Add the spatial bounding box to the dataframe so we can plot it b = list(bound) granule_arr.append(['bound', 0, box(b[0], b[1], b[2], b[3])]) # Create a pandas dataframe l4adf = pd.DataFrame(granule_arr, columns=["granule_url", "granule_size", "granule_poly"]) # Drop granules with empty geometry l4adf = l4adf[l4adf['granule_poly'] != ''] # Print metrics about the granules we will attempt to download print ("Total granules found: ", len(l4adf.index)-1) print ("Total file size (MB): ", l4adf['granule_size'].sum()) # + ### Plot the granules using geopandas gdf = gpd.GeoDataFrame(l4adf, geometry=l4adf.granule_poly) world = gpd.read_file(gpd.datasets.get_path('naturalearth_lowres')) base = world.plot(color='white', edgecolor='black', figsize = (7, 7)) # last row contains the bounding box (Red) ax= gdf[-1:].plot(ax=base, color='white', edgecolor='red', alpha=0.5) # all but the last row contains granule bounding geometry (Green) ax= gdf[:-1].plot(ax=base, color='green', edgecolor='green', alpha=0.7) minx, miny, maxx, maxy = gdf[-1:].geometry.total_bounds ax.set_xlim(minx-1, maxx+1) ax.set_ylim(miny-1, maxy+1) # - # drop duplicate URLs if any and save to CSV l4a_granules = l4adf[:-1].drop_duplicates(subset=['granule_url']) l4a_granules.to_csv('granules.txt', columns = ['granule_url'], index=False, header = False) links = l4a_granules.granule_url for i in range(4): print(links[i]) # + # Simple approach to download all specified files # TODO: Parallelize: note that session is not thread-safe s = requests.Session() # Login to get session cookies in s.cookies login_data = {"login": USER, "password": <PASSWORD>} s.post(URL, data=login_data) links = l4a_granules.granule_url[6:36] for link in tqdm(links): local_filename = link.split('/')[-1] with s.get(link, stream=True) as r: size_bytes = int(r.headers.get('content-length', 0)) progress_bar = tqdm(total=size_bytes, unit='iB', unit_scale=True) r.raise_for_status() with open(local_filename, 'wb') as f: for chunk in r.iter_content(chunk_size=8192): progress_bar.update(len(chunk)) f.write(chunk) progress_bar.close() # + from dotenv import dotenv_values import os URL = "https://urs.earthdata.nasa.gov/cookies" config = dotenv_values("/home/users/ah2174/gedi-biomass-mapping/.env") print(config) s = requests.Session() # Login to get session cookies in s.cookies login_data = {"login": config["EARTHDATA_USER"], "password": config["EARTHDATA_PASSWORD"]} s.post(URL, data=login_data) s.cookies # + TEST_FILE = "/home/users/ah2174/GEDI04_A_2019182013830_O03109_T00088_02_001_01.h5" TEST_FILE_LP = "GEDI02_A_2020101002520_O07513_03_T04382_02_003_01_V002" from src.data.gedi_granule import GediGranule granule = GediGranule(TEST_FILE) beam = granule.beam(4) beam.main_data.algorithm_run_flag # - times = [14.9 , 36.6 , 37.5 , 41 , 13.2 , 18 , 3.78 , 9.46 , 39.7 , 40.6 , 38.1 , 39.1 , 40.1 , 38.2 , 38.9 , 22.4 , 22.7 , 28.3 , 38.1 , 35.8 , 39.8 , 38.4 , 27.9 , 37.4 , 37.1 , 26 , 42.4 , 38.5 , 30.3 , 32.2] print(len(times)) sum(times)/len(times) # + # How many of the shots have algorithm run flag set to true from collections import defaultdict files = os.listdir(DEST_FOLDER) # lon_min, lat_min, lon_max, lat_max bound = (-73.9872354804, -33.7683777809, -34.7299934555, 5.24448639569) shots = 0 usable = 0 d = defaultdict(int) for file_name in tqdm(files): f = h5py.File(file_name,'r') for k in f.keys(): if k.startswith('BEAM'): beam = f.get(k) lat = beam.get('lat_lowestmode')[:] lon = beam.get('lon_lowestmode')[:] alg_run = beam.get('algorithm_run_flag')[:] lats_in_range = np.logical_and(lat > bound[1], lat < bound[3]) lons_in_range = np.logical_and(lon > bound[0], lon < bound[2]) idx_in_range = np.where(np.logical_and(lats_in_range, lons_in_range)) if(np.sum(np.logical_and(lats_in_range, lons_in_range)) != 0): d[file_name] += 1 shots += idx_in_range[0].shape[0] usable += np.sum(alg_run[idx_in_range]) f.close() # Sanity check: every file should have at least one shot in range print(len(d.keys())) print(shots) print(usable) print(usable/shots)
notebooks/exploratory/1-ah2174-GEDI-4a.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- f = open("prototxt/ResNet-50-deploy.prototxt", "r") for x in f: print(x) content = f.read() content = content.replace("\t", "").replace("{\n", "").replace("}\n", "").split('\n') content inp = [] layers = [] i = 0 while True: try: x = content[i] except: break if x[:6] == 'input_': inp += [int(x[11:])] if x[:5] == 'layer': dic = {} dic['bottom'] = x[13:-1] i += 1 x = content[i] dic['top'] = x[6:-1] i += 1 x = content[i] dic['name'] = x[7:-1] i += 1 x = content[i] dic['type'] = x[7:-1] if (dic['type'] == 'Convolution'): i+=1 x = content[i] dic['num_output'] = x[-2:] i+=1 x = content[i] dic['kernel_size'] = x[-1:] i+=1 x = content[i] dic['stride'] = x[-1:] layers.append(dic) elif (dic['type'] == 'Pooling'): i+=1 x = content[i] dic['kernel_size'] = x[-1:] i+=1 x = content[i] dic['stride'] = x[-1:] i+=1 x = content[i] dic['pool'] = x[-3:] i += 1 inp layers
deep-residual-networks/prototxt-modifier.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: R # language: R # name: ir # --- # # Example 4.7: CRR Model with parameter uncertainty # First, include the necessary functions. # The linear programming solver requires the gurobi package! source(file="functions.r") # First, we fix the model parameters. # Nr. of time steps $n$, different probabilities for the computations and $S_{t_0}$. n<-10 # Nr.of time steps N<-100 # Nr. of different P's S_0<-100 # Next, we choose the up-factor $u$, the down-factor $d$ and the interval of possible probabilities $[0.4,0.6]$ u<-rep(1.1,N) d<-rep(1/u,N) p<-seq(0.4,0.6,length=N) # We assign the possible values that are fixed in the model # and corresponding admissible probabilities x<-matrix(0,N,n/2+1) y<-matrix(0,N,n+1) # probabilities prob_set<-array(0,dim=c(N,(n/2)+1,n+1)) for(i in 1:N){ # Assigning Values for(l in 0:(n/2)){ x[i,l+1]<-S_0*u[i]^l*d[i]^(n/2-l) } for(l in 0:(n)){ y[i,l+1]<-S_0*(u[i]^l)*d[i]^(n-l) } for(j in 1:((n/2)+1)){ for(k in 0:(n/2)){ # Assigning Probabilities prob_set[i,j,k+j]<-dbinom(j-1,n/2,p[i])*dbinom(k,n/2,p[i]) } } } # We check if statistical arbitrage is available in the market check_statarb(x[10,],y[10,],prob_set[10,,],S_0) # We define our payoff function and compute associated bound # Function Definition func1<-function(x,y){max(0.5*(x+y)-S_0,0)} # Robust Bounds limit <- Inf lower_bound_nosa<-super_rep(x,y,prob_set,func=func1,stat_arb=F,lower=T,S_0,limit) lower_bound<-super_rep_robust(x,y,prob_set,func=func1,stat_arb=T,lower=T,S_0,limit) upper_bound<-super_rep_robust(x,y,prob_set,func=func1,stat_arb=T,lower=F,S_0,limit) upper_bound_nosa<-super_rep(x,y,prob_set,func=func1,stat_arb=F,lower=F,S_0,limit) # Computation of the Bounds limit<-Inf # Robust Bounds lower_bound_nosa<-super_rep(x,y,prob_set,func=func1,stat_arb=F,lower=T,S_0,limit) lower_bound<-super_rep_robust(x,y,prob_set,func=func1,stat_arb=T,lower=T,S_0,limit) upper_bound<-super_rep_robust(x,y,prob_set,func=func1,stat_arb=T,lower=F,S_0,limit) upper_bound_nosa<-super_rep(x,y,prob_set,func=func1,stat_arb=F,lower=F,S_0,limit) # The price interval that excludes robust statistical arbitrage print(lower_bound$d) print(upper_bound$d) # The price interval that excludes robust arbitrage print(lower_bound_nosa$d) print(upper_bound_nosa$d) # Next, we perform the simulations # First we assume the traded price of $c$ is 5, then we use the strategy attaining the lower bound to make a profit # + Delta_0_lower<-lower_bound$Delta_0 Delta_1_lower<-splinefun(lower_bound$x,lower_bound$Delta_1,method="natural") ## Statistical Arbitrage nr_gains<-0 nr_losses<-0 d_lower<-lower_bound$d gain<-0 Nr_sim<-1000000 best<-0 worst<-0 for(i in 1:Nr_sim){ random_p<-rbinom(n,1,runif(1,0.4,0.6)) S<-S_0*cumprod(u[1]*(random_p==1)+d[1]*(random_p==0)) f<-(func1(S[n/2],S[n])-Delta_0_lower*(S[n/2]-S_0)-Delta_1_lower(S[n/2])*(S[n]-S[n/2])-5) gain<-gain+f if(f<0){nr_losses<-nr_losses+1} if(f>0){nr_gains<-nr_gains+1} if(f>best){best<-f} if(f<worst){worst<-f} } print("Average Profit") gain/Nr_sim print("Best Gain") best print("Worst Loss") worst print("% Loss Scenarios") nr_losses/Nr_sim print("% Gain Scenarios") nr_gains/Nr_sim # - # Next, we assume the price of $c$ is $ 9$ ## Statistical Arbitrage nr_gains<-0 nr_losses<-0 d_lower<-lower_bound$d gain<-0 Nr_sim<-1000000 best<-0 worst<-0 for(i in 1:Nr_sim){ random_p<-rbinom(n,1,runif(1,0.4,0.6)) S<-S_0*cumprod(u[1]*(random_p==1)+d[1]*(random_p==0)) f<-(func1(S[n/2],S[n])-Delta_0_lower*(S[n/2]-S_0)-Delta_1_lower(S[n/2])*(S[n]-S[n/2])-9) gain<-gain+f if(f<0){nr_losses<-nr_losses+1} if(f>0){nr_gains<-nr_gains+1} if(f>best){best<-f} if(f<worst){worst<-f} } print("Average Profit") gain/Nr_sim print("Best Gain") best print("Worst Loss") worst print("% Loss Scenarios") nr_losses/Nr_sim print("% Gain Scenarios") nr_gains/Nr_sim # Next, we assume the price of $c$ is $10$, then we use the strategy attaining the upper bound # + Delta_0_upper<-upper_bound$Delta_0 Delta_1_upper<-splinefun(upper_bound$x,upper_bound$Delta_1,method="natural") nr_gains<-0 nr_losses<-0 d_upper<-upper_bound$d gain<-0 Nr_sim<-1000000 best<-0 worst<-0 for(i in 1:Nr_sim){ random_p<-rbinom(n,1,runif(1,0.4,0.6)) S<-S_0*cumprod(u[1]*(random_p==1)+d[1]*(random_p==0)) f<-(-func1(S[n/2],S[n])+Delta_0_upper*(S[n/2]-S_0)+Delta_1_upper(S[n/2])*(S[n]-S[n/2])+10) gain<-gain+f if(f<0){nr_losses<-nr_losses+1} if(f>0){nr_gains<-nr_gains+1} if(f>best){best<-f} if(f<worst){worst<-f} } print("Average Profit") gain/Nr_sim print("Best Gain") best print("Worst Loss") worst print("% Loss Scenarios") nr_losses/Nr_sim print("% Gain Scenarios") nr_gains/Nr_sim # - # Next, we assume the price of $c$ is $20$ nr_gains<-0 nr_losses<-0 d_upper<-upper_bound$d gain<-0 Nr_sim<-1000000 best<-0 worst<-0 for(i in 1:Nr_sim){ random_p<-rbinom(n,1,runif(1,0.4,0.6)) S<-S_0*cumprod(u[1]*(random_p==1)+d[1]*(random_p==0)) f<-(-func1(S[n/2],S[n])+Delta_0_upper*(S[n/2]-S_0)+Delta_1_upper(S[n/2])*(S[n]-S[n/2])+20) gain<-gain+f if(f<0){nr_losses<-nr_losses+1} if(f>0){nr_gains<-nr_gains+1} if(f>best){best<-f} if(f<worst){worst<-f} } print("Average Profit") gain/Nr_sim print("Best Gain") best print("Worst Loss") worst print("% Loss Scenarios") nr_losses/Nr_sim print("% Gain Scenarios") nr_gains/Nr_sim
Example 4.7.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Getting Confusion Chart # ### Scope # The scope of this notebook is to provide instructions on how to get the Confusion Matrix Chart using the Python API. The Code below will work **only for Multiclass Classification Projects**. For Binary Classification Projects, use the <code>get_roc_curve</code> command. # # ### Background # # The confusion matrix for a multiclass classification project will be a 𝑁×𝑁 matrix, with the left axis showing the true class of an observation and the top axis showing the class assigned to the observation by the model. Each element 𝑖,𝑗 of the matrix would be the number of items with true class 𝑖 that were classified as being in class 𝑗. # # Some important definitions: # # **F1**: The F1 score for each class. # # **Precision**: The precision statistic for each class. # # **Recall**: The recall statistic for each class. # # **Actual Count**: The number of records for each class that actually are that class. # # **Predicted Count**: The number of times each class was predicted. # # ### Requirements # # - Python version 3.7.3 # - DataRobot API version 2.19.0. # Small adjustments might be needed depending on the Python version and DataRobot API version you are using. # # Full documentation of the Python package can be found here: https://datarobot-public-api-client.readthedocs-hosted.com/en/ # # It is assumed you already have a DataRobot <code>Project</code> object and a DataRobot <code>Model</code> object. # #### Import Libraries import datarobot as dr # #### Requesting Confusion Chart Data cc = model.get_confusion_chart(source='validation') cc.raw_data
Model Evaluation/Python/Getting Confusion Chart.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import glob import pandas as pd import numpy as np # # Execution of Emulations print("Todo") # # Preparation of Results # # In order to get to the results, basically two options exist. The first option is to generate own results by executing the emulations as described above. The second way is to download the original paper results via FTP. # # In the case that the results were reproduced, place the results in the `results` folder of the repository. # # In order to download the results, execute command below: # # __Attention__: About 40GB of disk space are required. # !../results/download.sh # # Analysis of results # # The first step at result analysis is to parse the Wireshark packets and write statistics to handy CSV files. This is done by executing a python script. # # __Attention:__ The Python script uses all available CPU cores and could need up to an our to finish calculation. # !mkdir -p $ANALYSIS_HOME/results/averages # !python3 parsePCAP.py -i $ANALYSIS_HOME/results/final-results -o $ANALYSIS_HOME/results/averages # # Plotting of charts # # Once the averages are calculated and available, only the charts need to be plotted. Therefore, use the following command: # !python3 plotPcapResults.py -i $ANALYSIS_HOME/results/averages -o ./ -s 2x1,3x1,2x2 # The resulting charts can be found in the `result-analysis` folder. # # Calculation of overhead # # To quatify the percentage of overhead, the following script can be used. # + files = glob.glob("../results/averages/*.csv") averages = [] for file in files: scenario = file.split("/")[-1].split(".")[0] df = pd.read_csv(file, delimiter="\t") incoming = df[df["in/out"] == "in"] outgoing = df[df["in/out"] == "out"] sum = np.sum(incoming) approach = scenario.split("_")[4] size = scenario.split("_")[3] clients = scenario.split("_")[1] averages.append([ approach, size, int(clients), sum["bytesInterests"], sum["bytesData"], sum["bytesSyncPayload"]]) averages = pd.DataFrame(averages, columns=["approach", "size", "#clients", "bytesInterests", "bytesData", "bytesSyncPayload"]) averages["bytesTotal"] = averages["bytesInterests"] + averages["bytesData"] averages["percentPayload"] = averages["bytesSyncPayload"] / averages["bytesTotal"] averages.sort_values(by=["approach", "size", "#clients"]) # - # # Plotting player movements # # One chart in the paper visualized the player movement from the Fortnite Traces [1]. To print this chart, use the following commands. # # The resulting figures can be found in the `result-analysis`-folder. # # [1] <NAME>, <NAME>, <NAME>, & <NAME>. (2018). A Network Traffic and Player Movement Model to Improve Networking for Competitive Online Games. Proceedings of the 15th Annual Workshop on Network and Systems Support for Games. DOI: 10.1109/NetGames.2018.8463390 # # !git clone https://github.com/phylib/FortniteTraces FortniteTraces # !python3 plotPlayerMovement.py -i FortniteTraces/GeneratedTraces/g0/ -o ./ -z 1x1,2x1,3x1,2x2
result-analysis/Reproduction.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/intel-analytics/analytics-zoo/blob/master/docs/docs/colab-notebook/orca/quickstart/pytorch_lenet_mnist.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="I1fvwI1q13mC" # ![image.png](data:image/png;base64,/9j/4AAQSkZJRgABAQEAYABgAAD/2wBDAAUDBAQEAwUEBAQFBQUGBwwIBwcHBw8LCwkMEQ8SEhEPERETFhwXExQaFRERGCEYGh0dHx8fExciJCIeJBweHx7/2wBDAQUFBQcGBw4ICA4eFBEUHh4eHh4eHh4eHh4eHh4eHh4eHh4eHh4eHh4eHh4eHh4eHh4eHh4eHh4eHh4eHh4eHh7/wAARCABNAI0DASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKT<KEY>) # --- # + [markdown] id="pO3ksbTR18JZ" # ##### Copyright 2018 Analytics Zoo Authors. # + id="_HZJ3OR71u23" #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # + [markdown] id="tNTssjdw2Bpi" # ## **Environment Preparation** # + [markdown] id="fOosIv3t2Fhp" # **Install Java 8** # # Run the cell on the **Google Colab** to install jdk 1.8. # # **Note:** if you run this notebook on your computer, root permission is required when running the cell to install Java 8. (You may ignore this cell if Java 8 has already been set up in your computer). # + id="IBs-RL5p2Ia2" # Install jdk8 # !apt-get install openjdk-8-jdk-headless -qq > /dev/null import os # Set environment variable JAVA_HOME. os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64" # !update-alternatives --set java /usr/lib/jvm/java-8-openjdk-amd64/jre/bin/java # !java -version # + [markdown] id="wS20JSbY2LCJ" # **Install Analytics Zoo** # # # + [markdown] id="5C1GYEhGIlWu" # [Conda](https://docs.conda.io/projects/conda/en/latest/user-guide/install/) is needed to prepare the Python environment for running this example. # # **Note**: The following code cell is specific for setting up conda environment on Colab; for general conda installation, please refer to the [install guide](https://docs.conda.io/projects/conda/en/latest/user-guide/install/) for more details. # + id="wouustbSJS2r" # Install Miniconda # !wget https://repo.continuum.io/miniconda/Miniconda3-4.5.4-Linux-x86_64.sh # !chmod +x Miniconda3-4.5.4-Linux-x86_64.sh # !./Miniconda3-4.5.4-Linux-x86_64.sh -b -f -p /usr/local # Update Conda # !conda install --channel defaults conda python=3.6 --yes # !conda update --channel defaults --all --yes # Append to the sys.path import sys _ = (sys.path .append("/usr/local/lib/python3.6/site-packages")) os.environ['PYTHONHOME']="/usr/local" # + [markdown] id="1LOrK0lQHhrh" # You can install the latest pre-release version using `pip install --pre analytics-zoo`. # + id="A8lgWGhG2Oij" # Install latest pre-release version of Analytics Zoo # Installing Analytics Zoo from pip will automatically install pyspark, bigdl, and their dependencies. # !pip install --pre analytics-zoo # + id="3EFkPl4I2RJG" # Install python dependencies # !pip install torch==1.7.1 torchvision==0.8.2 # !pip install six cloudpickle # !pip install jep==3.9.0 # + [markdown] id="ATJ4YPAS2TQm" # ## **Distributed PyTorch using Orca APIs** # # In this guide we will describe how to scale out PyTorch (v1.5+) programs using Orca in 4 simple steps. # + id="LXeY_v2S24bN" # import necesary libraries and modules from __future__ import print_function import os import argparse from zoo.orca import init_orca_context, stop_orca_context from zoo.orca import OrcaContext # + [markdown] id="dn6xcsq43FRQ" # ### **Step 1: Init Orca Context** # + id="mXMGhKPk3GYN" # recommended to set it to True when running Analytics Zoo in Jupyter notebook. OrcaContext.log_output = True # (this will display terminal's stdout and stderr in the Jupyter notebook). cluster_mode = "local" if cluster_mode == "local": init_orca_context(cores=1, memory="2g") # run in local mode elif cluster_mode == "k8s": init_orca_context(cluster_mode="k8s", num_nodes=2, cores=4) # run on K8s cluster elif cluster_mode == "yarn": init_orca_context( cluster_mode="yarn-client", cores=4, num_nodes=2, memory="2g", driver_memory="10g", driver_cores=1, conf={"spark.rpc.message.maxSize": "1024", "spark.task.maxFailures": "1", "spark.driver.extraJavaOptions": "-Dbigdl.failure.retryTimes=1"}) # run on Hadoop YARN cluster # + [markdown] id="hbu_llz48oNL" # This is the only place where you need to specify local or distributed mode. View [Orca Context](https://analytics-zoo.readthedocs.io/en/latest/doc/Orca/Overview/orca-context.html) for more details. # # **Note**: You should export HADOOP_CONF_DIR=/path/to/hadoop/conf/dir when you run on Hadoop YARN cluster. # + [markdown] id="RBzXvR3LNxBP" # ### **Step 2: Define the Model** # You may define your model, loss and optimizer in the same way as in any standard (single node) PyTorch program. # + id="bZASG0afNWxd" import torch import torch.nn as nn import torch.nn.functional as F class LeNet(nn.Module): def __init__(self): super(LeNet, self).__init__() self.conv1 = nn.Conv2d(1, 20, 5, 1) self.conv2 = nn.Conv2d(20, 50, 5, 1) self.fc1 = nn.Linear(4*4*50, 500) self.fc2 = nn.Linear(500, 10) def forward(self, x): x = F.relu(self.conv1(x)) x = F.max_pool2d(x, 2, 2) x = F.relu(self.conv2(x)) x = F.max_pool2d(x, 2, 2) x = x.view(-1, 4*4*50) x = F.relu(self.fc1(x)) x = self.fc2(x) return F.log_softmax(x, dim=1) model = LeNet() model.train() criterion = nn.NLLLoss() lr = 0.001 adam = torch.optim.Adam(model.parameters(), lr) # + [markdown] id="o-SeParPNtK8" # ### **Step 3: Define Train Dataset** # # You can define the dataset using standard [Pytorch DataLoader](https://pytorch.org/docs/stable/data.html). Orca also supports a data creator function or [Orca SparkXShards](./data). # + id="tQMxn902OBoR" import torch from torchvision import datasets, transforms torch.manual_seed(0) dir='./dataset' batch_size=320 test_batch_size=320 train_loader = torch.utils.data.DataLoader( datasets.MNIST(dir, train=True, download=True, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ])), batch_size= batch_size, shuffle=True) test_loader = torch.utils.data.DataLoader( datasets.MNIST(dir, train=False, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ])), batch_size=test_batch_size, shuffle=False) # + [markdown] id="vLSjmlkROzPr" # ### **Step 4: Fit with Orca Estimator** # + [markdown] id="49Rq_fo2O34O" # First, Create an Estimator. # + id="a-AK6mFKO4o5" from zoo.orca.learn.pytorch import Estimator from zoo.orca.learn.metrics import Accuracy est = Estimator.from_torch(model=model, optimizer=adam, loss=criterion, metrics=[Accuracy()]) # + [markdown] id="6PEmVuGnPFBk" # Next, fit and evaluate using the Estimator. # + id="WeCoGJrGPH5M" from zoo.orca.learn.trigger import EveryEpoch est.fit(data=train_loader, epochs=1, validation_data=test_loader, checkpoint_trigger=EveryEpoch()) # + [markdown] id="tFQCQwH0UBF1" # Finally, evaluate using the Estimator. # + id="Z3cUxgYwUCUl" result = est.evaluate(data=test_loader) for r in result: print(str(r)) # + [markdown] id="YASrq-VzXdpJ" # The accuracy of this model has reached 98%. # + id="uSSReIykRPIB" # stop orca context when program finishes stop_orca_context()
docs/docs/colab-notebook/orca/quickstart/pytorch_lenet_mnist.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import tensorflow as tf import tensorflow.keras from tensorflow.keras.models import Sequential import pandas as pd import numpy as np import matplotlib.pyplot as plt from tensorflow.keras.layers import Dense,Flatten,Conv2D,Dropout,MaxPooling2D from tensorflow.keras.preprocessing.image import ImageDataGenerator import os # + path_zip = tf.keras.utils.get_file('cats_and_dogs.zip',origin = "https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip",extract = True) path = os.path.join(os.path.dirname(path_zip),'cats_and_dogs_filltered') # - train_dir = os.path.join(path,'train') validation_dir = os.path.join(path,'validation') train_cats_dir = os.path.join(train_dir,'cats') train_dogs_dir = os.path.join(train_dir,'dogs') validation_cats_dir = os.path.join(validation_dir,'cats') validation_dogs_dir = os.path.join(validation_dir,'dogs') batch_size = 128 epochs = 15 IMG_HEIGHT = 100 IMG_WIDTH = 100 train_image_generator = ImageDataGenerator(rescale = 1./255) validation_image_generator = ImageDataGenerator(rescale = 1./255) train_set = train_image_generator.flow_from_directory( 'cats_and_dogs_filtered/train', target_size=(IMG_HEIGHT,IMG_WIDTH), batch_size=128, class_mode='binary') test_set = validation_image_generator.flow_from_directory( 'cats_and_dogs_filtered/validation', target_size = (IMG_HEIGHT,IMG_WIDTH), batch_size =128, class_mode = 'binary') sample_images_tr, _ = next(train_set) def plotImages(images_arr): fig, axes = plt.subplots(1,5, figsize=(20,20)) axes = axes.flatten() for img, ax in zip( images_arr, axes): ax.imshow(img) ax.axis('off') plt.tight_layout() plt.show() plotImages(sample_images_tr[:5]) # + model = Sequential([ Conv2D(16,3, padding = 'same',activation = 'relu',input_shape = (IMG_HEIGHT,IMG_WIDTH,3)), MaxPooling2D(), Conv2D(32,3, padding = 'same',activation = 'relu'), MaxPooling2D(), Conv2D(64,3, padding = 'same',activation = 'relu'), MaxPooling2D(), Flatten(), Dense(512,activation = 'relu'), Dense(1,activation = 'sigmoid') ]) # - model.summary() model.compile(optimaizer = 'adam', loss = 'binary_crossentropy', metrics = ["accuracy"] ) history = model.fit_generator(train_set, steps_per_epoch = 2000 // batch_size, epochs = 15, validation_steps = 1000 // batch_size, validation_data = train_set, ) # + acc = history.history['accuracy'] val_acc = history.history['val_accuracy'] loss = history.history['loss'] val_loss = history.history['val_loss'] epoch_range = range(epochs) plt.figure(figsize = (8,8)) plt.subplot(1,2,1) plt.plot(epoch_range,acc,label = 'Training_Accuracy') plt.plot(epoch_range,val_acc,label = 'validation_Accuracy') plt.legend(loc = 'best') plt.title("Traiining and Validation Accuracy") plt.subplot(1,2,2) plt.plot(epoch_range,loss,label='Training_loss') plt.plot(epoch_range,val_loss,label = 'Validation_Loss') plt.legend(loc = 'best') plt.title("Training and Validation Loss") plt.show() # - img_gen = ImageDataGenerator(rescale = 1./255, horizontal_flip = True) train_data_gen = img_gen.flow_from_directory(batch_size = batch_size, directory = 'cats_and_dogs_filtered/train', shuffle = True, target_size = (IMG_HEIGHT,IMG_WIDTH) ) Augumented_images = [train_data_gen[0][0][0] for i in range(5)] plotImages(Augumented_images) # + img_gen = ImageDataGenerator(rescale = 1./255,rotation_range = 45) train_data_gen = img_gen.flow_from_directory(batch_size = batch_size, directory = 'cats_and_dogs_filtered/train', shuffle = True, target_size = (IMG_HEIGHT,IMG_WIDTH) ) augumen_imges = [train_data_gen[0][0][0] for i in range(5)] plotImages(augumen_imges) # + img_gen = ImageDataGenerator(rescale = 1./255, zoom_range = 0.5) train_image_generation = img_gen.flow_from_directory(shuffle = True, directory = 'cats_and_dogs_filtered/train', target_size = (IMG_HEIGHT,IMG_WIDTH), batch_size = batch_size) # - augumen = [train_image_generation[0][0][0] for i in range(5)] plotImages(augumen) img_gen = ImageDataGenerator(rescale = 1./255, zoom_range = 0.5, rotation_range = 45, horizontal_flip = True) # + train_image_generator = img_gen.flow_from_directory(shuffle = True, batch_size = batch_size, directory = 'cats_and_dogs_filtered/train', target_size = (100,100), class_mode = 'binary') aug = [train_image_generator[0][0][0] for i in range(5)] plotImages(aug) # + img_gen = ImageDataGenerator(rescale = 1./255) validation_data_gen = img_gen.flow_from_directory(shuffle = True, batch_size = batch_size, target_size = (100,100), directory = 'cats_and_dogs_filtered/validation', class_mode = 'binary') # - model2 = Sequential([Conv2D(16,3, padding = 'same', activation = 'relu',input_shape = (100,100,3)), MaxPooling2D(), Dropout(0.2), Conv2D(32,3, padding = 'same', activation = 'relu'), MaxPooling2D(), Conv2D(64,3, padding = 'same', activation = 'relu'), MaxPooling2D(), Dropout(0.2), Flatten(), Dense(512, activation = 'relu'), Dense(1, activation = 'sigmoid') ]) model2.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy']) model2.summary() history = model2.fit_generator(train_image_generator, epochs = epochs, steps_per_epoch = 2000// batch_size, validation_data= validation_data_gen, validation_steps = 1000 // batch_size, ) # + accuracy = history.history["accuracy"] loss = history.history["loss"] val_acc = history.history["val_accuracy"] val_loss = history.history["val_loss"] epoch_range = range(epochs) plt.figure(figsize = (8,8)) plt.subplot(1,2,1) plt.plot(epoch_range,accuracy,label = 'Training_accuracy') plt.plot(epoch_range,val_acc, label = 'validation_accuracy') plt.legend(loc = 'best') plt.title("Training_accuracy and validation_accuracy") plt.subplot(1,2,2) plt.plot(epoch_range,loss,label = 'Training_loss') plt.plot(epoch_range,val_loss,label = 'validation_loss') plt.legend(loc = 'best') plt.title("training nad validation loss") plt.show() # -
overfit.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Welter issue #9 # ## Generate synthetic, noised-up two-temperature model spectra, then naively fit a single temperature model to it. # ### Part 4- spot check the fake data # # <NAME> # Friday, January 8, 2015 # # Does it look okay? import warnings warnings.filterwarnings("ignore") import numpy as np from astropy.io import fits import matplotlib.pyplot as plt % matplotlib inline % config InlineBackend.figure_format = 'retina' import seaborn as sns sns.set_context('notebook') # # Look at plots. import os import json # + os.chdir('/Users/gully/GitHub/welter/notebooks/') os.chdir('../sf/m086/') with open('mixture_model.json') as f: mmix = json.load(f) with open('model_A_spec.json') as f: ma = json.load(f) with open('model_B_spec.json') as f: mb = json.load(f) plt.plot(mmix['wl'], mmix['mmix']) plt.plot(mmix['wl'], mmix['mmix_noised'], alpha=0.5) plt.plot(ma['wl'], np.array(ma['model'])*0.70, alpha=0.5) plt.plot(ma['wl'], np.array(mb['model'])*0.30/2.39894, alpha=0.5) #plt.ylim(0, 1.0); # - # The mixture model indeed looks like the sum of its parts. # + os.chdir('/Users/gully/GitHub/welter/notebooks/') os.chdir('../sf/eo013/') with open('mixture_model.json') as f: mmix = json.load(f) with open('model_A_spec.json') as f: ma = json.load(f) with open('model_B_spec.json') as f: mb = json.load(f) plt.plot(mmix['wl'], mmix['mmix']) plt.plot(mmix['wl'], mmix['mmix_noised'], alpha=0.5) plt.plot(ma['wl'], np.array(ma['model'])*0.83, alpha=0.5) plt.ylim(0, 1.0); # - # Cool! Mose of the structure is described by the warm component. The underlying cool spots are only barely perceptible in the mixture model, even for this extreme absorption band in the cool spectrum. My guess is that when fitting a single temperature model to this mixture model, most of this coarse structure will be absorbed into the calibration parameters, and most fine structure will be in the noise. But I think the temperature will get thrown off a little bit. # + os.chdir('/Users/gully/GitHub/welter/notebooks/') os.chdir('../sf/eo027/') with open('mixture_model.json') as f: mmix = json.load(f) with open('model_A_spec.json') as f: ma = json.load(f) with open('model_B_spec.json') as f: mb = json.load(f) plt.plot(mmix['wl'], mmix['mmix'], 'k') plt.plot(mmix['wl'], mmix['mmix_noised'], alpha=0.5) plt.plot(ma['wl'], np.array(ma['model'])*0.83, 'r',alpha=0.5) plt.plot(ma['wl'], np.array(mb['model'])*0.83*0.3/0.7/2.39894, 'g',alpha=0.5) # - #
notebooks/welter_issue009-04_spot_check_fake_data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + Collapsed="false" colab={} colab_type="code" id="w8s_rxaFuo49" import numpy as np import pandas as pd from copy import deepcopy import matplotlib.pyplot as plt from sklearn.metrics.pairwise import euclidean_distances from sklearn import cluster # + Collapsed="false" colab={"base_uri": "https://localhost:8080/", "height": 622} colab_type="code" id="_ld44DqnvMAd" outputId="8a0cc3fa-0918-4ccb-bf09-55813cdcf4e7" data = pd.read_csv('data.csv', header=None) print('Number of samples: ', len(data)) data.plot.scatter(0, 1, figsize=(15, 10)) plt.show() data = data.values # + Collapsed="false" colab={} colab_type="code" id="zsObCZxOwVbO" mean = data.mean() std = data.std() data = (data - mean) / std # + Collapsed="false" colab={} colab_type="code" id="-LdjKwMJwWQ6" class DBSCAN: def __init__(self, min_samples, eps): """ Constructs DBSCAN given parameters of neighborhood :param min_samples: Minimum samples within eps radius to be consider as a core point :param eps: Radius of core point """ self.min_samples = min_samples self.eps = eps self.labels = None # '0': Haven't processed, '-1': noise, 'C': cluster number self.core_points = None def fit_predict(self, x, *args, **kwargs): """ Fits the data using DBSCAN and returns labels and core points Order of data matter! Algorithm: 1. Consider a list of points that have not been seen yet 2. Read an arbitrary point until there is no unseen point left 3. If there are at least ``min_samples`` points within a radius of ``eps`` then all these points are from same cluster 4. Expand this cluster for its all core points for all neighbors 5. Repeat :param x: N-dimensional numpy array :return: A tuple of labels of each point and index of core points where label=-1 corresponds to noise data and label=N N>=1 demonstrates cluster label """ self.labels = np.zeros((len(x),)) self.core_points = np.zeros((len(x),)) current_cluster = 1 # we use 1->inf for pnt in range(len(x)): # if self.labels[pnt] == -1 or self.labels[pnt] >= 1: if self.labels[pnt] == 0: neighbor_indices = self.__nearest_neighbors(x, x[pnt]) if len(neighbor_indices) >= self.min_samples: self.__expand(x, pnt, current_cluster) current_cluster += 1 else: # noise/outlier scenario self.labels[pnt] = -1 return self.labels, self.core_points def __nearest_neighbors(self, data, point): """ Finds points near to the point ``point`` within the range of ``eps`` :param point: A point :param: All points :return: Indices of nearest neighbor points """ distances = euclidean_distances(data, point.reshape(1, -1)) neighbors = distances <= self.eps topk = np.argsort(distances, axis=0) neighbors_idx = np.max(neighbors[topk].nonzero()[0]) + 1 return topk[:neighbors_idx].flatten() def __expand(self, data, point_idx, current_cluster): """ Expands ``current_cluster`` using given point w.r.t. ``eps`` and ``min_samples`` Algorithm: 1. Get a point as the start point for ``current_cluster`` 2. Get its neighbors and go through them one by one using queue logic 3. If the neighbor is noise, then add it to the current cluster, if it is unseen, get all its neighbors then add them to the list of neighbors of original point 4. Repeat step 2 and 3 until all points in the list of neighbors are processed. :param data: Whole data to be clustered :param point_idx: The index of a point of the current cluster as the start point for expansion :param current_cluster: The label of current cluster :return: None """ self.labels[point_idx] = current_cluster neighbors_indices = deepcopy(self.__nearest_neighbors(data, data[point_idx])) while len(neighbors_indices) > 0: neighbor_point = neighbors_indices[0] neighbors_indices = np.delete(neighbors_indices, 0, 0) if self.labels[neighbor_point] == -1: self.labels[neighbor_point] = current_cluster elif self.labels[neighbor_point] == 0: self.labels[neighbor_point] = current_cluster neighbors_indices_neighbor_point = self.__nearest_neighbors(data, data[neighbor_point]) if len(neighbors_indices_neighbor_point) >= self.min_samples: neighbors_indices = np.concatenate((neighbors_indices, neighbors_indices_neighbor_point)) self.core_points[neighbor_point] = 1 # + Collapsed="false" colab={"base_uri": "https://localhost:8080/", "height": 536} colab_type="code" id="SzkmIVPVwccv" outputId="57821101-f9ba-4545-fb62-42c2fa14d693" parameters = {'eps': [0.25, 0.3, 0.35, 0.4], 'min_samples': [3, 4, 5, 6, 7, 10]} plt.figure(figsize=(20, 9)) i = 0 for dist in parameters['eps']: for min_pnt in parameters['min_samples']: dbscan = DBSCAN(min_samples=min_pnt, eps=dist) y_dbscan, _ = dbscan.fit_predict(data) plt.subplot(len(parameters['eps']), len(parameters['min_samples']), i+1) i += 1 plt.scatter(data[:, 0], data[:, 1], c=y_dbscan, s=50, cmap='viridis', label=np.unique(y_dbscan)) plt.legend() plt.show() # + Collapsed="false" colab={"base_uri": "https://localhost:8080/", "height": 373} colab_type="code" id="ll7l1G4YyNWw" outputId="2ea3ac77-5526-4b4a-ba58-051daf343d13" plt.figure(figsize=(9, 6)) dbscan = DBSCAN(min_samples=5, eps=0.3) y_dbscan, centers = dbscan.fit_predict(data) plt.scatter(data[:, 0], data[:, 1], c=y_dbscan, s=50, cmap='viridis', label=np.unique(y_dbscan)) plt.legend() plt.show()
HW02/notebooks/dbscan.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # A Newton-Rhapson Root Finding Implementation # %matplotlib inline import matplotlib.pyplot as plt import numpy as np def function_for_root(x): a = 1.01 b = -3.04 c = 2.07 return a*x**2 + b*x + c #define the function for root finding def derivative_for_root(x): a = 1.01 b = -3.04 return 2*a*x + b #define the function's derivative # ## Define the primary work function def newton_raphson_root_finding(f, dfdx, x_start, tol): #this function uses newton-rhapson search to find a root #set a flag flag = 1 #set a max number of iterations imax = 10000 #start a counter i = 0 #define the new and old guesses x_old = x_start x_new = 0.0 y_new = 0.0 #start the loop while(flag): #make a new guess x_new = x_old - f(x_old)/dfdx(x_old) #print out the iteration print(x_new,x_old,f(x_old),dfdx(x_old)) #if the abs value of the new function value #is < tol, then stop y_new = f(x_new) if (np.fabs(y_new)<tol): flag = 0. #stop the iteration else: #save the result x_old = x_new #increment the iteration i += 1 if(i>=imax): printf("Max iterations reached.") raise StopIteration('Stopping iterations after ',i) #we are done return x_new # ## Perform the search # + x_start = 0.5 tolerance = 1.0e-6 #print the inital guess print(x_start,function_for_root(x_start)) x_root = newton_raphson_root_finding(function_for_root,derivative_for_root,x_start,tolerance) y_root = function_for_root(x_root) s = "Root found with y(%f) = %f" % (x_root,y_root) print(s)
Newton-Raphson.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Nova Tarefa - Implantação # # Preencha aqui com detalhes sobre a tarefa.<br> # ### **Em caso de dúvidas, consulte os [tutoriais da PlatIAgro](https://platiagro.github.io/tutorials/).** # ## Declaração de Classe para Predições em Tempo Real # # A tarefa de implantação cria um serviço REST para predições em tempo-real.<br> # Para isso você deve criar uma classe `Model` que implementa o método `predict`. # + # %%writefile Model.py import joblib class Model: def __init__(self): pass def predict(self, X, feature_names, meta=None): return X # - # ## Teste do serviço REST # # Crie um arquivo `contract.json` com os seguintes atributos: # # - `features` : A lista de features em uma requisição. # - `targets` : A lista de valores retornados pelo método `predict`. # # Cada `feature` pode conter as seguintes informações: # # - `name` : nome da feature # - `ftype` : tipo da feature : **continuous** ou **categorical** # - `dtype` : tipo de dado : **FLOAT** ou **INT** : *obrigatório para ftype continuous* # - `range` : intervalo de valores numéricos : *obrigatório para ftype continuous* # - `values` : lista de valores categóricos : *obrigatório para ftype categorical* # # Em seguida, utilize a função `test_deployment` do [SDK da PlatIAgro](https://platiagro.github.io/sdk/) para simular predição em tempo-real.<br> # %%writefile contract.json { "features": [ { "name": "some_feature_name", "dtype": "FLOAT", "ftype": "continuous", "range": [0.0, 100.0] }, { "name": "another_feature_name", "ftype": "categorical", "values": ["category1", "category2"] } ], "targets": [] } # + from platiagro.deployment import test_deployment test_deployment("contract.json") # -
tests/resources/mocked_deployment_task.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] nbgrader={} # # Project Euler: Problem 8 # + [markdown] nbgrader={} # https://projecteuler.net/problem=8 # # The four adjacent digits in the 1000-digit number that have the greatest product are 9 × 9 × 8 × 9 = 5832. # # (see the number below) # # Find the thirteen adjacent digits in the 1000-digit number that have the greatest product. What is the value of this product? # # Use NumPy for this computation # + nbgrader={} import numpy as np # + nbgrader={} d1000 = """73167176531330624919225119674426574742355349194934 96983520312774506326239578318016984801869478851843 85861560789112949495459501737958331952853208805511 12540698747158523863050715693290963295227443043557 66896648950445244523161731856403098711121722383113 62229893423380308135336276614282806444486645238749 30358907296290491560440772390713810515859307960866 70172427121883998797908792274921901699720888093776 65727333001053367881220235421809751254540594752243 52584907711670556013604839586446706324415722155397 53697817977846174064955149290862569321978468622482 83972241375657056057490261407972968652414535100474 82166370484403199890008895243450658541227588666881 16427171479924442928230863465674813919123162824586 17866458359124566529476545682848912883142607690042 24219022671055626321111109370544217506941658960408 07198403850962455444362981230987879927244284909188 84580156166097919133875499200524063689912560717606 05886116467109405077541002256983155200055935729725 71636269561882670428252483600823257530420752963450""" # - g = list(d1000) z=[] for i in range(len(g)): if g[i] == 1 or 2 or 3 or 4 or 5 or 6 or 7 or 8 or 9 : z.append(g[i]) x = np.array((z), dtype = int) # + g = list(d1000) z=[] for i in range(len(g)): if g[i] == 1 or 2 or 3 or 4 or 5 or 6 or 7 or 8 or 9 : z.append(g[i]) d = [] for i in range(len(z)): if z[i] == '1': d.append(1.0) elif z[i] == '2': d.append(2.0) elif z[i] == '3': d.append(3.0) elif z[i] == '4': d.append(4.0) elif z[i] == '5': d.append(5.0) elif z[i] == '6': d.append(6.0) elif z[i] == '7': d.append(7.0) elif z[i] == '8': d.append(8.0) elif z[i] == '9': d.append(9.0) else: d.append(0.0) x = np.array(d) print(x) # + deletable=false nbgrader={"checksum": "6cff4e8e53b15273846c3aecaea84a3d", "solution": true} g = list(d1000) z=[] for i in range(len(g)): if g[i] == 1 or 2 or 3 or 4 or 5 or 6 or 7 or 8 or 9 : z.append(g[i]) d = [] for i in range(len(z)): if z[i] == '1': d.append(1.0) elif z[i] == '2': d.append(2.0) elif z[i] == '3': d.append(3.0) elif z[i] == '4': d.append(4.0) elif z[i] == '5': d.append(5.0) elif z[i] == '6': d.append(6.0) elif z[i] == '7': d.append(7.0) elif z[i] == '8': d.append(8.0) elif z[i] == '9': d.append(9.0) else: d.append(0.0) x = np.array(d) products_13digit = [] for i in range(len(x)-13): products_13digit.append(x[i:i + 13].prod()) y = np.array(products_13digit).max() print(y) """the printed number y is the answer""" # - # + deletable=false nbgrader={"checksum": "21332c13eb4f8f18191bd8d47414b7fc", "grade": true, "grade_id": "projecteuler8", "points": 10} assert True # leave this for grading # -
assignments/assignment03/ProjectEuler8.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from comet_ml import API import comet_ml import io import os import numpy as np import matplotlib.pyplot as plt import pandas as pd import seaborn as sns from collections import defaultdict import time comet_api = API(rest_api_key='6O55PoJt4tkp9LyupIE86eikH') comet_api.get() def get_parameter_by_key(exp, key): parameters = exp.get_parameters_summary() for parameter in parameters: if parameter['name'] == key: return parameter['valueCurrent'] return None # + def get_parameter_by_key(exp, key): parameters = exp.get_parameters_summary() for parameter in parameters: if parameter['name'] == key: return parameter['valueCurrent'] return None def new_to_old_metric(exp, key): metric = exp.get_metrics(key) vals = [float(m['metricValue']) for m in metric] return vals def stack_lists(data, n=1000): new_data = [] for d in data: if len(d) > n: new_data.append(d[:n]) elif len(d) < n: new_data.append( np.concatenate([d, d[-1].repeat(n - len(d))]) ) return np.vstack(new_data).T def preprocess_gp(vals): return np.minimum.accumulate(vals) def add_zero_point(vals, point): return np.array([point] + vals.tolist()) # - experiments = comet_api.get(workspace='schattengenie', project_name='em-showers-network-clustering-new') exp = experiments[0] # + from collections import defaultdict data = defaultdict(lambda: defaultdict(list)) for exp in experiments: good_showers = new_to_old_metric(exp, "Good showers") if len(good_showers): threshold = float(exp.get_command()[-5]) min_samples_core = int(get_parameter_by_key(exp, "min_samples_core")) data[threshold][min_samples_core].append(good_showers[0]) # + threshold_data = defaultdict(list) quality_data = defaultdict(list) quality_std_data = defaultdict(list) for threshold in data: for min_samples_core in data[threshold]: q = data[threshold][min_samples_core] threshold_data[min_samples_core].append(threshold) quality_data[min_samples_core].append(np.mean(q)) quality_std_data[min_samples_core].append(np.std(q)) # + sns.set(context='paper', style="whitegrid", font_scale=3, font = 'serif') colors = [ 'skyblue', 'orange', 'steelblue', 'gold', '#f58231', ] # %matplotlib inline linewidth = 3 # - def plot_mean_std(idx, mean, std, label="", i=0): idx = np.array(idx) mean = np.array(mean) std = np.array(std) upper = mean + std lower = mean - std plt.plot(idx, mean, c=colors[i], linewidth=linewidth, label=label, zorder=5) plt.plot(idx, upper, 'b--', c=colors[i], zorder=5) plt.plot(idx, lower, 'b--', c=colors[i], zorder=5) plt.fill_between(idx, lower, upper, alpha=0.2, color=colors[i], zorder=5) plt.figure(figsize=(12, 8), dpi=100) plt.title("") for i, min_samples_core in enumerate(threshold_data): plot_mean_std(threshold_data[min_samples_core], quality_data[min_samples_core], quality_std_data[min_samples_core], label="min_samples_core={}".format(min_samples_core), i=i) plt.ylabel("Good showers") plt.xlabel("Threshold") # plt.ylim plt.legend(loc='upper right', fontsize='xx-large') plt.savefig("threshold_grid_search_plot.pdf", bbox_inches='tight') plt.show() # + num_epochs = 1500 experiments = comet_api.get(workspace='schattengenie', project_name='em-showers-network-training') experiments_balanced = [ exp for exp in experiments if get_parameter_by_key(exp, "num_layers_edge_conv") == '5' ] experiments_emulsion = [ exp for exp in experiments if get_parameter_by_key(exp, "num_layers_edge_conv") == '0' ] experiments_edge = [ exp for exp in experiments if get_parameter_by_key(exp, "num_layers_edge_conv") == '8' ] # + balanced_roc_auc = [] for exp in experiments_balanced: roc_auc_test = np.array(new_to_old_metric(exp, 'roc_auc_test')) balanced_roc_auc.append(roc_auc_test) balanced_roc_auc = stack_lists(balanced_roc_auc, num_epochs) edge_roc_auc = [] for exp in experiments_edge: roc_auc_test = np.array(new_to_old_metric(exp, 'roc_auc_test')) edge_roc_auc.append(roc_auc_test) edge_roc_auc = stack_lists(edge_roc_auc, num_epochs) emulsion_roc_auc = [] for exp in experiments_emulsion: roc_auc_test = np.array(new_to_old_metric(exp, 'roc_auc_test')) emulsion_roc_auc.append(roc_auc_test) emulsion_roc_auc = stack_lists(emulsion_roc_auc, num_epochs) # - def plot_roc_auc_test(data, label="", i=0): idx = np.arange(len(data)) mean = data.mean(1) std = data.std(1) / np.sqrt(data.shape[1]) upper = mean + std lower = mean - std plt.plot(idx, mean, c=colors[i], linewidth=linewidth, label=label, zorder=5) plt.plot(idx, upper, 'b--', c=colors[i], zorder=5) plt.plot(idx, lower, 'b--', c=colors[i], zorder=5) plt.fill_between(idx, lower, upper, alpha=0.2, color=colors[i], zorder=5) # + plt.figure(figsize=(12, 8), dpi=100) plt.title("") plot_roc_auc_test(edge_roc_auc, i=1, label="Pure edge") plot_roc_auc_test(emulsion_roc_auc, i=0, label="Pure emulsion") plot_roc_auc_test(balanced_roc_auc, i=2, label="Mix") plt.ylabel("Validation ROC-AUC") plt.xlabel("Epochs") # plt.ylim plt.legend(loc='upper right', fontsize='xx-large') plt.savefig("validation_roc_auc.pdf", bbox_inches='tight') plt.show() # - # ------- # + import torch import torch.nn as nn from opera_tools import plot_graphx, DISTANCE, scattering_estimation_loss import matplotlib.pyplot as plt import networkx as nx # - import matplotlib.colors as mcolors datafile='clusters.pt' clusters = torch.load(datafile) len(clusters['clusters']) clusters['graphx'].nodes(data = True)[0]['signal'] clusters['graphx'].graph['showers_data'][0]['numtracks'] Colors = [] for name, hex in matplotlib.colors.cnames.items(): Colors.append(name) colors_list = list(colors._colors_full_map.values()) len(Colors) clrs = Colors + colors_list[100:152] len(clrs) def plot_graphx(graphx: nx.DiGraph, azim=-84, elev=10): """ Function for plotting shower """ from mpl_toolkits.mplot3d import Axes3D from mpl_toolkits.mplot3d.art3d import Line3DCollection import matplotlib.pyplot as plt import seaborn as sns sns.set(context='paper', style="whitegrid", font_scale=2, font = 'serif') x0, y0, z0 = [], [], [] sx, sy = [], [] #Signal = [] Tracks = [] for _, node in graphx.nodes(data=True): x0.append(node['features']['SX']) y0.append(node['features']['SY']) z0.append(node['features']['SZ']) sx.append(node['features']['TX']) sy.append(node['features']['TY']) #Signal.append(node['signal']) for i in range(200): Tracks.append(int(graphx.graph['showers_data'][i]['numtracks'])) x0, y0, z0 = np.array(x0), np.array(y0), np.array(z0) sx, sy = np.array(sx), np.array(sy) x1 = x0 + dZ * sx y1 = y0 + dZ * sy z1 = z0 + dZ start_points = np.array([z0, y0, x0]).T.reshape(-1, 3) end_points = np.array([z1, y1, x1]).T.reshape(-1, 3) lc_0 = Line3DCollection(list(zip(start_points[:Tracks[0]], end_points[:Tracks[0]])), colors = ['blue'], alpha=0.9, lw=2) lc_last = Line3DCollection(list(zip(start_points[len(start_points)-Tracks[199]:], end_points[len(start_points)-Tracks[199]:])), colors = ['gold'], alpha=0.9, lw=2) fig = plt.figure(figsize=(15, 15)) ax = fig.gca(projection='3d') ax.view_init(azim=azim, elev=elev) ax.add_collection3d(lc_0) ax.add_collection3d(lc_last) start = 0 end = 0 lc_list = [] for i in range(198): start += Tracks[i] end = start + Tracks[i+1] #print(start, end) lc = Line3DCollection(list(zip(start_points[start:end], end_points[start:end])), colors = clrs[i], alpha=0.9, lw=2) ax.add_collection3d(lc) ax.set_xlabel("z") ax.set_ylabel("y") ax.set_zlabel("x") ax.set_xlim(z0.min(), z1.max()) ax.set_ylim(y0.min(), y1.max()) ax.set_zlim(x0.min(), x1.max()) plt.show() plt.savefig("graph_colored.pdf", bbox_inches='tight') # + BT_Z_unique = np.array([ 0., 1293., 2586., 3879., 5172., 6465., 7758., 9051., 10344., 11637., 12930., 14223., 15516., 16809., 18102., 19395., 20688., 21981., 23274., 24567., 25860., 27153., 28446., 29739., 31032., 32325., 33618., 34911., 36204., 37497., 38790., 40083., 41376., 42669., 43962., 45255., 46548., 47841., 49134., 50427., 51720., 53013., 54306., 55599., 56892., 58185., 59478., 60771., 62064., 63357., 64650., 65943., 67236., 68529., 69822., 71115., 72408., 73701.]) BRICK_X_MIN = 0. BRICK_X_MAX = 103000. # 10.3 cm BRICK_Y_MIN = 0. BRICK_Y_MAX = 128000. # 12.8 cm SAFE_M = 3000. dZ = 0.00205 # 0.0205 cm emulsion DISTANCE = 1293. kwargs = {'bins': 100, 'alpha': 0.8, 'normed': True} # - from matplotlib import rcParams rcParams['axes.labelpad'] = 20.0 plot_graphx((clusters['graphx']));
results_analysis_new.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from collections import Counter import dill import dill data_path = '../data/records_final.pkl' voc_path = '../data/voc_final.pkl' ehr_adj_path = '../data/ehr_adj_final.pkl' ddi_adj_path = '../data/ddi_A_final.pkl' ehr_adj = dill.load(open(ehr_adj_path, 'rb')) ddi_adj = dill.load(open(ddi_adj_path, 'rb')) data = dill.load(open(data_path, 'rb')) voc = dill.load(open(voc_path, 'rb')) diag_voc, pro_voc, med_voc = voc['diag_voc'], voc['pro_voc'], voc['med_voc'] split_point = int(len(data) * 2 / 3) data_train = data[:split_point] eval_len = int(len(data[split_point:]) / 2) data_test = data[split_point:split_point + eval_len] data_eval = data[split_point+eval_len:] # + def most_common_medical(data, voc, choice=0, top=10): c = Counter() cnt = 0 for patient in data: for adm in patient: for item in adm[choice]: c[item] += 1 cnt += 1 for letter, count in c.most_common(top): print('%s: %.4f' % (letter, count/cnt)) def case_study_statics(data, adm, adm_ja, ): # y_gt: multi-hot c = Counter() # - most_common_medical(data, diag_voc, choice=2, top=10) most_common_medical(data, med_voc, choice=2, top=10) # # low score w.r.t Jaccard (Medication) # + import dill import os from collections import Counter # model_name = 'Retain_small' model_name = 'NEW_GAMENet_small_neg' case_study = dill.load(open(os.path.join('saved', model_name, 'case_study.pkl'), 'rb')) # sort sort_case_study = [ case_study[k] for k in sorted(case_study.keys())] # - sort_case_study[-3] # + # EHR import dill ddi_A = dill.load(open('../data/ddi_A.pkl','rb')) records = [[[[825, 7, 25], [65], [4, 6, 0, 14, 19, 1, 26, 3, 28, 39, 2, 12, 15, 87, 32]], [[825, 43, 24, 25], [602], [4, 6, 12, 3, 0, 14, 19, 2, 32, 39, 28, 1, 33, 15]]]] # test ddi in ground truth set all_cnt = 0 dd_cnt = 0 for patient in records: for adm in patient: med_code_set = adm[2] for i, med_i in enumerate(med_code_set): for j, med_j in enumerate(med_code_set): if j <= i: continue all_cnt += 1 if ddi_A[med_i, med_j] == 1 or ddi_A[med_j, med_i] == 1: dd_cnt += 1 print('i,j',med_i,med_j) print(dd_cnt / all_cnt) # + # EHR import dill from collections import defaultdict ddi_A = dill.load(open('../data/ddi_A_final.pkl','rb')) # records = [[[0, 1, 2, 3, 4, 6, 12, 14, 19, 32, 39],[0, 1, 2, 3, 4, 6, 12, 14, 15, 19, 28, 32, 39, 87]]] records = [[[0, 1, 2, 3, 4, 6, 12, 14, 15, 16, 19, 26, 32, 34, 39, 48, 59, 73], [0, 1, 2, 3, 4, 5, 6, 9, 11, 12, 14, 15, 19, 22, 26, 28, 34, 42, ]] ] # test ddi in ground truth set all_cnt = 0 dd_cnt = 0 ddi_set = defaultdict(set) for patient in records: for adm in patient: med_code_set = adm for i, med_i in enumerate(med_code_set): for j, med_j in enumerate(med_code_set): if j <= i: continue all_cnt += 1 if ddi_A[med_i, med_j] == 1 or ddi_A[med_j, med_i] == 1: dd_cnt += 1 ddi_set[med_i].add(med_j) print(dd_cnt / all_cnt) print(ddi_set) # - # v1 'diag' '1912', '2724', '2749' [Malignant neoplasm of brain, Hyperlipidmia, Gout] # v2 'diag' '1912', '3485', '2720', '2749' [Malignant neoplasm of brain, Cerebral Edema, Hypercholesterolemia, Gout] # records = [[[[825, 7, 25], # [65], # [0, 1, 2, 3, 4, 6, 12, 14, 15, 19, 26, 28, 32, 39, 87]], # [[825, 43, 24, 25], # [602], # [0, 1, 2, 3, 4, 6, 12, 14, 15, 19, 28, 32, 33, 39]]]] # N02B, A01A, A02B, A06A, B05C, A12C, C07A, C02D, N02A, B01A, C10A, J01D, N03A, A04A, H04A # 0.0663 # {0: {26}, 1: {32, 26}, 12: {26, 28}, 15: {32, 19}, 19: {26}, 32: {33}} # # # GAMENet 0.0342 # [[0, 1, 2, 3, 4, 6, 12, 14, 19, 32, 39], # [0, 1, 2, 3, 4, 6, 12, 14, 15, 19, 28, 32, 39, 87]] # {1: {32}, 12: {28}, 15: {32, 19}} # # # # RETAIN 0.1240 # [0, 1, 2, 3, 4, 5, 6, 8, 11, 12, 15, 19, 25, 26, 32] # true [0,1,2,3,4,6,12,15,19,32] # {0: {26}, 1: {32, 26}, 2: {25}, 3: {25}, 12: {25, 26}, 15: {32, 19}, 19: {25, 26}, 25: {32, 26}} # # # Nearest 0.0761 # [0, 1, 2, 3, 4, 6, 12, 14, 15, 19, 26, 28, 32, 39, 87] # [0,1,2,3,4,6,12,14,15,19,28,32,39] # {0: {26}, 1: {32, 26}, 12: {26, 28}, 15: {32, 19}, 19: {26}} # # # Linear # [0, 1, 3] 0 # # # # DMNC 0.0915 # true[[0, 1, 2, 3, 4, 6, 12, 14, 15, 19, 26, 32] + 6, # [0, 1, 2, 3, 4, 6, 12, 14, 15, 19, 32, 39]] + 7 # [[0, 1, 2, 3, 4, 6, 12, 14, 15, 16, 19, 26, 32, 34, 39, 48, 59, 73], # [0, 1, 2, 3, 4, 5, 6, 9, 11, 12, 14, 15, 19, 22, 26, 28, 34, 42, ]] # {0: {26}, 1: {32, 26, 42}, 2: {48}, 12: {34, 73, 22, 26, 28}, 15: {32, 48, 34, 19}, 19: {26, 34}, 26: {34, 59}, 34: {73}}) # # # Leap 0.0468 # [[0, 2, 3, 6, 12, 19, 26, 39], [0, 1, 2, 3, 4, 6, 7, 19, 87]] # {0: {26}, 12: {26}, 19: {26}}) # # import numpy as np sorted(np.array([14, 2, 8, 6, 5, 21, 3, 4, 7, 24, 28, 16, 17, 11, 36, 44, 13, 51, 70, 30])-2) test_len = int(len(sort_case_study) * 0.2) top = 10 worst_diag = Counter() worst_med = Counter() cnt = 0 for item in sort_case_study[:test_len]: patient = item['patient'] for adm in patient: cnt += 1 for diag in adm[0]: worst_diag[diag] += 1 for med in adm[2]: worst_med[diag] += 1 print('adm cnt', cnt) print('----') for letter, count in worst_diag.most_common(top): print('%s: %.4f' % (letter, count)) print('----') for letter, count in worst_med.most_common(top): print('%s: %.4f' % (letter, count)) type(sort_case_study)
code/deplicated/case_study_analystic.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import cupy as cp from numba import jit, prange import timeit import matplotlib.pyplot as plt from scipy.sparse import csr_matrix from cupyx.scipy.sparse import coo_matrix as cp_coo_matrix import sys sys.path.append('../..') from epipy.sparse import rigid_csr_matrix # + def ss_base(mat1, mat2, c): res = mat1 + (mat2-mat1)*c def ss_cp(mat1, mat2, c): res = mat1 + (mat2-mat1)*c # Deprecated # def ss_nb(A, iA, jA, B, iB, jB, c, C, iC, jC): # n = len(iA) - 1 # for row in prange(n): # for i in prange(iB[row], iB[row+1]): # left = int(iA[row]) # right = int(iA[row+1]-1) # while left <= right: # mid = int((left+right) / 2) # if jA[mid] < jB[i]: # left = mid + 1 # elif jA[mid] > jB[i]: # right = mid - 1 # else: # # jA[mid] == jB[i] # C[mid] += B[i]*c # break def ss_nb(mat, values, row, col, c): res_mat = mat.transform(values, row, col, c) # - N = [int(8*(2**0.5)**i) for i in range(28)] methods = ["ss_base", "ss_cp", "ss_nb"] K = len(methods) funcs = [ss_base, ss_cp, ss_nb] masks = [1, 1, 1] groups = [0, 0, 0] T = [[] for i in range(K)] deg = 40 obtain = 1000 for n in N: print("Start with size:", n) row, col = np.random.randint(0, n, (2, n*deg)) data1, data2 = np.random.rand(2, n*deg) c = 0.75 choose = np.random.randint(0, n*deg, (obtain)) row2 = row[choose] col2 = col[choose] data2 = data2[choose] mat1 = csr_matrix((data1,(row, col)), shape=(n,n)) cmat1 = cp_coo_matrix((cp.array(data1), (cp.array(row), cp.array(col))), shape=(n,n)).tocsr() mat2 = csr_matrix((data2,(row2, col2)), shape=(n,n)) cmat2 = cp_coo_matrix((cp.array(data2), (cp.array(row2), cp.array(col2))), shape=(n,n)).tocsr() rmat = rigid_csr_matrix((data1,(row, col)), shape=(n,n)) args = [(mat1, mat2, c), (cmat1, cmat2, c), (rmat, data2, row2, col2, c)] for i in range(K): # t = %timeit -q -n 1 -r 3 -o funcs[i](*args[i]) T[i].append(t.average*1000) n_groups = max(groups)+1 fig, axs = plt.subplots(n_groups) plt.rcParams["figure.figsize"] = (10,15) if n_groups == 1: for i in range(K): if masks[i]: axs.plot(N[1:], T[i][1:], label=methods[i]) axs.set_xlabel("Size") axs.set_ylabel("Time(ms)") axs.legend() axs.set_xscale('log') else: for g in range(n_groups): for i in range(K): if masks[i] and groups[i] == g: axs[g].plot(N[1:], T[i][1:], label=methods[i]) axs[g].set_xlabel("Size") axs[g].set_ylabel("Time(ms)") axs[g].legend() axs[g].set_xscale('log') plt.show() print(T)
test/benchmark/sparse_sparse.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python2 # --- import numpy as np import matplotlib.pyplot as plt import math import pandas from StringIO import StringIO from collections import Counter from matplotlib.patches import Rectangle import seaborn as sns import urllib2 import re def coverage(domain,ll): domain=domain.astype(int) for o in range(0,np.shape(domain)[0]): for p in xrange(domain[o,0],(domain[o,1]+1)): ll[p]=1 return ll def readTADtree (turl): TADtreeUrlr = urllib2.urlopen(urllib2.Request(turl)).read() TADtreeUrlstringr=re.findall("N\d+",TADtreeUrlr) flist = [int(x.split('N')[1]) for x in TADtreeUrlstringr] TADtreer = pandas.read_table(urllib2.urlopen(turl+'/N'+str(max(flist))+'.txt'),sep='\t',header=0) TADtreeTADr = TADtreer[['start','end']].values-1 TADtreeTADrb = np.unique(TADtreeTADr.flatten()) return (TADtreeTADr, TADtreeTADrb) # + chrs_length = [249250621,243199373,198022430,191154276,180915260,171115067,159138663,146364022,141213431,135534747,135006516,133851895,115169878,107349540,102531392,90354753,81195210,78077248,59128983,63025520,48129895,51304566] cDIall = np.empty((0,0)) cArrowall = np.empty((0,0)) crGMAPball = np.empty((0,0)) cOnTAD_rawball = np.empty((0,0)) cTADtreeTADball = np.empty((0,0)) cTopDomball = np.empty((0,0)) covDIall = 0 covArrowall = 0 covrGMAPball = 0 covOnTAD_rawball = 0 covTADtreeTADball = 0 covTopDomball = 0 # - ####We excluded Chr1 as TADtree cannot be finished on that chr. We also excluded Chr9 as rGMAP failed to call TADs on that chr#### res = 10000 nbins = 0 for chrnum in range(4,23): if chrnum in [9]: continue else: DItad = pandas.read_table(urllib2.urlopen('http://bx.psu.edu/~lua137/OnTAD/output/DI_TAD/hg19/GM12878/10kb/GM12878_10kb_chr'+str(chrnum)+'.add.DI.out.7col.final'),sep='\t',header=None) DI=DItad.loc[:,1:2].values/res DIb=np.unique(DI.flatten()) DIb = DIb[~np.isnan(DIb)] #print len(DIb),len(DI) Arrowhead = pandas.read_table(urllib2.urlopen('http://bx.psu.edu/~lua137/OnTAD/output/juicer/Arrowhead.Gm12878.10kb.KR.chr'+str(chrnum)),sep='\t',header=None) Arrow = Arrowhead.loc[:,1:2].values/res Arrowb=np.unique(Arrow.flatten()) #print len(Arrowb),len(Arrow) #TADtreeUrl = urllib2.urlopen(urllib2.Request('http://bx.psu.edu/~lua137/OnTAD/output/TADtree/final_alg/10kb/Gm12878/chr'+str(chrnum))).read() #TADtreeUrlstring=re.findall("N\d+",TADtreeUrl) #flist = [int(x.split('N')[1]) for x in TADtreeUrlstring] TADtreeTAD, TADtreeTADb = readTADtree('http://bx.psu.edu/~lua137/OnTAD/output/TADtree/final_alg/10kb_M2/Gm12878/chr'+str(chrnum)) #print len(TADtreeTADb),len(TADtreeTAD) rG = pandas.read_table(urllib2.urlopen('http://bx.psu.edu/~lua137/OnTAD/output/rGMAP/GM12878_combined_10000_chr'+str(chrnum)+'.rGMAPTAD'),sep='\t',header=None) rGMAP=rG.loc[:,0:1].values/res rGMAPb=np.unique(rGMAP.flatten()) OnTAD_raw = pandas.read_table(urllib2.urlopen('http://bx.psu.edu/~lua137/OnTAD/output/OnTAD/Gm12878/10kb/OnTAD_KRnorm_pen0.1_max200_hsz5_chr'+str(chrnum)+'.tad'),sep='\t',header=None) OnTAD_rawa = OnTAD_raw.loc[(OnTAD_raw[2]>0),:].values[:,0:2]-1 OnTAD_rawb = np.unique(OnTAD_rawa.flatten()) TopDom = pandas.read_table(urllib2.urlopen('http://bx.psu.edu/~lua137/OnTAD/output/TopDom/Gm12878/10kb/chr'+str(chrnum)+'.domain'),sep=' ',header=None) TopDoma = TopDom.loc[TopDom[5]=='domain',[1,3]].values-1 TopDomb = np.unique(TopDoma.flatten()) cDIall = np.append(cDIall,abs(DI[:,1]-DI[:,0])) cArrowall = np.append(cArrowall,abs(Arrow[:,1]-Arrow[:,0])) crGMAPball = np.append(crGMAPball,abs(rGMAP[:,1]-rGMAP[:,0])) cOnTAD_rawball = np.append(cOnTAD_rawball,abs(OnTAD_rawa[:,1]-OnTAD_rawa[:,0])) cTADtreeTADball = np.append(cTADtreeTADball,abs(TADtreeTAD[:,1]-TADtreeTAD[:,0])) cTopDomball = np.append(cTopDomball,abs(TopDoma[:,1]-TopDoma[:,0])) covDIall += sum(coverage(DI,[0]*(chrs_length[chrnum-1]/res+1))) covArrowall += sum(coverage(Arrow,[0]*(chrs_length[chrnum-1]/res+1))) covrGMAPball += sum(coverage(rGMAP,[0]*(chrs_length[chrnum-1]/res+1))) covOnTAD_rawball += sum(coverage(OnTAD_rawa,[0]*(chrs_length[chrnum-1]/res+1))) covTADtreeTADball += sum(coverage(TADtreeTAD,[0]*(chrs_length[chrnum-1]/res+1))) covTopDomball += sum(coverage(TopDoma, [0]*(chrs_length[chrnum-1]/res+1))) nbins = nbins + chrs_length[chrnum-1]/res+1 print '####Done with chr'+str(chrnum)+'####' len(cArrowall) plt.figure(2,figsize=(5,8)) d = {'OnTAD':cOnTAD_rawball, 'Arrowhead':cArrowall, 'rGMAP': crGMAPball,'TADtree':cTADtreeTADball, 'DomainCaller':cDIall, 'TopDom':cTopDomball} sizetable = pandas.DataFrame.from_dict(d, orient='index').transpose() sns.boxplot(data=sizetable, width=0.4, palette="colorblind", order=["OnTAD", 'Arrowhead', 'rGMAP', 'TADtree', 'DomainCaller', 'TopDom'], showfliers=False) plt.ylabel('TAD size', {'color': 'k', 'fontsize': 20}) plt.yticks((0, 50, 100, 150, 200, 250), ('0', '0.5Mb', '1.0Mb', '1.5Mb', '2.0Mb', '2.5Mb'), color='k',size=15) plt.xticks(rotation=45, size=15) plt.savefig("/Users/linan/Dropbox/TADsize.png", dpi=300, transparent=True, bbox_inches='tight') plt.show() # + plotlist = np.array([len(cDIall),len(cOnTAD_rawball),len(cArrowall),len(crGMAPball),len(cTADtreeTADball), len(cTopDomball)]) plt.figure(6) fig,ax = plt.subplots(1) ax.bar(np.arange(6), plotlist,0.35) plt.xticks(np.arange(6), ('DomainCaller','OnTAD','Arrowhead','rGMAP','TADtree', 'TopDom'),size=15) # Left Y-axis labels plt.ylabel('Number of TADs', {'color': 'k', 'fontsize': 20}) plt.yticks(color='k',size=15) plt.xticks(rotation=45) plt.savefig("/Users/linan/Dropbox/numofTAD.png", dpi=300, transparent=True, bbox_inches='tight') plt.show() # + print np.array([covDIall,covArrowall,covrGMAPball,covOnTAD_rawball,covTADtreeTADball,covTopDomball])/float(nbins) plt.figure(6) fig,ax = plt.subplots(1) ax.bar(np.arange(6),np.array([covDIall,covOnTAD_rawball,covArrowall,covrGMAPball,covTADtreeTADball, covTopDomball])/float(nbins),0.35) plt.xticks(np.arange(6), ('DomainCaller','OnTAD','Arrowhead','rGMAP','TADtree', 'TopDom'),size=15) # Left Y-axis labels plt.ylabel('% covered by TAD', {'color': 'k', 'fontsize': 20}) plt.yticks((0, 0.2, 0.4, 0.6, 0.8), ('0', '20%', '40%', '60%', '80%'), color='k',size=15) plt.xticks(rotation=45) plt.savefig("/Users/linan/Dropbox/TADcoverage.png", dpi=300, transparent=True, bbox_inches='tight') plt.show() # -
Sup_Fig3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="Zh0ooGCE7Peg" # # Imports # + id="_VEuwSD-t8v0" import time import nltk import string import re import gensim.downloader import pandas as pd import matplotlib.pyplot as plt import numpy as np from nltk.corpus import stopwords from nltk.stem import WordNetLemmatizer from gensim.models import Word2Vec from sklearn import random_projection from sklearn.metrics.pairwise import cosine_similarity from sklearn.svm import SVC,LinearSVC from sklearn.linear_model import LogisticRegression from sklearn.model_selection import KFold, ShuffleSplit from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, confusion_matrix, ConfusionMatrixDisplay from sklearn.feature_extraction.text import TfidfVectorizer from sklearn import preprocessing from sklearn.decomposition import PCA # !pip3 install fuzzywuzzy from fuzzywuzzy import fuzz import xgboost from collections import Counter from tqdm.notebook import tqdm try: from bayes_opt import BayesianOptimization except : # !pip3 install bayesian-optimization from bayes_opt import BayesianOptimization nltk.download('stopwords') nltk.download('wordnet') nltk.download('averaged_perceptron_tagger') # + [markdown] id="YcGT4rAfzJE-" # # Loading and Preprocessing # + [markdown] id="wiklkDUZ0cYv" # Define the preprocessing function(s). # + id="wdHa1pm4zFjO" stop_words = set(stopwords.words('english')) lemmatizer = WordNetLemmatizer() def benchmark_clean_text(text): # Lower words try: cleaned = text.lower() except AttributeError: return "this is an empty question".split() cleaned = text # Remove stopwords. Needed to be done before the apostrophes are removed cleaned = ' '.join([w for w in cleaned.split(' ') if not w in stop_words]) # Remove punctuation cleaned = cleaned.translate(str.maketrans('', '', string.punctuation +'–—‘’“”…')) # Replace all the remaining numbers that did not match any of the above categories number_pattern = re.compile(r'(\d{1,3},)?(\d{3},)*\d+(\.\d+)?') cleaned = number_pattern.sub(r' ', cleaned) # lemmatization cleaned = " ".join([lemmatizer.lemmatize(word) for word in cleaned.split()]) cleaned = cleaned.split() #note the added .split() here. Returns a list of list of words if cleaned == [] or cleaned == ['nan'] or cleaned == ['null'] or cleaned == ['a']: cleaned = "this is an empty question".split() return cleaned def quora_clean_text(text): text = str(text) text = text.lower() # Clean the text text = re.sub(r"[^A-Za-z0-9^,!.\/'+-=]", " ", text) text = re.sub(r"what's", "what is ", text) text = re.sub(r"\'s", " ", text) text = re.sub(r"\'ve", " have ", text) text = re.sub(r"can't", "cannot ", text) text = re.sub(r"n't", " not ", text) text = re.sub(r"i'm", "i am ", text) text = re.sub(r"\'re", " are ", text) text = re.sub(r"\'d", " would ", text) text = re.sub(r"\'ll", " will ", text) text = re.sub(r",", " ", text) text = re.sub(r"\.", " ", text) text = re.sub(r"!", " ! ", text) text = re.sub(r"\/", " ", text) text = re.sub(r"\^", " ^ ", text) text = re.sub(r"\+", " + ", text) text = re.sub(r"\-", " - ", text) text = re.sub(r"\=", " = ", text) text = re.sub(r"'", " ", text) text = re.sub(r"(\d+)(k)", r"\g<1>000", text) text = re.sub(r":", " : ", text) text = re.sub(r" e g ", " eg ", text) text = re.sub(r" b g ", " bg ", text) text = re.sub(r" u s ", " american ", text) text = re.sub(r"\0s", "0", text) text = re.sub(r" 9 11 ", "911", text) text = re.sub(r"e - mail", "email", text) text = re.sub(r"j k", "jk", text) text = re.sub(r"\s{2,}", " ", text) text = text.split() # Check for empty texts if text == [] or text == ['nan'] or text == ['null'] or text == ['a']: text = "this is an empty question".split() return text def preprocess(text): return benchmark_clean_text(text) # + [markdown] id="ztqLcwic0fl0" # Load, prepocess and store the data in a proper form. # + id="Zj2dNh3VtLJA" train = pd.read_csv("train.csv")#.dropna() test = pd.read_csv("test_without_labels.csv")#.dropna() # We need the test NaN values for ordering when submitting #train_df = pd.read_csv("../storage/datasets/q2b/train.csv") #test_df = pd.read_csv("../storage/datasets/q2b/test_without_labels.csv") # + id="JB0jeQ_wvfd7" # Create the preprocessed text in memory train_q1 = [preprocess(t) for t in train['Question1']] train_q2 = [preprocess(t) for t in train['Question2']] train_labels = np.array(train['IsDuplicate']) test_q1 = [preprocess(t) for t in test['Question1']] test_q2 = [preprocess(t) for t in test['Question2']] # + id="MhCdUoZYvfd8" # Create the preprocessed train texts and save it in disk (care, we save a string insted of a list of words) cleaned_train_df = train_df.copy(deep=True) cleaned_train_q1 = cleaned_train_df.Question1.apply(lambda text: " ".join(preprocess(text))) cleaned_train_df = cleaned_train_df.assign(Q1=cleaned_train_q1) cleaned_train_q2 = cleaned_train_df.Question2.apply(lambda text: " ".join(preprocess(text))) cleaned_train_df = cleaned_train_df.assign(Q2=cleaned_train_q2) cleaned_train_df.drop(columns=['Question1', 'Question2'], inplace=True) #cleaned_train_df.to_csv('../storage/datasets/q2b/preprocessed/train_benchmark_clean.csv', sep=',') # + id="_tcJ_Uojvfd9" # Create the preprocessed test texts and save it in disk (care, we save a string insted of a list of words) cleaned_test_df = test_df.copy(deep=True) cleaned_test_q1 = cleaned_test_df.Question1.apply(lambda text: " ".join(preprocess(text))) cleaned_test_df = cleaned_test_df.assign(Q1=cleaned_test_q1) cleaned_test_q2 = cleaned_test_df.Question2.apply(lambda text: " ".join(preprocess(text))) cleaned_test_df = cleaned_test_df.assign(Q2=cleaned_test_q2) cleaned_test_df.drop(columns=['Question1', 'Question2'], inplace=True) cleaned_test_df.to_csv('../storage/datasets/q2b/preprocessed/test_benchmark_clean.csv', sep=',') # + [markdown] id="vmXsr5F-6ESu" # # Feature Engineering # + id="xZjg7CcavfeB" # Reading the preprocessed train and test sets clean_train_df = pd.read_csv('../storage/datasets/q2b/preprocessed/train_quora_clean.csv') clean_test_df = pd.read_csv('../storage/datasets/q2b/preprocessed/test_quora_clean.csv') # + [markdown] id="m5MNP5bBvfeE" # ### Feature Class # # For easier organisation and creation / storing of features we create a class which offers some basic functionality. # + id="SyyGYgzVvfeE" class TextFeature: def __init__(self, train_arr, test_arr, name): self.train_arr = train_arr self.test_arr = test_arr self.name = name # + [markdown] id="ZPpgWtUX6ODm" # ## Word2Vec # + [markdown] id="8fykgF960jD8" # # Create the word2vec model. # + id="Gnrgsb-vtv8y" # This is a 1.6GB download for the first time quora_w2v = gensim.downloader.load('word2vec-google-news-300') google_news_emb_dim = 300 # + id="mSy6_FB4vfeH" # This part creates the vocabulary that can be both used in the feature extraction part and as an embedding layer in a NN vocabulary = dict() inverse_vocabulary = ['<unk>'] stop_words = set(stopwords.words('english')) def text_to_vec(df, w2v, vocabulary, inverse_vocabulary): numb_represantations = [] for index, row in df.iterrows(): questions = [] for question in ['Q1', 'Q2']: q2n = [] # print(row.loc[question]) for word in row.loc[question].split(): # print(row.loc[question].split()) # Stopwords have not yet been removed since they might be included in the pretrained word2vec if word in stop_words and word not in w2v.vocab: continue if word not in vocabulary: vocabulary[word] = len(inverse_vocabulary) q2n.append(len(inverse_vocabulary)) inverse_vocabulary.append(word) else: q2n.append(vocabulary[word]) questions.append(q2n) numb_represantations.append(questions) return numb_represantations, vocabulary, inverse_vocabulary numb_represantation_train, vocabulary, inverse_vocabulary = text_to_vec(clean_train_df, quora_w2v, vocabulary, inverse_vocabulary) numb_represantation_test, vocabulary, inverse_vocabulary = text_to_vec(clean_test_df, quora_w2v, vocabulary, inverse_vocabulary) # + [markdown] id="eMzjYLsdvfeI" # **Care:** In the method below we create and save the **aggregated** (mean) word embeddings for each sentence. This means that we create a numpy array of dimensions: `numb_of_texts, 2, embedding_dim`. # # Saving the embeddings individually is not possible due to memory constraints. # + id="ct-uK8vzvfeJ" # From integer represantation to embedding represantation def int_to_embed(w2v, dims, numb_repr, inverse_vocabulary): embeddings = [] for numb in numb_repr: word = inverse_vocabulary[numb] if word in w2v.vocab: embeddings.append(w2v.word_vec(word)) else: embeddings.append(np.random.normal(0, 1, dims)) return embeddings def create_avg_embedding_matrix(numb_represantation_of_texts, w2v, dims, inverse_vocabulary): ret_embedding_mat = [] for questions in numb_represantation_of_texts: both_question_embeddings = [] for q in questions: q_embeddings = np.mean(np.array(int_to_embed(w2v, dims, q, inverse_vocabulary)), axis=0) both_question_embeddings.append(q_embeddings) ret_embedding_mat.append(both_question_embeddings) return ret_embedding_mat train_embedding_mat = create_avg_embedding_matrix(numb_represantation_train, quora_w2v, google_news_emb_dim, inverse_vocabulary) test_embedding_mat = create_avg_embedding_matrix(numb_represantation_test, quora_w2v, google_news_emb_dim, inverse_vocabulary) # + id="IcW0qHcCvfeK" # Transforming to numpy arrays and saving in disk train_embedding_arr = np.array(train_embedding_mat) np.save('../storage/datasets/q2b/word_embeddings/train_embedding_avg.npy', train_embedding_arr) test_embedding_arr = np.array(test_embedding_mat) np.save('../storage/datasets/q2b/word_embeddings/test_embedding_avg.npy', test_embedding_arr) # + [markdown] id="Pp8RitQfvfeN" # ## Cosine similarity of averaged word embeddings # + id="OXMoL1LHvfeO" # We first load the averaged embeddings from disk train_embeddings = np.load('../storage/datasets/q2b/word_embeddings/train_embedding_avg.npy') test_embeddings = np.load('../storage/datasets/q2b/word_embeddings/test_embedding_avg.npy') # + id="ekJut3u3vfeP" # We then calculate the cosine similarities of the two questions of each row def matrix_cosine(x, y): return np.einsum('ij,ij->i', x, y) / ( np.linalg.norm(x, axis=1) * np.linalg.norm(y, axis=1) ) train_cosine_similarities = matrix_cosine(train_embeddings[:, 0, :], train_embeddings[:, 1, :]) test_cosine_similarities = matrix_cosine(test_embeddings[:, 0, :], test_embeddings[:, 1, :]) # + id="ivkYqLIuvfeR" # We store them in memory in a `TextFeature` object avg_embeddings_cos_similarities = TextFeature(train_cosine_similarities, test_cosine_similarities, "AvgEmbeddingsCosine") # + [markdown] id="SVXTWneuvfeT" # ## BoW cosine similarity # + [markdown] id="ZudXzEaZvfeU" # We first create a bag of words represantation using the the tf-idf vectorizer on all the questions. Then we calculate their cosine similarity. # + id="aUryQca23ZP4" df_train_cleaned = pd.read_csv('../storage/datasets/q2b/preprocessed/train_benchmark_clean.csv') df_test_cleaned = pd.read_csv('../storage/datasets/q2b/preprocessed/test_benchmark_clean.csv') # + id="-IeU9X_bvfeV" concatenated_train_questions = pd.concat([df_train_cleaned.Q1, df_train_cleaned.Q2]) tfidf_vectorizer = TfidfVectorizer() # Fit on all the quetions in the train set tfidf_vectorizer.fit(concatenated_train_questions) # Trnaform the train questions separately q1_train_tfidf = tfidf_vectorizer.transform(df_train_cleaned.Q1) q2_train_tfidf = tfidf_vectorizer.transform(df_train_cleaned.Q2) # Trnaform the test questions separately q1_test_tfidf = tfidf_vectorizer.transform(df_test_cleaned.Q1) q2_test_tfidf = tfidf_vectorizer.transform(df_test_cleaned.Q2) # + id="3vVb4TDuvfeW" outputId="7945fe86-3571-4de1-e64c-f3e46b1a11ab" # Calculate the cosine similarities from scipy.spatial import distance def cosine_of_vectors(t1, t2): t1 = t1.toarray()[0] t2 = t2.toarray()[0] return (t1 @ t2) / (np.linalg.norm(t1) * np.linalg.norm(t2)) train_bow_similarities = np.array([cosine_of_vectors(t1, t2) for t1, t2 in zip(q1_train_tfidf, q2_train_tfidf)]) test_bow_similarities = np.array([cosine_of_vectors(t1, t2) for t1, t2 in zip(q1_test_tfidf, q2_test_tfidf)]) # + id="Wdo4HcRFvfeX" # We store them in memory in a `TextFeature` object tfidf_cos_sim = TextFeature(np.nan_to_num(train_bow_similarities), np.nan_to_num(test_bow_similarities), "TfIdfCosSimilarity") # + [markdown] id="uJcbaDJ-vfeX" # ## Edit distance (Levenshtein) # + id="xL74iS-AvfeX" df_train_cleaned = pd.read_csv('../storage/datasets/q2b/preprocessed/train_benchmark_clean.csv') df_test_cleaned = pd.read_csv('../storage/datasets/q2b/preprocessed/test_benchmark_clean.csv') # + id="SKA9t1JuvfeZ" train_edit_ratio = np.array([fuzz.ratio(row.Q1, row.Q2) for ind, row in df_train_cleaned.iterrows()]) test_edit_ratio = np.array([fuzz.ratio(row.Q1, row.Q2) for ind, row in df_test_cleaned.iterrows()]) train_edit_partial_ratio = np.array([fuzz.partial_ratio(row.Q1, row.Q2) for ind, row in df_train_cleaned.iterrows()]) test_edit_partial_ratio = np.array([fuzz.partial_ratio(row.Q1, row.Q2) for ind, row in df_test_cleaned.iterrows()]) train_edit_token_sort_ratio = np.array([fuzz.token_sort_ratio(row.Q1, row.Q2) for ind, row in df_train_cleaned.iterrows()]) test_edit_token_sort_ratio = np.array([fuzz.token_sort_ratio(row.Q1, row.Q2) for ind, row in df_test_cleaned.iterrows()]) train_edit_token_set_ratio = np.array([fuzz.token_set_ratio(row.Q1, row.Q2) for ind, row in df_train_cleaned.iterrows()]) test_edit_token_set_ratio = np.array([fuzz.token_set_ratio(row.Q1, row.Q2) for ind, row in df_test_cleaned.iterrows()]) # + id="b4Q2LoGavfeZ" # We store them in memory in `TextFeature` objects edit_ratio = TextFeature(train_edit_ratio, test_edit_ratio, "edit_ratio") edit_partial_ratio = TextFeature(train_edit_partial_ratio, test_edit_partial_ratio, "edit_partial_ratio") edit_token_sort_ratio = TextFeature(train_edit_token_sort_ratio, test_edit_token_sort_ratio, "edit_token_sort_ratio") edit_token_set_ratio = TextFeature(train_edit_token_set_ratio, test_edit_token_set_ratio, "edit_token_set_ratio") # + [markdown] id="1aR4uV2zvfeZ" # ## Text Length # + id="PND0jcE0vfea" df_train_cleaned = pd.read_csv('../storage/datasets/q2b/preprocessed/train_benchmark_clean.csv') df_test_cleaned = pd.read_csv('../storage/datasets/q2b/preprocessed/test_benchmark_clean.csv') df_train = pd.read_csv('../storage/datasets/q2b/train.csv') df_test = pd.read_csv('../storage/datasets/q2b/test_without_labels.csv') df_train_quora = pd.read_csv('../storage/datasets/q2b/preprocessed/train_quora_clean.csv') df_test_quora = pd.read_csv('../storage/datasets/q2b/preprocessed/test_quora_clean.csv') # + id="B5iZowlSvfea" train_lengths_q1_clean = np.array([len(row.Q1.split()) for ind, row in df_train_cleaned.iterrows()]) train_lengths_q2_clean = np.array([len(row.Q2.split()) for ind, row in df_train_cleaned.iterrows()]) test_lengths_q1_clean = np.array([len(row.Q1.split()) for ind, row in df_test_cleaned.iterrows()]) test_lengths_q2_clean = np.array([len(row.Q2.split()) for ind, row in df_test_cleaned.iterrows()]) train_lengths_q1_original = np.array([len(row.Question1.split()) for ind, row in df_train.iterrows()]) train_lengths_q2_original = np.array([len(row.Question2.split()) if not isinstance(row.Question2, float) else 0 for ind, row in df_train.iterrows()]) test_lengths_q1_original = np.array([len(row.Question1.split()) if not isinstance(row.Question1, float) else 0 for ind, row in df_test.iterrows()]) test_lengths_q2_original = np.array([len(row.Question2.split()) if not isinstance(row.Question2, float) else 0 for ind, row in df_test.iterrows()]) train_lengths_q1_quora = np.array([len(row.Q1.split()) for ind, row in df_train_quora.iterrows()]) train_lengths_q2_quora = np.array([len(row.Q2.split()) for ind, row in df_train_quora.iterrows()]) test_lengths_q1_quora = np.array([len(row.Q1.split()) for ind, row in df_test_quora.iterrows()]) test_lengths_q2_quora = np.array([len(row.Q2.split()) for ind, row in df_test_quora.iterrows()]) # + id="qOuCZJhmvfeb" # Store all of the above into feature objects length_q1_clean = TextFeature(train_lengths_q1_clean, test_lengths_q1_clean, "length_q1_clean") length_q2_clean = TextFeature(train_lengths_q2_clean, test_lengths_q2_clean, "length_q2_clean") length_q1_original = TextFeature(train_lengths_q1_original, test_lengths_q1_original, "length_q1_original") length_q2_original = TextFeature(train_lengths_q2_original, test_lengths_q2_original, "length_q2_original") length_q1_quora = TextFeature(train_lengths_q1_quora, test_lengths_q1_quora, "length_q1_quora") length_q2_quora = TextFeature(train_lengths_q2_quora, test_lengths_q2_quora, "length_q2_quora") # + [markdown] id="uXPlNR38Tvk2" # ## Names # + id="myN2d5tuTzHH" clean_train_df = pd.read_csv('train_quora_clean.csv') clean_test_df = pd.read_csv('test_quora_clean.csv') # + id="Q_hSYk_gUMSZ" vecs = [[w if w[0].isupper() else "" for w in q[1:]] for q in train_q1] train_q1_names = [] train_q1_num_of_names = [] for q in vecs: aver = np.zeros(300) words = 0 for w in q: if w=="": continue try: aver += np.array(quora_w2v.word_vec(w)) words+=1 except: pass aver /= max(words,1) train_q1_names.append(aver) train_q1_num_of_names.append(words) vecs = [[w if w[0].isupper() else "" for w in q[1:]] for q in train_q2] train_q2_names = [] train_q2_num_of_names = [] for q in vecs: aver = np.zeros(300) words = 0 for w in q: if w=="": continue try: aver += np.array(quora_w2v.word_vec(w)) words+=1 except: pass aver /= max(words,1) train_q2_names.append(aver) train_q2_num_of_names.append(words) train_names_cosine = [] for i in range(len(train_q1_names)): names1 = train_q1_names[i] names2 = train_q2_names[i] train_names_cosine.append(cosine_of_vectors(names1,names2)) vecs = [[w if w[0].isupper() else "" for w in q[1:]] for q in test_q1] test_q1_names = [] test_q1_num_of_names = [] for q in vecs: aver = np.zeros(300) words = 0 for w in q: if w=="": continue try: aver += np.array(quora_w2v.word_vec(w)) words+=1 except: pass aver /= max(words,1) test_q1_names.append(aver) test_q1_num_of_names.append(words) vecs = [[w if w[0].isupper() else "" for w in q[1:]] for q in test_q2] test_q2_names = [] test_q2_num_of_names = [] for q in vecs: aver = np.zeros(300) words = 0 for w in q: if w=="": continue try: aver += np.array(quora_w2v.word_vec(w)) words+=1 except: pass aver /= max(words,1) test_q2_names.append(aver) test_q2_num_of_names.append(words) # + id="KMe3Yg129LfJ" def all_zeros(a): for i in a: if i!=0: return False return True train_names_cosine = [] for i in range(len(train_q1_names)): names1 = train_q1_names[i] names2 = train_q2_names[i] if all_zeros(names1) or all_zeros(names2): cosine=0 else: cosine = cosine_of_vectors(names1,names2) train_names_cosine.append(cosine) test_names_cosine = [] for i in range(len(test_q1_names)): names1 = test_q1_names[i] names2 = test_q2_names[i] if all_zeros(names1) or all_zeros(names2): cosine=0 else: cosine = cosine_of_vectors(names1,names2) test_names_cosine.append(cosine) # + id="R6TmbmEk-WW5" for i in range(100): c = train_names_cosine[i] print(c, train_q1_num_of_names[i],train_q2_num_of_names[i]) # + id="I7lbxCAmV_41" q1_num_names = TextFeature(train_q1_num_of_names,test_q1_num_of_names, "q1_num_names") q2_num_names = TextFeature(train_q2_num_of_names,test_q2_num_of_names, "q2_num_names") names_cosine = TextFeature(train_names_cosine,test_names_cosine, "names_cosine") # + [markdown] id="wr7eAS4KeciJ" # ## Number of Stopwords # + id="QSeyj4TceciJ" df_train = pd.read_csv('../storage/datasets/q2b/preprocessed/train_quora_clean.csv') df_test = pd.read_csv('../storage/datasets/q2b/preprocessed/test_quora_clean.csv') # + id="6-XRr93NeciJ" stop_words = set(stopwords.words('english')) def stopwords_in_str(txt): tokens = txt.split() return sum([1 if word in stop_words else 0 for word in tokens]) train_stopwords_q1 = np.array([stopwords_in_str(q) for q in df_train.Q1]) train_stopwords_q2 = np.array([stopwords_in_str(q) for q in df_train.Q2]) test_stopwords_q1 = np.array([stopwords_in_str(q) for q in df_test.Q1]) test_stopwords_q2 = np.array([stopwords_in_str(q) for q in df_test.Q2]) train_stopwords_diff = np.abs(train_stopwords_q1 - train_stopwords_q2) test_stopwords_diff = np.abs(test_stopwords_q1 - test_stopwords_q2) # + id="Wi94VM70eciK" stopwords_q1 = TextFeature(train_stopwords_q1, test_stopwords_q1, "stopwords_q1") stopwords_q2 = TextFeature(train_stopwords_q2, test_stopwords_q2, "stopwords_q2") stopwords_diff = TextFeature(train_stopwords_diff, test_stopwords_diff, "stopwords_diff") # + [markdown] id="AyW3MLExeciK" # ## Number of Punctuation symbols # + id="crUENY7OeciL" df_train = pd.read_csv('../storage/datasets/q2b/train.csv') df_test = pd.read_csv('../storage/datasets/q2b/test_without_labels.csv') # + id="Y4da7UjHeciM" punctuations = set(string.punctuation + '–—‘’“”…') def numb_of_punct(txt): try: return sum([1 if char in punctuations else 0 for char in txt]) except TypeError: return 0 train_puncts_q1 = np.array([numb_of_punct(q) for q in df_train.Question1]) train_puncts_q2 = np.array([numb_of_punct(q) for q in df_train.Question2]) test_puncts_q1 = np.array([numb_of_punct(q) for q in df_test.Question1]) test_puncts_q2 = np.array([numb_of_punct(q) for q in df_test.Question2]) train_puncts_diff = np.abs(train_puncts_q1 - train_puncts_q2) test_puncts_diff = np.abs(test_puncts_q1 - test_puncts_q2) # + id="0L-dstz4eciM" puncts_q1 = TextFeature(train_puncts_q1, test_puncts_q1, "punctuations_q1") puncts_q2 = TextFeature(train_puncts_q2, test_puncts_q2, "punctuations_q2") puncts_diff = TextFeature(train_puncts_diff, test_puncts_diff, "punctuations_diff") # + [markdown] id="k8QP6tbfeciM" # ## Number of words not in Google News embeddings # + id="zmDpb_hxeciM" df_train = pd.read_csv('../storage/datasets/q2b/preprocessed/train_quora_clean.csv') df_test = pd.read_csv('../storage/datasets/q2b/preprocessed/test_quora_clean.csv') quora_w2v = gensim.downloader.load('word2vec-google-news-300') # + id="cjNmeL2KeciN" def numb_of_words_not_in_embeddings(txt): tokens = txt.split() return sum([1 if word not in quora_w2v.vocab else 0 for word in tokens]) train_not_in_w2v_q1 = np.array([numb_of_words_not_in_embeddings(q) for q in df_train.Q1]) train_not_in_w2v_q2 = np.array([numb_of_words_not_in_embeddings(q) for q in df_train.Q2]) test_not_in_w2v_q1 = np.array([numb_of_words_not_in_embeddings(q) for q in df_test.Q1]) test_not_in_w2v_q2 = np.array([numb_of_words_not_in_embeddings(q) for q in df_test.Q2]) train_not_in_w2v_diff = np.abs(train_not_in_w2v_q1 - train_not_in_w2v_q2) test_not_in_w2v_diff = np.abs(test_not_in_w2v_q1 - test_not_in_w2v_q2) # + id="Z-ShUPc0eciN" not_in_w2v_q1 = TextFeature(train_not_in_w2v_q1, test_not_in_w2v_q1, "not_in_w2v_q1") not_in_w2v_q2 = TextFeature(train_not_in_w2v_q2, test_not_in_w2v_q2, "not_in_w2v_q2") not_in_w2v_diff = TextFeature(train_not_in_w2v_diff, test_not_in_w2v_diff, "not_in_w2v_diff") # + [markdown] id="n9rp443seciN" # ## Edit distance of words not in Google News embeddings # + id="jUrwuuDpeciN" df_train = pd.read_csv('../storage/datasets/q2b/preprocessed/train_quora_clean.csv') df_test = pd.read_csv('../storage/datasets/q2b/preprocessed/test_quora_clean.csv') quora_w2v = gensim.downloader.load('word2vec-google-news-300') # + id="jXh_L0PIeciO" def dist_of_words_not_in_w2v(q1, q2, distance): # Get which words are not in google news w2v, and remove any extra whitespace not_words_q1 = re.sub(' +', ' ', " ".join([word if word not in quora_w2v.vocab else "" for word in q1.split()])) not_words_q2 = re.sub(' +', ' ', " ".join([word if word not in quora_w2v.vocab else "" for word in q2.split()])) return distance(q1, q2) train_not_in_w2v_ratio_dist = np.array([dist_of_words_not_in_w2v(row.Q1, row.Q2, fuzz.token_sort_ratio) for ind, row in df_train.iterrows()]) test_not_in_w2v_ratio_dist = np.array([dist_of_words_not_in_w2v(row.Q1, row.Q2, fuzz.token_sort_ratio) for ind, row in df_test.iterrows()]) # + id="nW8I74tjeciP" not_in_w2v_ratio = TextFeature(train_not_in_w2v_ratio_dist, test_not_in_w2v_ratio_dist, "not_in_w2v_ratio") # + [markdown] id="mWsjyNAAeciP" # ## Number of digits # + id="jJ_Td5yXeciP" df_train = pd.read_csv('../storage/datasets/q2b/train.csv') df_test = pd.read_csv('../storage/datasets/q2b/test_without_labels.csv') # + id="CHyLUKBPeciP" digits = set(range(10)) def numb_of_digits(txt): try: return sum([1 if char in digits else 0 for char in txt]) except TypeError: return 0 train_digits_q1 = np.array([numb_of_digits(q) for q in df_train.Question1]) train_digits_q2 = np.array([numb_of_digits(q) for q in df_train.Question2]) test_digits_q1 = np.array([numb_of_digits(q) for q in df_test.Question1]) test_digits_q2 = np.array([numb_of_digits(q) for q in df_test.Question2]) train_digits_diff = np.abs(train_digits_q1 - train_digits_q2) test_digits_diff = np.abs(test_digits_q1 - test_digits_q2) # + id="EIewynMPeciQ" digits_q1 = TextFeature(train_digits_q1, test_digits_q1, "digits_q1") digits_q2 = TextFeature(train_digits_q2, test_digits_q2, "digits_q2") digits_diff = TextFeature(train_digits_diff, test_digits_diff, "digits_diff") # + [markdown] id="UGlxBMdyeciR" # ## Number of nouns # + id="HxTtGAcVeciR" df_train = pd.read_csv('../storage/datasets/q2b/preprocessed/train_quora_clean.csv') df_test = pd.read_csv('../storage/datasets/q2b/preprocessed/test_quora_clean.csv') # + id="pv6DuS3FeciR" def numb_of_nouns(txt): tags = nltk.pos_tag(txt.split()) return sum([1 if tag=='NN' else 0 for word, tag in tags]) train_nouns_q1 = np.array([numb_of_nouns(q) for q in df_train.Q1]) train_nouns_q2 = np.array([numb_of_nouns(q) for q in df_train.Q2]) test_nouns_q1 = np.array([numb_of_nouns(q) for q in df_test.Q1]) test_nouns_q2 = np.array([numb_of_nouns(q) for q in df_test.Q2]) train_nouns_diff = np.abs(train_nouns_q1 - train_nouns_q2) test_nouns_diff = np.abs(test_nouns_q1 - test_nouns_q2) # + id="XMJ99ugceciS" nouns_q1 = TextFeature(train_nouns_q1, test_nouns_q1, "nouns_q1") nouns_q2 = TextFeature(train_nouns_q2, test_nouns_q2, "nouns_q2") nouns_diff = TextFeature(train_nouns_diff, test_nouns_diff, "nouns_diff") # + [markdown] id="K90TK0AqeciS" # ## Edit distance of nouns # + id="0vIbi47WeciS" df_train = pd.read_csv('../storage/datasets/q2b/preprocessed/train_quora_clean.csv') df_test = pd.read_csv('../storage/datasets/q2b/preprocessed/test_quora_clean.csv') # + id="tx8fV5hPeciT" def dist_of_nouns(q1, q2, distance): tags_q1 = nltk.pos_tag(q1.split()) tags_q2 = nltk.pos_tag(q2.split()) nouns_q1 = re.sub(' +', ' ', " ".join([word if tag=='NN' else "" for word, tag in tags_q1])) nouns_q2 = re.sub(' +', ' ', " ".join([word if tag=='NN' else "" for word, tag in tags_q2])) return distance(q1, q2) train_nouns_ratio_dist = np.array([dist_of_nouns(row.Q1, row.Q2, fuzz.token_sort_ratio) for ind, row in df_train.iterrows()]) test_nouns_ratio_dist = np.array([dist_of_nouns(row.Q1, row.Q2, fuzz.token_sort_ratio) for ind, row in df_test.iterrows()]) # + id="OjL_UkmveciU" nouns_ratio = TextFeature(train_nouns_ratio_dist, test_nouns_ratio_dist, "nouns_ratio") # + [markdown] id="Dy4FhQ7IeciU" # ## Edit distance of question ending # + id="srG_Vxh_eciU" df_train = pd.read_csv('../storage/datasets/q2b/preprocessed/train_quora_clean.csv') df_test = pd.read_csv('../storage/datasets/q2b/preprocessed/test_quora_clean.csv') # + id="an41IiGSeciU" def distance_of_last_part(q1, q2, divider, distance): splitted_q1 = q1.split() q1_divided = " ".join(splitted_q1[(len(splitted_q1) // divider):]) splitted_q2 = q2.split() q2_divided = " ".join(splitted_q2[(len(splitted_q2) // divider):]) return distance(q1_divided, q2_divided) train_divided_2_sort_ratio_dist = np.array([distance_of_last_part(row.Q1, row.Q2, 2, fuzz.token_sort_ratio) for ind, row in df_train.iterrows()]) test_divided_2_sort_ratio_dist = np.array([distance_of_last_part(row.Q1, row.Q2, 2, fuzz.token_sort_ratio) for ind, row in df_test.iterrows()]) train_divided_4_sort_ratio_dist = np.array([distance_of_last_part(row.Q1, row.Q2, 4, fuzz.token_sort_ratio) for ind, row in df_train.iterrows()]) test_divided_4_sort_ratio_dist = np.array([distance_of_last_part(row.Q1, row.Q2, 4, fuzz.token_sort_ratio) for ind, row in df_test.iterrows()]) train_divided_2_simple_ratio_dist = np.array([distance_of_last_part(row.Q1, row.Q2, 2, fuzz.ratio) for ind, row in df_train.iterrows()]) test_divided_2_simple_ratio_dist = np.array([distance_of_last_part(row.Q1, row.Q2, 2, fuzz.ratio) for ind, row in df_test.iterrows()]) train_divided_4_simple_ratio_dist = np.array([distance_of_last_part(row.Q1, row.Q2, 4, fuzz.ratio) for ind, row in df_train.iterrows()]) test_divided_4_simple_ratio_dist = np.array([distance_of_last_part(row.Q1, row.Q2, 4, fuzz.ratio) for ind, row in df_test.iterrows()]) train_divided_2_partial_ratio_dist = np.array([distance_of_last_part(row.Q1, row.Q2, 2, fuzz.partial_ratio) for ind, row in df_train.iterrows()]) test_divided_2_partial_ratio_dist = np.array([distance_of_last_part(row.Q1, row.Q2, 2, fuzz.partial_ratio) for ind, row in df_test.iterrows()]) train_divided_4_partial_ratio_dist = np.array([distance_of_last_part(row.Q1, row.Q2, 4, fuzz.partial_ratio) for ind, row in df_train.iterrows()]) test_divided_4_partial_ratio_dist = np.array([distance_of_last_part(row.Q1, row.Q2, 4, fuzz.partial_ratio) for ind, row in df_test.iterrows()]) # + id="Q4-dllhWeciU" divided_2_sort_ratio_dist = TextFeature(train_divided_2_sort_ratio_dist, test_divided_2_sort_ratio_dist, "divided_2_sort_ratio_dist") divided_4_sort_ratio_dist = TextFeature(train_divided_4_sort_ratio_dist, test_divided_4_sort_ratio_dist, "divided_4_sort_ratio_dist") divided_2_simple_dist = TextFeature(train_divided_2_simple_ratio_dist, test_divided_2_simple_ratio_dist, "divided_2_simple_dist") divided_4_simple_dist = TextFeature(train_divided_4_simple_ratio_dist, test_divided_4_simple_ratio_dist, "divided_4_simple_dist") divided_2_partial_ratio_dist = TextFeature(train_divided_2_partial_ratio_dist, test_divided_2_partial_ratio_dist, "divided_2_partial_ratio_dist") divided_4_partial_ratio_dist = TextFeature(train_divided_4_partial_ratio_dist, test_divided_4_partial_ratio_dist, "divided_4_partial_ratio_dist") # + [markdown] id="pn6S_NVUolWj" # ## Edit Distance of Question Beginning # + id="8vlLpVOhooZW" df_train = pd.read_csv('train_quora_clean.csv') df_test = pd.read_csv('test_quora_clean.csv') # + id="l8HM-Xfso0lR" def distance_of_first_part(q1, q2, distance): splitted_q1 = q1.split() q1_divided = " ".join(splitted_q1[:4]) splitted_q2 = q2.split() q2_divided = " ".join(splitted_q2[:4]) return distance(q1_divided, q2_divided) train_start_dist = np.array([distance_of_first_part(row.Q1, row.Q2, fuzz.ratio) for ind, row in df_train.iterrows()]) test_start_dist = np.array([distance_of_first_part(row.Q1, row.Q2, fuzz.ratio) for ind, row in df_test.iterrows()]) # + id="Hh3Xl_GZtUGF" start_dist = TextFeature(train_start_dist, test_start_dist, "start_distances") # + [markdown] id="tR8Meha79HJF" # # Useful functions (CV,BO,...) # + id="nPD44Dul7ZLQ" def metrics_calculating(y_true, y_pred): return Counter({ 'accuracy': accuracy_score(y_true, y_pred), 'recall': recall_score(y_true, y_pred, average='macro'), 'precision': precision_score(y_true, y_pred, average='macro'), 'f1': f1_score(y_true, y_pred, average='macro') }) def pretty_print_grid_search(gs_obj): print(f"Best parameters found: {gs_obj.best_params_}, with a score of: {np.round(gs_obj.best_score_, decimals=3)}") df_gs = pd.DataFrame.from_dict(gs_obj.cv_results_) display(df_gs) def run_cross_val(model, splits_numb, X, y, lab_encoder=None): train_metrics = Counter() validation_metrics = Counter() conf_mats = [] pred_vals = None kf = KFold(n_splits=splits_numb, shuffle=False) for train_index, val_index in tqdm(list(kf.split(X)), desc="Splits"): ### Fit on the input model ### model.fit(X[train_index], y[train_index]) ### Predict on the Train set (mainly for debugging) ### y_pred_train = model.predict(X[train_index]) ### Predict on the Validation set ### y_pred_val = model.predict(X[val_index]) try: proba_preds = model.decision_function(X[val_index]) except: proba_preds = model.predict_proba(X[val_index]) try: pred_vals = np.concatenate((pred_vals,proba_preds)) except: pred_vals = proba_preds ### Metrics Bookkeeping ### train_metrics += metrics_calculating(y[train_index], y_pred_train) validation_metrics += metrics_calculating(y[val_index], y_pred_val) print(train_metrics) print(validation_metrics) ### Confusion Plots Generation ### if lab_encoder is not None: y_labels_true = lab_encoder.inverse_transform(y[val_index]) y_labels_pred = lab_encoder.inverse_transform(y_pred_val) conf_mats.append(confusion_matrix(y_labels_true, y_labels_pred, labels=list(lab_encoder.classes_))) if lab_encoder is not None: disp = ConfusionMatrixDisplay(confusion_matrix=np.sum(np.array(conf_mats), axis=0), display_labels=list(lab_encoder.classes_)) fig, ax=plt.subplots(1,1,figsize=(7,7)) disp.plot(ax=ax) train_avg_metrics = {k: v / splits_numb for k, v in train_metrics.items()} validation_avg_metrics = {k: v / splits_numb for k, v in validation_metrics.items()} try: LOG['predictions'].append(pred_vals.copy()) except: pass #print(accuracy_score(pred_vals,y)) return train_avg_metrics, validation_avg_metrics def run_grid_search(X, y, model, params, folds_numb, scoring="accuracy", verbose_res=True): gs = GridSearchCV(model, params, cv=folds_numb, scoring=scoring, n_jobs=10, verbose=3) gs.fit(X, y) if verbose_res: pretty_print_grid_search(gs) return gs def run_bayesian_optimization(params, obj_function,log=None,iters=8,inits=2): #log needs to be a dictionary if log!=None: if 'predictions' not in log: log['predictions'] = [] optimizer = BayesianOptimization(obj_function,params,verbose=2)#,random_state=42) optimizer.maximize(n_iter = iters,init_points=inits) #higher values are recommended, but time. if log!=None: if 'hyperparameters' not in log: log['hyperparameters'] = [] if 'scores' not in log: log['scores'] = [] for r in optimizer.res: log['hyperparameters'].append(r['params']) log['scores'].append(r['target']) return optimizer.max # + [markdown] id="Gy2dD90l6z60" # # Model Training # + [markdown] id="V2CCPw-EpT8s" # These need to be defined outside the Bayesian Optimization for scope reasons. Namely, 'stupid'. Try not to change their values anywhere as they are global variables. # + id="MYBKOJzspRkM" metric = "accuracy" folds_numb = 5 #X = train_features #y = train_labels # + [markdown] id="ZXYybdwxvfeg" # ## Feature Gathering # + id="KDE9aus7vfeg" # Before progressing we must first concatenate all of our feature objects into one DataFrame features_added = [ start_dist ] # Create the DataFrames #train_features_df = pd.read_csv('../storage/datasets/q2b/train.csv', usecols=['IsDuplicate']) # The train features will have as their last column the labels #test_features_df = pd.DataFrame() train_features_df = pd.read_csv('train_features.csv') test_features_df = pd.read_csv('test_features.csv') for feature in features_added: #features_used train_features_df.insert(0, column=feature.name, value=feature.train_arr) test_features_df.insert(0, column=feature.name, value=feature.test_arr) # + id="4-Br_EMqvfej" # Save the files train_features_df.to_csv(f'train_features.csv', index=False) test_features_df.to_csv(f'test_features.csv', index=False) # + colab={"base_uri": "https://localhost:8080/"} id="P5DF8GwwpUmW" outputId="b83c0602-40be-4875-87e4-9ca1bf7c8ae5" # Read the files train_features_df = pd.read_csv('train_features.csv') test_features_df = pd.read_csv('test_features.csv') print(train_features_df.shape) # + [markdown] id="nj4j_o62695R" # ## Linear SVM # + id="rv45flqdoBNT" def train_LSVC(C): #Due to scope issues, the following parameters must be set outside this function: #X, y, folds_numb, metric, method_name model = LinearSVC(C=C,dual=False,fit_intercept=True,verbose=0) train_avg_metrics, validation_avg_metrics = run_cross_val(model, folds_numb, X, y) return validation_avg_metrics[metric] # + id="sYiyEKlDGbDY" params = {'C': (0.1,10)} best = run_bayesian_optimization(params,train_LSVC) #contains 'target' and 'params' C = best['params']['C'] model = LinearSVC(C=C,dual=False,fit_intercept=True,verbose=0) model.fit(X,y) # + [markdown] id="wAKJZLsikZwE" # # Predictions and Results # + id="Pf3rW5egkdu9" train_preds = model.predict(train_features) print(metrics_calculating(train_labels, train_preds)) test_preds = model.predict(test_features) best # + [markdown] id="e877bhGqvfet" # ## Xgboost - With tuning # + id="sHdDiX4Xecih" hall_of_fame_cols = [ #'divided_4_partial_ratio_dist', #'digits_diff', 'length_q1_quora', 'length_q2_quora', 'length_q1_original', 'length_q2_original', 'length_q1_clean', 'length_q2_clean', 'edit_token_sort_ratio', 'edit_token_set_ratio', 'edit_partial_ratio', 'edit_ratio', 'TfIdfCosSimilarity', 'AvgEmbeddingsCosine', #'q1_num_names', #'q2_num_names', #'names_cosine', #'stopwords_diff', #'nouns_diff', #'start_distances' ] # + id="YpydMClDvfet" df_train = pd.read_csv('train_features.csv', usecols=hall_of_fame_cols + ['IsDuplicate']) df_test = pd.read_csv('test_features.csv', usecols=hall_of_fame_cols) X_train = np.array(df_train)[:, :-1] y_train = np.array(df_train)[:, -1] X_test = np.array(df_test) metric="accuracy" # + id="Grjx40YPecih" # Normalize the train and test sets normalizer = preprocessing.Normalizer().fit(X_train) X_train = normalizer.transform(X_train) X_test = normalizer.transform(X_test) # + colab={"base_uri": "https://localhost:8080/"} id="BA1zYkkQkAZe" outputId="cd82f5b2-a237-4ef5-cd9e-0277f77ebe11" pca = PCA(n_components=9) pca.fit_transform(X_train) print(sum(pca.explained_variance_ratio_)) pca.fit_transform(X_test) print(sum(pca.explained_variance_ratio_)) # + colab={"base_uri": "https://localhost:8080/"} id="zgZ9Dh65vfev" outputId="1f14db44-10cd-4146-d946-138eeff90253" xgboost_clf = xgboost.XGBClassifier(max_depth=10, n_estimators=10000, objective='binary:logistic', eval_metric='logloss', learning_rate=0.01, use_label_encoder=False, tree_method='gpu_hist' ) xgboost_clf.fit(X_train, y_train) #run_cross_val(xgboost_clf, splits_numb=5, X=X_train, y=y_train) # + id="LtaO3pwLzGKo" def trainXGB(learning_rate,max_depth,n_estimators): xgboost_clf = xgboost.XGBClassifier(learning_rate=learning_rate,max_depth=int(max_depth),n_estimators=int(n_estimators),objective='binary:logistic',tree_method='gpu_hist') train_avg_metrics, validation_avg_metrics = run_cross_val(xgboost_clf, splits_numb=5, X=X_train, y=y_train) return validation_avg_metrics[metric] # + colab={"base_uri": "https://localhost:8080/", "height": 1000, "referenced_widgets": ["57589022cc1e40c39741a352a75a32eb", "d015cc58f401412ab08b87501620fce9", "4e96df8865e74c68992aec836ef0edf1", "37c4dbf623124d6f84f3732c0bc86c14", "4938d272ce8b46869cd90d9afe107ddf", "8003d3f9f4374fbd97a37fdc06e036c1", "649f56a6a5ec45799f33f4986962cefe", "ee399c8a87714c618b462863911e89aa", "eada8eac01ba49ca9687e088e9293453", "<KEY>", "275121bee0b34ad49aa2556d63db67dd", "b40120bc602545a39575ba150768e89a", "<KEY>", "<KEY>", "7742d063b42c4a64b0e96b9d7016c482", "<KEY>", "<KEY>", "a153a902f21048e18d7ce6151316447d", "<KEY>", "<KEY>", "<KEY>", "6bf2f06420474ee68d9bc084534c1aa3", "e12f684e36004d8995acdeb93503f204", "3fba2e6e7df246228e5f11e6823fd676", "<KEY>", "<KEY>", "<KEY>", "6629d6ea60b04fff8028eb1d8b9c4818", "896197c48ab441a6911e139a80265b36", "<KEY>", "<KEY>", "835ddc609d79475b80ebd5171a234413", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "fbae48adde564e84a5d6bc9e1b7eb8ea", "<KEY>", "<KEY>", "<KEY>", "f56ebba685754ad8b915c986a907ee76", "86321cb5be70419ab64d135e909c8708", "<KEY>", "9460d602eb97496396e0dceab2871918", "823588cc30e5402c8226105f8b77e7bb", "ddf8fbea41864447a9ca26966462f286", "<KEY>", "<KEY>", "6b6a856d142c4124a57a24846fdc121a", "a7a58e0c2eb44636b51181aa67874ded", "<KEY>", "<KEY>", "<KEY>", "fe8d2ef99e9d49d59214fe20ebf5b870", "af22ff66ee7a408f80a2999048440d0f", "66138af046ae46cd958b8a3db17ffbe6", "<KEY>", "ca5bd01cae2042a1965d03a4471b4e44", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "fa2b25c60c314c0e9d62262ead587859", "6d3881668d384f119e4a309c8fabbe49", "0609cf69fac84b3584fa14fafe2e9d87", "<KEY>", "dc76e43fedee48c88aaa3c76f9cfa443", "<KEY>", "<KEY>", "c561cd113e684dec9cc1ba1cd1094034", "<KEY>", "8eb82951b9e3480d9b472a677b0f6d5b", "<KEY>", "<KEY>", "32f260baeeca4c988222c66473c6862e", "9b92517da42a4d47b80cebe91c13fce2", "616228e69c874515925cac983df2fb9e", "<KEY>", "fecf45ff321a4352a48c590579699d81", "bede8699bbef4fdca3299027f001b5ab", "ac6f68f94e18480bb65131eccf3f3da4", "<KEY>", "9d5d45c9a997438db20558a19e9e8fb0", "<KEY>", "<KEY>", "ee0df8be810d4f1c834477f18fa2e3aa", "ba13911ea7514e3d85e1ec8240425b99", "c73c61894ad84a09bf494c9903065973", "<KEY>", "<KEY>", "344630e6c1fb4eea9ae352d8da846538", "<KEY>", "d52351a597564f1ca27f60afed8c2bc8", "<KEY>", "d852f48bdaf545949a94700d25cefab9", "ec9e0c3e9d6f4baf842672f39cfdbe9a", "<KEY>", "95e8eaba339047eea0b3f621e250a92b", "<KEY>", "<KEY>", "aad05e7b47a54ce8aceda7dc9cbef6cc", "89d95efafd244302ab8b307342d61514", "d2b3cacef86c4eccb06121a833ab036f", "<KEY>", "34444a92334743c78ca3b5494e8041dd", "<KEY>", "<KEY>", "1a8a81de42a5482f945c6e14465b7137", "<KEY>", "2cff603a49f64c358a4743d3b010d174", "d291deb2e9b846229a4c43672924ccc2", "5a0398d171eb4ed695f55026526b8531", "<KEY>", "5deb2d3577624d0abc6a6377fce37819", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "35d12eae052245eea73ac7a640648fb4", "9880e31efa434e4ab1aa95469ca97685", "<KEY>", "550a543c7ff7419e8fa8b7302a9a94ba", "<KEY>", "397e9add75a04cc8962dabb350c9ac64", "e995f101cedd4e3bbe7fa22ec4d4c0d2", "<KEY>", "<KEY>", "422fbaae9a91488badd58968d7d0d00c", "<KEY>", "<KEY>", "2e7763d227724375a6b6231ef65172bf", "8f214553937e4d059ac7c7100a430a63", "a249b05e3d7c4c8a8639ddc0c226eced", "b5d85a1ef5af413183232792232a09ed", "03a86a0e23ab405497236c34188e2e04", "67a7d369f5514a6fbe31138651520fa7", "<KEY>", "<KEY>", "d4676e3229dc45a99f98ddd088038326", "5b17674021024551a0b9c8c4c50fcc40", "<KEY>", "<KEY>", "f8fbcee673a4481f9f027a83b9fd0d6b", "<KEY>", "907c4caddd6e456ebee915000995da7f", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "3e78077485a94949a39b790e750c735d", "<KEY>", "2a6deafff10445a9950c083d75665e24", "0812208b5eea462bb2a205dc797eda6c", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "9cc69e249a464643bcc0eb3916ba0840"]} id="UbFin62-vfex" outputId="e4748870-9a73-4ed4-ada3-362fed139ed6" param_dict = {#'objective':'binary:logistic', #'use_label_encoder': False, 'learning_rate': (0.0001,0.1), 'max_depth': (5,15), #'tree_method': 'gpu_hist', 'n_estimators': (100,1000)} #logs = {} LOG = logs best = run_bayesian_optimization(param_dict,obj_function=trainXGB,log=logs,iters=18,inits=2) # + [markdown] id="QZKLBye-dsmp" # ## Ensembler # + [markdown] id="XwepqSRYd4a0" # We insert a manual callback in our implementation of bayesian optimization and cross-validation. Every time it is called, it records the hyperparameters it received along with the model's validation predictions. Then, our ensembler reads all of these predictions and finds the models whose predictions, when combined, maximize the validation performance. # # The class may seem daunting, but its interface is like any sklearn model's. Feel free to omit the long cell below. # + id="-2zNJImwdvho" class ensembler: def __init__(self,log,val_labels,max_members,method="greedy"): #log is a dictionary containing: #'hyperparameters': a list of hyperparameter combinations. #'predictions': a list of model predictions on the validation set. #'scores': a list of accuracy scores on the validation set. if method=="greedy": self.greedyEnsembling(log,val_labels,max_members) else: self.bruteEnsembling(log,val_labels,max_members) def greedyEnsembling(self,log,val_labels,max_members): ens_indexes = [] ens_preds = [] self.models = [] #find the best model best_index = log['scores'].index(max(log['scores'])) ens_indexes.append(best_index) ens_preds.append(np.array(log['predictions'][best_index])) #iteratively add the ensemble member from the pool (with repetition) #that maximizes the validation performance. Continue until you reach max_members #or until no member is suitable. valAcc = log['scores'][best_index] for i in range(1,max_members): best_p = -1 for p in range(len(log['predictions'])): possAcc = self.checkAccuracy(ens_preds,log['predictions'][p],val_labels) if possAcc>valAcc: best_p = p valAcc = possAcc if best_p>-1: ens_preds.append(log['predictions'][best_p]) ens_indexes.append(best_p) else: break for i in ens_indexes: self.models.append(self.genericModelBuilder(logs['hyperparameters'][i])) self.valAcc = valAcc def bruteEnsembling(self,log,val_labels,num_of_members): #this is painfully slow ens_indexes = [] ens_preds = [] self.models = [] #find the best model all_indexes = range(len(log['predictions'])) maxAcc = -1 bestC = None for c in itertools.combinations(all_indexes,num_of_members): temp_preds = [] for j in range(len(log['predictions'][0])): this_res = np.zeros(4) for i in c: this_res+=log['predictions'][i][j] temp_preds.append(list(this_res).index(max(this_res))) acc = accuracy_score(temp_preds,val_labels) if acc>maxAcc: maxAcc = acc bestC = c self.valAcc = maxAcc for i in bestC: self.models.append(self.genericModelBuilder(log['hyperparameters'][i])) def elections(self, preds): res = [] for j in range(len(preds[0])): this_res = np.zeros(len(preds[0][1])) for p in preds: this_res+=p[j] res.append(list(this_res).index(max(this_res))) return res def checkAccuracy(self, preds,new_preds,labels): temp_preds = np.concatenate((preds,[new_preds])) total_preds = self.elections(temp_preds) return accuracy_score(labels,total_preds) def genericModelBuilder(self,params): model = None if 'C' in params: model = LinearSVC(C=params['C'],tol=params['tol'],max_iter=int(params['max_iter']),dual=True,fit_intercept=True,verbose=0) elif 'alpha' in params: model = SGDClassifier(tol=params['tol'], loss='hinge', max_iter=int(params['max_iter']), alpha=params['alpha'], penalty='elasticnet',early_stopping=True,l1_ratio=params['l1_ratio']) else: model = xgboost.XGBClassifier(learning_rate=params['learning_rate'],max_depth=int(params['max_depth']),n_estimators=int(params['n_estimators']),objective='binary:logistic',tree_method='gpu_hist') return model def fit(self,X,y): for m in self.models: m.fit(X,y) def predict(self,X): preds = [] for m in self.models: try: preds.append(m.decision_function(X)) except: preds.append(m.predict_proba(X)) return self.elections(preds) # + [markdown] id="QLstPzZcd-EH" # While the class may seem daunting, using it is as simple as can be: # + id="80e5Nv82d-cw" ensemble = ensembler(logs_2,y_train,9,method="greedy") print(ensemble.valAcc) print(len(ensemble.models)) # + id="AkhDfVWN789A" ensemble.fit(X_train, y_train) #preds = ensemble.predict(X_test) model = ensemble # + [markdown] id="ID4x7lZ6vfe1" # ## Create Output File # + id="hebHDfdpvfe2" # Set the object model and the path that the output will be created file_path = 'final_hope.csv' # Get the Ids from the test file test_ids_df = pd.read_csv('test_without_labels.csv', usecols=['Id']) y_predicted = model.predict(np.array(X_test)) y_predicted = np.array([int(x) for x in y_predicted]) results = { "Id": list(test_ids_df.Id), "Predicted": y_predicted } results_df = pd.DataFrame.from_dict(results) results_df.to_csv(file_path, index=False) # + [markdown] id="HoSjYznzCrkl" # # Goodbye # + [markdown] id="dnVGETtPCmoz" # *~ That's all, folks! ~*
DuplicateDetectionFeatureEngineering.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/PolovinkinNikita/SkyNet/blob/main/Gradient_Descent.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="Rm_DzTK4q8mG" from numpy import asarray from numpy import arange from numpy.random import rand import numpy as np import matplotlib.pyplot as plt # + id="yRD5GNRivy-V" def objective(x): return x**2.0 def derivative(x): return x * 2.0 # + id="NtkLxdRTwDOe" bounds = np.array([[-15.0, 15.0]]) # + id="C6Hruchhwqre" inputs = [] outputs = [] # + id="eh05uoUHw03f" for i in range(1000): x = bounds[:, 0] + rand(len(bounds)) * (bounds[:, 1] - bounds[:, 0]) y = objective(x) inputs.append(x) outputs.append(y) # + id="3wGspP981KoR" solutions = [] evaluations = [] # + colab={"base_uri": "https://localhost:8080/", "height": 942} id="aJhYMHN_jIi8" outputId="5e85a812-b608-4a5a-c9cf-1f5d03ed4a8e" for i in range(1, 50): x_evaluation = objective(x) if x_evaluation >= 0.0000099 : solutions.append(x) evaluations.append(x_evaluation) gradient = derivative(x) new_x = x - 0.1 * gradient x = new_x print('>%d f(%s) = %.5f' % (i, x, x_evaluation)) else: break plt.scatter(inputs, outputs) plt.scatter(solutions, evaluations, color = 'red')
Gradient_Descent.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### 空的class # + class myclass(): pass aaa = myclass() type(aaa) # - # ### 常見的class架構 # + class myclass(): # 方法:初始化 def __init__(self, appetizer, main_course, dessert): self.appetizer = appetizer # 屬性1 self.main_course = main_course # 屬性2 self.dessert = dessert # 屬性3 self._age = 18 # 受保護的屬性1(無強制力,還是可以從外部直接訪問+修改) self.__people = 3 # 受保護的屬性2(有強制力,無法從外部直接修改+修改) # 方法:自訂(透過內部方法訪問「受保護的屬性2」) def check_orfer(self): print('前菜:', self.appetizer) print('主菜:', self.main_course) print('甜點:', self.dessert) print('年紀:', self._age) print('人數:', self.__people) # 方法:自訂(透過內部方法修改「受保護的屬性2」) def set_var(self, value): self.__people = value menu = myclass('沙拉', '牛排', '蛋糕') # 建立class並且賦值 print(menu.appetizer) # 呼叫屬性 menu.check_orfer() # 呼叫方法 print('='*10) print(dir(menu)) # __people被改名成_myclass__people,所以沒辦法直接修改 # - print(menu.main_course) menu.main_course = '魚排' print(menu.main_course) print('='*10) print(menu._age) menu._age = 30 print(menu._age) print('='*10) #print(menu.__people) # 無法訪問,除非使用menu._myclass__people #menu.__people = 100 # 不可修改,除非使用menu._myclass__people menu.check_orfer() menu.set_var(100) print('='*10) menu.check_orfer() # ### 屬性 class Student: count = 0 def __init__(self, name): self.name = name print(Student.count) #print(Student.name) # 因為沒有初始化/建立,所以無法呼叫 # + s1 = Student(name='B') print(s1.count) Student.count = 2 # 源頭動,大家動 print(s1.count) s2 = Student(name='C') print(s2.count) s1.count = 8 # 尾巴動,頭不動 print(Student.count) # + class Student: count = 0 # 計數器 def __init__(self, name): Student.count = Student.count + 1 self.name = name s1 = Student(name='B') s2 = Student(name='C') Student.count # -
Python/.ipynb_checkpoints/[Python] class-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: pytorch # language: python # name: pytorch # --- # %reload_ext autoreload # %autoreload 2 import torch from fastai.vision.all import * import json bce = torch.nn.BCE() p = torch.zeros(2) p[0]=0 p bce(p,p) class SpecialTuple(tuple): def __len__(self): return len(next(iter(self))) def __getitem__(self, key): return [a[key] for a in self] df =pd.read_json('results.json') df.head() df.keys() df[['evaluation_results.discrete_mig','path','train_results.elapsed_time']].dropna() mean_results=('MIResults/ckp_*/*/metrics/metrics/mean/mig/results/aggregate/evaluation.json') files=glob.glob(mean_results) # + jupyter={"outputs_hidden": true} results=[] for file in files: match = re.search('ckp_(\d+)/(\d+)/',file) i,trail = match.groups() with open(file) as fp: data= json.load(fp) t = {'ckp':int(i),'trail':int(trail)} t.update(data) results.append(t) df = pd.DataFrame(results) df.head() # - for trail, process in df.groupby('trail'): plt.plot(process.sort_values('ckp')['evaluation_results.informativeness_train'].to_list(),label=trail) # break #plt.legend() process['evaluation_results.discrete_mig'] # + jupyter={"outputs_hidden": true} df[df['discrete_mig']>0.3] # - df[df['ckp']==10][['evaluation_results.discrete_mig','trail']] # + # mi trail = 151 data = np.array(results[9][trail]['discrete_mi']) infor_dim = data.argmax(0) metrics=[] for i in range(1,10): data = np.array(results[i][trail]['discrete_mi']) metrics.append(data[infor_dim,range(5)]) metrics = np.stack(metrics) for i in range(5): plt.plot(metrics[:,i],label=i) plt.legend() # - # mi percent trail = 0 entropy = np.log([3,6,40,32,32]) data = np.array(results[9][trail]['discrete_mi']) infor_dim = (data/data.sum(1,keepdims=True)).argmax(0) print(data.sum(0)/entropy) print(results[9][trail]['discrete_mig']) metrics=[] for i in range(1,10): data = np.array(results[i][trail]['discrete_mi']) data.sort(0) metrics.append((data[-1,:]-data[-2,:])/entropy) metrics = np.stack(metrics) for i in range(5): plt.plot(metrics[:,i],label=i) plt.legend() data[-1,:] a=np.array([[-7.008282842946302e-16,-7.008282842946302e-16,-7.008282842946302e-16,0.007486822113552842,0.20955580791467143,0.21694143686417125],[9.367506770274757e-17,9.367506770274757e-17,9.367506770274757e-17,0.007158382144239975,0.20513407651903673,0.18940479833116505],[-6.938893903907227e-17,-6.938893903907227e-17,-6.938893903907227e-17,0.008052565037922352,0.21031576006747843,0.23013573142422497],[2.5587171270657905e-16,2.5587171270657905e-16,2.5587171270657905e-16,0.008555877344540493,0.1531172316701163,0.16312299003666825],[6.938893903907227e-17,6.938893903907227e-17,6.938893903907227e-17,0.00793611424112986,0.26680478730089613,0.21450148627109905],[2.8102520310824275e-16,2.8102520310824275e-16,2.8102520310824275e-16,0.007589227323700348,0.22715044622292255,0.10261842887575752],[2.255140518769849e-16,2.255140518769849e-16,2.255140518769849e-16,0.007182329932919207,0.3541685977829361,0.19686082583140757],[-5.130444680201407e-16,-5.130444680201407e-16,-5.130444680201407e-16,0.007796386146325618,0.21203879169014356,0.18782677398443184],[-3.538835890992686e-16,-3.538835890992686e-16,-3.538835890992686e-16,0.008277019591843703,0.1848939319934417,0.32078284584650174],[4.8572257327350605e-17,4.8572257327350605e-17,4.8572257327350605e-17,0.006851254893035902,0.16321920659218114,0.13810416842452009]]) a.min() fvae_results=[] for file in glob.glob('wandb/*/files/metrics.log'): with open(file) as fp: data= json.load(fp) run_name = re.search('run-.*-(.*)/files',file).group(1) data['name']=run_name fvae_results.append(data) fvae_df=pd.DataFrame(fvae_results) fvae_df.head() fvae_df[fvae_df['discrete_mig']>0.3].sort_values('discrete_mig') # + import wandb api = wandb.Api() # Project is specified by <entity/project-name> runs = api.runs("erow/experiments") summary_list = [] config_list = [] name_list = [] for run in runs: # run.summary are the output key/values like accuracy. # We call ._json_dict to omit large files summary_list.append(run.summary._json_dict) # run.config is the input metrics. # We remove special values that start with _. config = {k:v for k,v in run.config.items() if not k.startswith('_')} config_list.append(config) # run.name is the name of the run. name_list.append(run.name) # - name_list[:22] import pandas as pd summary_df = pd.DataFrame.from_records(summary_list[:22]) config_df = pd.DataFrame.from_records(config_list[:22]) name_df = pd.DataFrame({'name': name_list[:22]}) all_df = pd.concat([name_df, config_df,summary_df], axis=1) df=all_df[['name','beta','method','random_seed','discrete_mig','discrete_d_mig']][1:] df def convert(x): # print(x) if isinstance(x,list): return f"{x[3]:.2f},{x[2]:.2f}" elif isinstance(x,pd.Series): return x.apply(convert) return x df.apply(convert,1)
notebooks/dis_analyze.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="view-in-github" # <a href="https://colab.research.google.com/github/bereml/iap/blob/master/libretas/1e_mnist_fcn.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # - # # Clasificación de MNIST con un red densa # # Curso: [Introducción al Aprendizaje Profundo](http://turing.iimas.unam.mx/~ricardoml/course/iap/). Profesores: [Bere](https://turing.iimas.unam.mx/~bereml/) y [Ricardo](https://turing.iimas.unam.mx/~ricardoml/) <NAME>. # # --- # --- # # En esta libreta veremos un ejemplo de clasificación multiclase de imágenes de dígitos implementando una red densa multicapa en PyTorch. # # Emplearemos un conjunto referencia llamado [MNIST](http://yann.lecun.com/exdb/mnist/) recolectado por [Yann LeCun](http://yann.lecun.com). Está compuesto de imágenes en escala de grises de 28 × 28 píxeles que contienen dígitos entre 0 y 9 escritos a mano. El conjunto cuenta con 60,000 imágenes de entrenamiento y 10,000 de prueba. # # <img src="https://upload.wikimedia.org/wikipedia/commons/2/27/MnistExamples.png" width="600"/> # Conjunto MNIST. Imagen tomada de Wikipedia. # ## 1 Preparación # biblioteca para inspeccionar arquitecturas # https://github.com/tyleryep/torchinfo # !pip install torchinfo # ### 1.1 Bibliotecas # + colab={} colab_type="code" id="Ny0L2LzogTN-" # funciones aleatorias import random # tomar n elementos de una secuencia from itertools import islice as take # gráficas import matplotlib.pyplot as plt # arreglos multidimensionales import numpy as np # redes neuronales import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import torchvision.transforms as T # procesamiento de imágenes from skimage import io # redes neuronales from torch.utils.data import DataLoader from torchvision.datasets import MNIST # inspección de arquitectura from torchinfo import summary # barras de progreso from tqdm import trange # - # parche para bicho en colab # https://github.com/pytorch/vision/issues/1938 from six.moves import urllib opener = urllib.request.build_opener() opener.addheaders = [('User-agent', 'Mozilla/5.0')] urllib.request.install_opener(opener) # ### 1.2 Auxiliares # + # directorio de datos DATA_DIR = '../datos' # tamaño del lote BATCH_SIZE = 32 # filas y columnas de la regilla de imágenes ROWS, COLS = 4, 8 def display_grid(xs, titles, rows, cols, figsize=(12, 6)): """Displays examples in a grid.""" fig, ax = plt.subplots(rows, cols, figsize=figsize) i = 0 for r in range(rows): for c in range(cols): ax[r, c].imshow(xs[i], cmap='gray') ax[r, c].set_title(titles[i]) ax[r, c].set_xticklabels([]) ax[r, c].set_yticklabels([]) i += 1 fig.tight_layout() plt.show() def display_batch(x, titles, rows, cols, figsize=(12, 6)): """Displays a batch of processed examples in a grid.""" # denormalizamos [0, 1] => [0, 255] x *= 255 # rotamos canales (C x H x W) => (H x W x C) x = x.permute(0, 2, 3, 1) # convertimos a entero x = (x.numpy()).astype(np.uint8) # aplanamos canal x = x.reshape(*x.shape[:3]) # desplegamos display_grid(x, titles, rows, cols, figsize) def set_seed(seed=0): """Initializes pseudo-random number generators.""" random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) # reproducibilidad set_seed() # + [markdown] colab_type="text" id="MjKxreAkoZeT" # ## 2 Datos # - # ### 2.1 Tuberias de datos con PyTorch # # <img src="https://raw.githubusercontent.com/bereml/iap/master/fig/mnist_pipeline.png"/> # &nbsp; # Tuberia de datos para MNIST. # ### 2.2 Exploración # # PyTorch provee la clase [`MNIST`](https://pytorch.org/vision/0.8/datasets.html#mnist), una implementación de `Dataset` lista para usar del conjunto MNIST. # creamos un Dataset ds = MNIST( # directorio de datos root=DATA_DIR, # subconjunto de entrenamiento train=True, # convertir la imagen a ndarray transform=np.array, # descargar el conjunto download=True ) # + # cargamos algunas imágenes images, labels = [], [] for i in range(BATCH_SIZE): x, y = ds[i] images.append(x) labels.append(y) # desplegamos print(f'images[0] shape={images[0].shape} dtype={images[0].dtype}') titles = [str(y) for y in labels] display_grid(images, titles, ROWS, COLS) # + [markdown] colab_type="text" id="9p_BsiITogUA" # ### 2.3 Cargadores de datos # # <img src="https://raw.githubusercontent.com/bereml/iap/master/fig/eval_trn_tst.svg" width="450"/> # &nbsp; # Entrenamiento con una partición de entrenamiento y prueba. # - # #### Entrenamiento # + colab={"base_uri": "https://localhost:8080/", "height": 54} colab_type="code" id="E1aEVpYtuadH" outputId="8df25761-3201-461a-e82b-26b5befd0302" # transformación que convierte una imagen: # de numpy.ndarray (H x W x C) en rango [0, 255] # a torch.FloatTensor (C x H x W) en rango [0, 1] tsfm = T.ToTensor() # creamos un Dataset trn_ds = MNIST( # directorio de datos root=DATA_DIR, # subconjunto de entrenamiento train=True, # transformación transform=tsfm ) # creamos un DataLoader trn_dl = DataLoader( # conjunto trn_ds, # tamaño del lote batch_size=BATCH_SIZE, # desordenar shuffle=True ) # desplegamos un lote de imágenes x, y = next(iter(trn_dl)) print(f'x shape={x.shape} dtype={x.dtype}') print(f'y shape={y.shape} dtype={y.dtype}') display_batch(x, y.tolist(), ROWS, COLS) # - # #### Prueba # + colab={"base_uri": "https://localhost:8080/", "height": 54} colab_type="code" id="QXMXXc9DPgqY" outputId="100e6a58-5552-4c5f-b08e-dc83c23c3b21" # creamos un Dataset tst_ds = MNIST( # directorio de datos root=DATA_DIR, # subconjunto de entrenamiento train=False, # transformación transform=tsfm ) # creamos un DataLoader tst_dl = DataLoader( # subconjunto tst_ds, # tamaño del lote batch_size=BATCH_SIZE, # desordenar shuffle=True ) # desplegamos un lote de imágenes x, y = next(iter(tst_dl)) print(f'x shape={x.shape} dtype={x.dtype}') print(f'y shape={y.shape} dtype={y.dtype}') display_batch(x, y.tolist(), ROWS, COLS) # - # ## 3 Modelo # # <img src="https://raw.githubusercontent.com/bereml/iap/master/fig/fcn_arch.png"/> # &nbsp; # Arquitectura de la red completamente conectada. # ### 3.1 Definición de la arquitectura # definición del modelo class FCN(nn.Module): # inicializador def __init__(self): # inicilización del objeto padre, obligatorio super(FCN, self).__init__() # tamaño de las capas self.I = 1 * 28 * 28 FC1, FC2 = 128, 10 # definición de capas self.cls = nn.Sequential( # fc1 # [M, 1x28x28] => [M, 128] nn.Linear(self.I, FC1), nn.Sigmoid(), # fc2 # [M, 128] => [M, 10] nn.Linear(FC1, FC2) ) # método para inferencia def forward(self, x): # aplanamos los pixeles de la imagen # [M, 1, 28, 28] => [M, 1x28x28] x = x.view(-1, self.I) # inferencia # [M, 1x28x28] x = self.cls(x) return x # ### 3.2 Instancia de la arquitectura model = FCN() print(model) # inferencia con datos sintéticos x = torch.zeros(1, 1, 28, 28) y = model(x) print(f'{x.shape} => {y.shape}') # ### 3.3 Inspección de la arquitectura summary(model, (1, 28, 28), device='cpu', verbose=0) # ## 4 Entrenamiento # # <img src="https://raw.githubusercontent.com/bereml/iap/master/fig/supervisado.svg" width="700"/> # &nbsp; # Ciclo de entrenamiento supervisado. # ### 4.1 Ciclo de entrenamiento # + colab={"base_uri": "https://localhost:8080/", "height": 568} colab_type="code" id="xCqwGRD1nz1a" outputId="7dc4823c-865a-41ee-b54b-67117e5d4e95" def train_epoch(dl, model, opt): # por cada lote for x, y_true in dl: # computamos logits y_lgts = model(x) # computamos la pérdida loss = F.cross_entropy(y_lgts, y_true) # vaciamos los gradientes opt.zero_grad() # retropropagamos loss.backward() # actualizamos parámetros opt.step() def eval_epoch(dl, model, num_batches=None): # evitamos que se registren las operaciones # en la gráfica de cómputo with torch.no_grad(): # historiales losses, accs = [], [] # validación de la época con num_batches # si num_batches==None, se usan todos los lotes for x, y_true in take(dl, num_batches): # computamos los logits y_lgts = model(x) # computamos los puntajes y_prob = F.softmax(y_lgts, 1) # computamos la clases y_pred = torch.argmax(y_prob, 1) # computamos la pérdida loss = F.cross_entropy(y_lgts, y_true) # computamos la exactitud acc = (y_true == y_pred).type(torch.float32).mean() # guardamos históricos losses.append(loss.item()) accs.append(acc.item()) # promediamos loss = np.mean(losses) * 100 acc = np.mean(accs) * 100 return loss, acc def train(model, trn_dl, tst_dl, lr=1e-3, epochs=20, trn_batches=None, tst_batches=None): # historiales loss_hist, acc_hist = [], [] # optimizador opt = optim.SGD(model.parameters(), lr=lr) # ciclo de entrenamiento for epoch in trange(epochs): # entrenamos la época train_epoch(trn_dl, model, opt) # evaluamos la época en entrenamiento trn_loss, trn_acc = eval_epoch(trn_dl, model, trn_batches) # evaluamos la época en prueba tst_loss, tst_acc = eval_epoch(tst_dl, model, tst_batches) # guardamos historial loss_hist.append([trn_loss, tst_loss]) acc_hist.append([trn_acc, tst_acc]) # imprimimos progreso print(f'E{epoch:02} ' f'loss=[{trn_loss:6.2f},{tst_loss:6.2f}] ' f'acc=[{trn_acc:5.2f},{tst_acc:5.2f}]') return loss_hist, acc_hist # - # Entrenamos un modelo: # instanciamos un modelo model = FCN() # entrenamos loss_hist, acc_hist = train(model, trn_dl, tst_dl) # ### 4.2 Gráficas de pérdidas y exactitud trn_loss, tst_loss = zip(*loss_hist) plt.plot(trn_loss, 'tab:orange', label='trn loss') plt.plot(tst_loss, 'tab:red', label='tst loss') plt.legend(loc='upper right') plt.xlabel('época') plt.ylabel('pérdida') plt.show() trn_acc, tst_acc = zip(*acc_hist) plt.plot(trn_acc, 'tab:green', label='trn acc') plt.plot(tst_acc, 'tab:blue', label='tst acc') plt.legend(loc='lower right') plt.xlabel('época') plt.ylabel('exactitud') plt.show() # ## 5 Evaluación # # <img src="https://raw.githubusercontent.com/bereml/iap/master/fig/eval_trn_tst.svg" width="450"/> # &nbsp; # Entrenamiento con una partición de entrenamiento y prueba. # ### 5.1 Evaluación final # evaluamos en entrenamiento trn_loss, trn_acc = eval_epoch(trn_dl, model) # evaluamos en prueba tst_loss, tst_acc = eval_epoch(tst_dl, model) print(f'loss=[{trn_loss:6.2f},{tst_loss:6.2f}] ' f'acc=[{trn_acc:5.2f},{tst_acc:5.2f}]') # ### 5.2 Inferencia with torch.no_grad(): x, y_true = next(iter(tst_dl)) y_lgts = model(x) y_prob = F.softmax(y_lgts, 1) y_pred = torch.argmax(y_prob, 1) titles = [f'V={t} P={p}' for t, p in zip(y_true, y_pred)] display_batch(x, titles, ROWS, COLS) # ## Participación # # Remplaza la función de activación de la capa oculta por una [`nn.ReLU`](https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html), vuelve a entrenar y compara tus resultados.
libretas/1e_mnist_fcn.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from satsearch import Search import stackstac, os, requests from netrc import netrc from subprocess import Popen from getpass import getpass import rasterio from distributed import LocalCluster,Client import datetime import dask.array as dask_array import dask from utils import DevNullStore,DiagnosticTimer,total_nthreads,total_ncores,total_workers,get_chunksize # + # data = 'hls' # data = 'sentinel_2a' data = 'landsat8' s3 = True if data == 'sentinel_2a': band='B05' if data == 'hls': band='B01' if data == 'landsat8': band='B1' # - if data == 'hls': #Setup NASA Credentials urs = 'urs.earthdata.nasa.gov' # Earthdata URL to call for authentication prompts = ['Enter NASA Earthdata Login Username \n(or create an account at urs.earthdata.nasa.gov): ', 'Enter NASA Earthdata Login Password: '] try: netrcDir = os.path.expanduser("~/.netrc") netrc(netrcDir).authenticators(urs)[0] del netrcDir # Below, create a netrc file and prompt user for NASA Earthdata Login Username and Password except FileNotFoundError: homeDir = os.path.expanduser("~") Popen('touch {0}.netrc | chmod og-rw {0}.netrc | echo machine {1} >> {0}.netrc'.format(homeDir + os.sep, urs), shell=True) Popen('echo login {} >> {}.netrc'.format(getpass(prompt=prompts[0]), homeDir + os.sep), shell=True) Popen('echo password {} >> {}.netrc'.format(getpass(prompt=prompts[1]), homeDir + os.sep), shell=True) del homeDir, urs, prompts base_env = stackstac.DEFAULT_GDAL_ENV.updated(dict( GDAL_MAX_RAW_BLOCK_CACHE_SIZE='200000000', GDAL_SWATH_SIZE='200000000', VSI_CURL_CACHE_SIZE='200000000', GDAL_HTTP_UNSAFESSL='YES', GDAL_HTTP_COOKIEFILE=os.path.expanduser('~/cookies.txt'), GDAL_HTTP_COOKIEJAR=os.path.expanduser('~/cookies.txt'), )) # + if s3 and data=='hls': #Get NASA Temp AWS Credentials s3_cred = requests.get('https://lpdaac.earthdata.nasa.gov/s3credentials').json() s3_cred env = base_env.updated( dict( AWS_REGION='us-west-2', AWS_NO_SIGN_REQUEST='NO', AWS_REQUEST_PAYER='REQUESTER', AWS_SECRET_ACCESS_KEY=s3_cred['secretAccessKey'], AWS_ACCESS_KEY_ID=s3_cred['accessKeyId'], AWS_SESSION_TOKEN=s3_cred['sessionToken'] ) ) # Default to the StackStac ~.LayeredEnv if s3==False and data=='hls': env = base_env.updated(dict(AWS_NO_SIGN_REQUEST='YES')) if s3 and (data == 'landsat8' or data == 'sentinel_2a'): env = base_env.updated( dict( AWS_REGION='us-west-2', AWS_REQUEST_PAYER='REQUESTER', region_name='us-west-2', AWS_NO_SIGN_REQUEST='YES', ) ) env # + def get_STAC_items(url, collection, dates, bbox): results = Search.search(url=url, collections=collection, datetime=dates, bbox=bbox) return(results) def remap_s3_url(stac): for i,entry in enumerate(stac): for asset in entry['assets'].keys(): stac[i]['assets'][asset]['href'] = stac[i]['assets'][asset]['href'].replace('https://lpdaac.earthdata.nasa.gov/', '/vsis3/') stac[i]['assets'][asset]['href'] = stac[i]['assets'][asset]['href'].replace('https://sentinel-cogs.s3.us-west-2.amazonaws.com/', '/vsis3/sentinel-cogs/') stac[i]['assets'][asset]['href'] = stac[i]['assets'][asset]['href'].replace('https://landsat-pds.s3.us-west-2.amazonaws.com/', '/vsis3/landsat-pds/') return(stac) bbox = [-104.79107047, 40.78311181, -104.67687336, 40.87008987] if data == 'hls': url = 'https://cmr.earthdata.nasa.gov/stac/LPCLOUD/' collection = ['HLSS30.v1.5']#'C1711924822-LPCLOUD' #HLS dates = '2020-01-01/2021-02-10' stac_items = get_STAC_items(url,collection,dates,','.join(map(str, bbox))).items() s_col = stac_items.geojson()['features'] if s3: s_col = remap_s3_url(s_col) if data == 'sentinel_2a': url = 'https://earth-search.aws.element84.com/v0' collection = ['sentinel-s2-l2a-cogs'] dates = '2019-01-20/2022-02-10' stac_items = get_STAC_items(url,collection,dates,bbox).items() s_col = stac_items.geojson()['features'] if s3: s_col = remap_s3_url(s_col) if data == 'landsat8': url = 'https://earth-search.aws.element84.com/v0' collection = ['landsat-8-l1-c1'] dates = '2013-11-01/2022-02-10' stac_items = get_STAC_items(url,collection,dates,bbox).items() s_col = stac_items.geojson()['features'] if s3: s_col = remap_s3_url(s_col) print('Number of Items: ',len(s_col)) # - with env.open, rasterio.open(s_col[0]['assets']['B7']['href']) as src: b= src.bounds prof = src.profile dtype = prof['dtype'] nodata = prof['nodata'] res = src.res print(prof) #lab_extension = /user/<username>/proxy/8787/status cluster = LocalCluster(threads_per_worker=1) cl = Client(cluster) cl if data=='landsat8': band = ['B1','B2','B3','B4','B5','B6','B7'] else: band=[band] da = stackstac.stack(s_col,dtype=dtype, fill_value=nodata, resolution=res[0], epsg=32613, properties=None, snap_bounds=True, chunksize=-1, assets=band, bounds=list(b), gdal_env=env) da # %pdb # + diag_timer = DiagnosticTimer() devnull = DevNullStore() dat = da.data chunksize = get_chunksize(dat) totalsize = dat.nbytes*1e-9 if s3: method='s3' else: method='http' diag_kwargs = dict(nbytes=dat.nbytes, nGBytes=totalsize, chunksize=chunksize, method=method, cloud_source='aws us-west-2', system='aws us-west-2', format='cog') runtime = datetime.datetime.now().strftime("%Y%m%d_%H%M") with diag_timer.time(nthreads=total_nthreads(cl), ncores=total_ncores(cl), nworkers=total_workers(cl), **diag_kwargs): future = dask_array.store(dat, devnull, lock=False, compute=False) dask.compute(future, retries=5) df = diag_timer.dataframe() df['throughput_MBps'] = df.nbytes/1e6/df.runtime df
s3_v_http.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.12 64-bit (''imaging'': conda)' # language: python # name: python3 # --- # # Creating a surface mesh from segmentations # # This notebook will go over the process of importing an image segmentation into Python and turning it into a set of surface meshes using `vtk`. Image and mesh processing will include steps necessary to: # - Smooth the surface mesh surface # - Crop the bone(s) so that the length of the shaft is proportional to the bone width # - Resample the surface meshes to have equal spacing # # Notes: # - This notebook is based off of code written in the python package pymskt [Python Musculoskeletal Tools](https://github.com/gattia/pymskt) (pymskt) # - The data for running this notebook is in the `/data` folder of this [repository](https://github.com/gattia/blog_posts). # # Loading in libraries import SimpleITK as sitk import vtk import matplotlib.pyplot as plt import numpy as np import matplotlib # print the versions of the libraries used to run the notebook print(f'SimpleITK version: {sitk.__version__}') print(f'vtk version: {vtk.VTK_MAJOR_VERSION}.{vtk.VTK_MINOR_VERSION}') print(f'numpy version: {np.__version__}') print(f'Matplotlib version: {matplotlib.__version__}') # # Load in image(s) and view them # set path to image/segmentation we will use in this notebook path_image = '../data/anthonys_knee.dcm' path_seg = '../data/anthonys_knee-label.nrrd' # - Read in image & segmentation > Plot them # + image = sitk.ReadImage(path_image) seg = sitk.ReadImage(path_seg) image_array = sitk.GetArrayFromImage(image) seg_array = sitk.GetArrayFromImage(seg) # - # ### View Single Slice(s) # + # Set the slice we want to view slice_idx = 51 # Create a copy of the segmentation array for plotting - set 0s to nan so they are transparent when plotting seg_array_plotting = np.copy(seg_array).astype(float) seg_array_plotting[seg_array_plotting == 0] = np.nan plt.figure(figsize=(12,12)) plt.imshow(image_array[slice_idx, :, :], cmap='bone') plt.imshow(seg_array_plotting[slice_idx, :, :], alpha=0.5) # - # ### Below cells are for more interactive viewers of segmentations. # - There are two versions, a simple version and a slightly more intricate version. # - Either can be used as an example on how to view medical images/segmentations in a jupyter notebook # - If you are OK with the above, feel free to skip these cells # ### Simpler Interactive Viewer # + from ipywidgets import interact, interactive import ipywidgets as widgets plt.figure(figsize=(100, 100)) def f(slice_idx): plt.imshow(image_array[slice_idx, :, :], cmap='bone') plt.imshow(seg_array_plotting[slice_idx, :, :], alpha=0.5) return interact(f, slice_idx=widgets.IntSlider(min=0, max=image_array.shape[0], step=1, value=int(image_array.shape[0]/2))); # - # ### Less Simple Viewer - ipywidgets # + def f(slice_idx, seg_alpha): plt.figure(figsize=(10,10)) plt.imshow(image_array[slice_idx, :, :], cmap='bone') plt.imshow(seg_array_plotting[slice_idx, :, :], alpha=seg_alpha) plt.show() return interactive_plot = interactive( f, slice_idx=widgets.IntSlider( min=0, max=image_array.shape[0]-1, step=1, value=int(image_array.shape[0]/2)), seg_alpha=widgets.FloatSlider(min=0, max=1., step=0.1, value=0.5) ) output = interactive_plot.children[-1] output.layout.height = '600px' interactive_plot # - # # Pre-processing of segmentation before meshing # ### Set border of image to be zeros # - If the segmentation extends all the way to the edges of the image, then it won't be "closed". This means that there will be a big hole in the mesh at the proximal (top) end of the femur, and at the distal (bottom) end of the tibia. # # + border_size = 1 # How big do we want the border of background pixels to be. new_seg_array = np.zeros_like(seg_array) new_seg_array[border_size:-border_size, border_size:-border_size, border_size:-border_size] = seg_array[border_size:-border_size, border_size:-border_size, border_size:-border_size] # - # ### Create binary / gaussian smoothed masks for each object we want to mesh # - We will create surface meshes from the segmentation using the Marching Cubes algorithm. # - If you create the mesh directly from the segmentation(s), then there will be a dramatic "staircase" in some of the bones along the slices of the image. # - To mitigate the "staircasing" there are multiple approaches. Commonly the mesh itself will be # + def discrete_marching_cubes(vtk_image_reader, n_labels=1, start_label=1, end_label=1, compute_normals_on=True, return_polydata=True ): """ Compute dmc on segmentation image. Parameters ---------- vtk_image_reader compute_normals_on n_contours contours_range_start contours_range_end Returns ------- """ dmc = vtk.vtkDiscreteMarchingCubes() dmc.SetInputConnection(vtk_image_reader.GetOutputPort()) if compute_normals_on is True: dmc.ComputeNormalsOn() dmc.GenerateValues(n_labels, start_label, end_label) dmc.Update() if return_polydata is True: return dmc.GetOutput() elif return_polydata is False: return dmc def continuous_marching_cubes(vtk_image_reader, threshold=0.5, compute_normals_on=True, compute_gradients_on=True, return_polydata=True): mc = vtk.vtkMarchingContourFilter() mc.SetInputConnection(vtk_image_reader.GetOutputPort()) if compute_normals_on is True: mc.ComputeNormalsOn() elif compute_normals_on is False: mc.ComputeNormalsOff() if compute_gradients_on is True: mc.ComputeGradientsOn() elif compute_gradients_on is False: mc.ComputeGradientsOff() mc.SetValue(0, threshold) mc.Update() if return_polydata is True: mesh = mc.GetOutput() return mesh elif return_polydata is False: return mc def create_surface_mesh_smoothed(seg_image, label_idx, image_smooth_var, loc_tmp_save='/tmp', tmp_filename='temp_smoothed_bone.nrrd', copy_image_transform=True, mc_threshold=0.5): """ Create surface mesh based on a filtered binary image to try and get smoother surface representation """ # Set border of segmentation to 0 so that segs are all closed. seg_image = msktimage.set_seg_border_to_zeros(seg_image, border_size=1) # smooth/filter the image to get a better surface. filtered_image = msktimage.smooth_image(seg_image, label_idx, image_smooth_var) # save filtered image to disk so can read it in using vtk nrrd reader sitk.WriteImage(filtered_image, os.path.join(loc_tmp_save, tmp_filename)) smoothed_nrrd_reader = msktimage.read_nrrd(os.path.join(loc_tmp_save, tmp_filename), set_origin_zero=True) # create the mesh using continuous marching cubes applied to the smoothed binary image. smooth_mesh = continuous_marching_cubes(smoothed_nrrd_reader, threshold=mc_threshold) if copy_image_transform is True: # copy image transofrm to the image to the mesh so that when viewed (e.g. in 3D Slicer) it is aligned with image smooth_mesh = meshTransform.copy_image_transform_to_mesh(smooth_mesh, seg_image) # Delete tmp files safely_delete_tmp_file(loc_tmp_save, tmp_filename) return smooth_mesh
create_surface_mesh/create_surface_meshes.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Addition Prediction Problem # Known from https://arxiv.org/abs/1410.4615 # # Below is a picture of an example of what we want to achive # ![Many-to-many model](./seq2seq.PNG) from IPython.display import Image from IPython.core.display import HTML from random import seed, randint import numpy as np import matplotlib.pyplot as plt from keras.models import Sequential from keras.layers import LSTM, TimeDistributed, Dense, RepeatVector def random_sum_pairs(n_examples, n_numbers, largest): '''Function to generate a list of random integers and the sum of those integers.''' X, y = [], [] for i in range(n_examples): input_seq = [randint(1, largest) for i in range(n_numbers)] output_seq = sum(input_seq) X.append(input_seq) y.append(output_seq) return X, y # Test of above function seed(1) n_examples = 2 n_numbers = 2 largest = 10 X, y = random_sum_pairs(n_examples, n_numbers, largest) X, y def to_string(X, y, n_numbers, largest): '''Function to convert the output from random_sum_pairs to a string representaion.''' max_length = int(n_numbers * np.ceil(np.log10(largest+1)) + n_numbers - 1) X_str = [] for i in X: str_i = '+'.join([str(n) for n in i]) str_i = ''.join([' ' for _ in range(max_length-len(str_i))]) + str_i X_str.append(str_i) max_length = int(np.ceil(np.log10(n_numbers * (largest+1)))) y_str = [] for j in y: str_j = str(j) str_j = ''.join([' ' for _ in range(max_length-len(str_j))]) + str_j y_str.append(str_j) return X_str, y_str # Test to_string X, y = to_string(X, y, n_numbers, largest) X, y # integer encode strings def integer_encode(X, y, vocab): '''Function to integer encode our string representation of the input and output''' char_to_int = dict((c, i) for i, c in enumerate(vocab)) X_enc = [] for p in X: integer_encoded = [char_to_int[char] for char in p] X_enc.append(integer_encoded) y_enc = [] for p in y: integer_encoded = [char_to_int[char] for char in p] y_enc.append(integer_encoded) return X_enc, y_enc # Test integer_encode vocab = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '+', ' '] X, y = integer_encode(X, y, vocab) print(X) print(y) def one_hot_encode(X, y, max_int): """Function to one-hot encode out integer encoding""" X_hot, y_hot = [], [] for i in X: seq = [] for j in i: temp = np.zeros(max_int).astype(int) temp[j] = 1 seq.append(temp) X_hot.append(seq) for i in y: seq = [] for j in i: temp = np.zeros(max_int).astype(int) temp[j] = 1 seq.append(temp) y_hot.append(seq) return X_hot, y_hot # Test one-hot encoding X, y = one_hot_encode(X, y, len(vocab)) X, '-'*80, y def generate_data(n_examples, n_numbers, largest, vocab): '''Function for generating data for our model, using above helper-functions.''' # generate pairs X, y = random_sum_pairs(n_examples, n_numbers, largest) # convert to strings X, y = to_string(X, y, n_numbers, largest) # integer encode X, y = integer_encode(X, y, vocab) # one hot encode X, y = one_hot_encode(X, y, len(vocab)) # return as NumPy arrays X, y = array(X), array(y) return X, y # Invers transform of one-hot encoding def invert(seq, vocab): int_to_char = dict((i, c) for i, c in enumerate(alphabet)) strings = [] for pattern in seq: string = int_to_char[np.argmax(pattern)] strings.append(string) return ''.join(strings) # number of math terms n_terms = 3 # largest value for any single input digit largest = 10 # scope of possible symbols for each input or output time step vocab = [str(x) for x in range(10)] + ['+', ' '] # size of alphabet: (12 for 0-9, + and ' ') n_chars = len(alphabet) # length of encoded input sequence (8 for '10+10+10) n_in_seq_length = int(n_terms * np.ceil(np.log10(largest+1)) + n_terms - 1) # length of encoded output sequence (2 for '30') n_out_seq_length = int(np.ceil(np.log10(n_terms * (largest+1)))) # + # define LSTM model = Sequential() model.add(LSTM(75, input_shape=(n_in_seq_length, n_chars))) model.add(RepeatVector(n_out_seq_length)) model.add(LSTM(50, return_sequences=True)) model.add(TimeDistributed(Dense(n_chars, activation='softmax'))) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) model.summary() # - # fit model X, y = generate_data(75000, n_terms, largest, vocab) model.fit(X, y, epochs=1, batch_size=32, verbose=2) # evaluate LSTM X, y = generate_data(100, n_terms, largest, vocab) loss, acc = model.evaluate(X, y, verbose=0) print('Loss: %f, Accuracy: %f' % (loss, acc*100)) # predict for _ in range(10): # generate an input-output pair X, y = generate_data(1, n_terms, largest, vocab) # make prediction yhat = model.predict(X, verbose=0) # decode input, expected and predicted in_seq = invert(X[0], vocab) out_seq = invert(y[0], vocab) predicted = invert(yhat[0], vocab) print('%s = %s (expect %s)' % (in_seq, predicted, out_seq))
model_notebooks/Encoder-Decoder_LSTM.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import codecs import matplotlib.pyplot as plt import numpy as np import odf import math import pdfkit from scipy.stats import linregress import scipy.constants as const from itertools import chain from statistics import mean from uncertainties import ufloat, ufloat_fromstr from uncertainties.umath import * from sklearn.linear_model import LinearRegression from scipy.optimize import curve_fit, minimize_scalar from lmfit.models import LorentzianModel from IPython.display import display, Latex pd.set_option('display.max_rows', None) pd.set_option('display.max_columns', None) pd.set_option('display.width', None) # ### calibration of magnetic field # - SI units. B is converted from $[mT]$ to $[T]$. # + I = np.array([0, 0.4, 0.98, 1.67, 2.28, 3.22, 3.93, 4.54, 5.14, 5.48, 6.40, 6.92, 7.38, 7.86, 8.29, 9.16]) B = 10**(-3) * np.array([8, 30, 64, 105, 143, 200, 245, 283, 320, 342, 398, 428, 455, 489, 509,556]) ib = pd.DataFrame(list(zip(I, B)), columns=['I','B']) ib.transpose() # + X = np.array(I).reshape(-1, 1) Y = np.array(B).reshape(-1, 1) reg = LinearRegression().fit(X, Y) intercept = reg.intercept_ coef = reg.coef_ reg_value = reg.score(X, Y) def magnetic_field(I): return (coef[0, 0] * np.array(I) + intercept[0]) # - plt.figure(dpi=100) plt.plot(X, magnetic_field(X)) plt.scatter(X, Y, color='b') plt.xlabel('$I\ [A$]') plt.ylabel('$B\ [T]$') plt.grid() plt.savefig('calibration.pdf') plt.show() display(Latex("$R^2$ = {}".format(reg_value))) display(Latex("$f(x) = {}x + {}$".format(coef[0, 0], intercept[0]))) # ### calculation of Bohr magneton # - t, h, c, B are etalon spacing, Planck's constant, speed of light and array of magnetic fields respectively. # - All SI units. Ring positions are converted from $[mm]$ to $[m]$. # + def radii(pos): """Returns consecutive radii starting with the outermost ring. Positions are taken from the leftmost to the rightmost rings.""" n = len(pos) assert n % 16 == 0 r = [abs(pos[n-1-i] - pos[i]) / 2 for i in range(n // 2)] return np.array(r) def wave_num_diff(pos): """Returns the average difference in wave numbers of the two split wavelengths.""" t = 3 * 10**(-3) r = radii(pos) r_sq = np.square(r) n_r = len(r) p_max = n_r // 2 # maximum order of rings delta_sum = 0 for i in range(n_r): delta_sum += (-1)**i * r_sq[i] avg_delta = delta_sum / p_max Delta_sum = 0 for i in range(n_r): Delta_sum += (-1)**(i // 2) * r_sq[i] avg_Delta = Delta_sum / p_max wave_num_diff = (1/(2 * t)) * (avg_delta/avg_Delta) return wave_num_diff def bohr_magneton(I, pos): h = const.h c = const.c B = magnetic_field(I) wave_num = wave_num_diff(pos) bohr_mag = (h * c) * (wave_num / (2 * B)) return bohr_mag # + # reading1 = [0.5, 0.9, 2.9, 3.5, 4.78, 5.54, 8.58, 9.60, 26.6, 27.64, 30.32, 31.06, 33.26, 33.82, 35.78, 36.36] reading1 = [0.5, 0.9, 2.9, 3.5, 5.78, 6.54, 9.58, 10.60, 26.6, 27.64, 30.32, 31.06, 33.26, 33.82, 35.78, 36.36] pos1 = np.array(reading1) / 2000 I1 = 6.05 u1 = bohr_magneton(I1, pos1) u1 # + # msd1 = [12, 13, 15, 15, 17, 18, 21, 22, 38, 39, 42, 43, 45, 45, 47, 48] msd2 = [12, 13, 15, 15, 17, 18, 21, 22, 39, 40, 43, 44, 46, 46, 48, 49] ssd2 = [80, 34, 22, 94, 96, 80, 80, 88, 30, 40, 52, 46, 10, 78, 82, 40] pos2 = (np.array(msd2) + np.array(ssd2) / 100) / 2000 I2 = 7.15 u2 = bohr_magneton(I2, pos2) u2 # + # reading2b = [0.06, 0.68, 2.50, 3.2, 5.38, 6.08, 8.98, 10.14, 27.04, 28.32, 30.80, 31.60, 33.74, 34.48, 36.38, 36.92] # pos2b = np.array(reading4) / 2000 # I2b = 7.06 # u2b = bohr_magneton(I4, pos4) # u2b # + # reading3 = [0.52, 0.96, 2.60, 3.34, 5.46, 6.28, 9, 10.46, 27, 28.32, 30.74, 31.68, 32.8, 33.58, 35.4, 35.9] reading3 = [0.52, 0.96, 2.60, 3.34, 5.46, 6.28, 9, 10.46, 27, 28.32, 30.74, 31.68, 33.8, 34.58, 36.4, 36.9] pos3 = np.array(reading3) / 2000 I3 = 7.97 u3 = bohr_magneton(I3, pos3) u3 # + # reading6 = [0.36, 0.94, 2.54, 3.34, 5.44, 6.4, 8.96, 10.4, 27.96, 29.56, 31.86, 32.78, 34.82, 35.74, 37.42, 38.08] reading4 = [0.36, 0.94, 2.54, 3.34, 5.44, 6.4, 8.96, 10.4, 26.96, 28.56, 30.86, 31.78, 33.82, 34.74, 36.42, 37.08] pos4 = np.array(reading4) / 2000 I4 = 9.07 u4 = bohr_magneton(I4, pos4) u4 # + # msd5 = [14, 15, 16, 17, 19, 20, 23, 24, 39, 40, 41, 42, 43, 44, 46, 47] msd5 = [14, 15, 16, 17, 19, 20, 23, 24, 41, 43, 45, 46, 48, 49, 51, 52] ssd5 = [54, 26, 88, 72, 66, 78, 36, 80, 80, 52, 84, 96, 80, 60, 34, 8] pos5 = (np.array(msd5) + np.array(ssd5) / 100) / 2000 I5 = 10.13 u5 = bohr_magneton(I5, pos5) u5 # + # # reading7 = [0.74, 0.9, 3.16, 4, 5.94, 6.98, 9.6, 11.1, 27.7, 29.24, 31.64, 32.64, 34.56, 35.38, 37.18, 37.92] # reading7 = [0.74, 1.4, 3.16, 4, 5.94, 6.98, 9.6, 11.1, 27.7, 29.24, 31.64, 32.64, 34.56, 35.38, 37.18, 37.92] # pos7 = np.array(reading7) / 2000 # I7 = 10 # print(bohr_magneton(I7, pos7)) # u = np.mean([u1, u2, u3, u5, u6]) # print(u) # + I_arr = np.array([I1, I2, I3, I4, I5]) B_arr = np.array([magnetic_field(I) for I in I_arr]) pos_arr = [pos1, pos2, pos3, pos4, pos5] wave_num_arr = np.array([wave_num_diff(pos) for pos in pos_arr]) reg = linregress(B_arr, wave_num_arr / 2) print(reg[0] * (const.h * const.c), reg[4] * (const.h * const.c)) plt.figure(dpi=100) plt.scatter(B_arr, wave_num_arr/2, color='b') plt.plot(B_arr, [reg[0] * B + reg[1] for B in B_arr]) plt.xlabel('$B\ [T]$') plt.ylabel('$\Delta \hat v\ [m^{-1}]$') plt.grid() plt.savefig('linear_fit.pdf') plt.show() # + ### from linear regression u_reg = reg[0] u_reg_err = reg[4] print(u_reg, u_reg_err) ### from average u_arr = np.array([u1, u2, u3, u5, u6]) u_avg = np.mean(u_arr) u_avg_err = np.std(u_arr) print(u_avg, u_avg_err) ### to LaTeX r_arr = [1000 * radii(pos) for pos in [pos1, pos2, pos3, pos4, pos5]] r_arr = np.transpose(np.round(r_arr, 2)) u_arr_df = 10**(24) * u_arr u_arr_df = np.array(u_arr_df, float) u_arr_df = np.round(u_arr_df, 2) data = np.array([I_arr, np.round(B_arr, 3), r_arr[0], r_arr[1], r_arr[2], r_arr[3], r_arr[4], r_arr[5], r_arr[6], r_arr[7], np.round(wave_num_arr, 1), u_arr_df]) df = pd.DataFrame(np.transpose(data), columns=["$I [A]$", "$B [T]$", "$r_{4, a}$", "$r_{4, b}$", "$r_{3, a}$", "$r_{3, b}$", "$r_{2, a}$", "$r_{2, b}$", "$r_{1, a}$", "$r_{1, b}$", "$\Delta \hat v$", "$\mu_B$"]) df # + plt.figure(figsize=(4, 6), dpi=120) plt.scatter([6]*16, pos1 - (pos1[7]+pos1[8])/2, marker='.') plt.scatter([7]*16, pos2 - (pos2[7]+pos2[8])/2, marker='.') # plt.scatter([7]*16, pos4 - (pos4[7]+pos4[8])/2, marker='.') plt.scatter([8]*16, pos3 - (pos3[7]+pos3[8])/2, marker='.') plt.scatter([9]*16, pos4 - (pos4[7]+pos4[8])/2, marker='.') plt.scatter([10]*16, pos5 - (pos5[7]+pos5[8])/2, marker='.') # plt.scatter([7]*16, pos7 - (pos7[7]+pos7[8])/2, marker='.') plt.show()
Advanced Physics Lab II/Zeeman Effect/w1_g9_Zeeman_Effect.ipynb