code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="jFWRKD47qCIs" import numpy as np import pandas as pd import matplotlib.pyplot as plt # data visualisation import seaborn as sns #data visualization # + colab={"base_uri": "https://localhost:8080/"} id="B6yU2objqQ2k" outputId="8d9fb1e7-8e68-4c0a-c419-39d39a983a67" data = pd.read_csv('/content/insurance.csv', skiprows=4) data.head # + colab={"base_uri": "https://localhost:8080/", "height": 300} id="W9DL_dyGwn1g" outputId="4d7555f0-10aa-477f-b9e8-27729ef51e2f" data.describe() # + colab={"base_uri": "https://localhost:8080/", "height": 206} id="awlnkIMyqkyp" outputId="64f4b231-50c1-4a75-b747-1c79f83685b7" data.rename(columns={'Y = total payment for all the claims in thousands of Swedish Kronor': 'Payments'}, inplace=True) data.head() # + colab={"base_uri": "https://localhost:8080/", "height": 206} id="dcMr5VpSrTmj" outputId="e5eaafbf-47ab-45af-c553-c7c4c2613191" data['Claims'] = data.index data.head() # + colab={"base_uri": "https://localhost:8080/", "height": 206} id="RjO2FcPnrlsC" outputId="f5a3f665-3c16-402c-e497-e7ac7fa6ff43" data.reset_index(drop=True, inplace=True) data.head() # + colab={"base_uri": "https://localhost:8080/", "height": 282} id="i2QUst4Pu2UE" outputId="8a413d1e-3c90-4e8e-c35a-b9d7f6315807" data.plot() # + colab={"base_uri": "https://localhost:8080/", "height": 296} id="ggu5PM4iu_ug" outputId="5d6994c3-da35-4891-dcc3-d5f7e7ac0c9c" plt.scatter(x=data['Payments'],y=data['Claims']) plt.xlabel("Payments") plt.ylabel("cClaims") # + id="C6nARi2UvzHF" x=data['Payments'] y=data['Claims'] # + colab={"base_uri": "https://localhost:8080/", "height": 296} id="Y4exEdhKvNYh" outputId="3143ed0d-0f43-4f54-9804-7e3c598ebce7" plt.bar(x,y) plt.xlabel("claims") plt.ylabel("payments") # + colab={"base_uri": "https://localhost:8080/"} id="X0u_laKSvpWp" outputId="ecff7662-8571-4ce8-90cb-084915a67178" correlation = data['Payments'].corr(data['Claims']) correlation # + colab={"base_uri": "https://localhost:8080/", "height": 286} id="thAzSyrpwP2O" outputId="f050996b-ec87-48df-e9f4-e032bdb19024" sns.heatmap(data.corr(), annot = True) # + [markdown] id="qlglkrzrynmX" # Linear Regression # # + id="ORonP1-hxeDB" from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn import linear_model # + id="FLkN8iwjx0da" x=data['Claims'].values y=data['Payments'].values x = x.reshape(-1,1) x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.20, random_state=42) # + id="wRUOh7sV0ezT" # + colab={"base_uri": "https://localhost:8080/"} id="yqHbhUpAx7KL" outputId="6f88c788-0167-44a3-fe75-072b614332c3" reg = LinearRegression() reg.fit(x_train,y_train) reg.score(x_test,y_test) # + colab={"base_uri": "https://localhost:8080/", "height": 282} id="wNqAfLhVx_XH" outputId="878b3cf4-da05-4c2a-aedb-5b34b9a7d684" y_predicts = reg.predict(x_test) plt.scatter(x=x_train, y=y_train) plt.plot(x_test, y_predicts, color="green") # + [markdown] id="fOu92QARgDVo" # Lasso Regression # + id="X3x6fN8pzDn6" from sklearn.linear_model import Lasso # + colab={"base_uri": "https://localhost:8080/"} id="t6uVNFZDygGA" outputId="76b3f620-300c-4492-b1c1-bf204e43bbe2" model = Lasso() model.fit(x_train, y_train) model.score(x_test,y_test) # + colab={"base_uri": "https://localhost:8080/", "height": 282} id="Sxwevy1UzB-h" outputId="25ba2641-2541-4312-894d-a9a9ecec27f2" y_predict = model.predict(x_test) plt.scatter(x=x_train, y=y_train) plt.plot(x_test, y_predict, color="red") # + [markdown] id="NIW5SaqZf_Rv" # Random Forest # + colab={"base_uri": "https://localhost:8080/"} id="L_hcE9IO1U3t" outputId="81c3869b-1c64-42ca-b2d5-4471791628fd" from sklearn.ensemble import RandomForestRegressor reg2 = RandomForestRegressor(n_estimators=100,n_jobs=1) reg2.fit(x_train,y_train) reg2.score(x_test,y_test) # + colab={"base_uri": "https://localhost:8080/", "height": 282} id="H2cNA28wb3iH" outputId="72eb79a3-7f48-4fe4-96a4-ebbb06448284" y_pred = reg2.predict(x_test) plt.scatter(x=x_train, y=y_train) plt.plot(x_test, y_pred, color="black") # + id="aw5zlCnxdzJz"
Swedish Auto Insurance/Model/Swedish_Auto_Insurance.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy from matplotlib import pyplot # %matplotlib inline from matplotlib import rcParams rcParams['font.family'] = 'serif' rcParams['font.size'] = 16 # + nx = 41 dx = 2./(nx-1) nt = 20 nu = 0.3 #value of viscosity sigma = .2 dt = sigma*dx**2/nu x = numpy.linspace(0,2,nx) ubound = numpy.where(x >= 0.5) lbound = numpy.where(x <= 1) u = numpy.ones(nx) u[numpy.intersect1d(lbound, ubound)] = 2 un = numpy.ones(nx) # + for n in range(nt): un = u.copy() u[1:-1] = un[1:-1] + nu*dt/dx**2*(un[2:] - 2*un[1:-1] + un[0:-2]) pyplot.plot(x,u, ls = '--', lw = '4') pyplot.ylim(0,2.5); # - from JSAnimation.IPython_display import display_animation from matplotlib import animation nt = 50 u = numpy.ones(nx) u[numpy.intersect1d(lbound,ubound)] = 2 un = numpy.ones(nx) # + fig = pyplot.figure(figsize=(8,5)) ax = pyplot.axes(xlim=(0,2), ylim=(1,2.5)) line = ax.plot([],[], ls = '--', lw =3)[0] def diffusion(i): line.set_data(x,u) un = u.copy() u[1:-1] = un[1:-1] + nu*dt/dx**2*(un[2:] - 2*un[1:-1] + un[0:-2]) animation.FuncAnimation(fig, diffusion, frames=nt, interval = 100) # -
lessons/02_spacetime/02_03_1D_Diffusion_solutions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import os os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = 'imda/mesolitica-tpu.json' import os import mp from tqdm import tqdm from glob import glob import soundfile as sf files = glob('TEXT/*.TXT') len(files) with open(files[0]) as fopen: txt = fopen.read().split('\n') txt = [txt[i: i + 2] for i in range(0, len(txt), 2)] len(txt) txt[1] speaker = files[0].split('/')[1].replace('.TXT', '') channel = speaker[-1] speaker = speaker[1:-1] # + from unidecode import unidecode splitted = txt[7][0].split('\t') wav = unidecode(splitted[0]) path = f'part2/SPEAKER{speaker}/SESSION{channel}/{wav}.WAV' y, sr = sf.read(path) # - import IPython.display as ipd ipd.Audio(y, rate = sr) txt[7][1].split('\t')[1] # + import unicodedata import re import itertools vocabs = [" ", "a", "e", "n", "i", "t", "o", "u", "s", "k", "r", "l", "h", "d", "m", "g", "y", "b", "p", "w", "c", "f", "j", "v", "z", "0", "1", "x", "2", "q", "5", "3", "4", "6", "9", "8", "7"] def preprocessing_text(string): string = unicodedata.normalize('NFC', string.lower()) string = string.replace('\'', '') string = ''.join([c if c in vocabs else ' ' for c in string]) string = re.sub(r'[ ]+', ' ', string).strip() string = ( ''.join(''.join(s)[:2] for _, s in itertools.groupby(string)) ) return string # + def get_after_mandarin(word): if '<mandarin>' in word: w = word.split('>')[1].split(':')[1] return w.split('</')[0] else: return word def get_before_mandarin(word): if '</mandarin>' in word: return word.split('</')[0] else: return word def replace_paralinguistic(string, replaces = ['(ppb)', '(ppc)', '(ppl)', '(ppo)', '<UNK>', '<MANDARIN>']): for r in replaces: string = string.replace(r, ' ') string = string.split() string = [get_after_mandarin(w) for w in string] string = [get_before_mandarin(w) for w in string] string = [w for w in string if w[0] not in '<[(' and w[-1] not in '>])'] return ' '.join(string) # - def loop(files): files, index = files results = [] for i in tqdm(files): with open(i) as fopen: txt = fopen.read().split('\n') txt = [txt[i: i + 2] for i in range(0, len(txt), 2)] speaker = i.split('/')[1].replace('.TXT', '') channel = speaker[-1] speaker = speaker[1:-1] for k in range(len(txt)): if len(txt[k]) != 2: continue splitted = txt[k][0].split('\t') wav = unidecode(splitted[0]) path = f'part2/SPEAKER{speaker}/SESSION{channel}/{wav}.WAV' text = txt[k][1].split('\t')[1] try: if len(text) < 2: continue if text[0] == '<' and text[-1] == '>': continue text = replace_paralinguistic(text) text = preprocessing_text(text) if len(text): results.append((path, text)) except Exception as e: pass return results len(loop((files[:1],0))) singlishs = mp.multiprocessing(files, loop, cores = 6) len(singlishs) # + import six def to_example(dictionary): """Helper: build tf.Example from (string -> int/float/str list) dictionary.""" features = {} for (k, v) in six.iteritems(dictionary): if not v: raise ValueError('Empty generated field: %s' % str((k, v))) # Subtly in PY2 vs PY3, map is not scriptable in py3. As a result, # map objects will fail with TypeError, unless converted to a list. if six.PY3 and isinstance(v, map): v = list(v) if isinstance(v[0], six.integer_types) or np.issubdtype( type(v[0]), np.integer ): features[k] = tf.train.Feature( int64_list=tf.train.Int64List(value=v) ) elif isinstance(v[0], float): features[k] = tf.train.Feature( float_list=tf.train.FloatList(value=v) ) elif isinstance(v[0], six.string_types): if not six.PY2: # Convert in python 3. v = [bytes(x, 'utf-8') for x in v] features[k] = tf.train.Feature( bytes_list=tf.train.BytesList(value=v) ) elif isinstance(v[0], bytes): features[k] = tf.train.Feature( bytes_list=tf.train.BytesList(value=v) ) else: raise ValueError( 'Value for %s is not a recognized type; v: %s type: %s' % (k, str(v[0]), str(type(v[0]))) ) return tf.train.Example(features=tf.train.Features(feature=features)) # - from glob import glob import tensorflow as tf from tqdm import tqdm import malaya_speech from malaya_speech.utils import subword import numpy as np import mp from google.cloud import storage subwords = subword.load('transducer-singlish.subword') sr = 16000 maxlen = 18 minlen_text = 1 global_count = 0 def loop(files): client = storage.Client() bucket = client.bucket('mesolitica-tpu-general') files, index = files output_file = f'{index}-{global_count}.tfrecord' writer = tf.io.TFRecordWriter(output_file) for s in tqdm(files): try: if len(s[1]) < minlen_text: continue y, _ = malaya_speech.load(s[0]) if (len(y) / sr) > maxlen: continue t = subword.encode(subwords, s[1], add_blank=False) example = to_example({'waveforms': y.tolist(), 'targets': t, 'targets_length': [len(t)]}) writer.write(example.SerializeToString()) except Exception as e: print(e) pass writer.close() blob = bucket.blob(f'imda/part2/{output_file}') blob.upload_from_filename(output_file) os.system(f'rm {output_file}') loop((singlishs[:10], 0)) batch_size = 25000 for i in range(0, len(singlishs), batch_size): batch = singlishs[i: i + batch_size] mp.multiprocessing(batch, loop, cores = 6, returned = False) global_count += 1 # + from malaya_speech.utils import tf_featurization config = malaya_speech.config.transducer_featurizer_config featurizer = tf_featurization.STTFeaturizer(**config) # + n_mels = 80 def preprocess_inputs(example): s = featurizer.vectorize(example['waveforms']) mel_fbanks = tf.reshape(s, (-1, n_mels)) example['inputs'] = mel_fbanks return example def parse(serialized_example): data_fields = { 'waveforms': tf.compat.v1.VarLenFeature(tf.float32), 'targets': tf.compat.v1.VarLenFeature(tf.int64), 'targets_length': tf.compat.v1.VarLenFeature(tf.int64), } features = tf.compat.v1.parse_single_example( serialized_example, features = data_fields ) for k in features.keys(): features[k] = features[k].values features = preprocess_inputs(features) keys = list(features.keys()) for k in keys: if k not in ['waveforms', 'inputs', 'targets', 'targets_length']: features.pop(k, None) return features def get_dataset(files, batch_size = 2, shuffle_size = 32, thread_count = 24): def get(): dataset = tf.data.TFRecordDataset(files) dataset = dataset.shuffle(shuffle_size) dataset = dataset.map(parse, num_parallel_calls = thread_count) dataset = dataset.repeat() return dataset return get # - files = tf.io.gfile.glob('gs://mesolitica-tpu-general/imda/part2/*.tfrecord') d = get_dataset(files)() d = d.as_numpy_iterator() next(d)
data/imda/convert-part2-tfrecord.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Evaluation of traffic light detection # based on [CarND Object Detection Lab](https://github.com/udacity/CarND-Object-Detection-Lab) # + import tensorflow as tf import numpy as np import matplotlib.pyplot as plt from PIL import Image from PIL import ImageDraw from PIL import ImageColor import time import os import xml.etree.cElementTree as ET # %matplotlib inline plt.style.use('ggplot') # - # ## Helper functions # + # Colors (one for each class) cmap = ImageColor.colormap # print("Number of colors =", len(cmap)) COLOR_LIST = sorted([c for c in cmap.keys()]) # # Utility funcs # def filter_boxes(min_score, boxes, scores, classes): """Return boxes with a confidence >= `min_score`""" n = len(classes) idxs = [] for i in range(n): if scores[i] >= min_score: idxs.append(i) filtered_boxes = boxes[idxs, ...] filtered_scores = scores[idxs, ...] filtered_classes = classes[idxs, ...] return filtered_boxes, filtered_scores, filtered_classes def to_image_coords(boxes, height, width): """ The original box coordinate output is normalized, i.e [0, 1]. This converts it back to the original coordinate based on the image size. """ box_coords = np.zeros_like(boxes) box_coords[:, 0] = boxes[:, 0] * height box_coords[:, 1] = boxes[:, 1] * width box_coords[:, 2] = boxes[:, 2] * height box_coords[:, 3] = boxes[:, 3] * width return box_coords def draw_boxes(image, boxes, classes, thickness=4): """Draw bounding boxes on the image""" draw = ImageDraw.Draw(image) for i in range(len(boxes)): bot, left, top, right = boxes[i, ...] class_id = int(classes[i]) color = COLOR_LIST[class_id] draw.line([(left, top), (left, bot), (right, bot), (right, top), (left, top)], width=thickness, fill=color) def load_graph(graph_file): """Loads a frozen inference graph""" graph = tf.Graph() with graph.as_default(): od_graph_def = tf.GraphDef() with tf.gfile.GFile(graph_file, 'rb') as fid: serialized_graph = fid.read() od_graph_def.ParseFromString(serialized_graph) tf.import_graph_def(od_graph_def, name='') return graph # - # Frozen inference graph files. NOTE: change the path to where you saved the models. base_dir = 'frozen_models' FASTER_RCNN_GRAPH_FILE = os.path.join(base_dir, 'real', 'faster_rcnn_resnet101_coco_2018_01_28_ck_1368_1096', 'frozen_inference_graph.pb') # + detection_graph = load_graph(FASTER_RCNN_GRAPH_FILE) category_index = {1: {'id': 1, 'name': 'green'}, 2: {'id': 2, 'name': 'red'}, 3: {'id': 3, 'name': 'yellow'}, 4: {'id': 4, 'name': 'off'}} def inference_test(detection_graph, image): image_np = np.expand_dims(np.asarray(image, dtype=np.uint8), 0) # The input placeholder for the image. image_tensor = detection_graph.get_tensor_by_name('image_tensor:0') detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0') detection_scores = detection_graph.get_tensor_by_name('detection_scores:0') detection_classes = detection_graph.get_tensor_by_name('detection_classes:0') with tf.Session(graph=detection_graph) as sess: # Actual detection. (boxes, scores, classes) = sess.run([detection_boxes, detection_scores, detection_classes], feed_dict={image_tensor: image_np}) # Remove unnecessary dimensions boxes = np.squeeze(boxes) scores = np.squeeze(scores) classes = np.squeeze(classes) confidence_cutoff = 0.8 # Filter boxes with a confidence score less than `confidence_cutoff` boxes, scores, classes = filter_boxes(confidence_cutoff, boxes, scores, classes) return boxes, scores, classes # + test_set_path = 'data/real_data/test' image_files = [] xml_files = [] for file_name in os.listdir(test_set_path): if file_name.endswith(".jpg"): image_files.append(file_name) elif file_name.endswith(".xml"): xml_files.append(file_name) image_files = sorted(image_files) xml_files = sorted(xml_files) # + result_strings = [] truth_list = [] prediction_list = [] num_correct = 0 for i in range(len(image_files)): image = Image.open(os.path.join(test_set_path, image_files[i])) boxes, scores, classes = inference_test(detection_graph, image) width, height = image.size box_coords = to_image_coords(boxes, height, width) truth = -1 try: root = ET.ElementTree(file=os.path.join(test_set_path, xml_files[i])).getroot() truth = root.find('object').find('name').text except: truth = 'no_box' prediction = 'no_box' class_index = -1 score = -1 if len(boxes) > 0: prediction = category_index[classes[0]]['name'] class_index = classes[0] score = scores[0] num_boxes = len(boxes) #file_name, truth, prediction, class_index, score, num_boxes result = '{}, {}, {}, {}, {}, {}'.format(image_files[i], truth, prediction, class_index, score, num_boxes) result_strings.append(result) truth_list.append(truth) prediction_list.append(prediction) if (truth == prediction): num_correct += 1 print(result) # - print(num_correct)
ros/src/tl_detector/light_classification/evaluation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # Main building blocks when constructing larger code # + # a function is a useful device that groups together a set of # statements so they can be run more than once # - def name_of_function(args1,args2,etc): ''' This is where the function's document string (docstring) goes. when you call help() on your function, it will be printed out. ''' # Do stuff here # Return desired effect def greetings(): print('Hoi. Wir gehts?') greetings greetings() # + #Accepting parametres (arguments) #Let's write a function that greets people with their name. # - def greeting(name): print(f'Hello {name}') greeting('Muti') # ## Using return # # So far we've only seen print() used, but if we actually want to save the resulting variable we need to use the **return keyword**. # # #Let's see some example that use a <code>return</code> statement. <code>return</code> allows a function to *return* a result that can then be stored as a variable, or used in whatever manner a user wants. # # ### Example: Addition function def addition (num1,num2): return (num1 + num2) addition(345,6785) def SQRT (num1): return (num1**2) SQRT(1000) # + #Can also save as a variable due to return # - result = SQRT(8) result # + #ex of creating a func. to check if number = prime (a common interview exercise) # - def is_num_prime(num): ''' Naive method to check if number is prime ''' for n in range (2,num): if num % n == 0: print(num, 'is not a prime number') break else: #If number is not prime print(num, 'is a prime number') is_num_prime(53)
Functions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: sg4 # language: python # name: sg4 # --- # # Node Representation Learning with attri2vec on Citeseer # This is the python implementation of the attri2vec algorithm outlined in paper ***[Attributed Network Embedding Via Subspace Discovery](https://arxiv.org/abs/1901.04095)*** <NAME>, <NAME>, <NAME> and <NAME>, arXiv:1901.04095, [cs.SI], 2019. The implementation uses the stellargraph libraries. # # # ## attri2vec # # attri2vec learns node representations by performing a linear/non-linear mapping on node content attributes. To make the learned node representations respect structural similarity, [`DeepWalk`](https://dl.acm.org/citation.cfm?id=2623732)/[`node2vec`](https://snap.stanford.edu/node2vec) learning mechanism is used to make nodes sharing similar random walk context nodes represented closely in the subspace, which is achieved by maximizing the occurrence probability of context nodes conditioned on the representation of the target nodes. The probability is modelled by Softmax and negative sampling is used to speed up its calculation. This makes attri2vec equivalent to predict whether a node occurs in the given target node's context in random walks with the representation of the target node, by minimizing the cross-entropy loss. # # In implementation, node embeddings are learnt by solving a simple classification task: given a large set of "positive" `(target, context)` node pairs generated from random walks performed on the graph (i.e., node pairs that co-occur within a certain context window in random walks), and an equally large set of "negative" node pairs that are randomly selected from the graph according to a certain distribution, learn a binary classifier that predicts whether arbitrary node pairs are likely to co-occur in a random walk performed on the graph. Through learning this simple binary node-pair-classification task, the model automatically learns an inductive mapping from attributes of nodes to node embeddings in a low-dimensional vector space, which preserves structural and feature similarities of the nodes. # # To train the attri2vec model, we first construct a training set of nodes, which is composed of an equal number of positive and negative `(target, context)` pairs from the graph. The positive `(target, context)` pairs are the node pairs co-occurring on random walks over the graph whereas the negative node pairs are the sampled randomly from the global node degree distribution of the graph. In attri2vec, each node is attached with two kinds of embeddings: 1) the inductive 'input embedding', i.e, the objective embedding, obtained by perform a linear/non-linear transformation on node content features, and 2) 'output embedding', i.e., the parameter vector used to predict its occurrence as a context node, obtained by looking up a parameter table. Given a `(target, context)` pair, attri2vec outputs a predictive value to indicate whether it is positive or negative, which is obtained by performing the dot product of the 'input embedding' of the target node and the 'output embedding' of the context node, followed by a sigmoid activation. # # The entire model is trained end-to-end by minimizing the binary cross-entropy loss function with regards to predicted node pair labels and true node pair labels, using stochastic gradient descent (SGD) updates of the model parameters, with minibatches of 'training' node pairs generated on demand and fed into the model. # + import networkx as nx import pandas as pd import numpy as np import os import random import stellargraph as sg from stellargraph.data import UnsupervisedSampler from stellargraph.mapper import Attri2VecLinkGenerator, Attri2VecNodeGenerator from stellargraph.layer import Attri2Vec, link_classification from tensorflow import keras from pandas.core.indexes.base import Index import matplotlib.pyplot as plt from sklearn.manifold import TSNE from sklearn.decomposition import PCA from sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegressionCV from sklearn.metrics import accuracy_score # - # ## Dataset # The dataset is the citation network Citeseer. # # It can be downloaded by clicking [here](https://linqs-data.soe.ucsc.edu/public/lbc/citesser.tgz) # # The following is the description of the dataset from the publisher, # > The CiteSeer dataset consists of 3312 scientific publications classified into one of six classes. The citation network consists of 4732 links. Each publication in the dataset is described by a 0/1-valued word vector indicating the absence/presence of the corresponding word from the dictionary. The dictionary consists of 3703 unique words. The README file in the dataset provides more details. # # Download and unzip the citeseer.tgz file to a location on your computer. # # We assume that the dataset is stored in the directory # # `~/data/citeseer/` # # where the files `citeseer.cites` and `citeseer.content` can be located. # # We are going to load the data into a networkx object. data_dir = "~/data/citeseer" # Load edges in order 'cited-paper' <- 'citing-paper'. citeseer_location = os.path.expanduser(os.path.join(data_dir, "citeseer.cites")) g_nx = nx.read_edgelist(path=citeseer_location, create_using=nx.DiGraph()).reverse() # Convert the graph to undirected graph. g_nx = g_nx.to_undirected() # Load the node attribute data. citeseer_data_location = os.path.expanduser(os.path.join(data_dir, "citeseer.content")) attr_names = ["w_{}".format(ii) for ii in range(3703)] node_column_names = attr_names + ["subject"] node_attr = pd.read_csv(citeseer_data_location, sep='\t', header=None, names=node_column_names) # Change the type of the indexes of node_attr to str. node_attr.index = Index(list(map(str, list(node_attr.index)))) # The original graph contains some nodes with no attributes. We remove them here. g_nx = g_nx.subgraph(list(node_attr.index)) # Select the largest connected component. For clarity we ignore isolated nodes and subgraphs. g_nx_ccs = (g_nx.subgraph(c).copy() for c in nx.connected_components(g_nx)) g_nx = max(g_nx_ccs, key=len) print("Largest subgraph statistics: {} nodes, {} edges".format( g_nx.number_of_nodes(), g_nx.number_of_edges())) # Specify node and edge types. nx.set_node_attributes(g_nx, "paper", "label") nx.set_edge_attributes(g_nx, "cites", "label") # Get the ids of the nodes in the selected largest connected component. node_ids = sorted(list(g_nx.nodes)) # Get node features. node_features = node_attr[attr_names].reindex(node_ids) # Create the Stellargraph with node features. G = sg.StellarGraph(g_nx, node_features=node_features) print(G.info()) # ## Train attri2vec on Citeseer # Specify the other optional parameter values: root nodes, the number of walks to take per node, the length of each walk. nodes = list(G.nodes()) number_of_walks = 4 length = 5 # Create the UnsupervisedSampler instance with the relevant parameters passed to it. unsupervised_samples = UnsupervisedSampler(G, nodes=nodes, length=length, number_of_walks=number_of_walks) # Set the batch size and the number of epochs. batch_size = 50 epochs = 4 # Define an attri2vec generator, which generates batches of (target, context) nodes and labels for the node pair. generator = Attri2VecLinkGenerator(G, batch_size) # Building the model: a 1-hidden-layer node representation ('input embedding') of the `target` node and the parameter vector ('output embedding') for predicting the existence of `context node` for each `(target context)` pair, with a link classification layer performed on the dot product of the 'input embedding' of the `target` node and the 'output embedding' of the `context` node. # # Attri2Vec part of the model, with a 128-dimenssion hidden layer, no bias term and no normalization. (Normalization can be set to 'l2'). layer_sizes = [128] attri2vec = Attri2Vec( layer_sizes=layer_sizes, generator=generator, bias=False, normalize=None ) # Build the model and expose input and output sockets of attri2vec, for node pair inputs: x_inp, x_out = attri2vec.build() # Use the link_classification function to generate the prediction, with the 'ip' edge embedding generation method and the 'sigmoid' activation, which actually performs the dot product of the 'input embedding' of the target node and the 'output embedding' of the context node followed by a sigmoid activation. prediction = link_classification( output_dim=1, output_act="sigmoid", edge_embedding_method='ip' )(x_out) # Stack the Attri2Vec encoder and prediction layer into a Keras model, and specify the loss. # + model = keras.Model(inputs=x_inp, outputs=prediction) model.compile( optimizer=keras.optimizers.Adam(lr=1e-3), loss=keras.losses.binary_crossentropy, metrics=[keras.metrics.binary_accuracy], ) # - # Train the model. history = model.fit_generator( generator.flow(unsupervised_samples), epochs=epochs, verbose=2, use_multiprocessing=False, workers=1, shuffle=True, ) # ## Visualise Node Embeddings # Build the node based model for predicting node representations from node content attributes with the learned parameters. Below a Keras model is constructed, with x_inp[0] as input and x_out[0] as output. Note that this model's weights are the same as those of the corresponding node encoder in the previously trained node pair classifier. x_inp_src = x_inp[0] x_out_src = x_out[0] embedding_model = keras.Model(inputs=x_inp_src, outputs=x_out_src) # Get the node embeddings by applying the learned mapping function to node content features. node_gen = Attri2VecNodeGenerator(G, batch_size).flow(node_ids) node_embeddings = embedding_model.predict_generator(node_gen, workers=1, verbose=1) # Get node subjects. node_targets = [ node_attr["subject"][node_id] for node_id in node_ids ] # Transform the embeddings to 2d space for visualisation. # + transform = TSNE # PCA trans = transform(n_components=2) node_embeddings_2d = trans.fit_transform(node_embeddings) # + # draw the embedding points, coloring them by the target label (paper subject) alpha = 0.7 label_map = { l: i for i, l in enumerate(np.unique(node_targets)) } node_colours = [ label_map[target] for target in node_targets ] plt.figure(figsize=(7,7)) plt.axes().set(aspect="equal") plt.scatter(node_embeddings_2d[:,0], node_embeddings_2d[:,1], c=node_colours, cmap="jet", alpha=alpha) plt.title('{} visualization of node embeddings'.format(transform.__name__)) plt.show() # - # ## Node Classificaion Task # The embeddings learned by `attri2vec` can be used as feature vectors in downstream tasks, such as node classification and link prediction. # # In this example, we will use the `attri2vec` node embeddings to train a classifier to predict the subject of a paper in DBLP. # X will hold the 128-dimensional input features X = node_embeddings # y holds the corresponding target values y = np.array(node_targets) # ### Data Splitting # # We split the data into train and test sets. # # We use 10% of the data for training and the remaining 90% for testing as a hold out test set. X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.1, test_size=None) print("Array shapes:\n X_train = {}\n y_train = {}\n X_test = {}\n y_test = {}".format(X_train.shape, y_train.shape, X_test.shape, y_test.shape)) # ### Classifier Training # # We train a Logistic Regression classifier on the training data. clf = LogisticRegressionCV( Cs=10, cv=10, scoring="accuracy", verbose=False, multi_class='ovr', max_iter=300 ) clf.fit(X_train, y_train) # Predict the hold-out test set. y_pred = clf.predict(X_test) # Calculate the accuracy of the classifier on the test set. accuracy_score(y_test, y_pred)
demos/node-classification/attri2vec/attri2vec-citeseer-node-classification-example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Longest Palindromic Subsequence # # # This problem consists in finding a maximum-length contiguous substring of a given string ($X$) that is also a palindrome. # # # Recurring equation: # # $LPS[i \dots j] = \left\{\begin{matrix} # 1 & \text{if } i = j \\ # LPS[i+1 \dots j-1] + 2 & X[i] = X[j] \\ # \max \left ( LPS[i+1 \dots j], LPS[i \dots j-1] \right ) & \text{if } X[i] \ne X[j] # \end{matrix}\right.$ # # # ref. https://en.wikipedia.org/wiki/Longest_palindromic_substring # + def LPS_recursive(X: str, i: int, j: int): """ :param X: string :param i, j: position in the string """ if i > j: # base condition return 0 if i == j: # if string X has only one character, it is palindrome return 1 # if last character of the string is same as the first character if X[i] == X[j]: # includes first and last characters in palindrome # and recurs for the remaining substring X[i+1, j-1] return LPS_recursive(X, i + 1, j - 1) + 2 # if last character of string is different to the first character # 1. Remove last character and recur for the remaining substring X[i, j-1] # 2. Remove first character and recur for the remaining substring X[i+1, j] # return maximum of the two values return max(LPS_recursive(X, i, j - 1), LPS_recursive(X, i + 1, j)) def LPS(X:str): return LPS_recursive(X, 0, len(X) - 1) # - # answer 5 X = "ABBDCACB" print("The length of Longest Palindromic Subsequence is ", LPS(X))
Sequences/Longest Palindromic Subsequence.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from random import randint # # Recursive algorithm def max_substring_length_idx_recursive(s, idx): # String S, index idx -> X[idx] = the length of the longest sequence of characters # in alphabetical order that terminates at the idx-th character # Return: # X[idx] = 1 + max{X[j]; j = 0, ..., i-1, such that S[j]<S[idx]} # X[idx] = 1, if there does not exist such a j return 1 + max([0] + [max_substring_length_idx_recursive(s, j) for j in range(idx) if s[j] < s[idx]]) # ### Complexity demonstration # # $\mathcal{O}(max\_len(S,i)) = \sum_{1\leq j \leq i-1} max\_len(S,j)) = \mathcal{O}(max\_len(S,i-1)+max\_len(S,i-2)+...+max\_len(S,1))$ # # Where $\mathcal{O}(max\_len(S,i-1))=\mathcal{O}(max\_len(S,i-2)+max\_len(S,i-3)+...+max\_len(S,1))$ # # $\implies \mathcal{O}(max\_len(S,i)) = \mathcal{O}(2max\_len(S,i-2)+2 max\_len(S,i-3)+...+2 max\_len(S,1))$ # # If we iterate the same process $i-1$ times # # $\mathcal{O}(max\_len(S,i)) = \mathcal{O}(2^{i-1}max\_len(S,1)) \implies \mathcal{O}(max\_len(S,i))=\mathcal{O}(2^i)$ # # where $\mathcal{O}(max\_len(S,1))=\mathcal{O}(1)$ # This function computes the length of the subsequence of maximum length that is in alphabetical order. def max_substring_length_recursive(s): return max_substring_length_idx_recursive(s + chr(ord('Z') + 1), len(s)) - 1 # # Dynamic Programming algorithm def max_substring_length_dynamic_programming(s): max_lengths = [] # This is our Dynamic Programming vector. # The i-th element of the vector contains the couple (S[i], X[i]) # Loop through the string s to fill the D.P. vector for i in range(len(s)): max_x_j = 0 for s_j, x_j in max_lengths: if s_j < s[i] and max_x_j < x_j: max_x_j = x_j max_lengths.append((s[i], max_x_j+1)) # Return the maximum X[i] in the D.P. vector return max(max_lengths, key=lambda x: x[1])[1] # ### Complexity demonstration # # In dynamic programming, it is iterated over a string s, for each element of the string it is necessary to find the maximum in a list. The list has lenght equal to the index of the character in the string. In order to find the maximum is required that the algorithm performs len(L) steps. n=lenght(S) # # $\theta(max\_len(S))=\theta(\sum_{0\leq i \leq n} len(L_i))$ # # =$\theta(\sum_{0\leq i \leq n}^{}i)=\theta(\frac{n(n-1)}{2})$= $\theta(n^2)$ # # as a result, it is possible to observe that using dynamic programming to perform this task requires polynomial time, whereas using normal recursive algorithm would require exponential time. So It can be claimed that dynamic programming is more efficient than recursive algorithm in this case. # ## Utility functions # These functions allow us to create short and long string to perform some tests on the scalability of the algorithms above. # + def create_long_string(size=100): alphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' return alphabet * size def create_short_string(max_substring_length, n_trash_char=1): # Fix input variable if out of bounds if n_trash_char < 0: n_trash_char = 0 if max_substring_length <= 0: max_substring_length = 1 elif max_substring_length > 26: max_substring_length = 26 s = '' # Create an empty string idx_char = ord('A') # Calculate index of char A # For for _ in range(max_substring_length-1): s += chr(idx_char) s += 'Z' * n_trash_char idx_char += 1 s += chr(idx_char) return s # - # ## Tests # ### Short strings # Test the functions on short string n_test = 10 print('Tests on short string') for _ in range(n_test): max_substr_len = randint(1, 26) S = create_short_string(max_substr_len) print('Max Substring Length:', max_substr_len, '- Recursive Function Output:', max_substring_length_recursive(S), '- DP Function Output:', max_substring_length_recursive(S)) # ### Long strings # Test the functions on long string print('Tests on long string') S = create_long_string() print('String S maximum substring length: 26') print('Dynamic Programming Function Output:', max_substring_length_dynamic_programming(S)) # Using the string S the recursive algorithm does not terminate in reasonable time # print('Recursive Function Output:', max_substring_length_recursive(S))
homework_3_algorithm_question.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: feos-pets # language: python # name: feos-pets # --- # # PeTS Equation of State - Binary Mixture (Pseudo Pure Fluid) # Original publication of the _perturbation theory for truncated and shifted Lennard-Jones fluids_ (PeTS) of <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, Mol. Phys. **116**, 2083 (2018); # https://doi.org/10.1080/00268976.2018.1447153 import numpy as np from feos_pets.dft import * from feos_pets.si import * from feos_pets import * import matplotlib.pyplot as plt import pandas as pd # ## Specifying PeTS Parameters epsilon_k = 500.0 * KELVIN sigma = 1.0 * ANGSTROM # ## Definition of Reference Data # # The molecular simulation reference data is taken from <NAME>, <NAME>, <NAME>, <NAME>, Mol. Phys. **104**, 1509 (2006); # https://doi.org/10.1080/00268970600556774 # # Critical point reference data is taken from the original publication of <NAME>, <NAME>, <NAME>, <NAME>, Mol. Phys. **116**, 2083 (2018); # https://doi.org/10.1080/00268976.2018.1447153 # + # Dara from Vrabec et al. (2006) data = np.array([ [0.64, 0.00217, 0.8176, 0.00351, 5.7118], [0.67, 0.00335, 0.8024, 0.00525, 5.5910], [0.70, 0.00479, 0.7866, 0.00727, 5.4666], [0.73, 0.00697, 0.7704, 0.01036, 5.325 ], [0.76, 0.00944, 0.7538, 0.01374, 5.179 ], [0.79, 0.01241, 0.7361, 0.01776, 5.022 ], [0.82, 0.01640, 0.7181, 0.0233, 4.844 ], [0.85, 0.0214, 0.6986, 0.0303, 4.639 ], [0.88, 0.0274, 0.6784, 0.0392, 4.413 ], [0.91, 0.0336, 0.6556, 0.0483, 4.172 ], [0.94, 0.0417, 0.6309, 0.0616, 3.87 ], [0.97, 0.0504, 0.6032, 0.0763, 3.56 ], [1.00, 0.0606, 0.5712, 0.0960, 3.18 ], [1.03, 0.0730, 0.530, 0.127, 2.63 ], [1.06, 0.0855, 0.463, 0.168, 1.88 ] ]) data_st = np.array([ [0.625, 11.85, 2759, 0.8244, 0.0024, 1.5658, 0.727], [0.650, 11.91, 2769, 0.8120, 0.0036, 1.6616, 0.675], [0.675, 11.97, 2769, 0.7994, 0.0051, 1.7494, 0.633], [0.700, 12.04, 2776, 0.7863, 0.0072, 1.8621, 0.581], [0.725, 12.11, 2782, 0.7729, 0.0095, 1.9844, 0.549], [0.750, 12.18, 2797, 0.7599, 0.0122, 2.1065, 0.493], [0.775, 12.26, 2852, 0.7452, 0.0153, 2.2517, 0.455], [0.800, 12.34, 2872, 0.7307, 0.0203, 2.4348, 0.403], [0.825, 12.43, 2888, 0.7156, 0.0244, 2.5695, 0.369], [0.850, 12.52, 2897, 0.6993, 0.0302, 2.7943, 0.322], [0.875, 12.63, 2961, 0.6821, 0.0368, 3.0027, 0.275], [0.900, 12.75, 2958, 0.6632, 0.0443, 3.3181, 0.239], [0.925, 12.87, 3065, 0.6437, 0.0508, 3.6935, 0.191], [0.950, 13.01, 3141, 0.6222, 0.0635, 4.0550, 0.156], [0.975, 13.18, 3315, 0.5987, 0.0793, 4.6938, 0.122], [1.000, 13.39, 3538, 0.5672, 0.0891, 5.4502, 0.081], [1.025, 13.66, 3597, 0.5265, 0.1064, 7.1065, 0.057], [1.050, 14.05, 4027, 0.4827, 0.1233, 8.1196, 0.030] ]) df = pd.DataFrame(data, columns=['T*', 'p*', 'rho^L*', 'rho^V*', 'Delta^LV h*']) df_st = pd.DataFrame(data_st, columns=['T*', 'l*', 'N', 'rho^L*', 'rho^V*', 'D*', 'gamma*']) # Critical point data extracted from Heier et al. (2018), figure 1; unclear origin T_c = 1.0850094876660341 p_c = 0.10073800738007378 rho_c = 0.3194085027726432 # Critical point data extracted from Vrabec et al. (2018) T_c_vrabec = 1.0779 p_c_vrabec = np.exp(3.1664 - 5.9809 / T_c_vrabec + 0.01498 / T_c_vrabec**3) rho_c_vrabec = 0.3190 # Critical point data extracted from Heier et al. (2018), figure 1; critical point of original PeTS implementation T_c_pets_heier = 1.0884250474383301 p_c_pets_heier = 0.10184501845018448 rho_c_pets_heier = 0.3077634011090573 # - # ## Definition of PeTS Equation of State pets = PetsFunctional(PetsParameters.from_lists(epsilon_k=[epsilon_k/KELVIN]*2, sigma=[sigma/ANGSTROM]*2)) # + cp = State.critical_point_pure(eos=pets) T_c_pets = cp[0].temperature p_c_pets = cp[0].pressure() rho_c_pets = cp[0].density T_c_pets_red = cp[0].temperature / epsilon_k # - # ## Phase Diagram of Pseudo Pure Fluid (Binary Mixture of the Same Component) # + temps = np.linspace(0.64, 0.99*T_c_pets_red, 101) # PhaseEquilibrium.bubble_point_tx() not converging to two phases close to critical point p_sat = np.zeros(temps.shape) rho_sat = np.zeros([temps.shape[0], 2]) pressure_ic = None for i, temperature in enumerate(np.nditer(temps)): pe = PhaseEquilibrium.bubble_point( eos=pets, temperature_or_pressure=temperature * epsilon_k, liquid_molefracs=np.array([0.5, 0.5]), tp_init=pressure_ic, tol_inner=1e-7 ) p_sat[i] = pe.liquid.pressure() / (epsilon_k * KB / sigma**3) rho_sat[i, 0] = pe.vapor.density * (NAV * sigma**3) rho_sat[i, 1] = pe.liquid.density * (NAV * sigma**3) pressure_ic = pe.vapor.pressure() * 1.03 # - rho_sat[82] p_sat[60] * (epsilon_k * KB / sigma**3) / BAR # + f, ax = plt.subplots(1,2, figsize=(14,5)) ax[0].plot(temps, p_sat, color='tab:blue', label='this PeTS implementation') ax[0].scatter(df['T*'], df['p*'], marker='s', color='tab:orange', label='simulation data Vrabec et al. (2006)') ax[0].scatter(T_c_vrabec, p_c_vrabec, marker='o', color='tab:orange', label='critical point Vrabec et al. (2018)') ax[0].scatter(T_c, p_c, marker='o', color='tab:red', label='critical point Heier et al. (2018); unclear origin') ax[0].scatter(T_c_pets_heier, p_c_pets_heier, marker='o', color='tab:green', label='critical point PeTS Heier et al. (2018)') ax[0].scatter(T_c_pets/epsilon_k, p_c_pets/(epsilon_k * KB / sigma**3), marker='x', color='tab:red', label='critical point this PeTS implementation') ax[0].set_title('Vapor-Liquid Coexistence - Vapor Pressure') ax[0].set_xlabel(r'$T* = \frac{T}{\frac{\epsilon}{k_\mathrm{B}}}$') ax[0].set_ylabel(r'$p* = \frac{p}{\frac{\epsilon}{\sigma^3}}$') ax[0].set_xlim(0.6, 1.2) ax[0].set_ylim(0.0, 0.11) ax[0].legend(loc='upper left') ax[0].grid() ax[1].plot(temps, rho_sat[:,0], color='tab:blue', label='this PeTS implementation') ax[1].plot(temps, rho_sat[:,1], color='tab:blue') ax[1].scatter(df['T*'], df['rho^L*'], marker='s', color='tab:orange', label='simulation data Vrabec et al. (2006)') ax[1].scatter(df['T*'], df['rho^V*'], marker='s', color='tab:orange') ax[1].scatter(T_c, rho_c, marker='o', color='tab:red', label='critical point Heier et al. (2018); unclear origin') ax[1].scatter(T_c_vrabec, rho_c_vrabec, marker='o', color='tab:orange', label='critical point Vrabec et al. (2018)') ax[1].scatter(T_c_pets_heier, rho_c_pets_heier, marker='o', color='tab:green', label='critical point PeTS Heier et al. (2018)') ax[1].scatter(T_c_pets/epsilon_k, rho_c_pets*NAV*sigma**3, marker='x', color='tab:red', label='critical point this PeTS implementation') ax[1].set_title('Vapor-Liquid Coexistence - Saturated Densities') ax[1].set_xlabel(r'$T* = \frac{T}{\frac{\epsilon}{k_\mathrm{B}}}$') ax[1].set_ylabel(r'$\rho* = \rho \sigma^3$') ax[1].set_xlim(0.6, 1.1) ax[1].set_ylim(0.0, 0.9) ax[1].legend(loc='center left') ax[1].grid() # - # ## Binary Phase Diagram - Pressure-Composition of Pseudo Pure Fluid (Binary Mixture of the Same Component) dia_p = PhaseDiagram.binary_vle(eos=pets, temperature_or_pressure=1*epsilon_k) # + f, ax = plt.subplots(1, 2, figsize=(20,5)) ax[0].scatter(dia_p.liquid.molefracs[:, 0], dia_p.liquid.pressure/(epsilon_k * KB / sigma**3), color='tab:red', marker='s') ax[0].scatter(dia_p.liquid.molefracs[:, 1], dia_p.liquid.pressure/(epsilon_k * KB / sigma**3), color='tab:blue', marker='x') ax[0].set_xlim(0, 1) ax[0].set_ylim(0, 0.15) ax[0].set_xlabel(r'$x_1, x_2$') ax[0].set_ylabel(r'$p* = \frac{p}{\frac{\epsilon}{\sigma^3}}$') ax[1].plot([0, 1], [0, 1], color='black') ax[1].scatter(dia_p.liquid.molefracs[:, 0], dia_p.vapor.molefracs[:, 0], color='tab:orange', marker='s') ax[1].set_xlim(0, 1) ax[1].set_ylim(0, 1) ax[1].set_xlabel(r'$x_1$') ax[1].set_ylabel(r'$y_1$'); # - # # PeTS Density Functional Theory - Binary Mixture (Pseudo Pure Fluid) vle = PhaseEquilibrium.bubble_point( eos=pets, temperature_or_pressure=0.9*epsilon_k, liquid_molefracs=np.array([0.5, 0.5]), tp_init=2211*BAR ) cp = State.critical_point(eos=pets, moles=np.array([0.5, 0.5])*MOL) solver = DFTSolver().picard_iteration().anderson_mixing(tol=1e-13, max_iter=100) dft = PlanarInterface.from_tanh(vle=vle, n_grid=2048, l_grid=50*sigma, critical_temperature=cp.temperature).solve(solver) # + f, ax = plt.subplots(1,2, figsize=(15,5)) ax[0].plot(dft.z/sigma, (dft.density*NAV*sigma**3)[0], color='tab:blue', label=r'$\rho_1$'+', this PeTS implementation') ax[0].plot(dft.z/sigma, (dft.density*NAV*sigma**3)[1], '--', color='tab:red', label=r'$\rho_2$'+', this PeTS implementation') ax[0].plot(dft.z/sigma, (dft.density*NAV*sigma**3)[0] + (dft.density*NAV*sigma**3)[1], color='tab:orange', label=r'$\rho_1 + \rho_2$'+', this PeTS implementation') ax[0].set_title('Equilibrium Density Profile for ' + r'$T = 0.9 \frac{\epsilon}{k_\mathrm{B}}$') ax[0].set_xlabel(r'$\frac{z}{\sigma}$') ax[0].set_ylabel(r'$\rho* = \rho \sigma^3$') ax[0].set_xlim(0, 50) ax[0].set_ylim(0, 0.8) ax[0].legend(loc='upper right') ax[0].grid() ax[1].plot(dft.z/sigma, (dft.density*NAV*sigma**3)[0], color='tab:blue', label=r'$\rho_1$'+', this PeTS implementation') ax[1].plot(dft.z/sigma, (dft.density*NAV*sigma**3)[1], '--', color='tab:red', label=r'$\rho_2$'+', this PeTS implementation') ax[1].plot(dft.z/sigma, (dft.density*NAV*sigma**3)[0] + (dft.density*NAV*sigma**3)[1], color='tab:orange', label=r'$\rho_1 + \rho_2$'+', this PeTS implementation') ax[1].set_title('Equilibrium Density Profile for ' + r'$T = 0.9 \frac{\epsilon}{k_\mathrm{B}}$' + ' (Zoom)') ax[1].set_xlabel(r'$\frac{z}{\sigma}$') ax[1].set_ylabel(r'$\rho* = \rho \sigma^3$') ax[1].set_xlim(20, 30) ax[1].set_ylim(0, 0.8) ax[1].legend(loc='upper right') ax[1].grid() # -
examples/Pets_dft_binary_caseI.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/jmren168/PdM/blob/master/NRF06_SVM.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="eY2VNnL8aHrw" colab_type="code" outputId="ab8bc642-b8f6-4668-f694-aa2a5e11b080" colab={"base_uri": "https://localhost:8080/", "height": 121} from sklearn.datasets import load_digits # digit dataset # load dataset digits = load_digits() # features and target X = digits.data / digits.data.max() y = digits.target from sklearn.svm import SVC # SVM for classification from sklearn.model_selection import train_test_split # split訓練及測試資料 # split dataset into training and test sets X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=1/3, random_state=0) # 依2:1隨機切割出training set and test set # fit data clf = SVC(C=1.0, kernel='rbf') # C: 分錯類別的penalty; kernel: 轉換空間 clf.fit(X_train, y_train) # outside prediction y_pred_test = clf.predict(X_test) # inside prediction y_pred_train = clf.predict(X_train) # calculate performance acc_outside = sum((y_test - y_pred_test)==0) / len(y_test) * 100 acc_inside = sum((y_train - y_pred_train)==0) / len(y_train) * 100 print('SVM') print("inside accuracy: {:.2f}%".format(acc_inside)) print("outside accuracy: {:.2f}%".format(acc_outside))
NRF06_SVM.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" # # Classificador de Raças de Cachorros usando Tensorflow e Keras # # # Neste notebook iremos implementadar um modelo para classificação de imagens. Classificação é uma das "tarefas" em que podemos utilizar Machine Learning, nesta tarefa o ensino é **supervisionado**, em outras palavras nós vamos ensinar ao modelo através de exemplos com gabarito. # # Nosso modelo deverá receber imagens de veículos e não-veículos e identificar a que **classe** (raça de cachorro) o cachorro pertence. # # ## Dados # # Os dados são oriundos da competição [Dog Breed Indentification do Kaggle](https://www.kaggle.com/c/dog-breed-identification), na qual fornece aproximadamente 10 mil imagens de cachorros de 120 classes. # # # ## Modelo # # Iremos utilizar a [arquitetura da InceptionV3](https://arxiv.org/abs/1512.00567), ela está implementada no [Keras](https://keras.io/applications/#inceptionv3) # # # ## Conseguindo os dados # # Para ter acesso aos dados é necessário uma conta no Kaggle, e ter que entrar na [competição](https://www.kaggle.com/c/dog-breed-identification), e ir na aba Data na competição a baixá-los # # ### Avisos # # #### Aviso #1 # Para fazer o treinamento da InceptionV3 é necessário um grande poder computacional, na qual a maioria das pessoas não possuem. Mas não será por isso que não utilizaremos a Inception, graças ao Kaggle, temos a opção de rodar Kernels (que são muito similares aos notebooks do jupyter) na infraestrutura do próprio Kaggle, para mais informações sobre o suporte a GPU's do Kaggle veja [esse notebook](https://www.kaggle.com/dansbecker/running-kaggle-kernels-with-a-gpu) do [<NAME>](https://twitter.com/dan_s_becker) # # #### Aviso #2 # Esse notebook não foi executado na minha máquina, eu rodei ele nos kernels do Kaggle. Por isso não temos as saídas das células, se você quiser visualizar as saídas clique [aqui](https://www.kaggle.com/igorslima/inception) # + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a" import pandas as pd import numpy as np # %matplotlib inline import matplotlib.pyplot as plt np.random.seed(0) # + _uuid="02c8a71ece29a178232e9dbaa7e276771b9e22c7" input_folder = '/kaggle/input' # + _uuid="7f1befac14426ddfd086dff71415ebf116cff4ba" # lendo input df_train = pd.read_csv(input_folder+'/labels.csv') df_test = pd.read_csv(input_folder+'/sample_submission.csv') df_train.breed.value_counts().plot(kind='bar', figsize=(15,15), title="Quantidade de imagens por raça no treino"); # + _uuid="504b2d4ca611b4ad14590ea75265acdca9dd5791" df_train.head() # + _uuid="58b2f589d55b4f1c65e1029d733e892edde27e93" df_test.head() # + [markdown] _uuid="7d0e60963df94f6c46ed02468bfad4ac9678a9a1" # ## Transormando os dados para a "notação" one-hot-encoding # # Para mais informações sobre o One Hot Enconding leia este [post](https://hackernoon.com/what-is-one-hot-encoding-why-and-when-do-you-have-to-use-it-e3c6186d008f) # # + _uuid="0e3fb9189bc51fdc317fab64c191b5854aa58370" targets_series = pd.Series(df_train['breed']) one_hot = pd.get_dummies(targets_series, sparse = True) one_hot_labels = np.asarray(one_hot) # + _uuid="1eb0f06b0ae1b529c3dc6cbca9b7c111ed13d093" im_size = 224 # + [markdown] _uuid="13dcdd5e58d1bbf690ad5d03b2cd10b06fc137ba" # ## Lendo as imagens # # Para treinar a rede é necessário peger as imagens do disco e colocar elas em memória. Não entendeu um 'a' do que eu disse? Tudo bem, é normal. O que eu quis dizer foi que vamos ter que pegar as imagens do HD e colocar elas na memória RAM. # + _uuid="9d222cdecad7acd231c1219b4a34644e0afc58d3" from tqdm import tqdm # bliblioteca para colocar a porcentagem de andamento do for import cv2 # biblioteca para visão computacional # + _uuid="48eea7f6f97e88dac6d5bb2f74bd944eda536b0b" x_train = [] y_train = [] x_test = [] # + _uuid="1358b9a8fe44b3c46869a5f742a19bff831fc809" i = 0 for f, breed in tqdm(df_train.values): img = cv2.imread(input_folder+'/train/{}.jpg'.format(f)) x_train.append(cv2.resize(img, (im_size, im_size))) label = one_hot_labels[i] y_train.append(label) i += 1 # + _uuid="ef7b84bca44b8b98e8e595a8c5b88ed5208708be" del df_train # apagando uma variável pra diminuir consumo de memória # + _uuid="2459fc750bd49ed0b58c18850354b1b702456ce2" for f in tqdm(df_test['id'].values): img = cv2.imread(input_folder+'/test/{}.jpg'.format(f)) x_test.append(cv2.resize(img, (im_size, im_size))) # + [markdown] _uuid="ab1ed63653409cbb17f079312858827753939815" # ## Dividindo dataset # # Geralmente em dividimos os dados em treino, validação e teste. # 1. Treino: conjunto para treinar o modelo # 2. Validação: conjunto para escolher os melhores hiperparâmetros do modelo (mais tarde falo sobre hiperparâmetros, ok?) # 3. Teste: conjunto para coletar as métricas finais do modelo # + _uuid="8330926bebc8b2e681db5fc0a10ad663598ab9e7" from sklearn.model_selection import train_test_split # biblioteca para fazer a divisão dos dados em treino e teste # + _uuid="643ec57c09549f9034d1d1c14cb0618f6aca09d7" num_class = 120 X_train, X_valid, Y_train, Y_valid = train_test_split(x_train, y_train, shuffle=True, test_size=0.2, random_state=1) # + [markdown] _uuid="3b89b322fe04cd8a6aae2017d1920d4e7e9d7c55" # ## Data augmentation # # Nós temos dados o suficiente para travar nossas máquinas XD, mas não o suficiente para treinar modelos bastantes robustos, temos poucas imagens por classe. # # Para ameninzar esse problema iremos utilizar uma técnica chamada data augmentations, ela transforma uma imagem em diversas, como por exemplo dar um giro vertical, ou horizontal. Como nesse exemplo: # # ![Imgur](https://i.imgur.com/GJGMou5.png) # # Links legais (em inglês, desculpem): # # [Link para a documentação](https://keras.io/preprocessing/image/) # # [Tutorial massa do keras](https://blog.keras.io/building-powerful-image-classification-models-using-very-little-data.html) # # [Outro tutorial massa, mas não é do Keras, esse é do Dan Becker](https://www.kaggle.com/dansbecker/data-augmentation)[](http://) # + _uuid="b1bfefd063c2286770a4a84a60f9c46edf754416" from keras.preprocessing.image import ImageDataGenerator # biblioteca para data augmetantaion # + _uuid="b0511f3cdfda94276de4d5e1298fb6cb14945428" datagen = ImageDataGenerator(width_shift_range=0.2, height_shift_range=0.2, zoom_range=0.2, rotation_range=30, vertical_flip=False, horizontal_flip=True) # aqui eu defino os parâmetros que irei # utilizar para gerar as imagens #Aqui a ideia é rotacionar, inverter ... as fotos para que aumente o tamanho do Dataset e diminua a chance # de overfittar o modelo, um gato invertido continua sendo um gato train_generator = datagen.flow(np.array(X_train), np.array(Y_train), batch_size=32) valid_generator = datagen.flow(np.array(X_valid), np.array(Y_valid), batch_size=32) # + [markdown] _uuid="12d0be41e87bb62d28813e408509e587536de72f" # ## Criação da Inception # # A partir de agora iremos criar a rede propriamente dita, iremos utilizar a arquitetura da rede Inception, e os pesos pré-treinada sobre os dados do ImageNet. # + _uuid="1a757b3461c8293ed4f2176416b9e6db37ae650a" from keras.applications.inception_v3 import InceptionV3 from keras.layers import Dense, Dropout, Flatten from keras import regularizers from keras.models import Model # + _uuid="6a1c8dab8ee63ff004312811a1e425eecbfe1aaf" base_model = InceptionV3(weights="imagenet",include_top=False, input_shape=(im_size, im_size, 3)) dropout = base_model.output dropout = Dropout(0.5)(dropout) model_with_dropout = Model(inputs=base_model.input, outputs=dropout) x = model_with_dropout.output x = Flatten()(x) predictions = Dense(num_class, activation='softmax', kernel_regularizer=regularizers.l2(0.0015), activity_regularizer=regularizers.l1(0.0015))(x) my_model = Model(inputs=model_with_dropout.input, outputs=predictions) my_model.compile(optimizer='sgd', loss='categorical_crossentropy', metrics=['accuracy']) # + [markdown] _uuid="01d72b62414ca1690bff9d179c57c8375375cbc6" # ## Treinando o modelo # + _uuid="2cf300f822959cf2b29c860ffc0219ced4f873cc" my_model.fit_generator( train_generator, epochs=10, steps_per_epoch=len(X_train) / 18, validation_data=valid_generator, validation_steps=len(X_valid) / 18) # reali my_model.save_weights('first_try.h5') # Criando um modelo .h5 conseguimos dar load nos parametros #Sem ter que ficar treinando toda vez # + [markdown] _uuid="ce948f90bb140680168430850b4b910f5097f924" # ## Fazendo predições # + _uuid="489a3eacfb5d2d2980eb0772ff880cd90e195aa4" preds = my_model.predict(np.array(x_test), verbose=1) sub = pd.DataFrame(preds) col_names = one_hot.columns.values sub.columns = col_names sub.insert(0, 'id', df_test['id']) sub.head(5) sub.to_csv("submission.csv") # -
classificador-racas-de-cachorros/inceptionv3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: PyBaMM development (env) # language: python # name: pybamm-dev # --- # # Thermal models # # There are a number of thermal submodels available in PyBaMM. In this notebook we give details of each of the models, and highlight any relevant parameters. At present PyBaMM includes a lumped thermal model, a 1D thermal model which accounts for the through-cell variation in temperature, and a 2D pouch cell model which assumed the temperature is uniform through the thickness of the pouch, but accounts for variations in temperature in the remaining two dimensions. Here we give the governing equations for each model. # # A more comprehensive review of the pouch cell models can be found in [[4]](#References), [[6]](#References). # %pip install pybamm -q # install PyBaMM if it is not installed import pybamm # ## Lumped models # # The lumped thermal model solves the following ordinary differential equation for the average temperature, given here in dimensional terms, # # $$\rho_{eff} \frac{\partial T}{\partial t} = \bar{Q} - \frac{hA}{V}(T-T_{\infty}),$$ # # where $\rho_{eff}$ is effective volumetric heat capacity, $T$ is the temperature, $t$ is time, $\bar{Q}$ is the averaged heat source term, $h$ is the heat transfer coefficient, $A$ is the surface area (available for cooling), $V$ is the cell volume, and $T_{\infty}$ is the ambient temperature. An initial temperature $T_0$ must be prescribed. # # # The effective volumetric heat capacity is computed as # # $$ \rho_{eff} = \frac{\sum_k \rho_k c_{p,k} L_k}{\sum_k L_k},$$ # # where $\rho_k$ is the density, $c_{p,k}$ is the specific heat, and $L_k$ is the thickness of each component. The subscript $k \in \{cn, n, s, p, cp\}$ is used to refer to the components negative current collector, negative electrode, separator, positive electrode, and positive current collector. # # The heat source term accounts for Ohmic heating $Q_{Ohm,k}$ due to resistance in the solid and electrolyte, irreverisble heating due to electrochemical reactions $Q_{rxn,k}$, and reversible heating due to entropic changes in the the electrode $Q_{rev,k}$: # # $$ Q = Q_{Ohm,k}+Q_{rxn,k}+Q_{rev,k}, $$ # # with # $$ Q_{Ohm,k} = -i_k \nabla \phi_k, \quad Q_{rxn,k} = a_k j_k \eta_k, \quad Q_{rev,k} = a_k j_k T_k \frac{\partial U}{\partial T} \bigg|_{T=T_{\infty}}.$$ # Here $i_k$ is the current, $\phi_k$ the potential, $a_k$ the surface area to volume ratio, $j_k$ the interfacial current density, $\eta_k$ the overpotential, and $U$ the open circuit potential. The averaged heat source term $\bar{Q}$ is computed by taking the volume-average of $Q$. # There are two lumped thermal options in PyBaMM: "lumped" and "x-lumped". Both models solve the same equation, but the term corresponding to heat loss is computed differently. # ### "x-lumped" option # The "x-lumped" model assumes a pouch cell geometry in order to compute the overall heat transfer coefficient $h$, surface area $A$, and volume $V$. This model allows the user to select a different heat transfer coefficient on different surfaces of the cell. PyBaMM then automatically computes the overall heat transfer coefficient (see [[1]](#ref), [[2]](#ref2) for details). The parameters used to set the heat transfer coefficients are: # # "Negative current collector surface heat transfer coefficient [W.m-2.K-1]" # "Positive current collector surface heat transfer coefficient [W.m-2.K-1]" # "Negative tab heat transfer coefficient [W.m-2.K-1]" # "Positive tab heat transfer coefficient [W.m-2.K-1]" # "Edge heat transfer coefficient [W.m-2.K-1]" # # and correspond to heat transfer at the large surface of the pouch on the side of the negative current collector, heat transfer at the large surface of the pouch on the side of the positive current collector, heat transfer at the negative tab, heat transfer at the positive tab, and heat transfer at the remaining surfaces. # # The "x-lumped" option can be selected as follows options = {"thermal": "x-lumped"} model = pybamm.lithium_ion.DFN(options) # ### "lumped" option # The behaviour of the "lumped" option changes depending on the "cell geometry" option. By default the "lumped" option sets the "cell geometry" option "arbitrary". This option allows a 1D electrochmical model to be solved, coupled to a lumped thermal model that can be used to model any aribitrary geometry. The user may specify the total heat transfer coefficient $h$, surface area for cooling $A$, and cell volume $V$. The relevant parameters are: # # "Total heat transfer coefficient [W.m-2.K-1]" # "Cell cooling surface area [m2]" # "Cell volume [m3]" # # which correspond directly to the parameters $h$, $A$ and $V$ in the governing equation. # # However, if the "cell geometry" is set to "pouch" the heat transfer coefficient, cell cooling surface are and cell volume are automatically computed to correspond to a pouch. In this instance the "lumped" is equivalent to the "x-lumped" option. # # The lumped thermal option with an arbitrary geometry can be selected as follows # + # lumped with no geometry option defaults to an arbitrary geometry options = {"thermal": "lumped"} model = pybamm.lithium_ion.DFN(options) # OR # lumped with arbitrary geometry specified options = {"cell geometry": "arbitrary", "thermal": "lumped"} model = pybamm.lithium_ion.DFN(options) # - # The lumped thermal option with a pouch cell geometry can be selected as follows # lumped with pouch cell geometry (equivalent to choosing "x-lumped" thermal option) options = {"cell geometry": "pouch", "thermal": "lumped"} model = pybamm.lithium_ion.DFN(options) # ## 1D (through-cell) model # # The 1D model solves for $T(x,t)$, capturing variations through the thickness of the cell, but ignoring variations in the other dimensions. The temperature is found as the solution of a partial differential equation, given here in dimensional terms # # $$\rho_k c_{p,k} \frac{\partial T}{\partial t} = \lambda_k \nabla^2 T + Q(x,t)$$ # # with boundary conditions # # $$ -\lambda_{cn} \frac{\partial T}{\partial x}\bigg|_{x=0} = h_{cn}(T_{\infty} - T) \quad -\lambda_{cp} \frac{\partial T}{\partial x}\bigg|_{x=1} = h_{cp}(T-T_{\infty}),$$ # # and initial condition # # $$ T\big|_{t=0} = T_0.$$ # # Here $\lambda_k$ is the thermal conductivity of component $k$, and the heat transfer coefficients $h_{cn}$ and $h_{cp}$ correspond to heat transfer at the large surface of the pouch on the side of the negative current collector, heat transfer at the large surface of the pouch on the side of the positive current collector, respectively. The heat source term $Q$ is as described in the section on lumped models. Note: the 1D model neglects any cooling from the tabs or edges of the cell -- it assumes a pouch cell geometry and _only_ accounts for cooling from the two large surfaces of the pouch. # # The 1D model is termed "x-full" (since it fully accounts for variation in the x direction) and can be selected as follows options = {"thermal": "x-full"} model = pybamm.lithium_ion.DFN(options) # # Pouch cell models # # The pouch cell thermal models ignore any variation in temperature through the thickness of the cell (x direction), and solve for $T(y,z,t)$. The temperature is found as the solution of a partial differential equation, given here in dimensional terms, # # $$\rho_{eff} \frac{\partial T}{\partial t} = \lambda_{eff} \nabla_\perp^2T + \bar{Q} - \frac{(h_{cn}+h_{cp})A}{V}(T-T_{\infty}),$$ # # along with boundary conditions # # $$ -\lambda_{eff} \nabla_\perp T \cdot \boldsymbol{n} = \frac{L_{cn}h_{cn} + (L_n+L_s+L_p+L_{cp})h_{edge}}{L_{cn}+L_n+L_s+L_p+L_{cp}}(T-T_\infty),$$ # at the negative tab, # $$ -\lambda_{eff} \nabla_\perp T \cdot \boldsymbol{n} = \frac{(L_{cn}+L_n+L_s+L_p)h_{edge}+L_{cp}h_{cp}}{L_{cn}+L_n+L_s+L_p+L_{cp}}(T-T_\infty),$$ # at the positive tab, and # $$ -\lambda_{eff} \nabla_\perp T \cdot \boldsymbol{n} = h_{edge}(T-T_\infty),$$ # elsewhere. Again, an initial temperature $T_0$ must be prescribed. # # Here the heat source term is averaged in the x direction so that $\bar{Q}=\bar{Q}(y,z)$. The parameter $\lambda_{eff}$ is the effective thermal conductivity, computed as # # $$ \lambda_{eff} = \frac{\sum_k \lambda_k L_k}{\sum_k L_k}.$$ # # The heat transfer coefficients $h_{cn}$, $h_{cp}$ and $h_{egde}$ correspond to heat transfer at the large surface of the pouch on the side of the negative current collector, heat transfer at the large surface of the pouch on the side of the positive current collector, and heat transfer at the remaining, respectively. # # As with the "x-lumped" option, the relevant heat transfer parameters are: # "Negative current collector surface heat transfer coefficient [W.m-2.K-1]" # "Positive current collector surface heat transfer coefficient [W.m-2.K-1]" # "Negative tab heat transfer coefficient [W.m-2.K-1]" # "Positive tab heat transfer coefficient [W.m-2.K-1]" # "Edge heat transfer coefficient [W.m-2.K-1]" # # Model comparison # Here we compare the "lumped" thermal model for a pouch cell geometry and an arbitrary geometry. We first set up our models, passing the relevant options pouch_model = pybamm.lithium_ion.DFN( options={"cell geometry": "pouch", "thermal": "lumped"} ) arbitrary_model = pybamm.lithium_ion.DFN( options={"cell geometry": "arbitrary", "thermal": "lumped"} ) # We then pick our parameter set parameter_values = pybamm.ParameterValues(chemistry=pybamm.parameter_sets.Marquis2019) # and look at the various parameters related to heat transfer # + params = [ "Negative current collector surface heat transfer coefficient [W.m-2.K-1]", "Positive current collector surface heat transfer coefficient [W.m-2.K-1]", "Negative tab heat transfer coefficient [W.m-2.K-1]", "Positive tab heat transfer coefficient [W.m-2.K-1]", "Edge heat transfer coefficient [W.m-2.K-1]", "Total heat transfer coefficient [W.m-2.K-1]", ] for param in params: print(param + ": {}".format(parameter_values[param])) # - # We see that the default parameters used for the pouch cell geometry assume that the large surfaces of the pouch are insulated (no heat transfer) and that most of the cooling is via the tabs. # # We can also look at the parameters related to the geometry of the pouch # + L_cn = parameter_values["Negative current collector thickness [m]"] L_n = parameter_values["Negative electrode thickness [m]"] L_s = parameter_values["Separator thickness [m]"] L_p = parameter_values["Positive electrode thickness [m]"] L_cp = parameter_values["Positive current collector thickness [m]"] L_y = parameter_values["Electrode width [m]"] L_z = parameter_values["Electrode height [m]"] # total thickness L = L_cn + L_n + L_s + L_p + L_cp # compute surface area A = 2 * (L_y * L_z + L * L_y + L * L_z) print("Surface area [m2]: {}".format(A)) # compute volume V = L * L_y *L_z print("Volume [m3]: {}".format(V)) # - # and the parameters related to the surface are for cooling and cell volume for the arbitrary geometry # + params = ["Cell cooling surface area [m2]", "Cell volume [m3]"] for param in params: print(param + ": {}".format(parameter_values[param])) # - # We see that both models assume the same cell volume, and the arbitrary model assumes that cooling occurs uniformly from all surfaces of the pouch. # # Let's run simulations with both options and compare the results. For demonstration purposes we'll increase the current to amplify the thermal effects # + # update current to correspond to a C-rate of 3 (i.e. 3 times the nominal cell capacity) parameter_values["Current function [A]"] = 3 * parameter_values["Nominal cell capacity [A.h]"] # pick solver solver = pybamm.CasadiSolver(mode="fast", atol=1e-3) # create simulations in a list sims = [ pybamm.Simulation(pouch_model, parameter_values=parameter_values, solver=solver), pybamm.Simulation(arbitrary_model, parameter_values=parameter_values, solver=solver) ] # loop over the list to solve for sim in sims: sim.solve([0, 1000]) # plot the results pybamm.dynamic_plot( sims, [ "Volume-averaged cell temperature [K]", "Volume-averaged total heating [W.m-3]", "Current [A]", "Terminal voltage [V]" ], labels=["pouch", "arbitrary"], ) # - # We see that the lumped model with an arbitrary geometry is cooled much more (it is cooled uniformly from all surfaces, compared to the pouch that is only cooled via the tabs and outside edges), which results in the temperature barely changing throughout the simulation. In comparison, the model with the pouch cell geometry is only cooled from a small portion of the overall cell surface, leading to an increase in temperature of around 20 degrees. # ## References # # The relevant papers for this notebook are: pybamm.print_citations()
examples/notebooks/models/thermal-models.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Multi-layer FNN on MNIST # # This is MLP (784-X^W-10) on MNIST. SGD algorithm (lr=0.1) with 100 epoches. # # # + import os, sys import numpy as np from matplotlib.pyplot import * import locale locale.setlocale(locale.LC_ALL, 'en_US.UTF-8') import matplotlib.cm as cm import matplotlib.pyplot as plt import matplotlib.font_manager as font_manager import seaborn as sns import itertools # %matplotlib inline # + """ Extract final stats from resman's diary file""" def extract_num(lines0): valid_loss_str = lines0[-5] valid_accuracy_str = lines0[-6] train_loss_str = lines0[-8] train_accuracy_str = lines0[-9] run_time_str = lines0[-10] valid_loss = float(valid_loss_str.split( )[-1]) valid_accuracy = float(valid_accuracy_str.split( )[-1]) train_loss = float(train_loss_str.split( )[-1]) train_accuracy = float(train_accuracy_str.split( )[-1]) run_time = float(run_time_str.split( )[-1]) return valid_loss, valid_accuracy, train_loss, train_accuracy, run_time """ Extract number of total parameters for each net config from resman's diary file""" def parse_num_params(line0): line_str = ''.join(lines0) idx = line_str.find("Total params") param_str = line_str[idx+14:idx+14+20] # 14 is the length of string "Total params: " param_num = param_str.split("\n")[0] return int(locale.atof(param_num)) # - """ Extract accuracy at each epoch for each net config from resman's diary file""" def parse_acc(lines0): line_str = ''.join(lines0) # print lines0 idx = line_str.find("epochs") print idx val_acc_str = line_str[idx+14:idx+14+20] # 14 is the length of string "Total params: " param_num = param_str.split("\n")[0] return int(run_time_str) # #### Extract results from diary file # # 1. Number of params # 2. Loss/Accuarcy for training/testing # 3. Runing time def read_results(results_dir): depth = [0,1,2,3,4,5,6,7,8,9,10] width = [25,50] dim = [0,50,200,500,1000] #[1500,2000]# ########## 1. filename list of diary ######################## diary_names = [] for subdir, dirs, files in os.walk(results_dir): for file in files: if file == 'diary': fname = os.path.join(subdir, file) diary_names.append(fname) # print diary_names ########## 2. Construct stats (width, depth, dim) ########## # acc_test_all : Tensor (width, depth, dim) # num_param_all: Tensor (width, depth) # acc_solved_all: Tensor (width, depth) # dim_solved_all: Tensor (width, depth) ############################################################ nw, nd, nn= len(width), len(depth), len(dim) acc_test_all = np.zeros((len(width), len(depth), len(dim))) nll_test_all = np.zeros((len(width), len(depth), len(dim))) mode = 1 # {0: test loss, 1: test acc} error_files = [] # record the error file # 2.1 construct acc_test_all and num_param_all for id_w in range(len(width)): w = width[id_w] for id_ll in range(len(depth)): ll = depth[id_ll] for id_d in range(len(dim)): d = dim[id_d] # 2.1.1 Read the results, for f in diary_names: if '_'+str(d)+'_'+str(ll)+'_'+str(w)+'/' in f: # print "%d is in" % d + f with open(f,'r') as ff: lines0 = ff.readlines() try: R = extract_num(lines0) R = np.array(R) acc_test_all[id_w,id_ll,id_d]=R[1] nll_test_all[id_w,id_ll,id_d]=R[0] except ValueError: error_files.append((w,ll,d)) R = np.zeros(len(R)) print "Error. Can not read config: depth %d, width %d and dim %d." % (ll, w, d) # break return acc_test_all, nll_test_all # + # collect the results from given folders depth = [0,1,2,3,4,5,6,7,8,9,10] width = [25,50] dim = [0,50,200,500,1000] #[1500,2000]# rep = 3 acc_test_all = np.zeros((len(width), len(depth), len(dim), rep)) nll_test_all = np.zeros((len(width), len(depth), len(dim), rep)) for j in range(rep): results_dir = '../results/fnn_mnist_l2_depth_run' + str(j+1) print results_dir acc_test, nll_test = read_results(results_dir) # print acc_test acc_test_all[:,:,:,j] = acc_test nll_test_all[:,:,:,j] = nll_test acc_test_all[1,6,0,1] = 0.1135 nll_test_all[1,6,0,1] = 2.30402 # + # check the results print acc_test_all.shape print nll_test_all.shape print "Baseline results" print nll_test_all[:,:,0] print "Dim %d results" % dim[-1] print nll_test_all[:,:,2,:] # - # ------------------------- # ### Testing Accuracy wrt. Width, Depth and Dim # #### Testing Accuracy of Intrinsic dim for #parameters # + acc_mean = acc_test_all.mean(axis=(0,3)) acc_std = acc_test_all.std(axis=(0,3)) nll_mean = nll_test_all.mean(axis=(0,3)) nll_std = nll_test_all.std(axis=(0,3)) print acc_mean size_M = (10,5) font = {'family' : 'normal', 'weight' : 'normal', 'size' : 16} matplotlib.rc('font', **font) nn = len(depth) stride = max( int(nn / 10), 1) stride2 = max( int(nn / 5), 1) # 2) Testing Accuracy: Acc fig = plt.figure(figsize=(6,4)) for d in range(5): if d==0: label_name = "direct" plot(depth, acc_mean[:,d], 'o-', color='k', lw=2, markevery=stride) fill_between(depth, acc_mean[:,d]+acc_std[:,d], acc_mean[:,d]-acc_std[:,d], color='k', alpha=0.3, label=label_name) else: label_name = "$d=$" + str(dim[d]) plot(depth, acc_mean[:,d], 'o-', lw=2, markevery=stride) fill_between(depth, acc_mean[:,d]+acc_std[:,d], acc_mean[:,d]-acc_std[:,d], alpha=0.3, label=label_name) plt.xlabel('Depth') plt.ylabel('Testing Accuracy') # ax.set_title('width %d, depth %d' %(width[i], depth[j])) plt.grid() legend(loc='best', fancybox=True, framealpha=0.5) plt.xlim([0,10]) plt.ylim([0,1.0]) fig.savefig("figs/acc_fnn_mnist_depth.pdf", bbox_inches='tight') plt.show() # 3) Testing Loss: NLL fig = plt.figure(figsize=(6,4)) for d in range(5): if d==0: label_name = "direct" plot(depth, nll_mean[:,d], 'o-', color='k', lw=2, markevery=stride) fill_between(depth, nll_mean[:,d]+nll_std[:,d], nll_mean[:,d]-nll_std[:,d], color='k', alpha=0.3, label=label_name) else: label_name = "$d=$" + str(dim[d]) plot(depth, nll_mean[:,d], 'o-', lw=2, markevery=stride) fill_between(depth, nll_mean[:,d]+nll_std[:,d], nll_mean[:,d]-nll_std[:,d], alpha=0.3, label=label_name) plt.xlabel('Depth') plt.ylabel('Testing NLL') # ax.set_title('width %d, depth %d' %(width[i], depth[j])) plt.grid() legend(loc='best', fancybox=True, framealpha=0.5) plt.xlim([0,10]) plt.ylim([0,2.50]) fig.savefig("figs/nll_fnn_mnist_depth.pdf", bbox_inches='tight') plt.show() # - # #### Intrinsic dim for #parameters # ## Performance comparison with Baseline
intrinsic_dim/plots/more/fnn_mnist_l2_depth.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 存在重复元素 # # * Slug: contains-duplicate # # * Date: 2018-06-25 # # * Category: LeetCode # # * Tags: 数组, 算法 # # * Author: timking # # * Summary: LeetCode - 探索 - 初级算法 # > [原文链接](https://leetcode-cn.com/problems/contains-duplicate/description/) # 给定一个整数数组,判断是否存在重复元素。 # # 如果任何值在数组中出现至少两次,函数返回 true。如果数组中每个元素都不相同,则返回 false。 # # **示例 1:** # # ``` # 输入: [1,2,3,1] # 输出: true # ``` # # **示例 2:** # # ``` # 输入: [1,2,3,4] # 输出: false # ``` # # **示例 3:** # # ``` # 输入: [1,1,1,3,3,4,3,2,4,2] # 输出: true # ``` # ## 初步解答 # # 这道题比较简单。直接利用字典的特性(不存在重复的键)完成就好。 # # 关于字典具体可以看之前的文章, [&laquo;高性能python-选择合适的数据结构&raquo;](/high_performance_python_section2.html#字典_1) class Solution: def containsDuplicate(self, nums): """ :type nums: List[int] :rtype: bool """ return len(set(nums)) != len(nums)
content/LeetCode/contains-duplicate/main.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import warnings import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from scipy import stats from plotly.offline import init_notebook_mode, iplot import plotly.figure_factory as ff import plotly.graph_objs as go from plotly import tools from utils import discrete_plot from utils import numerical_plot warnings.simplefilter('ignore') pd.options.display.max_rows = 100 init_notebook_mode(connected=True) # %matplotlib inline # - # # Load Dataset # + train = pd.read_csv('./data/train_clean.csv') test = pd.read_csv('./data/test_clean.csv') print('Train:') print(train.info(verbose=False), '\n') print('Test:') print(test.info(verbose=False)) # - # # Data Basic Information. # + # imbalanced dataset target1 = train['target'].sum() target0 = (1 - train['target']).sum() print('Target 0:\t', target0, '\t', np.round(target0 / len(train), 4)) print('Target 1:\t', target1, '\t', np.round(target1 / len(train), 4)) print('0/1 Ratio:\t', np.round(target0 / target1, 4)) # + # visualize the target count distribution data = [go.Bar(x=['status 0'], y=[target0], name='Status 0'), go.Bar(x=['status 1'], y=[target1], name='Status 1')] margin=go.layout.Margin(l=50, r=50, b=30, t=40, pad=4) legend = dict(orientation='h', xanchor='auto', y=-0.2) layout = go.Layout(title='Loan Status Count Plot', xaxis=dict(title='Loan Status'), yaxis=dict(title='Count'), autosize=False, width=700, height=400, margin=margin, legend=legend) fig = go.Figure(data=data, layout=layout) iplot(fig) # - # # Visualization # + # define categorical and numerical features cat_features = ['term', 'home_ownership', 'verification_status', 'purpose', 'title', 'addr_state', 'initial_list_status', 'application_type', 'grade', 'sub_grade'] num_features = ['loan_amnt', 'loan_to_inc', 'int_rate', 'installment_ratio', 'emp_length', 'annual_inc', 'dti', 'delinq_2yrs', 'inq_last_6mths', 'open_acc', 'pub_rec', 'revol_bal', 'revol_util', 'total_acc', 'collections_12_mths_ex_med', 'acc_now_delinq', 'tot_coll_amt', 'tot_cur_bal', 'total_rev_hi_lim', 'acc_open_past_24mths', 'avg_cur_bal', 'bc_open_to_buy', 'bc_util', 'chargeoff_within_12_mths', 'delinq_amnt', 'mo_sin_old_il_acct', 'mo_sin_old_rev_tl_op', 'mo_sin_rcnt_rev_tl_op', 'mo_sin_rcnt_tl', 'mort_acc', 'mths_since_recent_bc', 'mths_since_recent_inq', 'num_accts_ever_120_pd', 'num_actv_bc_tl', 'num_actv_rev_tl', 'num_bc_sats', 'num_bc_tl', 'num_il_tl', 'num_op_rev_tl', 'num_rev_accts', 'num_rev_tl_bal_gt_0', 'num_sats', 'num_tl_120dpd_2m', 'num_tl_30dpd', 'num_tl_90g_dpd_24m', 'num_tl_op_past_12m', 'pct_tl_nvr_dlq', 'percent_bc_gt_75', 'pub_rec_bankruptcies', 'tax_liens', 'tot_hi_cred_lim', 'total_bal_ex_mort', 'total_bc_limit', 'total_il_high_credit_limit', 'credit_length'] features = cat_features + num_features # define numerical and categorical features print('Categorical feature:\t', len(cat_features)) print('Numerical feature:\t', len(num_features)) print('Total feature:\t\t', len(features)) # - # ### 2. Numerical Variables # loan_amnt feature = 'loan_amnt' iplot(numerical_plot(train, feature, hist_bins=40, scatter_bins=100, log=False, w=1000, h=450)) # loan_to_inc feature = 'loan_to_inc' iplot(numerical_plot(train, feature, hist_bins=40, scatter_bins=100, log=True, w=1000, h=450)) # int_rate feature = 'int_rate' iplot(numerical_plot(train, feature, hist_bins=40, scatter_bins=100, log=False, w=1000, h=450)) # installment_ratio feature = 'installment_ratio' iplot(numerical_plot(train, feature, hist_bins=40, scatter_bins=100, log=False, w=1000, h=450)) # annual_inc feature = 'annual_inc' iplot(numerical_plot(train, feature, hist_bins=40, scatter_bins=100, log=True, w=1000, h=450)) # dti feature = 'dti' iplot(numerical_plot(train, feature, hist_bins=40, scatter_bins=100, log=True, w=1000, h=450)) # open_acc feature = 'open_acc' iplot(numerical_plot(train, feature, hist_bins=40, scatter_bins=100, log=False, w=1000, h=450)) # revol_bal feature = 'revol_bal' iplot(numerical_plot(train, feature, hist_bins=40, scatter_bins=100, log=True, w=1000, h=450)) # revol_util feature = 'revol_util' iplot(numerical_plot(train, feature, hist_bins=40, scatter_bins=100, log=False, w=1000, h=450)) # total_acc feature = 'total_acc' iplot(numerical_plot(data, feature, hist_bins=40, scatter_bins=100, log=False, w=1000, h=450)) # tot_coll_amt feature = 'tot_coll_amt' iplot(numerical_plot(data, feature, hist_bins=40, scatter_bins=100, log=False, w=1000, h=450)) # tot_cur_bal feature = 'tot_cur_bal' iplot(numerical_plot(data, feature, hist_bins=40, scatter_bins=100, log=False, w=1000, h=450)) # total_rev_hi_lim feature = 'total_rev_hi_lim' iplot(numerical_plot(data, feature, hist_bins=40, scatter_bins=100, log=False, w=1000, h=450)) # avg_cur_bal feature = 'avg_cur_bal' iplot(numerical_plot(data, feature, hist_bins=40, scatter_bins=100, log=False, w=1000, h=450)) # bc_open_to_buy feature = 'bc_open_to_buy' iplot(numerical_plot(data, feature, hist_bins=40, scatter_bins=100, log=False, w=1000, h=450)) # bc_util feature = 'bc_util' iplot(numerical_plot(data, feature, hist_bins=40, scatter_bins=100, log=False, w=1000, h=450)) # delinq_amnt feature = 'delinq_amnt' iplot(numerical_plot(data, feature, hist_bins=40, scatter_bins=100, log=False, w=1000, h=450)) # mo_sin_old_il_acct feature = 'mo_sin_old_il_acct' iplot(numerical_plot(data, feature, hist_bins=40, scatter_bins=100, log=False, w=1000, h=450)) # mo_sin_old_rev_tl_op feature = 'mo_sin_old_rev_tl_op' iplot(numerical_plot(data, feature, hist_bins=40, scatter_bins=100, log=False, w=1000, h=450)) # mo_sin_rcnt_rev_tl_op feature = 'mo_sin_rcnt_rev_tl_op' iplot(numerical_plot(data, feature, hist_bins=40, scatter_bins=100, log=False, w=1000, h=450)) # mo_sin_rcnt_tl feature = 'mo_sin_rcnt_tl' iplot(numerical_plot(data, feature, hist_bins=40, scatter_bins=100, log=False, w=1000, h=450)) # mths_since_recent_bc feature = 'mths_since_recent_bc' iplot(numerical_plot(data, feature, hist_bins=40, scatter_bins=100, log=False, w=1000, h=450)) # pct_tl_nvr_dlq feature = 'pct_tl_nvr_dlq' iplot(numerical_plot(data, feature, hist_bins=40, scatter_bins=100, log=False, w=1000, h=450)) # percent_bc_gt_75 feature = 'percent_bc_gt_75' iplot(numerical_plot(data, feature, hist_bins=40, scatter_bins=100, log=False, w=1000, h=450)) # tot_hi_cred_lim feature = 'tot_hi_cred_lim' iplot(numerical_plot(data, feature, hist_bins=40, scatter_bins=100, log=False, w=1000, h=450)) # total_bal_ex_mort feature = 'total_bal_ex_mort' iplot(numerical_plot(data, feature, hist_bins=40, scatter_bins=100, log=False, w=1000, h=450)) # total_bc_limit feature = 'total_bc_limit' iplot(numerical_plot(data, feature, hist_bins=40, scatter_bins=100, log=False, w=1000, h=450)) # total_il_high_credit_limit feature = 'total_il_high_credit_limit' iplot(numerical_plot(data, feature, hist_bins=40, scatter_bins=100, log=False, w=1000, h=450))
deprecated/4. Data Visualization - Numerical Variable.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd household = pd.read_csv('~/Qsync/整理/Work/Uganda RCT/ISASimple_Gates_LLINE-UP_rct_RSRC_households.txt', sep='\t') household.describe() household.head(10) observation = pd.read_csv('~/Qsync/整理/Work/Uganda RCT/ISASimple_Gates_LLINE-UP_rct_RSRC_observations.txt', sep='\t') observation metadata = pd.read_csv('~/Qsync/整理/Work/Uganda RCT/ISASimple_Gates_LLINE-UP_rct_RSRC_ontologyMetadata.txt', sep='\t') metadata participants = pd.read_csv('~/Qsync/整理/Work/Uganda RCT/ISASimple_Gates_LLINE-UP_rct_RSRC_participants.txt', sep='\t') participants participants.describe() samples = pd.read_csv('~/Qsync/整理/Work/Uganda RCT/ISASimple_Gates_LLINE-UP_rct_RSRC_samples.txt', sep='\t') samples samples.describe()
examples/analyse_ug_rct.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="Ht8cT-Ou6a7R" # # SIT742: Modern Data Science # **(Week 04: Text Analysis)** # # --- # - Materials in this module include resources collected from various open-source online repositories. # - You are free to use, change and distribute this package. # - If you found any issue/bug for this document, please submit an issue at [tulip-lab/sit742](https://github.com/tulip-lab/sit742/issues) # # Prepared by **SIT742 Teaching Team** # # --- # # ## Session 4A - The Fundamentals of Text Pre-processing # # Table of Content # # * Part 1. Accessing Various Text Resources # * Part 2. Basic Steps of Pre-Processing Text # * Part 3. Summary # * Part 4. Reading Materials # # # --- # # The majority of text data that appears in everyday sources such as books, # newspapers, magazines, emails, blogs, and tweets # is free language text. Given the amount of information stored as text on the Internet, it is not feasible for a human to manually explore such a large amount of text data to extract useful information. Therefore, we have to use automatic approaches, such as text analysis algorithms developed in the fields of text mining, natural language process (NLP) and information retrieval (IR). It is worth knowing that computers cannot directly understand text like humans. For example, humans can automatically break down sentences into units of meaning, but computers cannot. Therefore, text data must be processed before various text analysis algorithms can use it. # # Unlike the data you can retrieve from relational databases, text data always appears in an unstructured form. # By unstructured we mean that text data exists "in the wild" and has not been converted into a structured format, like a spreadsheet. Therefore, it has to be manipulated and converted into a proper structured and numerical format consumable by text analysis algorithms, which is referred to as text pre-processing. It is an important task and a critical step in text analysis. The characters, words and sentences identified by text pre-processing are the fundamental units passed to all the downstream text analysis algorithms, such as part-of-speech tagging, parsing, document classification and clustering, etc. # This chapter describes the basic pre-processing steps that are needed to convert unstructured text into a structured # format. # + [markdown] colab_type="text" id="et29QNJy6a7X" # ## Part 1. Accessing Various Text Resources # # What are the text corpora and lexical resources often used in text analysis? Where and how can we # access them? # Text data used for different text analysis tasks can be derived from various resources, such as # * **Existing data repositories**, most of which contains corpora that have been either pre-processed into a specific format that can be directly digested by the downstream text analysis algorithms or manually annotated. # For example, # * [UCI Machine Learning Repository](https://archive.ics.uci.edu/ml/datasets.html?format=&task=&att=&area=&numAtt=&numIns=&type=text&sort=nameUp&view=table) contains 30 corpora that can be used in text mining tasks, such as regression, clustering, and classification. # * [Linguistic Data Consortium](https://www.ldc.upenn.edu/) contains corpora mainly used in various natural language processing tasks, such as parsing, acoustic analysis, phonological analysis and etc. One disadvantage of using LDC is that its corpora are not free. Users have to buy a license in order to use those corpora. # # * **NLTK**: A language toolkit that also includes a diverse set of corpora and lexical resources, which include, for example, # * Plain text corpora, e.g., # * The Gutenberg Corpus contains thousands of books. # * Tagged Corpora, e.g., # * The Brown Corpus is annotated with part-of-speech tags. Each word is now paired with its part-of-speech tag. # You can retrieve words as (word, tag) tuples, rather than just bare word strings. # * Chunked Corpora, e.g., # * The CoNLL corpora includes phrasal chunks (CoNLL 2000), named entity chunks (CoNLL 2002). # * Parsed Corpora, e.g., # * The Treebank corpora provide a syntactic parse for each sentence, like the Penn Treebank based on Wall Street Journal samples. # * Word List and Lexicons, e.g., # * [WordNet](https://wordnet.princeton.edu/): a large lexical database of English, where nouns, verbs, adjectives and adverbs are organized into interlinked synsets (i.e., sets of synonyms) # * Categorized Corpora: # * The Reuters corpus: a corpus of Reuters News stories for used in developing text analysis algorithms. # # * **Web**: The largest source for getting text data is the Web. Text can be extracted from webpages or be retrieved # via various APIs. For example, # * **Wikipedia articles**: The Wikimedia website provides links to download dumps of Wikipedia articles. Click [here](https://dumps.wikimedia.org/enwiki) to view various dumps for English Wikipedia articles. # * **Tweets** that allows people to communicate with short, 140-characters messages. It is fortunate that Twitter provides quite well documented API that we can use to retrieve tweets of our interest. # * The other text data can be scraped from the Internet, like webpages. Here is a <a href="https://www.youtube.com/watch?v=3xQTJi2tqgk">Youtube video</a> on **scraping websites with Python**. # # The set of NLTK corpora can be easily accessed with interfaces offered by NLTK. Here we show you how to install the text data that comes with NLTK and all the packages included in NLTK. # + colab={} colab_type="code" id="FyjDKH_y6a7b" import nltk #If you're unsure of which data/model you need, you can start out with the basic list of data + models with: #It will download a list of "popular" resources, these includes: nltk.download("popular") #It will download a list of "retuters" resources, thses includes: nltk.download("reuters") #While you downliad the nltk package, it will show the Download path,(root/nltk_data) #It will also show the 1st item in the nltk.data.path list # Specifies the file stored in the NLTK data package at *path*. NLTK will search for these files in the directories specified by ``nltk.data.path``. nltk.data.path # + colab={"base_uri": "https://localhost:8080/", "height": 1729} colab_type="code" executionInfo={"elapsed": 15346, "status": "error", "timestamp": 1552971128623, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "02817931634636469244"}, "user_tz": -660} id="WyTNm7Ge6a7m" outputId="e11fda80-5c7b-458b-8ea4-8864c3a1aca4" # import nltk #A new window should open, showing the NLTK Downloader. #You can input the related character for the command. #For example, if you would like to check the current NLTK configuation details. #Just follow the prompt message, such as input 'c' to check configuration # and then input `m', and 'q' to exit. nltk.download() # + [markdown] colab_type="text" id="zC1Msn826a7w" # You also can install the NLTK software on the Mac or Windows OS, For example, if you use the Mac OS, then you run the above block's two commands and it will locally gives you a window that looks like the following screenshot. For this lab, we use the Google Colab, a Linux-like system, it will show the command line interface (CLL) not a windows as below if you run the nltk.download() function. # # ![NLTK](https://github.com/tulip-lab/sit742/raw/master/Jupyter/image/nltkInstallWindow.png "NLTK") # # # # This window, 'NTLK Download', shown on the Mac OS will allows you to browse the available corpora and packages included in NLTK. The Collections tab on the downloader shows how the packages are grouped into sets. You can select the line labeled "all" and click "download" to obtain all corpora and packages (<font color = "red">Warning: the size is a couple of GBs</font>). It will take a couple of minutes to download the corpora and packages, depending on how fast your Internet connection is. You can also choose to just install the copora and packages as you go. # * * * # + [markdown] colab_type="text" id="v98Jv-IM6a7z" # ## Part 2. Basic Steps of Pre-Processing Text # # The possible steps of text pre-processing are nearly the same for all text analysis tasks, though which pre-processing steps are chosen depends on the specific task. The basic steps are as follows: # * Tokenization # * Case normalization # * Removing Stop words # * Stemming and Lemmatization # * Sentence Segmentation # # We will walk you through each of these steps with some examples. First, you need to # decide <font color="red">the scope of the text to be used in the downstream text analysis tasks</font>. Should you use an entire document? # Or should you break the document down into sections, paragraphs, or sentences. Choosing # the proper scope depends on the goals of the analysis task. # For example, you might choose to use an entire document in document classification and clustering tasks # while you might choose smaller units like paragraphs or sentences in document summarization and information # retrieval tasks. The scope chosen by you will have an impact on the steps needed in the pre-processing process. # # # # # + [markdown] colab_type="text" id="jR2B1MS_K86m" # ### 2.1. Tokenization # # Text is usually represented as sequences of characters by computers. # However, most natural language processing (NLP) and text mining tasks # (e.g., parsing, information extraction, machine translation, document classification, information # retrieval, etc.) need to operate on tokens. # The process of breaking a stream of text into tokens is often referred to as **tokenization**. # For example, a tokenizer turns a string such as # ``` # A data wrangler is the person performing the wrangling tasks. # ``` # into a sequence of tokens such as # ``` # "A" "data" "wrangler" "is" "the" "person" "performing" "the" "wrangling" "tasks" # ``` # # There is no single right way to do tokenization. # It completely depends on the corpus and the text analysis task you are going to perform. It is important to ensure that your tokenizer produces proper token types for your downstream text analysis tools. # Although word tokenization is relatively easy compared with other NLP or text mining task, errors made in this phase will propagate into later analysis and cause problems. # In this section, we will demonstrate the process of chopping character sequences into pieces with different tokenizers. # # The major question of the tokenization phase is what counts as a token. # Different linguistic analyses might have different notions of tokens. # In different languages, a token could mean different things. # Here we are not going to dive into the linguistic aspect of what counts as a token, # as it goes beyond the scope of this unit. # We rather consider English text. # **In English, a token can be a string of alphanumeric characters separated by spaces, which # seems quite easy.** # However, things get considerably worse when we start considering words having # hyphens, apostrophes, periods and so on. In a word tokenization task, should we # remove hyphens? Should we keep periods? # According to different text analysis tasks, # tokens can be unigram words, multi-word phrases (or collocations), or # other meaningful and identifiable linguistic elements. # Therefore, working out word tokens is not an easy task in pre-processing natural language text. # You might be interested in watching a YouTube video on [word tokenization](https://www.youtube.com/watch?v=f9o514a-kuc). # + colab={} colab_type="code" id="i7sE3bDa6a72" raw = """The GSO finace group in U.S.A. provided Cole with about US$40,000,555.4 in funding, which accounts for 35.3% of Cole's revenue (i.e., AUD113.3m), as the ASX-listed firm battles for its survival. Mr. Johnson said GSO's recapitalisation meant "the current shares are worthless".""" # + [markdown] colab_type="text" id="r2yfyRDj6a8A" # #### 2.1.1 Standard Tokenizer # # For English, a straightforward tokenization strategy is to use white spaces as token delimiters. # The whitespace tokenizer simply splits the text on any sequence of whitespace, tab, or newline characters. # Consider the above hypothetical text. # As a starting point, let's tokenize the text above by using any whitespace characters as token delimiters. # As mentioned, these characters include whitespace (' '), tab ('\t'), newline ('\n'), return ('\r'), and so on. # You have learnt in week 2 that those characters are together represented by a built-in regular expression abbreviation '\s'. # Thus, we will use '\s' rather than writing it as something like '[ \t\n]+'. # You can read the details about the ["\s" Syntax](https://docs.python.org/3/library/re.html) # # There are multiple ways of tokenizing a string with whitespaces. # The simplest approach might be using Python's string function `split()`. # This function returns a list of tokens in the string. # Another way is to use Python's regular expression package, `re` as # ```python # import re # re.split(r"\s+", raw) # ``` # The output should be exactly the same as that given by the string function `split()`. # Here we further demonstrate the use of <font color="blue">RegexpTokenzier</font> from Natural Language Toolkit (NLTK). # + colab={} colab_type="code" id="yKv-cbC96a8G" from nltk.tokenize import RegexpTokenizer # + colab={} colab_type="code" id="QAWtbwiJ6a8R" #For the RegexpTokenizer function, the arguement gaps type is bool. #we will use the 'True' if this tokenizer's pattern should be used to find separators between tokens; #we will use the 'False' if this tokenizer's pattern should be used to find the tokens themselves. #The below example with gasp param is True tokenizer = RegexpTokenizer(r"\s+", gaps=True) tokens = tokenizer.tokenize(raw) print(tokens) #The below example with gasp param is False tokenizer_test = RegexpTokenizer(r"\s+", gaps=False) tokens_test = tokenizer_test.tokenize(raw) print(tokens_test) # + [markdown] colab_type="text" id="dvEgk0LU6a8e" # A <font color="blue">RegexpTokenizer</font> splits a string into tokens using a regular expression. # Refer to its online [documentation](http://www.nltk.org/api/nltk.tokenize.html#nltk.tokenize.regexp.RegexpTokenizer) # for more details. # Its constructor takes four arguments. # The compulsory argument is the pattern used to build the tokenizer. # It is in the form of a regular expression. # **In the example above, we used `\s+` to match 1 or more whitespace characters.** # If the pattern defines separators between tokens, the value of `gaps` should be # set to `True`. Otherwise, the pattern should be used to find the tokens. # NLTK also provides a whitespace tokenizer, `WhitespaceTokenizer[source]`, which is # equivalent to our tokenizer. Try # # # # + colab={} colab_type="code" id="akOPDrnC-_H_" from nltk.tokenize import WhitespaceTokenizer WhitespaceTokenizer().tokenize(raw) # + [markdown] colab_type="text" id="JVYonrOs-_xK" # It seems that word tokenization is quite simple if words in a language are all # separated by whitespace characters. # However, this is not the case in many languages other than English, **such # as Chinese, Japanese, Korean and Ancient Greek.** # In those languages, text is written without any whitespaces between words. # So the whitespace tokenizer is of no use at all. # To handle them, we need more advanced tokenization techniques, often referred to as # word segmentation, which is an important and challenging task in NLP. # **However, # discussing word segmentation is beyond our scope here.** # # It is not surprising that the whitespace tokenizer is **insufficient** even for English, since English does not just contains sequences of alphanumeric characters separated by white spaces. # It often contains punctuation, hyphen, apostrophe, and so on. # Sometimes **whitespace does not necessarily indicate a word break. ** # For example, non-compositional phrases (e.g., "real estate" and "shooting pain") and proper nouns (e.g., "The New York Times") have a different meaning than the sum of their parts. They cannot be split in the process of word tokenization. # They must be treated as a whole in, for instance, information retrieval. # # Back to our example, # the whitespace tokenizer still gives us word like "(i.e.,", "funding," and "worthless".". # We would like to remove parentheses, some punctuations, quotation marks and other non-alphanumeric characters. # A simple and straightforward strategy is to use all non-alphanumeric characters as token delimiters. # + colab={} colab_type="code" id="Rj9Uo8PG6a8k" tokenizer = RegexpTokenizer(r"\W+", gaps=True) tokenizer.tokenize(raw) # + [markdown] colab_type="text" id="xDJ813sU6a82" # In regular expressions, '\W' indicates any non-alphanumeric characters (equivalent to `[^a-zA-Z0-9]`) while '\w' indicates any alphanumeric characters (equivalent to `[a-zA-Z0-9]`). # The counterpart is to extract tokens that only consist of alphanumeric characters without the empty strings. Try the following out yourself: # ```python # tokenizer = RegexpTokenizer(r"\w+") # tokenizer.tokenize(raw) # ``` # # These two strategies are simple to implement, but there are cases where they may not match the desired behaviour. # For example, the whitespace tokenizer cannot properly handle non-alphanumeric characters, while the non-alphanumeric tokenizer might over-tokenise some tokens with periods, hyphens, apostrophes, etc. # In the rest of this section, we will discuss the main problems that you might face while tokenising free language text. You will soon find that tokenizers should often be customized to deal with different datasets. # + colab={} colab_type="code" id="xaDUmnKoE59l" #\w means match any alphanumberic characters tokenizer = RegexpTokenizer(r"\w+", gaps=True) tokenizer.tokenize(raw) # + [markdown] colab_type="text" id="U_1ScDlL6a86" # #### 2.1.2 Periods in Abbreviations # # Word tokens are not always surrounded by whitespace characters. Punctuation, such as commas, semicolons, and periods, are often used in English, as they are vital to disambiguate the meaning of sentences. However, it is problematic for computers to handle punctuation, especially periods, properly in tokenization. # In this part we will focus on the handling of periods. # # Periods are usually used to mark the end of sentences. Difficulty arises when the period marks abbreviations (including acronyms). Please refer to **"Step 2: Handling Abbreviations" in [3]** for a detailed discussion on abbreviations. In the case of abbreviations, particularly acronyms, separating tokens on punctuation and other non-alphanumeric characters would put different components of the acronym into different tokens, as you have seen in our example, where "U.S.A" has been put into three tokens, "U", "S" and "A", losing the meaning of the acronym. To deal with abbreviations, one approach is to maintain a look-up list of known abbreviations during tokenization. Another approach aims for smart tokenization. Here we will show you how to use regular expressions to cover most but not all abbreviations. # # An acronym is often formed from the initial components in multi-word phrases. Some contains periods, and some do not. Common acronyms with periods are for example, # * U.S.A # * U.N. # * U.K. # * B.B.C # # Other abbreviations with a similar pattern are, for instance, # * A.M. and P.M. # * A.D. and B.C. # * O.K. # * i.e. # * e.g. # # For abbreviations like those, it is not hard to figure out the pattern and the corresponding regular expression. Each of those abbreviations contains at least a pair of a letter (either uppercase or lowercase) and a period. The regular expression is # ```python # r"([a-zA-Z]\.)+" # ``` # To see the graphical representation of the regular expression, please click the [RegexpTokenizer](https://regexper.com/#%28%5Ba-zA-z%5D%5C.%29%2B) webpage. # + colab={} colab_type="code" id="GYOFGHKtG-uy" #If you directly use the r"([a-zA-Z])", you will find out that the output is different with your expect. tokenizer = RegexpTokenizer(r"([a-zA-Z]\.)+") tokenizer.tokenize(raw) # + colab={} colab_type="code" id="xtdc9DPK6a8-" #Then, we add the ?: in the above regular expression. tokenizer = RegexpTokenizer(r"(?:[a-zA-Z]\.)+") tokenizer.tokenize(raw) # + [markdown] colab_type="text" id="YpjaA1Jm6a9I" # Observe that # 1. We introduced <font color="red">(?: )</font> in the regular expression to avoid just selecting substrings that match the pattern. `(?:)` is a non-capturing version of regular parentheses. If the parentheses are used to specify the scope of the pattern, but not to select the matched material to be output, you have to use `(?:)`. To check out how `?:` affects the output, try to remove it and run the tokenizer again. You will get the following output # ``` # ['e.', 'A.', 'l.', 'r.'] # ``` # It just returns the last substrings that match the pattern. # 2. The code also returned 'l.' and 'r.' that are part of 'survival.' and 'Mr.' # The period in 'survival.' marks the end of a sentence. # Indeed, it is very challenging to deal with the period at the end of each sentence, as it can also be part of an abbreviation if the abbreviation appears at the end of a sentence. # For example, the following sentence ends with 'etc.' # ``` # I need milk, eggs, bread, etc. # ``` # # Next, let’s further consider some more general abbreviations, like # * Mr. and Mrs. # * Dr. # * st. # * Wash. and Calif. (abbreviations for two states in U.S., Washington and California) # # In those abbreviations, the period is always preceded two or more letters in English alphabet. Turn this pattern into a regular expression # ``` # r"[a-zA-z]{2,}\." # ``` # + colab={} colab_type="code" id="_1gelRs56a9X" tokenizer = RegexpTokenizer(r"[a-zA-z]{2,}\.") tokenizer.tokenize(raw) # + [markdown] colab_type="text" id="7U3-4wm26a9r" # It is not surprising that the ouput contains "survival." again. # The issue of working out which punctuation marks indicate the end of a setence will be discussed in section 2.5. # Let's put all the cases together. # The regular expression can be generalised to # ```python # r"([a-zA-Z]+\.)+" # ``` # which matches both acronyms and abbreviations like "Dr." # + colab={} colab_type="code" id="gZjNqYKeJiZk" tokenizer = RegexpTokenizer(r"([a-zA-z]+\.)+") tokenizer.tokenize(raw) # + [markdown] colab_type="text" id="jhUyz8cy6a92" # As we mentioned early in this chapter, the issues of tokenization are language specific. # The language of the document to be tokenized should be known a priori. # Take computer technology as an example. # It has introduced new types of character sequences that a tokenizer should probably treat as a single token, including email addresses, web URLs, IP addresses, etc. One solution is to simply ignore them by using a non-alphanumeric-based tokenizer. # However, this comes the cost of losing the original meaning of those kinds of tokens. For instance, if an IP address, like "172.19.197.106", is tokenized into individual numbers, "172", "19", "197", and "106". # It is no longer an IP address, and these numbers can be anything. # To account for strings like # * "172.19.197.106" # * "www.mit.edu" # # you can simply update our regular expression accounting for abbreviations to # ```python # (\w+\.?)+ # ``` # # Try it out on http://regexr.com/. # + colab={} colab_type="code" id="u08JXBR9J-22" #Token for mathing IP address tokenizer = RegexpTokenizer(r"\d{1,3}") print(tokenizer.tokenize("172.19.197.106")) #Token for mathing word in a UTL tokenizer = RegexpTokenizer(r"\w{1,}") print(tokenizer.tokenize("www.mit.edu")) #the last word in a IP address or a URL tokenizer = RegexpTokenizer(r"(\w+\.?)+") print(tokenizer.tokenize("172.19.197.106")) print(tokenizer.tokenize("www.mit.edu")) # + [markdown] colab_type="text" id="uEujTvvy6a99" # #### 2.1.3 Currency and Percentages # # While analysing financial document, such as finance reports, a financial analyst might be interested in monetary numerals mentioned in the reports. One interesting research question in both finance and computer science is whether one can use finance reports to help predict the stock market prices. In this case, it would be good for a tokenizer to keep all the monetary numerals. # # Currency is usually expressed in symbols and numerals (e.g., $10). # There are many different ways of writing about different currencies. # For example, # * A three-letter currency abbreviations followed by figures, for example, # ``` # AUD100, EUR500, CNY330 # ``` # # * A letter or letters symbolising the country followed the, for example, # ``` # A$100 (= AUD100), US$10 (= USD10), C$5 (= CAD5), # ``` # # * A currency symbols ($, £, €, ¥, etc.) followed by figures, for examples # ``` # £100.5, €30.0 # ``` # # While the number of digits in the integer part is more than three, commas are often inserted between every three digits, like # ``` # AUD100, 000 # ``` # Let's construct a regular expression that can account for all the following monetary numerals # ``` # 1. $10,000.00 # 2. €10,000,000.00 # 3. ¥5.5555 # 4. AUD100 # 5. A$10.555 # ``` # The regular expression should looks like as follows (<a href="https://regexper.com/#(%3F%3A%5BA-Z%5D%7B1%2C3%7D)%3F%5B%5C%24£€¥%5D%3F(%3F%3A%5Cd%7B1%2C3%7D%2C)*%5Cd%7B1%2C3%7D(%3F%3A%5C.%5Cd%2B)%3F"> the graphical representation</a>): # ```python # r" (?: # [A-Z]{1,3})? # (1) # [\$£€¥]? # (2) # (?:\d{1,3},)* # (3) # \d{1,3} # (4) # (?:\.\d+)? # (5) # " # ``` # # ![The diagram for this regular expression](https://github.com/tulip-lab/sit742/raw/master/Jupyter/image/P04A01.png) # # # (1) matches the start of monetary numerals, which consists of one or up to 3 uppercase letters that indicate a country symbol or a currency abbreviation. # <br/> # (2) together with (1), matches the start of monetary numerals, which consists of either only a currency symbol or a country symbol plus a currency symbol. # <br/> # (3) accounts for the integer part that contains more than three digits. It matches all digits in the integer part except for the last three digits. # <br/> # (4) matches the last three digits in the integer part. # <br/> # (5) matches the fractional part. # # + colab={} colab_type="code" id="Drp_VOFm6a-T" #Let run the above regular expression tokenizer = RegexpTokenizer(r"(?:[A-Z]{1,3})?[\$£€¥]?(?:\d{1,3},)*\d{1,3}(?:\.\d+)?") tokenizer.tokenize(raw) # + [markdown] colab_type="text" id="uXanOxsH6a-n" # Refer back to our example text "raw", can you find any issue rather than the percentage (35.5%)? The regular expression cannot handle "AUD113.3m", where the "m" indicates million. Without 'm', the number 'AUD113.3' loses its meaning in the original context. Therefore, you have seen that there might not be a regular expression that can handle all possible ways of representing currency. # # Now, we have constructed a regular expression for currencies, even though it is not perfect. # Next, we move to working out the regular expression for percentages, things becomes quite easy. # Percentages usually have the following forms # * 23% # * 23.23% # * 23.2323% # * 100.00% # # The maximum number of digits in the integer part is 3, the minimun is 1, so the regular expression is '\d{1,3}'. # A percentage can have either one or no fractional part, which can be matched by '(\.\d+)?'. # Adding % to the end, we have (<a href="https://regexper.com/#%5Cd%7B1%2C3%7D(%5C.%5Cd%2B)%25">the graphical representation</a>) # ```python # r"\d{1,3}(\.\d+)%" # ``` # # ![The diagram for this regular expression](https://github.com/tulip-lab/sit742/raw/master/Jupyter/image/P04A02.png) # + colab={} colab_type="code" id="I45vt11t6a-s" tokenizer = RegexpTokenizer(r"\d{1,3}(?:\.\d+)?%") tokenizer.tokenize(raw) # + [markdown] colab_type="text" id="NsfJgax26a-2" # The above code should give you the only percentage in our example text. # Compare the regular expression matching percentages with that matching currency, # you will find that the former is similar to the last bits of the latter, except for the percentage sign. # Besides, there are other numerical and special expressions that # we can not easily handle with regular expressions. For example, these expressions include # email addresses, time, vehicle licence numbers, phone numbers, etc. # If you are interested in dealing with them, you could read the “Regular Expressions Cookbook” by <NAME> and <NAME>. # + [markdown] colab_type="text" id="hngoWmJi6a-5" # #### 2.1.4 Hyphens and Apostrophes # # In English, hyphenation is used for various purposes. The hyphen can be used to form certain compound terms, including hyphenated compound nouns, verbs and adjectives. It can also be used for word division. There are many sources of hyphens in texts. Thus, should one count a sequence of letters with a hyphen as one word to two? Unfortunately, the answer seems to be sometimes one, sometimes two. # For example, if the hyphen is used to split up vowels in words, such as "co-operate", "co-education" and "pre-process", these words should be regarded as single token. In contrast, if the hyphen is used to group a couple of words together, for example, "a state-of-the-art algorithm" and "a money-back guarantee", these hyphenated words should be separated into individual words. # Therefore, handling hyphenated words automatically is one of the most difficult tasks in pre-processing text data. # # "**The Art of Tokenization**" (Please refer the Part 4, Reading Materials) categorizes different hyphens into three types: # * **End-of-Line Hyphen**: In professionally printed material (like books, and newspapers), the hyphen is used to divide words between the end of one line and the beginning of the next in order to perform justification of text during typesetting. It seems to be easy to handle these kinds of hyphens by simply removing them and joining the parts of a word at the end of one line and the beginning of the next. # * **Lexical Hyphen**: Words with a lexical hyphen are better to be treated as a single word. They are typically included in a dictionary. For example, words contains certain prefixes, like "co-", "pre-", "multi-", etc., and other words like "so-called", "forty-two" # * **Sententially Determined Hyphenation**: This type of hyphen is often created dynamically. It includes, for example, nouns modified by an 'ed'-verb (e.g., "text-based" and "hand-made") and sequences of words used as a modifier in a noun group, as in "the 50-cent-an-hour raise". In these cases, we might want to treat those tokens joined by hyphens as individual words. # # The use of hyphens in many such cases is extremely inconsistent, which further increase the complexity of dealing with hyphens in tokenization. People often resort to using either some heuristic rules or treating it as a machine learning problem. However, these go beyond our scope here. It is clear that handling hyphenation is much more complicated than one can expect. You should also be clear that there is no way of handling all the cases above. # # Let's assume that we are going to treat all strings of two words separated by a hyphen as a single token, how can we extract them from texts without breaking them into pieces. In our example text, we are going to view "ASX-listed" as a single token. The pattern here is a sequence of alphanumeric character plus "-" and plus another sequence of alphanumeric character. # The corresponding regular expressions should be # ```python # r"\w+-\w" # ``` # + colab={} colab_type="code" id="I9rchg8I6a-_" tokenizer = RegexpTokenizer(r"\w+-\w+") tokenizer.tokenize(raw) # + [markdown] colab_type="text" id="FKnqjBWh6a_K" # Similar to hyphens, how to handle an apostrophe in tokenization is another complex question. The apostrophe in English is often used in two cases: # * Contractions: a shortened version of a word or multiple words. # * don't (do not) # * she'll (she will) # * you're (you are) # * he's (he is or he has) # * you'd (you would) # * Possessives: used to indicate ownership/possession with nouns. # * the cat's tail # * Einstein's theory # # Should we treat a string containing apostrophes as a single word or two words? # Perhaps, you might think we should separate English Contractions into two words, and regard possessives as a single word. # However, distinguishing contractions from possessives is not easy. # For example, should "cat's" be "cat has/is" or the possessive case of cat. # Thus some processor in NLP splits the strings in either case into two words, while others do not. # Here we again assume that we are going to retrieve all strings with an apostrophe as single words. # The regular expression is quite similar to the one for handling hyphens. # ``` # r"\w+'\w+" # ``` # + colab={} colab_type="code" id="p8lUk62W6a_Y" tokenizer = RegexpTokenizer(r"\w+'\w+") tokenizer.tokenize(raw) # + [markdown] colab_type="text" id="e5trx9fP6a_x" # Now let's generalise the `\w+` to permit word-internal hyphens and apostrophes (<a href="https://regexper.com/#%5Cw%2B(%3F%3A%5B-'%5D%5Cw%2B)%3F">the graphical representation</a>): # ```python # \w+(?:[-']\w+)? # ``` # # You have learnt some simple approaches for handling different issues in word tokenization, which turns out to be far more difficult than you might have expected. It is clear that different NLP and text mining tasks on different text corpora need different word tokenization strategies, as you must decide what counts as a word. Besides the `RegexpTokenizer`, NLTK implements a set of other word tokenizaton modules. Please refer to [its official webpage](http://www.nltk.org/api/nltk.tokenize.html) for more details. # So far that we have only considered well-written text, but there are other types of natural language texts, such the transcripts of speech corpora and some non-standard texts like tweets that provide their own additional challenges. # + colab={} colab_type="code" id="lnhfVL6HR6Hz" tokenizer = RegexpTokenizer(r"\w+(?:[-']\w+)? ") tokenizer.tokenize(raw) # + [markdown] colab_type="text" id="tp9IPKDr6a_5" # ### 2.2. Case Normalization # After word tokenization, you may find that words can contain either upper- or lowercase letters. # For example, you might have "data" and "Data" appearing in the same text. # Should one treat them as two different words or as the same word? # Most English texts are written in mixed case. # In other words, a text can contain both upper- and lowercase letters. # Capitalization helps readers differentiate, for example, between nouns and proper nouns. # In many circumstances, however, an uppercase word should be treated no differently than in lower case appearing in a document, and even in a corpus. # Therefore, a common strategy is to reduce all letters in a word to lower case. # It is very simple to do so. # + colab={} colab_type="code" id="H-BhfNO36a__" tokens = [token.lower() for token in tokens] tokens # + [markdown] colab_type="text" id="R4hQJMHE6bAP" # It is often a good idea to do case normalization. For example, with case normalization, you can match "data wrangling" with "Data Wrangling" in an information retrieval task. But for other tasks, like named entity recognition, one would better to keep capitalised words (e.g., pronouns) left as capitalised. # People have tried some simple heuristics that just makes some token lowercase. # However, there is a trade-off between getting capitalization right and simply using lowercase regardless of the correct case of words. # You can read about basic formatting issues of text processing in "Corpus-Based Work" on the Part 4, Reading Materials. # + [markdown] colab_type="text" id="gWw6xdFV6bAY" # ### 2.3. Removing Stop words # [Stopwords](https://en.wikipedia.org/wiki/Stop_words) are words that are extremely common and carry little lexical content. For many NLP and text mining tasks, it is useful to remove stopwords in order to save storage space # and speed up processing, and the process of removing these words is usually called “stopping.” # An example stopword list from NLTK is shown bellow: # + colab={} colab_type="code" id="bQc14Mrg6bAx" from nltk.corpus import stopwords stopwords_list = stopwords.words('english') #show the stopword in the 'english' database stopwords_list # + [markdown] colab_type="text" id="fdELVGtT6bBR" # The above list contains 127 stopwords in total, which are often [function words](https://en.wikipedia.org/wiki/Function_word) in English, like articles (e.g., "a", "the", and "an"), # pronouns (e.g., "he", "him", and "they"), particles (e.g., "well", "however" and "thus"), etc. # It is easy to use NLTK's built-in stopword list to remove all the stopwords from a tokenised text. # + colab={} colab_type="code" id="vOM8t8d_6bBl" filtered_tokens = [token for token in tokens if token not in stopwords_list] filtered_tokens # + colab={} colab_type="code" id="8QTQwnCdTvct" #This will show all exclude stopwords from the filtered list excluded_tokens = [token for token in tokens if token in stopwords_list] excluded_tokens # + [markdown] colab_type="text" id="tiG652Wi6bB2" # We have removed 13 stopwords. The rest Token number is 28. # To check what stopwords have been excluded from the filtered list, you simply change `not in` to `in`. # # There is no single universal list of stop words used by all NLP and text mining tools. # Different stopword lists are available online. For example, the English stopword list # available at [Kevin Bouge's website](https://sites.google.com/site/kevinbouge/stopwords-lists) # which contains 570 stopwords, a quite fine-grained stopword list. # At the same website, you can also download stopword lists for 27 languages other than English. # Please download the English stopwords list from <NAME>'s website, and save it into the folder where # you keep this IPython Notebook file. # We will try out the aforementioned stopword lists on the large # [Reuters corpus](https://github.com/teropa/nlp/tree/master/resources/corpora/reuters). # + colab={} colab_type="code" id="99wYIR5x6bB8" # !pip install wget import wget link_to_data = 'https://github.com/tulip-lab/sit742/raw/master/Jupyter/data/stopwords_en.txt' DataSet = wget.download(link_to_data) # !ls # + colab={} colab_type="code" id="4KaWJx9R6bCw" import nltk reuters = nltk.corpus.reuters.words() stopwords_list_570 = [] with open('stopwords_en.txt') as f: stopwords_list_570 = f.read().splitlines() #It will show the retuers stopwords, you can compare it with the above 'english'stopwords. #You will find that the 'retuers'stopwords is more abundant than 'english' stopwords. stopwords_list_570 # + [markdown] colab_type="text" id="ixs2WLXN6bDn" # Remove stop words accroding to NLTK's built-in stopword list. # + colab={} colab_type="code" id="sp4I6HPH6bEB" filtered_reutuers = [w for w in reuters if w.lower() not in stopwords_list] #It will show the percentage between the filtered_retuers and the 'english' stopwords len(filtered_reutuers)*1.0/len(reuters) # + [markdown] colab_type="text" id="6QPqNXCo6bEf" # Remove stop words according to the downloaded stop word list. (Note: the following script will run a couple of minutes due to data structure used in search.) # + colab={} colab_type="code" id="CroshJn16bEk" filtered_reutuers = [w for w in reuters if w.lower() not in stopwords_list_570] #It will show the percentage between the filtered_retuers and the 'retuers' stopwords #It will show that the retuers stopwords will filte more stopwords. len(filtered_reutuers)*1.0/len(reuters) # + [markdown] colab_type="text" id="Vbc9EF4j6bEx" # Thus, with the help of these two stopword lists, we can filter about 36% and 34% of the words respectively. # We have significantly reduced the size of the Reuters corpus. # The question is: Have we lost lots of information due to removing stopwords? # For the large majority of NLP and text mining tasks and algorithms, stopwords usually appear to be of little value and have little impact on the final results, as the presence of stopwords in a text does not really help distinguishing it from other texts. # In contrast, text analysis tasks involving phrases are the exception because phrases lose their meaning if some of the words are removed. # For example, if the two stopwords in the phrase "a bed of roses" are removed, its original meaning in the context of IR will be lost. # # Stopwords usually refer to the most common words in a language. # The general strategy for determining whether a word is a stopword or not is to compute its total number of appearances in a corpus. # We will cover more about removing common words other than stopwords while we further explore text data in next chapter. # Here we would like to point out that failing to remove those common words could lead to skewed analysis results. # For example, while analysing emails we usually remove headers (e.g., "Subject", "To", and "From") and sometimes # a lengthy legal disclaimer that often appears in many corporate emails. # For short messages, a long disclaimer can overwhelm the actual text when performing any sort of text analysis. # For more discussion on stopping, please read [5] and watch an 8-mintue YouTube video on [Stop Words](https://www.youtube.com/watch?v=w36-U-ccajM). # + [markdown] colab_type="text" id="SKZTPma-6bEz" # ### 2.4. Stemming and Lemmatization # # Another question in text pre-processing is whether we want to keep word forms like "educate", "educated", "educating", # and "educates" separate or to collapse them. Grouping such forms together and working in terms of their base form is # usually known as stemming or lemmatization. # Typically the stemming process includes the identification and removal of prefixes, suffixes, and pluralisation, # and leaves you with a stem. # Lemmatization is a more advanced form of stemming that makes use of, for example, the context surrounding the words, # an existing vocabulary, morphological analysis of words and other grammatical information (e.g., part-of-speech tags) # to determine the basic or dictionary form of a word, which is known as the lemma. # See Wikipedia entries for [stemming](https://en.wikipedia.org/wiki/Stemming) # and [lemmatization](https://en.wikipedia.org/wiki/Lemmatisation). # # Stemming and lemmatization are the basic text pre-processing methods for texts in languages like English, French, # German, etc. # In English, nouns are inflected in the plural, verbs are inflected in the various tenses, and adjectives are # inflected in the comparative/superlative. # For example, # * watch &#8594; watches # * party &#8594; parties # * carry &#8594; carrying # * love &#8594; loving # * stop &#8594; stopped # * wet &#8594; wetter # * fat &#8594; fattest # * die &#8594; dying # * meet &#8594; meeting # # It is not hard to find that they all follow some inflections rules. # For instance, to get the plural forms of nouns endings with consonant 'y', one often changes the ending # 'y' to 'ie' before adding 's'. # Indeed most existing stemming algorithms make intensive use of this kind of rules. # # In morphology, the derivation process creates a new word out of an existing one often by adding either # a prefix or a suffix. It brings considerable sematic changes to the word, often word class is changed, for example, # * dark &#8594; darkness # * agree &#8594; agreement # * friend &#8594; friendship # * derivation &#8594; derivational # # The goal of stemming and lemmatization is to reduce either inflectional forms or derivational forms of # a word to a common base form. # Before we demonstrate the use of several state-of-the-art stemmers and lemmatizers implemented in NLTK, please read # [4] and section 3.6 in [2]. # If you are a visual learner, you could watch the YouTube video on # [Stemming](https://www.youtube.com/watch?v=2s7f8mBwnko) from Prof. <NAME>. # # NLTK provides several famous stemmers interfaces, such as # # * Porter Stemmer, which is based on # [The Porter Stemming Algorithm](http://tartarus.org/martin/PorterStemmer/) # * Lancaster Stemmer, which is based on # [The Lancaster Stemming Algorithm](https://tartarus.org/martin/PorterStemmer/), # * Snowball Stemmer, which is based on [the Snowball Stemming Algorithm](http://snowball.tartarus.org/) # # Let's try the three stemmers on the words listed above. # # + colab={} colab_type="code" id="ukMvAMG46bFG" words = ['watches', 'parties', 'carrying', 'loving', 'stopped', 'wetter', 'fattest', 'dying', 'darkness', 'agreement', 'friendship', 'derivational', 'denied', 'meeting'] # + [markdown] colab_type="text" id="BgaIIrNu6bFW" # Porter Stemming Algorithm is the one of the most common stemming algorithms. # It makes use of a series of heuristic replacement rules. # + colab={} colab_type="code" id="dgNgyugE6bFZ" from nltk.stem import PorterStemmer stemmer = PorterStemmer() ['{0} -> {1}'.format(w, stemmer.stem(w)) for w in words] # + [markdown] colab_type="text" id="5glx9qR76bFj" # The Porter Stemmer works quite well on general cases, like 'watches' &#8594; 'watch' and 'darkness' &#8594; 'dark'. # However, for some special cases, the Porter Stemmer might not work as expected, # like 'carrying' &#8594; 'carri' and 'derivational' &#8594; 'deriv'. # Note that a concept called "list comprehension" supported by Python is used here. # If you would like to know more about list comprehension, please click [here](http://www.secnetix.de/olli/Python/list_comprehensions.hawk). # # The Lancaster Stemmer is much newer than the Porter Stemmer, published in 1990. # + colab={} colab_type="code" id="w6tqQde_6bFo" from nltk.stem import LancasterStemmer stemmer = LancasterStemmer() ['{0} -> {1}'.format(w, stemmer.stem(w)) for w in words] # + [markdown] colab_type="text" id="Jo6VCDD46bF9" # After comparing the output from the Lancaster Stemmer and that from the Porter Stemmer, you might think that # the Lancaster Stemmer could be a bit more aggressive than the Porter Stemmer, since it gets 'agreement' &#8594; 'agr' and 'derivational' &#8594; 'der'. # At the same time, it seems that the Lancaster Stemmer can handle words like 'parties' and 'carrying' quite well. # # Now let's try the Snowball Stemmer. # The version in NLTK is available in 15 languages. # Different from the previous two stemmers, you need to specify which language the Snowball Stemmer will be applied to in its class constructor. # It works in a similar way to the Porter Stemmer. # + colab={} colab_type="code" id="FBV_FV7y6bGR" from nltk.stem import SnowballStemmer stemmer = SnowballStemmer('english') ['{0} -> {1}'.format(w, stemmer.stem(w)) for w in words] # + [markdown] colab_type="text" id="hpZc2ZM26bGa" # A stemmer usually resorts to language-specific rules. # Different stemmers implementing different rules and behave differently, # as shown above. # The use of inflection and derivation is very complex in English. # There might not exist a set of rules that can cover all the cases. # Therefore, the stemmers that you have played will always generate some out-of-vocabulary words. # # Rather than using a stemmer, you can use a lemmatizer that utilises # more information about the language to accurately identify the lemma # for each word. # As pointed out in "**Stemming and lemmatization**" (Please read the related Reading Materials on the Part 4), # > Stemmers use language-specific rules, but they require less knowledge than a lemmatizer, which needs a complete vocabulary and morphological analysis to correctly lemmatize words # # The WordNet lemmatizer implemented in NLTK is based on WordNet's built-in morphologic function, and returns the input word unchanged if it cannot be found in WordNet, which sounds more reasonable # than just chopping off prefixes and suffixes. In NLTK, you can use it in the following way: # + colab={} colab_type="code" id="FIGTEBI16bGf" from nltk.stem import WordNetLemmatizer lemmatizer = WordNetLemmatizer() ['{0} -> {1}'.format(w, lemmatizer.lemmatize(w)) for w in words] # + [markdown] colab_type="text" id="kzbZuj8G6bG5" # It is a bit strange that the lemmatizer did nothing to nearly all the words, except for 'watches', 'parties' # However, if we specify the POS tag of each word, what will happen? # Let try a couple of words in our list. # + colab={} colab_type="code" id="gCUQdQ7k6bHE" lemmatizer.lemmatize('dying', pos='v') # + colab={} colab_type="code" id="Fkb5sN6g6bHd" lemmatizer.lemmatize('meeting', pos='v') # + colab={} colab_type="code" id="sEtrMIGt6bIK" lemmatizer.lemmatize('meeting', pos='n') # + colab={} colab_type="code" id="SFbhdbxD6bId" lemmatizer.lemmatize('wetter', pos='a') # + colab={} colab_type="code" id="_54mOzQc6bIl" lemmatizer.lemmatize('fattest', pos='a') # + [markdown] colab_type="text" id="RJl_Da4Z6bIz" # If we know the POS tags of the words, the WordNet Lemmatizer can accurately identify the corresponding lemmas. # For example, the word 'meeting' with different POS tag, the WordNet Lemmatizer gives you different lemmas. # Without giving the POS tags, it uses noun as default. # # Both stemming and lemmatization can significantly reduce the number of words in a vocabulary. # In other words, the downstream text analysis tools can benefit from them by saving running time # and memory space. In contrast, can stemming and lemmatization improve the performance # of those tools? It is a quite arguable question. # As pointed out in [4], stemming and lemmatization can increase recall but harm precision in information # retrieval. Researchers have also found that classifying English document tasks often do not gain # from stemming and lemmatization. # However, it might not be the case when we change our language to something rather than English, for example, German. # + [markdown] colab_type="text" id="2Pfsl5v46bI6" # ### 2.5. Sentence Segmentation # # Sentence segmentation is also known as sentence boundary disambiguation or sentence boundary detection. # The following is the Wikipedia definition of sentence boundary disambiguation: # >Sentence boundary disambiguation (SBD), also known as sentence breaking, is the problem in natural language processing of deciding where sentences begin and end. Often natural language processing tools require their input to be divided into sentences for a number of reasons. However sentence boundary identification is challenging because punctuation marks are often ambiguous. For example, a period may denote an abbreviation, decimal point, an ellipsis, or an email address - not the end of a sentence. About 47% of the periods in the Wall Street Journal corpus denote abbreviations. As well, question marks and exclamation marks may appear in embedded quotations, emoticons, computer code, and slang. # # SBD is one of the essential problems for many NLP tasks, like Parsing, Information Extraction, Machine Translation, and Document Summarizations. # The accuracy of the SBD system will directly affect the performance of these applications. # # Sentences are the basic textual unit immediately above the word and phrase. # So what is a sentence? Is something ending with one of the following punctuations ".", "!", "?"? # Does a period always indicate sentence boundaries? # For English texts, it is almost as easy as finding every occurrence of those punctuations. # However, some periods occur as part of abbreviations, monetary numerals and percentages, as we # have discussed in sections 1.2 and 1.3. # Although you can use a few heuristic rules to correctly # identify the majority of sentence boundaries, SBD is much more complex that we can expect, # please read section 4.2.4 of the book, 'Corpus-Based Work' refered on the Part 4 Reading Materials, and watch a Youtube video on [Sentence segmentation](https://www.youtube.com/watch?v=9LXq3oQEEIA). # discussing more advanced techniques for SBD goes beyond our scope. # Instead, we will show you some sentence segmentation tools implemented in NLTK. # Please also note that there are other tools or packages containing a sentence tokenizer, # for example, Apache OpenNLP, Stanford NLP toolkit, and so on. # # The NLTK's [Punkt Sentence Tokenizer](http://www.nltk.org/api/nltk.tokenize.html) was designed to split # text into sentences "*by using an unsupervised algorithm to build a model for abbreviation words, collocations, and words that start sentences.*” It contains a pre-trained sentence tokenizer for English. # Let's test it out with a couple of examples extracted from the book, called "<NAME>", on Project Gutenberg, by # <NAME>. # First construct a pre-trained English sentence tokenizer, # + colab={} colab_type="code" id="N0JP3rNx6bJD" import nltk.data sent_detector = nltk.data.load('tokenizers/punkt/english.pickle') # + [markdown] colab_type="text" id="gbC3vCn76bJQ" # Following the intruction on the official website of Punkt Sentence Tokenizer, we tokenize two snippets extracted # from "<NAME>": # + colab={} colab_type="code" id="sPakoHAG6bJY" text1 = '''And so it turned out; Mr. <NAME> being from home, but leaving Mrs. Hussey entirely competent to attend to all his affairs. Upon making known our desires for a supper and a bed, <NAME>, postponing further scolding for the present, ushered us into a little room, and seating us at a table spread with the relics of a recently concluded repast, turned round to us and said—"Clam or Cod?"''' #('\n-----\n' is used to wrap the sentences after the stripped results, it is useful for #reading the processed the text) print('\n-----\n'.join(sent_detector.tokenize(text1.strip()))) # + colab={} colab_type="code" id="3Gpu_ug56bJh" text2 = '''A clam for supper? a cold clam; is THAT what you mean, <NAME>?" says I, "but that's a rather cold and clammy reception in the winter time, ain't it, Mrs. Hussey?"''' print('\n-----\n'.join(sent_detector.tokenize(text2.strip()))) # + [markdown] colab_type="text" id="9SoAsA-26bJ0" # You can also use `sent_tokenize`, an instance of Punkt Sentence Tokenizer. # This instance has already been trained on and works well for many European languages. # ```python # from nltk.tokenize import sent_tokenize # sent_tokenize(text1) # ``` # You should get similar outputs as above. # # Comparing the two results we notice that the sentence tokenizer has troubles in recognizing abbreviations. # It got "Mrs." right in the first snippet but not the second. Regarding this type of issues, please read a blog post on sentence tokenizer , just click the 'Testing out the NLTK sentence tokenizer' on the Part 4 Reading Materials. # * * * # + [markdown] colab_type="text" id="cqJkbYlm6bJ3" # ## Part 3. Sumary # # In this chapter we have covered the fundamentals of text pre-processing. # You have learnt how to access different text data, and how to carry out # the following basic text pre-processing steps: # * Tokenization # * Case normalization # * Stopping # * Stemming and lemmatization # * Sentence segmentation # # Now you should be able to perform those pre-processing tasks on a new corpus according # to requirements of different text analysis tasks. # We would like to point out that besides NLTK, there are other NLP tools with mixed quality, which can be used to process text data. For example, [the standford NLP group](http://nlp.stanford.edu/software/) provides a list of tools for parsing, POS tagging, Name Entity Regonition (NER), word segmentation, tokinization, etc; # and [Mallet](http://mallet.cs.umass.edu/) is a Java-based package for statistical natural langage processing. # * * * # # ## Part 4. Reading Materials # # 1. "[Tokenization](http://nlp.stanford.edu/IR-book/html/htmledition/tokenization-1.html)" 📖 . # 2. "[Processing Row Text](http://www.nltk.org/book_1ed/ch03.html)", chapter 3 of # of "Natural Language Processing with Python". # 3. "[The Art of Tokenization](https://www.ibm.com/developerworks/community/blogs/nlp/entry/tokenization?lang=en)": An IBM blog on tokenization. It gives a detailed discussion about word tokenization and its challenges 📖 . # 4. "[Stemming and lemmatization](http://nlp.stanford.edu/IR-book/html/htmledition/stemming-and-lemmatization-1.html)" 📖 . # 5. "[Dropping common terms: stop words](http://nlp.stanford.edu/IR-book/html/htmledition/dropping-common-terms-stop-words-1.html)" 📖 . # 6. "[Corpus-Based Work](https://www.deakin.edu.au/library)", Chapter 4 of "Foundations of statistical natural language processing" by <NAME> 📖 . # 7. "[Testing out the NLTK sentence tokenizer](http://www.robincamille.com/2012-02-18-nltk-sentence-tokenizer/)" # 1. "[Accessing Text Corpora and Lexical Resources](http://www.nltk.org/book/ch02.html): Chapter 2 of "Natural Language Processing with Python" By <NAME>, <NAME> & <NAME> 📖 . # 2. "[Corpus Readers](http://www.nltk.org/howto/corpus.html#tagged-corpora)": An NLTK tutorial on accessing the contents of a diverse set of corpora. #
Jupyter/SIT742P04A-TextPreprocessing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] deletable=true editable=true # **<NAME>, EE** # # **Due: 3/28/2017** # # # Two-Transmission-Link Queueing System Simulator and Output # # *** # # ## Code # + deletable=true editable=true # %matplotlib inline from pylab import * import numpy as np from collections import deque # + [markdown] deletable=true editable=true # This section of code implements a queue data type for use in my simulation. It can enqueue which adds an item to the back of the queue and is FIFO for dequeueing. Doing it this way closely represents a real queue and lets me easily track objects in the queue. # + deletable=true editable=true class Queue: """A queue for use in simulating M/M/1/k. Attributes: k (int): Maximum customers allowed in the system. departures (list): A sample of departure intervals. queue (list): A deque object. dropped (int): Number of items dropped because queue was full. served (int): Number of items served from queue. """ def __init__(self, k, mu, departures): """Forms a queue. Args: k (int): Maximum customers allowed in the system. mu (float): Rate out of the queue. departures (int): Number of departure intervals to generate. """ self.k = k # Generates the departure intervals # according to an exponential distribution. self.departures = exponential(1/mu, departures) self.queue = deque([], k) self.dropped = 0 self.served = 0 def empty(self): """Checks if the queue is empty. Returns: True if empty, False otherwise. """ return len(self.queue) is 0 def is_full(self): """Checks if the queue is full. Returns: True if full, False otherwise. """ return len(self.queue) is self.k def enqueue(self, item): """Adds an item to end of the queue. Args: item: An item to add to the queue. """ if self.is_full(): self.dropped += 1 else: self.queue.append(item) def dequeue(self): """Removes the fist item from the queue.""" if not self.empty(): self.served += 1 return self.queue.popleft() return None def get_size(self): """Get the size of the queue. Returns: An integer for the size of the queue. """ return len(self.queue) # + [markdown] deletable=true editable=true # This next piece of code performs the actual simulation and keeps track of everything. My method generates some arrival interval according to an exponential distribution with relation to $\lambda$. I then check if the time is 0 to handle an edge case by immediately adding to the queue. To do that I need to generate a uniformly distributed random variable to compare with $\phi$ and determine which queue to use. I then add that arrival time to the queue for use later and to simulate a packet entering. With that out of the way I then go through a series of checks to make sure packets are dequeueing according to exponential service times with relation to $\mu$. If anything should be dequeued I can then check its arrival time that I stored when dequeueing with the current time to determine delay while also incrementing a counter to check the number of packets serviced. Next, I'll have to enqueue whatever arrived after all dequeues finished, if any. When doing this I go through an important check. I will increment a dropped counter if the queue I'm adding to is full which helps me determine blocking. If it successfully queues I can then increment a counter which checks how full the queue is at certain points during the runtime. This counter also decreases during dequeues, and it helps me determine the average number of packets in the queue. # # # The latter portion of the code block contains equations and methods to output the expected metrics and the ones simulated in an easily readable format. # + deletable=true editable=true def simulation(lamb, mu, k, phi, samples): """Used to run a simulation of an M/M/1/k network. Args: lamb (float): The rate into the entire network. mu (float): The rate out of the two queues in the network. k (int): Maximum number of customers the two queues can handle. phi (float): Probability an arrival goes to the first queue. samples (int): Number of packets to sample. Defaults to 6000. """ queue1 = Queue(k, mu, samples*2) queue2 = Queue(k, mu, samples*2) # Counts arrivals to each node. queue1_arrivals, queue2_arrivals = 0, 0 # Count time passed. time = 0 # Indexes for sample space lists. i, j, n, m = 0, 0, 0, 0 # Lists for obtaining average number of packets and time in queue. queue1_size, queue2_size = [], [] queue1_time, queue2_time = [0], [0] # Iterate over entire sample of arrivals. while queue1.served < samples and queue2.served < samples: # Generate an interarrival time. arrivals = exponential(1/lamb) # Idle state, ignores output rates. if time is 0: if random() < phi: queue1_arrivals += 1 queue1.enqueue(0) else: queue2_arrivals += 1 queue2.enqueue(0) # Increments time by one arrival interval. time += arrivals else: # Dequeues any packets that should have been processed # before the next arrival. while queue1.departures[i] <= time: t = queue1.dequeue() if t is not None: queue1_time.append(queue1.departures[i] - t) # Sums the intervals to compare with time since arrival. queue1.departures[i+1] += queue1.departures[i] i += 1 if queue1.served > 1000: queue1_size.append(queue1.get_size()) while queue2.departures[j] <= time: t = queue2.dequeue() if t is not None: queue2_time.append(queue2.departures[j] - t) queue2.departures[j+1] += queue2.departures[j] j += 1 if queue2.served > 1000: queue2_size.append(queue2.get_size()) # Splits arrivals based on phi probability. if random() < phi: queue1_arrivals += 1 queue1.enqueue(time) else: queue2_arrivals += 1 queue2.enqueue(time) if queue1.served > 1000 or queue2.served > 1000: queue1_size.append(queue1.get_size()) queue2_size.append(queue2.get_size()) # Increments time by one arrival interval. time += arrivals # Print the metrics for the queues. print_metrics(lamb, mu, k, phi, samples, time, queue1, queue1_arrivals, queue1_size, queue1_time, queue2, queue2_arrivals, queue2_size, queue2_time) def print_metrics(lamb, mu, k, phi, samples, time, queue1, queue1_arrivals, queue1_size, queue1_time, queue2, queue2_arrivals, queue2_size, queue2_time): """Prints the metrics for the system, queue1, and queue2. Args: lamb (float): The rate into the entire network. mu (float): The rate out of the two queues in the network. k (int): Maximum number of customers the two queues can handle. phi (float): Probability an arrival goes to the first queue. samples (int): Number of packets sampled. time: The runtime of the system. queue1 (Queue): The first Queue object. queue1_arrivals: The number of arrivals into the system. queue1_size (list): A list of the number of items in queue at different times. queue1_time (list): A list of the delay for each packet that left the system. queue2 (Queue): The second Queue object. queue2_arrivals: The number of arrivals into the system. queue2_size (list): A list of the number of items in queue at different times. queue2_time (list): A list of the delay for each packet that left the system. """ # Calculate and print results. # Queue 1. # Blocking probability. e_pb1 = eval_blocking(lamb*phi, mu, k) pb1 = queue1.dropped/queue1_arrivals # Average delay. e_et1 = eval_delay(lamb*phi, mu, k, e_pb1) et1 = average(queue1_time) # Average number of packets in system. rho = phi*lamb/mu e_n1 = (rho/(1-rho))-((k+1)*rho**(k+1)/(1-rho**(k+1))) n1 = average(queue1_size) # Throughput. e_thru1 = e_n1/e_et1 thru1 = n1/et1 # Queue 2. # Blocking probability. e_pb2 = eval_blocking(lamb*(1-phi), mu, k) pb2 = queue2.dropped/queue2_arrivals # Average delay. e_et2 = eval_delay(lamb*(1-phi), mu, k, e_pb2) et2 = average(queue2_time) # Average number of packets in system. rho = (1-phi)*lamb/mu e_n2 = (rho/(1-rho))-((k+1)*rho**(k+1)/(1-rho**(k+1))) n2 = average(queue2_size) # Throughput. e_thru2 = e_n2/e_et2 thru2 = n2/et2 # Whole system. # Blocking probability. e_pb = phi*e_pb1 + (1-phi)*e_pb2 pb = (queue1.dropped+queue2.dropped)/(queue1_arrivals + queue2_arrivals) # Average number of packets in system. e_n = e_n1 + e_n2 n = n1 + n2 # Average delay. e_et = e_n/(lamb*(1-e_pb)) et = average(queue1_time+queue2_time) # Throughput. e_thru = e_thru1 + e_thru2 thru = n/et print("\nSimulation of two M/M/1/{0} queues with phi={1}:\n".format(k,phi)) # Whole system. system_metrics = {'expected_blocking':e_pb, 'blocking':pb, 'expected_delay':e_et, 'delay':et, 'expected_number':e_n, 'number':n, 'expected_throughput':e_thru, 'throughput':thru} print("\tSystem:") print("\t\tBlocking probability:\n\t\t\tExpected: ", e_pb) print("\t\t\tSimulated: ", pb) print("\t\tAverage delay in seconds:\n\t\t\tExpected: ", e_et) print("\t\t\tSimulated: ", et) print("\t\tAverage number of packets:\n\t\t\tExpected: ", e_n) print("\t\t\tSimulated: ", n) print("\t\tThroughput in packets/second:\n\t\t\tExpected: ", e_thru) print("\t\t\tSimulated: ", thru) # Queue 1. queue1_metrics = {'expected_blocking':e_pb, 'blocking':pb, 'expected_delay':e_et, 'delay':et, 'expected_number':e_n, 'number':n, 'expected_throughput':e_thru, 'throughput':thru} print("\n\tQueue 1:") print("\t\tBlocking probability:\n\t\t\tExpected: ", e_pb1) print("\t\t\tSimulated: ", pb1) print("\t\tAverage delay in seconds:\n\t\t\tExpected: ", e_et1) print("\t\t\tSimulated: ", et1) print("\t\tAverage number of packets:\n\t\t\tExpected: ", e_n1) print("\t\t\tSimulated: ", n1) print("\t\tThroughput in packets/second:\n\t\t\tExpected: ", e_thru1) print("\t\t\tSimulated: ", thru1) # Queue 2. queue2_metrics = {'expected_blocking':e_pb, 'blocking':pb, 'expected_delay':e_et, 'delay':et, 'expected_number':e_n, 'number':n, 'expected_throughput':e_thru, 'throughput':thru} print("\n\tQueue 2:") print("\t\tBlocking probability:\n\t\t\tExpected: ", e_pb2) print("\t\t\tSimulated: ", pb2) print("\t\tAverage delay in seconds:\n\t\t\tExpected: ", e_et2) print("\t\t\tSimulated: ", et2) print("\t\tAverage number of packets:\n\t\t\tExpected: ", e_n2) print("\t\t\tSimulated: ", n2) print("\t\tThroughput in packets/second:\n\t\t\tExpected: ", e_thru2) print("\t\t\tSimulated: ", thru2) f, (ax1, ax2) = subplots(1, 2, sharey=True) f.suptitle("Distribution of Packets in Queue as a Factor of Runtime") ax1.hist(queue1_size) ax1.set_title("Queue 1") #ax1.fill_between(range(0,len(queue1_size)), queue1_size) ax1.tick_params( axis='y', # changes apply to the x-axis which='both', # both major and minor ticks are affected bottom='off', # ticks along the bottom edge are off top='off', # ticks along the top edge are off labelleft='off') # labels along the bottom edge are off''' ax1.set_ylabel("Runtime") ax1.set_xlabel("Packets in Queue") ax2.hist(queue2_size) ax2.set_title("Queue 2") #ax2.fill_between(range(0,len(queue2_size)), queue2_size) ax2.tick_params( axis='y', # changes apply to the x-axis which='both', # both major and minor ticks are affected bottom='off', # ticks along the bottom edge are off top='off', # ticks along the top edge are off labelleft='off') # labels along the bottom edge are off ax2.set_ylabel("Runtime") ax2.set_xlabel("Packets in Queue") show() def eval_blocking(lamb, mu, k): """Finds the blocking probability of a queue. Args: lamb (float): The rate into the queue. mu (float): The rate out of the queue. k (int): Maximum number of customers able to be in the queue. """ rho = lamb/mu return rho**k*((1-rho)/(1-rho**(k+1))) def eval_delay(lamb, mu, k, pb): """Finds the average delay of a queue. Args: lamb (float): The rate into the queue. mu (float): The rate out of the queue. k (int): Maximum number of customers able to be in the queue. pb (float): The blocking probability for the queue. """ rho = lamb/mu en = (rho/(1-rho))-((k+1)*rho**(k+1)/(1-rho**(k+1))) return en/(lamb*(1-pb)) # + [markdown] deletable=true editable=true # *** # ## Results # ### Configuration 1: # * $\mu1 = 5\,packets/sec$ # * $\mu2 = 5\,packets/sec$ # * $\lambda = 8\,packets/sec$ # * $buffer = 20$ # * $\phi = 0.4, 0.5, 0.6$ # + deletable=true editable=true simulation(8, 5, 21, 0.4, 100000) simulation(8, 5, 21, 0.5, 100000) simulation(8, 5, 21, 0.6, 100000) # + [markdown] deletable=true editable=true # ### Configuration 2: # * $\mu1 = 5\,packets/sec$ # * $\mu2 = 5\,packets/sec$ # * $\lambda = 8\,packets/sec$ # * $buffer = 5$ # * $\phi = 0.4, 0.5, 0.6$ # + deletable=true editable=true simulation(8, 5, 6, 0.4, 100000) simulation(8, 5, 6, 0.5, 100000) simulation(8, 5, 6, 0.6, 100000) # + [markdown] deletable=true editable=true # *** # # ## Conclusions # # The value of $\phi$ has an interesting impact on the system's behavior. As can be seen from the graphs, if $\phi$ is not equal then there is a queue being underutilized with too much runtime spent with too few packets in queue. This affects throughput negatively as can be seen from the results. Buffer size also plays an important role as it changes the probability that a packet will be blocked from entering the queue. A decrease in the buffer size will also negatively impact the throughput as the queue will not be able to utilize the full capability of the service rate. # # # From, the results of the simulations, the system with a buffer size of 20 and a $\phi$ of 0.5 is the best configuration. This is obvious from the observed results especially from the viewpoint of throughput. A larger buffer will allow better utilization of the service time with fewer blockings happening, and a $\phi$ of 0.5 will make sure a queue is not being underutilized. This at first seemed contrary because while this queue has a higher throughput, it's delay was actually worse than the queue with a buffer size of 5. However, that is because the queue can only hold 6 packets. The queue will have less delay because at most there will only be 5 packets ahead of another reducing the delay, but the queue itself is actually unreliable because it is blocking more packets. It will block packets more often and it won't be able to fully utilize its service time the way the size 20 buffer can.
ECE 461 Mini Project #1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Matrix Math and NumPy Refresher # ## Data dimensions # - Scalars # - Zero dimension # - Vectors # - One dimension: length # - Row vectors # - Column vectors # - Matrix # - Two dimensions: rows and columns # - Tensors # - N dimensions # ## NumPy # - Provide fast alternatives to math operations in Python. # - Work efficiently with groups of numbers. # + pycharm={"is_executing": false, "name": "#%%\n"} import numpy as np # - # ## `ndarray` # - Represent any data types: scalars, vectors, matrics, or tensors. # - Data types: signed or unsigned. For example: `uint8`, `int16`. # - Every item in the array must have the same type. # + pycharm={"is_executing": false, "name": "#%%\n"} s = np.array(5) print(s.shape) # zero dimension: () x = s + 3 print(x) # 8 # + [markdown] pycharm={"name": "#%% md\n"} # ## Vectors # + pycharm={"is_executing": false, "name": "#%%\n"} v = np.array([1, 2, 3]) print(v.shape) # one dimension: (3,) print(v[1]) # 2 print(v[1:]) # [2, 3] # + [markdown] pycharm={"name": "#%% md\n"} # ## Matrices # + pycharm={"is_executing": false, "name": "#%%\n"} m = np.array([ [1, 2, 3], [4, 5, 6], [7, 8, 9], ]) print(m) # [[1 2 3] # [4 5 6] # [7 8 9]] print(m.shape) # (3, 3) # + [markdown] pycharm={"is_executing": false, "name": "#%% md\n"} # ## Tensors # + pycharm={"is_executing": false, "name": "#%%\n"} t = np.array([[[[1],[2]],[[3],[4]],[[5],[6]]],[[[7],[8]],\ [[9],[10]],[[11],[12]]],[[[13],[14]],[[15],[16]],[[17],[17]]]]) print(t.shape) # (3, 3, 2, 1) # + [markdown] pycharm={"name": "#%% md\n"} # ## Changing shapes # + pycharm={"is_executing": false, "name": "#%%\n"} # vector v = np.array([1, 2, 3, 4]) print(v.shape) # (4,) # reshape to 1 x 4 matrix x = v.reshape(1, 4) print(x) # [[1 2 3 4]] print(x.shape) # (1, 4) # look for all the items of v # and add a new dimension per row x = v[None, :] print(x) # [[1 2 3 4]] print(x.shape) # (1, 4) # reshape to 4 x 1 matrix y = v.reshape(4, 1) print(y) # [[1] # [2] # [3] # [4]] print(y.shape) # (4, 1) # look for all the items of v # and add a new dimension per column y = v[:, None] print(y) # [[1] # [2] # [3] # [4]] print(y.shape) # (4, 1) # + [markdown] pycharm={"name": "#%% md\n"} # ## Element-wise matrix operations # - Treat items in the matrix individually and perform the same operation on each one. # - Operations between a scalar and a matrix. # - Operations between two matrices. # - They must have the same shape. # + pycharm={"is_executing": false, "name": "#%%\n"} # scalar operations values = np.array([1, 2, 3]) values += 5 print(values) # [6 7 8] # + pycharm={"is_executing": false, "name": "#%%\n"} # matrix operations a = np.array([[1,3],[5,7]]) print(a) # ([[1, 3], # [5, 7]]) b = np.array([[2,4],[6,8]]) print(b) # ([[2, 4], # [6, 8]]) print(a + b) # ([[ 3, 7], # [11, 15]]) # + [markdown] pycharm={"name": "#%% md\n"} # ## Data representation # # - Row representation # - Each row represents one entity. # - Each column represents one feature. # # | Person | Height | Weight | Age | # | ---- | ---- | ---- | ---- | # | 1 | 175 | 75 | 24 | # | 2 | 203 | 102 | 31 | # | 3 | 217 | 94 | 39 | # # - Column representation # - Each column represents one entity. # - Each row represents one feature. # # | Feature | Person 1 | Person 2 | Person 3 | # | ---- | ---- | --- | ---- | # | Height | 175 | 203 | 217 | # | Weight | 75 | 102 | 94 | # | Age | 24 | 31 | 39 | # + [markdown] pycharm={"name": "#%% md\n"} # ## Matrix product # - Take a series of dot product between every row in the left matrix and every column in the right matrix. # - Dot product # - Algebraically, the dot product is the sum of the products of the corresponding entries of the two sequences of numbers. # - Geometrically, it is the product of the Euclidean magnitudes of the two vectors and the cosine of the angle between them. # - The number of columns in the left matrix must equal the number of rows in the right matrix. # - `(x, y) x (y, z) => (x, z)` # - `np.dot` is equivalent to `np.matmul` only if both matrices are 2D. Do not use `np.dot` for matrix multiplication. # - Always multiple a row matrix with a column matrix. # - Each dot production combines differrent features from each data entity. # - E.g. Combine height, weight, and age from person 1. # + pycharm={"is_executing": false, "name": "#%%\n"} a = np.array([[1,2,3,4],[5,6,7,8]]) print(a.shape) # (2, 4) b = np.array([[1,2,3],[4,5,6],[7,8,9],[10,11,12]]) print(b.shape) # (4, 3) c = np.matmul(a, b) print(c.shape) # (2, 3) # + [markdown] pycharm={"name": "#%% md\n"} # ## Matrix transpose # # - When multiplying two row matrices: # - Transpose the second matrix (which becomes a column matrix). # - The result is a row matrix. # - Each row is the combined features (with different apporaches) about one entity. # - Transpose the first matrix (which becomes a column matrix) and swap the order. # - The result is a column matrix. # - Each column is the combined features (with different approaches) about one entity. # - The answers of the two solutions are transposes of each other. # - When multiplying two column matrices: # - The above transposing operations will not work. # - If multiplying the first with the transposed second matrix: # - Each dot production combines the same feature from different entities, rather than combining features for each entity (which is probably intended). # - E.g. combine height from person 1 with height from person 2. # + pycharm={"is_executing": false, "name": "#%%\n"} # row data # one row per entity with four features each inputs = np.array([ \ [-0.27, 0.45, 0.64, 0.31], \ [1.04, 2.27, -0.8, -1.0]]) # row data # three different ways of combining the four features weights = np.array([ \ [0.02, 0.001, -0.03, 0.036], \ [0.04, -0.003, 0.025, 0.009], \ [0.012, -0.045, 0.28, -0.067]]) # first approach print(np.matmul(inputs, weights.T)) # row data: each row contains the three combined data for one entity # [[-0.01299 0.00664 0.13494] # [ 0.01107 0.00579 -0.24667]] # second approach print(np.matmul(weights, inputs.T)) # column data: each column contains the three combined data for one entity # [[-0.01299 0.01107] # [ 0.00664 0.00579] # [ 0.13494 -0.24667]] # + [markdown] pycharm={"name": "#%% md\n"} # - Transposed matrix share the same underlying data with the original matrix. # - `numpy` simply changes the way it indexes the original matrix. # + pycharm={"is_executing": false, "name": "#%%\n"} m = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]]) print(m.shape) # (3, 4) m_t = m.T print(m_t.shape) # (4, 3) # modify one matrix will also modify the other m_t[3][1] = 200 print(m_t) # array([[ 1, 5, 9], # [ 2, 6, 10], # [ 3, 7, 11], # [ 4, 200, 12]]) print(m) # array([[ 1, 2, 3, 4], # [ 5, 6, 7, 200], # [ 9, 10, 11, 12]]) # - # ## Quiz # + # Use the numpy library import numpy as np def prepare_inputs(inputs): # TODO: create a 2-dimensional ndarray from the given 1-dimensional list; # assign it to input_array input_array = np.array([inputs]) # TODO: find the minimum value in input_array and subtract that # value from all the elements of input_array. Store the # result in inputs_minus_min inputs_minus_min = input_array - np.min(inputs) # TODO: find the maximum value in inputs_minus_min and divide # all of the values in inputs_minus_min by the maximum value. # Store the results in inputs_div_max. inputs_div_max = inputs_minus_min / np.max(inputs_minus_min) # return the three arrays we've created return input_array, inputs_minus_min, inputs_div_max def multiply_inputs(m1, m2): # TODO: Check the shapes of the matrices m1 and m2. # m1 and m2 will be ndarray objects. # # Return False if the shapes cannot be used for matrix # multiplication. You may not use a transpose # TODO: If you have not returned False, then calculate the matrix product # of m1 and m2 and return it. Do not use a transpose, # but you swap their order if necessary if m1.shape[1] == m2.shape[0]: return np.matmul(m1, m2) elif m2.shape[1] == m1.shape[0]: return np.matmul(m2, m1) else: return False def find_mean(values): # TODO: Return the average of the values in the given Python list return np.mean(values) input_array, inputs_minus_min, inputs_div_max = prepare_inputs([-1,2,7]) print("Input as Array: {}".format(input_array)) print("Input minus min: {}".format(inputs_minus_min)) print("Input Array: {}".format(inputs_div_max)) print("Multiply 1:\n{}".format(multiply_inputs(np.array([[1,2,3],[4,5,6]]), np.array([[1],[2],[3],[4]])))) print("Multiply 2:\n{}".format(multiply_inputs(np.array([[1,2,3],[4,5,6]]), np.array([[1],[2],[3]])))) print("Multiply 3:\n{}".format(multiply_inputs(np.array([[1,2,3],[4,5,6]]), np.array([[1,2]])))) print("Mean == {}".format(find_mean([1,3,4])))
01-intro/05-matrix-and-numpy.ipynb
# --- # jupyter: # jupytext: # formats: ipynb,.pct.py:percent # text_representation: # extension: .py # format_name: percent # format_version: '1.3' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %% [markdown] # # Multiclass classification # %% [markdown] # The multiclass classification problem is a regression problem from an input $x \in {\cal X}$ to discrete labels $y\in {\cal Y}$, where ${\cal Y}$ is a discrete set of size $C$ bigger than two (for $C=2$ it is the more usual binary classification). # # Labels are encoded in a one-hot fashion, that is if $C=4$ and $y=2$, we note $\bar{y} = [0,1,0,0]$. # # The generative model for this problem consists of: # * $C$ latent functions $\mathbf{f} = [f_1,...,f_C]$ with an independent Gaussian Process prior # * a deterministic function that builds a discrete distribution $\pi(\mathbf{f}) = [\pi_1(f_1),...,\pi_C(f_C)]$ from the latents such that $\sum_c \pi_c(f_c) = 1$ # * a discrete likelihood $p(y|\mathbf{f}) = Discrete(y;\pi(\mathbf{f})) = \prod_c \pi_c(f_c)^{\bar{y}_c}$ # # A typical example of $\pi$ is the softmax function: # # $$ \pi_c (f_c) \propto \exp( f_c)$$ # # Another convenient one is the robust max: # $$ # \pi_c(\mathbf{f}) = \begin{cases} 1 - \epsilon, & \mbox{if } c = \arg \max_c f_c \\ # \epsilon /(C-1), & \mbox{ otherwise} \end{cases} # $$ # # # # # %% import numpy as np import tensorflow as tf import warnings warnings.filterwarnings('ignore') # ignore DeprecationWarnings from tensorflow import matplotlib.pyplot as plt # %matplotlib inline import gpflow from gpflow.utilities import print_summary, set_trainable from gpflow.ci_utils import ci_niter from multiclass_classification import plot_posterior_predictions, colors np.random.seed(0) # reproducibility # %% [markdown] # ## Sampling from the GP multiclass generative model # %% [markdown] # ### Declaring model parameters and input # %% # Number of functions and number of data points C = 3 N = 100 # RBF kernel lengthscale lengthscale = 0.1 # Jitter jitter_eye = np.eye(N) * 1e-6 # Input X = np.random.rand(N, 1) # %% [markdown] # ### Sampling # %% # SquaredExponential kernel matrix kernel_se = gpflow.kernels.SquaredExponential(lengthscale=lengthscale) K = kernel_se(X) + jitter_eye # Latents prior sample f = np.random.multivariate_normal(mean=np.zeros(N), cov=K, size=(C)).T # Hard max observation Y = np.argmax(f, 1).reshape(-1,).astype(int) # One-hot encoding Y_hot = np.zeros((N, C), dtype=bool) Y_hot[np.arange(N), Y] = 1 data = (X, Y) # %% [markdown] # ### Plotting # %% plt.figure(figsize=(12, 6)) order = np.argsort(X.reshape(-1,)) for c in range(C): plt.plot(X[order], f[order, c], '.', color=colors[c], label=str(c)) plt.plot(X[order], Y_hot[order, c], '-', color=colors[c]) plt.legend() plt.xlabel('$X$') plt.ylabel('Latent (dots) and one-hot labels (lines)') plt.title('Sample from the joint $p(Y, \mathbf{f})$') plt.grid() plt.show() # %% [markdown] # ## Inference # # %% [markdown] # Inference here consists of computing the posterior distribution over the latent functions given the data $p(\mathbf{f}|Y, X)$. # # You can use different inference methods. Here we perform variational inference. # For a treatment of the multiclass classification problem using MCMC sampling, see [Markov Chain Monte Carlo (MCMC)](../advanced/mcmc.ipynb). # # # %% [markdown] # ### Approximate inference: Sparse Variational Gaussian Process # %% [markdown] # #### Declaring the SVGP model (see [GPs for big data](../advanced/gps_for_big_data.ipynb)) # %% # sum kernel: Matern32 + White kernel = gpflow.kernels.Matern32() + gpflow.kernels.White(variance=0.01) # Robustmax Multiclass Likelihood invlink = gpflow.likelihoods.RobustMax(C) # Robustmax inverse link function likelihood = gpflow.likelihoods.MultiClass(3, invlink=invlink) # Multiclass likelihood Z = X[::5].copy() # inducing inputs m = gpflow.models.SVGP(kernel=kernel, likelihood=likelihood, inducing_variable=Z, num_latent=C, whiten=True, q_diag=True) # Only train the variational parameters set_trainable(m.kernel.kernels[1].variance, False) set_trainable(m.inducing_variable, False) print_summary(m, fmt='notebook') # %% [markdown] # #### Running inference # %% opt = gpflow.optimizers.Scipy() @tf.function(autograph=False) def objective_closure(): return - m.log_marginal_likelihood(data) opt_logs = opt.minimize(objective_closure, m.trainable_variables, options=dict(maxiter=ci_niter(1000))) print_summary(m, fmt='notebook') # %% plot_posterior_predictions(m, X, Y)
doc/source/notebooks/advanced/multiclass_classification.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import torch import torchvision import torchvision.transforms as transforms # ### Loading and normalizing CIFAR10 # + # Define transform transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) ]) trainset = torchvision.datasets.CIFAR10(root='./data', train=True, transform=transform, download=True) trainloader = torch.utils.data.DataLoader(trainset, batch_size=4, shuffle=True, num_workers=0) testset = torchvision.datasets.CIFAR10(root='./data', train=False, transform=transform, download=True) testloader = torch.utils.data.DataLoader(testset, batch_size=4, shuffle=True, num_workers=0) classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck') # + # Visualize import matplotlib.pyplot as plt import numpy as np # %matplotlib inline def imshow(img): img = img/2 + 0.5 # unnormalize npimg = img.numpy() plt.imshow(np.transpose(npimg, (1, 2, 0))) plt.show() # get some random training images dataiter = iter(trainloader) images, labels = dataiter.next() imshow(torchvision.utils.make_grid(images)) print(' '.join('%5s' % classes[labels[j]] for j in range(4))) # - # ### Define a Convolutional Neural Network # + import torch.nn as nn import torch.nn.functional as F class MyNet(nn.Module): def __init__(self): super(MyNet, self).__init__() # 3 channels, 6 filters, kernel size 5x5 self.conv1 = nn.Conv2d(3, 6, 5) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(6, 16, 5) self.fc1 = nn.Linear(16*5*5, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 10) def forward(self, x): x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = x.view(-1, 16*5*5) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x mynet = MyNet() # - # ### Define a Loss function and optimizer # + import torch.optim as optim criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(mynet.parameters(), lr=0.001, momentum=0.9) # - # ### Train the network for epoch in range(2): running_loss = 0.0 for i, data in enumerate(trainloader, 0): inputs, labels = data # zero the parameter gradients optimizer.zero_grad() outputs = mynet(inputs) # feed inputs into the model loss = criterion(outputs, labels) # calcul loss loss.backward() # backpropagation optimizer.step() # update weights # print statistics running_loss += loss.item() if i % 2000 == 1999: # print every 2000 mini-batches print('[%d, %5d] loss: %.3f'%(epoch+1, i+1, running_loss/2000)) running_loss = 0 print('Finished training...') # Save the trained model PATH = './cifar_mynet.pth' torch.save(mynet.state_dict(), PATH) # ### Test the network on the test data # + dataiter = iter(testloader) images, labels = dataiter.next() # visualize image imshow(torchvision.utils.make_grid(images)) print('GroundTruth: ', ' '.join('%5s' % classes[labels[j]] for j in range(4))) # - mynet = MyNet() mynet.load_state_dict(torch.load(PATH)) outputs = mynet(images) _, predicted = torch.max(outputs, 1) print('Predicted: ', ' '.join('%5s' % classes[predicted[j]] for j in range(4))) # This is not bad, we have 3/4 right answers. correct = 0 total = 0 with torch.no_grad(): for data in testloader: images, labels = data outputs = mynet(images) _, predicted = torch.max(outputs, 1) total += labels.size(0) correct += (labels == predicted).sum().item() print('Accuracy of the network on the 10000 test images: %d %%' % ( 100 * correct / total)) # According to the result, we have 5% better than chance # + class_correct = list(0. for i in range(10)) class_total = list(0. for i in range(10)) with torch.no_grad(): for data in testloader: images, labels = data outputs = mynet(images) _, predicted = torch.max(outputs, 1) c = (labels == predicted).squeeze() for i in range(4): label = labels[i] class_correct[label] += c[i].item() class_total[label] += 1 for i in range(10): print('Accuracy of %5s : %2d %%' % (classes[i], 100 * class_correct[i] / class_total[i])) # -
torch-training.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from scipy import stats import numpy as np import pandas as pd x=[10,12,20,21,22,24,18,15] stats.ttest_1samp(x,15) # Ice cream demand example x=[13,8,10,10,8,9,10,11,6,8,12,11,11,12,10,12,7,10,11,8] stats.ttest_1samp(x,10) 0.7239703579964252/2 #SINCE THIS IS A ONE TAILED TEST # the above p value is sum of areas of both sides stats.t.cdf(-0.384,19) stats.t.ppf(0.05,19) # City Traffic Police from statsmodels.stats.proportion import proportions_ztest
Week 4 hypothesis testing 3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from sklearn.datasets import load_digits # %matplotlib inline import numpy as np import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import confusion_matrix, classification_report import seaborn as sns; sns.set() digits = load_digits() fig = plt.figure(figsize=(6, 6)) fig.subplots_adjust(left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05) for i in range(64): ax = fig.add_subplot(8, 8, i + 1, xticks=[], yticks=[]) ax.imshow(digits.images[i], cmap=plt.cm.binary, interpolation='nearest') ax.text(0, 7, str(digits.target[i])) Xtrain, Xtest, ytrain, ytest = train_test_split(digits.data, digits.target, random_state=0) model = RandomForestClassifier(n_estimators=1000) model.fit(Xtrain, ytrain) ypred = model.predict(Xtest) print(classification_report(ypred, ytest)) mat = confusion_matrix(ytest, ypred) sns.heatmap(mat.T, square=True, annot=True, fmt='d', cbar=False) plt.xlabel('true label') plt.ylabel('predicted label')
supervised_learning/projects/digits_classification_rf_sklear_dataset/digits_classification_rf_sklear_dataset.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/the-redlord/Space-Radio-Signal-Classification_keras/blob/master/RadioClassification_keras.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="SSSobzxNFtAw" colab_type="text" # # Downloading Dataset and required Libs (Colab Only) # + id="-QTLW5kKFtAz" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 318} outputId="01cc94c8-8f3f-4f86-f73e-b0abac92c4cc" # !wget https://github.com/the-redlord/Space-Radio-Signal-Classification_keras/raw/master/dataset.rar # + id="mqKgdaHxFtBA" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 36} outputId="6a40dd7c-1514-4364-88da-df1e6605f8d0" # !ls # + id="7bvGMbPVFtBI" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 280} outputId="f4e51c73-0a9e-4acf-eb8e-a2dcd4a26732" # !unrar x -r ./dataset.rar # + id="1Uf5kSykFtBQ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 545} outputId="8aa6be84-fa1b-42e8-d959-c161bc4d9dcb" # !pip install livelossplot # + [markdown] colab_type="text" id="KeHZOb76Eoao" # # Classify Radio Signals from Outer Space with Keras # + [markdown] id="qZ4L7MYqFtBY" colab_type="text" # ![](Allen_Telescope.jpg) # [Allen Telescope Array](https://flickr.com/photos/93452909@N00/5656086917) by [brewbooks](https://www.flickr.com/people/93452909@N00) is licensed under [CC BY 2.0](https://creativecommons.org/licenses/by/2.0/) # + [markdown] colab_type="text" id="fB2voc0SFB0W" # ## Task 1: Import Libraries # + colab_type="code" id="2pO03vSBEc6D" colab={"base_uri": "https://localhost:8080/", "height": 55} outputId="6869026f-86c3-4cac-fe1a-9a4acf5b4e5d" from livelossplot.inputs.tf_keras import PlotLossesCallback import pandas as pd import numpy as np import matplotlib.pyplot as plt import tensorflow as tf from sklearn.metrics import confusion_matrix from sklearn import metrics import numpy as np np.random.seed(42) import warnings;warnings.simplefilter('ignore') # %matplotlib inline print('Tensorflow version:', tf.__version__) tf.config.list_physical_devices('GPU') # + [markdown] colab_type="text" id="lYt4AvyeFJPn" # ## Task 2: Load and Preprocess SETI Data # + colab_type="code" id="oDubuBkZEgaE" colab={} train_images = pd.read_csv('dataset/train/images.csv',header=None) train_labels = pd.read_csv('dataset/train/labels.csv',header=None) val_images = pd.read_csv('dataset/validation/images.csv',header=None) val_labels = pd.read_csv('dataset/validation/labels.csv',header=None) # + colab_type="code" id="VVISghQ3Egcd" colab={"base_uri": "https://localhost:8080/", "height": 193} outputId="2301a6a2-8010-4de0-d963-07d9f63133f4" train_images.head(3) # + colab_type="code" id="BCQy-0ZsEgfa" colab={"base_uri": "https://localhost:8080/", "height": 143} outputId="23b8b796-b49a-4f4b-c3d3-6a4c011c7cff" train_labels.head(3) # + colab_type="code" id="DfJSg_p8Egh4" colab={"base_uri": "https://localhost:8080/", "height": 55} outputId="f8fe5aab-1026-478d-b690-4a872b42f040" print("Training set shape: ", train_images.shape, train_labels.shape) print('Validation set shape:',val_images.shape, val_labels.shape) # + colab_type="code" id="FJGhlNb2Egj7" colab={} # reshape the data to spectograms (images) x_train = train_images.values.reshape(3200,64,128,1) x_val = val_images.values.reshape(800,64,128,1) y_train = train_labels.values y_val = val_labels.values # + [markdown] colab_type="text" id="pAC1DPXrF8oS" # ## Task 3: Plot 2D Spectrograms # + colab_type="code" id="PTOmtFOaEgpN" colab={"base_uri": "https://localhost:8080/", "height": 133} outputId="740096ee-41f0-420e-a019-b4e48d8e7200" plt.figure(0, figsize=(12,12)) for i in range(1,4): plt.subplot(1,3,i) img = np.squeeze(x_train[np.random.randint(0, x_train.shape[0])]) plt.xticks([]) plt.yticks([]) plt.imshow(img,cmap='gray') # + colab_type="code" id="M2GiQa6QGg-B" colab={"base_uri": "https://localhost:8080/", "height": 237} outputId="a1bb8272-647e-4cfa-9200-272d2155657e" plt.imshow(np.squeeze(x_train[5])) # + colab_type="code" id="-uVTyriQGqrR" colab={} # + [markdown] colab_type="text" id="jqJJhvGkGqz1" # ## Task 4: Create Training and Validation Data Generators # + colab_type="code" id="tP6I_MnnGwW-" colab={} from tensorflow.keras.preprocessing.image import ImageDataGenerator # image augmentation datagen_train = ImageDataGenerator(horizontal_flip=True) datagen_train.fit(x_train) datagen_val = ImageDataGenerator(horizontal_flip=True) datagen_val.fit(x_val) # + colab_type="code" id="48xSPckOGwoV" colab={} # + [markdown] colab_type="text" id="-UEvdu2bHKEQ" # ## Task 5: Creating the CNN Model # + colab_type="code" id="tqpC-6NQGwrQ" colab={} from tensorflow.keras.layers import Dense, Input, Dropout,Flatten, Conv2D from tensorflow.keras.layers import BatchNormalization, Activation, MaxPooling2D from tensorflow.keras.models import Model, Sequential from tensorflow.keras.optimizers import Adam, SGD from tensorflow.keras.callbacks import ModelCheckpoint # + colab_type="code" id="h5dB9u7nHqG0" colab={} # Initialising the CNN model = Sequential() # 1st Convolution model.add(Input(shape=(64,128,1))) model.add(Conv2D(32,(5,5),padding='same')) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2,2))) model.add(Dropout(0.25)) # 2nd Convolution layer model.add(Conv2D(64,(5,5),padding='same')) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2,2))) model.add(Dropout(0.25)) # Flattening model.add(Flatten()) # Fully connected layer model.add(Dense(1024)) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(Dropout(0.4)) model.add(Dense(4,activation='softmax')) # + colab_type="code" id="sXSHxXI4HqaI" colab={} # + [markdown] colab_type="text" id="LmXdhu-6H7Q5" # ## Task 6: Learning Rate Scheduling and Compile the Model # + colab_type="code" id="JNEKTceqGwuX" colab={} initial_learning_rate = 0.005 lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay( initial_learning_rate = initial_learning_rate, decay_steps = 5, decay_rate = 0.96, staircase=True ) opt = Adam(learning_rate=lr_schedule) # + colab_type="code" id="IZi5V0W7GwxL" colab={"base_uri": "https://localhost:8080/", "height": 769} outputId="15a834ca-c46a-458c-ea42-d7835e535d4e" model.compile(optimizer=opt,loss='categorical_crossentropy',metrics=['accuracy']) model.summary() # + colab_type="code" id="ApAl2xbqGwzp" colab={} # + [markdown] colab_type="text" id="tfqcUL6DIKDR" # ## Task 7: Training the Model # + colab_type="code" id="1fuvoWEXIQfZ" colab={"base_uri": "https://localhost:8080/", "height": 458} outputId="9a5e0c2b-e727-46f0-a39b-a5081f84f604" checkpoint = ModelCheckpoint('model_weights.h5',monitor='val_loss', save_weights_only=True, mode='min',verbose=0) callbacks = [PlotLossesCallback(), checkpoint] batch_size = 32 history = model.fit( datagen_train.flow(x_train,y_train, batch_size=batch_size,shuffle=True), steps_per_epoch = len(x_train) // batch_size, validation_data = datagen_val.flow(x_val,y_val,batch_size=batch_size,shuffle=True), validation_steps = len(x_val) // batch_size, epochs = 12, callbacks=callbacks ) # + colab_type="code" id="zyaFllz-IQiD" colab={} # + [markdown] colab_type="text" id="Out7Wpj_Ic-g" # ## Task 8: Model Evaluation # + colab_type="code" id="MJTxGMVEIdlp" colab={"base_uri": "https://localhost:8080/", "height": 55} outputId="2dc87ed7-f006-4266-95c3-1d9cc19c61de" model.evaluate(x_val,y_val) # + colab_type="code" id="LmgNzJFYIdos" colab={"base_uri": "https://localhost:8080/", "height": 224} outputId="a6506c5f-e4f3-4f66-df93-47ad95dc4bf9" from sklearn.metrics import confusion_matrix from sklearn import metrics import seaborn as sns y_true = np.argmax(y_val,1) y_pred = np.argmax(model.predict(x_val),1) print(metrics.classification_report(y_true,y_pred)) # + id="L_Pibi5Ei2qP" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 36} outputId="8aa6f9f0-e757-4ada-848e-1e6d95e26edc" print('Classification accuracy: %0.6f' %metrics.accuracy_score(y_true,y_pred)) # + colab_type="code" id="yxaqITweIdr5" colab={} labels = ["squiggle", "narrowband", "noise", "narrowbanddrd"] # + id="UMy4ox_MFtDa" colab_type="code" colab={}
RadioClassification_keras.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import os import subprocess from {{cookiecutter.repo_name}}.settings import cwd PROJECT_NAME = os.path.basename(os.path.dirname(cwd)) # + def form_dvc_command(name, py_file, inps, outs=[], extra_deps=[], outs_persist=[]): res = (f"dvc run --no-exec -w {cwd} -f {PROJECT_NAME}/pipelines/{name}.dvc" f" -d {py_file}") res += "".join([" -d " + x for x in inps]) res += "".join([" -d " + x for x in extra_deps]) res += "".join([" -o " + x for x in outs]) res += "".join([" --outs-persist " + x for x in outs_persist]) res += f" python {py_file}" res += " " + " ".join(inps) res += " " + " ".join(outs_persist) res += " " + " ".join(outs) return res.strip() def execute_dvc_command(command_string): subprocess.check_output(f"cd {cwd}; {command_string}", shell=True)
{{ cookiecutter.repo_name }}/notebooks/1.0-{{cookiecutter.author_name}}-dvc-pipeline.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/GabrielLourenco12/python_exercises/blob/main/Exercicio1.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="eghkFguqvsEd" # # + [markdown] id="4-0YIDv9vuZh" # ## Exercícios 1 # + [markdown] id="0yOHUgurv-1I" # Ler dois números inteiros, executar e mostrar o resultado das seguintes operações: adição, subtração, multiplicação e divisão # + id="m9nEv6MNv3EK" outputId="6bd7f05c-9dbf-4b26-e62c-f9876ec559bc" colab={"base_uri": "https://localhost:8080/", "height": 118} a = float(input('Digite o primeiro número: ')) b = float(input('Digite o segundo número: ')) print('A soma é', a + b) print('A subtração é', a - b) print('A divisão é', a / b) print('A multiplicação é', a * b) # + [markdown] id="e3FNMiPRwsuf" # Efetuar o cálculo da quantidade de litros de combustível gasto em uma viagem, utilizando um automóvel que faz 12 Km por litro. Para obter o cálculo, o usuário deve fornecer o tempo gasto na viagem e a velocidade média durante ela. Desta forma, será possível obter a distância percorrida com a fórmula DISTANCIA = TEMPO * VELOCIDADE. Tendo o valor da distância, basta calcular a quantidade de litros de combustível utilizada na viagem, com a fórmula: LITROS_USADOS = DISTANCIA / 12. O programa deve apresentar os valores da velocidade média, tempo gasto na viagem, a distância percorrida e a quantidade de litros utilizada na viagem # + id="Yaqv0_I9vogJ" outputId="cda47a09-4692-4833-b014-3d7a6a97de7a" colab={"base_uri": "https://localhost:8080/", "height": 84} automovel_km_litro = 12 tempo_viagem = (float(input('Digite o tempo da viagem em minutos: ')))/60 velocidade_média = float(input('Digite a velocidade média km/h: ')) distancia = tempo_viagem * velocidade_média print('A distância percorrida foi',distancia,'Km' ) litros_usados = distancia/automovel_km_litro print('A quantidade de combustível usada foi',round(litros_usados,1),'L' )
Exercicio1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- #importing some useful packages import matplotlib.pyplot as plt import matplotlib.image as mpimg import numpy as np import cv2 import os from moviepy.editor import VideoFileClip from IPython.display import HTML import math # %matplotlib inline def grayscale(img): """Applies the Grayscale transform This will return an image with only one color channel but NOTE: to see the returned image as grayscale (assuming your grayscaled image is called 'gray') you should call plt.imshow(gray, cmap='gray')""" return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # Or use BGR2GRAY if you read an image with cv2.imread() # return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # image = cv2.imread('image.jpg') gray_image = grayscale(image) plt.imshow(gray_image, cmap='gray') plt.show() # hue saturation value img_hsv = cv2.cvtColor(image, cv2.COLOR_RGB2HSV) plt.imshow(img_hsv) plt.show() def canny(img, low_threshold, high_threshold): """Applies the Canny transform""" return cv2.Canny(img, low_threshold, high_threshold) lower_yellow = np.array([20, 100, 100], dtype = "uint8") upper_yellow = np.array([30, 255, 255], dtype="uint8") mask_yellow = cv2.inRange(img_hsv, lower_yellow, upper_yellow) mask_white = cv2.inRange(gray_image, 200, 255) mask_yw = cv2.bitwise_or(mask_white, mask_yellow) mask_yw_image = cv2.bitwise_and(gray_image, mask_yw)
Lane Detetction/.ipynb_checkpoints/Lane Detection-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from ctapipe.core import Container, Field, Map import numpy as np # + class SubContainer(Container): junk = Field("nothing","Some junk") class EventContainer(Container): event_id = Field(-1,"event id number") tels_with_data = Field([], "list of telescopes with data") sub = Field(SubContainer(), "stuff") # a sub-container in the hierarchy # for dicts of sub-containers, use Map instead # of a dict() as the default value to support serialization tel = Field(Map(), "telescopes") # - ev = EventContainer() # default values automatically filled in print(ev.event_id) print(ev.tels_with_data) print(ev.tel) print(ev) # values can be set as normal for a class: ev.event_id = 100 ev.event_id ev.as_dict() # by default only shows the bare items, not sub-containers (See later) ev.as_dict(recursive=True) # Now, let's define a sub-container that we can add per telescope: # + class TelContainer(Container): tel_id = Field(-1, "telescope ID number") image = Field(np.zeros(10), "camera pixel data") # - # and we can add a few of these to the parent container inside the tel dict: ev.tel[10] = TelContainer() ev.tel[5] = TelContainer() ev.tel[42] = TelContainer() ev.tel # ### converion to dictionaries ev.as_dict() ev.as_dict(recursive=True, flatten=False) # for serialization to a table, we can even flatten the output into a single set of columns ev.as_dict(recursive=True, flatten=True) # ### setting and clearing values ev.tel[5].image[:] = 9 print(ev) ev.reset() ev.as_dict(recursive=True, flatten=True) from ctapipe.io.containers import MCEventContainer ev = MCEventContainer() ev
examples/notebooks/Container_examples.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/deep1185/ga-learner-dsmp-repo/blob/master/fashion_mnist_deepmandal1185_17thnov.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + colab={"base_uri": "https://localhost:8080/"} id="dc6k9Mn3a-FN" outputId="c9cd3f37-76bf-48ba-f941-d3d0234ba3dc" # !pip install keras-tuner # + id="qX6ScirBbI7G" import tensorflow as tf from tensorflow import keras import numpy as np # + colab={"base_uri": "https://localhost:8080/"} id="-7xgtDtWbIu3" outputId="81a64d0f-d15d-4688-d9d3-a988cfcef229" print(tf.__version__) # + id="v4-ykRGEbIh5" fashion_mnist=keras.datasets.fashion_mnist # + id="gd19O2wNbIQe" (train_images,train_labels),(test_images,test_labels)=fashion_mnist.load_data() # + id="Z0RFymAvbH9b" train_images=train_images/255.0 test_images=test_images/255.0 # + colab={"base_uri": "https://localhost:8080/"} id="2cJodrRqbajO" outputId="31bf2331-b66e-470a-ce5c-485ffe995d32" train_images[0].shape # + id="ETpYA0QTba54" train_images=train_images.reshape(len(train_images),28,28,1) test_images=test_images.reshape(len(test_images),28,28,1) # + id="I4xkni-bbaXU" def build_model(hp): model = keras.Sequential([ keras.layers.Conv2D( filters=hp.Int('conv_1_filter', min_value=32, max_value=128, step=16), kernel_size=hp.Choice('conv_1_kernel', values = [3,5]), activation='relu', input_shape=(28,28,1) ), keras.layers.Conv2D( filters=hp.Int('conv_2_filter', min_value=32, max_value=64, step=16), kernel_size=hp.Choice('conv_2_kernel', values = [3,5]), activation='relu' ), keras.layers.Flatten(), keras.layers.Dense( units=hp.Int('dense_1_units', min_value=32, max_value=128, step=16), activation='relu' ), keras.layers.Dense(10, activation='softmax') ]) model.compile(optimizer=keras.optimizers.Adam(hp.Choice('learning_rate', values=[1e-2, 1e-3])), loss='sparse_categorical_crossentropy', metrics=['accuracy']) return model # + id="HQNXM-jNb0QY" from kerastuner import RandomSearch from kerastuner.engine.hyperparameters import HyperParameters # + colab={"base_uri": "https://localhost:8080/"} id="mMm-TEngb0nL" outputId="c180be4b-6cf4-45b2-ca5a-7d1616ace2e1" tuner_search=RandomSearch(build_model, objective='val_accuracy', max_trials=5,directory='output',project_name="Mnist Fashion") # + colab={"base_uri": "https://localhost:8080/"} id="aPxq2GONb0HD" outputId="f0302b1d-2fdf-42c0-c652-dc8ed4b5fc95" tuner_search.search(train_images,train_labels,epochs=3,validation_split=0.1) # + colab={"base_uri": "https://localhost:8080/"} id="oReN_KpvvrIx" outputId="0c8d7f09-77ea-4990-f704-3306a99390c6" tuner_search.search(train_images,train_labels,epochs=3,validation_split=0.1) # + id="jCD6mp-dv1D_" model=tuner_search.get_best_models(num_models=1)[0] # + colab={"base_uri": "https://localhost:8080/"} id="ur8p3HXywBXb" outputId="c618d438-e9df-492e-8d02-e50342c7a1ee" model.summary() # + colab={"base_uri": "https://localhost:8080/"} id="cWrdh6e5wJbW" outputId="b4cf794b-c952-41e9-f993-ffdd88043dcf" model.fit(train_images, train_labels, epochs=10, validation_split=0.1, initial_epoch=3)
fashion_mnist_deepmandal1185_17thnov.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernel_info: # name: python3 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Note # * Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps. # + # Dependencies and Setup import pandas as pd # File to Load (Remember to Change These) school_data_to_load = "Resources/schools_complete.csv" student_data_to_load = "Resources/students_complete.csv" # Read School and Student Data File and store into Pandas DataFrames school_data = pd.read_csv(school_data_to_load) student_data = pd.read_csv(student_data_to_load) # Combine the data into a single dataset. school_data_complete = pd.merge(student_data, school_data, how="left", on=["school_name", "school_name"]) school_data_complete.head() # - # ## District Summary # # * Calculate the total number of schools # # * Calculate the total number of students # # * Calculate the total budget # # * Calculate the average math score # # * Calculate the average reading score # # * Calculate the percentage of students with a passing math score (70 or greater) # # * Calculate the percentage of students with a passing reading score (70 or greater) # # * Calculate the percentage of students who passed math **and** reading (% Overall Passing) # # * Create a dataframe to hold the above results # # * Optional: give the displayed data cleaner formatting # + #school_data_complete #school Number #Group by n_school= len(school_data['school_name']) n_school #total student n_stu= len(student_data['student_name']) n_stu #total budget allbud_school=sum(school_data['budget']) allbud_school #Average Math ave_math = student_data['math_score'].mean() ave_math #Reading Score ave_read = student_data['reading_score'].mean() ave_read #Pass Math grade_math=['P' if score/100 >= 0.7 else 'F' for score in student_data['math_score']] npass_math= 0 nfail_math=0 for grade in grade_math: if grade == 'P': npass_math += 1 else: nfail_math += 1 rpass_math = npass_math/len(grade_math) #read grade_read=['P' if score/100 >= 0.7 else 'F' for score in student_data['reading_score']] npass_read= 0 nfail_read=0 for grade in grade_read: if grade == 'P': npass_read += 1 else: nfail_read += 1 rpass_read = npass_read/len(grade_read) rpass_read #overall passing grade_df= pd.DataFrame({'Read':grade_read, 'Math':grade_math}) # (df['A'] < 5) & (df['B'] > 5) npass_all = len(grade_df[(grade_df['Read'] == 'P') & (grade_df['Math'] == 'P')]) rpass_all = npass_all/len(grade_read) total_df = pd.DataFrame({'Total Schools':n_school, 'Total Students':n_stu, 'Total Budget':allbud_school, 'Average Math Score':ave_math, 'Average Reading Score':ave_read, '% Math Pass':rpass_math*100, '% Reading Pass':rpass_read*100, '% Overall Pass':rpass_all*100}, index=[0]) total_df # - # ## School Summary # * Create an overview table that summarizes key metrics about each school, including: # * School Name # * School Type # * Total Students # * Total School Budget # * Per Student Budget # * Average Math Score # * Average Reading Score # * % Passing Math # * % Passing Reading # * % Overall Passing (The percentage of students that passed math **and** reading.) # # * Create a dataframe to hold the above results # + #School name sc_name_gb = school_data_complete.groupby('school_name') sc_name_df = sc_name_gb.count().reset_index() sc_score_df = sc_name_gb.mean().reset_index() sc_name_df = pd.DataFrame({'school_name': sc_name_df['school_name'], 'Total Students': sc_name_df['Student ID'], 'reading_score': sc_score_df['reading_score'], 'math_score': sc_score_df['math_score']}) sc_name_df #school type sc_summary = pd.merge(sc_name_df, school_data, how="left", on=["school_name", "school_name"]) del(sc_summary['size']) sc_summary # # + sc_studid_gb = school_data_complete.groupby('Student ID') stid_df= sc_studid_gb.sum().reset_index() stid_df # this gorupby dataframe contain only the Read_score that is PASS 70 r_score= stid_df.loc[stid_df['reading_score']> 69].groupby('School ID') r_scoredf = r_score.count().reset_index() del(r_scoredf['Student ID'],r_scoredf['math_score'],r_scoredf['size'], r_scoredf['budget']) r_scoredf # this gorupby dataframe contain only the Math that is PASS 70 m_score= stid_df.loc[stid_df['math_score']> 69].groupby('School ID') m_scoredf = m_score.count().reset_index() del(m_scoredf['Student ID'],m_scoredf['reading_score'],m_scoredf['size'], m_scoredf['budget']) m_scoredf # this gorupby dataframe contain only the both that is PASS 70 o_score_col =(stid_df['math_score']> 69) & (stid_df['reading_score']> 69) stid_df['Pass Overall']= o_score_col o_scoreloc= stid_df.loc[stid_df['Pass Overall'] == True].groupby('School ID') o_score_df=o_scoreloc.count().reset_index() del(o_score_df['Student ID'],o_score_df['reading_score'],o_score_df['size'], o_score_df['budget'],o_score_df['math_score'] ) # o_score_df # - # #Total Pass Poplacte sc_score = pd.merge(r_scoredf, m_scoredf, how="left", on=["School ID", "School ID",]) sc_score = pd.merge(sc_score,o_score_df, how="left", on=["School ID", "School ID",]) sc_score = sc_score.rename(columns={'reading_score':'Pass Read', 'math_score':'Pass Math'}) sc_score sc_summary = pd.merge(sc_summary, sc_score, how="left", on=["School ID"]) sc_summary['%Pass Read']=sc_summary['Pass Read']/sc_summary['Total Students']*100 sc_summary['%Pass Math']=sc_summary['Pass Math']/sc_summary['Total Students']*100 sc_summary['%Pass Overall']=sc_summary['Pass Overall']/sc_summary['Total Students']*100 sc_summary['Per Student Budget']=sc_summary['budget']/sc_summary['Total Students'] del(sc_summary['School ID'],sc_summary['Pass Read'],sc_summary['Pass Math'], sc_summary['Pass Overall']) sc_summary # ## Top Performing Schools (By % Overall Passing) # * Sort and display the top five performing schools by % overall passing. sc_summary.sort_values('%Pass Overall', ascending = False).head() # ## Bottom Performing Schools (By % Overall Passing) # * Sort and display the five worst-performing schools by % overall passing. sc_summary.sort_values('%Pass Overall', ascending = True).head() # ## Math Scores by Grade # * Create a table that lists the average Reading Score for students of each grade level (9th, 10th, 11th, 12th) at each school. # # * Create a pandas series for each grade. Hint: use a conditional statement. # # * Group each series by school # # * Combine the series into a dataframe # # * Optional: give the displayed data cleaner formatting gr_df=school_data_complete.set_index("grade") gr_df # + m_ninegr_df = gr_df.loc['9th', ['math_score','school_name']].groupby('school_name').mean() m_tengr_df = gr_df.loc['10th', ['math_score','school_name']].groupby('school_name').mean() m_elegr_df = gr_df.loc['11th', ['math_score','school_name']].groupby('school_name').mean() m_twegr_df = gr_df.loc['12th', ['math_score','school_name']].groupby('school_name').mean() mgr_df=pd.DataFrame({'9th': m_ninegr_df['math_score'],'10th': m_tengr_df['math_score'], '11th':m_elegr_df['math_score'], '12th':m_twegr_df['math_score']}) mgr_df # - # ## Reading Score by Grade # * Perform the same operations as above for reading scores # + r_ninegr_df = gr_df.loc['9th', ['reading_score','school_name']].groupby('school_name').mean() r_tengr_df = gr_df.loc['10th', ['reading_score','school_name']].groupby('school_name').mean() r_elegr_df = gr_df.loc['11th', ['reading_score','school_name']].groupby('school_name').mean() r_twegr_df = gr_df.loc['12th', ['reading_score','school_name']].groupby('school_name').mean() rgr_df=pd.DataFrame({'9th': r_ninegr_df['reading_score'],'10th': r_tengr_df['reading_score'], '11th':r_elegr_df['reading_score'], '12th':r_twegr_df['reading_score']}) rgr_df # - # ## Scores by School Spending # * Create a table that breaks down school performances based on average Spending Ranges (Per Student). Use 4 reasonable bins to group school spending. Include in the table each of the following: # * Average Math Score # * Average Reading Score # * % Passing Math # * % Passing Reading # * Overall Passing Rate (Average of the above two) # + # max(sc_summary['Per Student Budget']) =655 # min(sc_summary['Per Student Budget'])=578 # (655-578)/4 =19.25 pstubudg_bin =[0, 590, 610, 630, 660] pstubudg_label = ['570 - 590','591 - 610','611 - 630', '631 - 660'] pstubudg_df=pd.DataFrame.copy(sc_summary) pstubudg_df["Per Student Budget Group"]= pd.cut(pstubudg_df["Per Student Budget"], pstubudg_bin, labels=pstubudg_label) del(pstubudg_df['Total Students'],pstubudg_df['type'],pstubudg_df['budget'],pstubudg_df["Per Student Budget"]) fin_pstubudg_df=pstubudg_df.groupby('Per Student Budget Group').mean() fin_pstubudg_df # + #Check it with the Example Solution n_pstubudg_bin =[0, 584, 629, 644, 675] n_pstubudg_label = ['<585','585-629 ','630-644', '645-675'] n_pstubudg_df=pd.DataFrame.copy(sc_summary) n_pstubudg_df["Per Student Budget Group"]= pd.cut(n_pstubudg_df["Per Student Budget"], n_pstubudg_bin, labels=n_pstubudg_label) del(n_pstubudg_df['Total Students'],n_pstubudg_df['type'],n_pstubudg_df['budget'],n_pstubudg_df["Per Student Budget"],n_pstubudg_df['school_name']) n_fin_pstubudg_df=n_pstubudg_df.groupby('Per Student Budget Group').mean() n_fin_pstubudg_df # - # ## Scores by School Size # * Perform the same operations as above, based on school size. # + school_size=pd.DataFrame.copy(pstubudg_df) school_size['size']= school_data['size'] size_bin =[0, 1800, 2700, 3600, 5000] size_label = ['400-1800','1801-2700','2701-3600', '3600 - 5000'] school_size["size"]= pd.cut(school_size['size'], size_bin, labels=size_label) del(school_size['Per Student Budget Group']) school_size=school_size.groupby('size').mean() # # max(school_size['size'])= 4976 # # min(school_size['size']) = 427 # # (4976-427)/4 =1137.25 # school_size school_size # - # ## Scores by School Type # * Perform the same operations as above, based on school type # + school_type =pd.DataFrame.copy(sc_summary) school_type['type']= sc_summary['type'] del(school_type['Total Students'],school_type['budget'],school_type['Per Student Budget']) school_type=school_type.groupby('type').mean() school_type # + jupyter={"outputs_hidden": true} # -
PyCitySchools/PyCitySchools_starter.ipynb
# + # Linear regression using MLE with fixed variance and input-dependent variance. # Adapted from # https://colab.sandbox.google.com/github/tensorflow/probability/blob/master/tensorflow_probability/examples/jupyter_notebooks/Probabilistic_Layers_Regression.ipynb#scrollTo=5zCEYpzu7bDX from __future__ import absolute_import from __future__ import division from __future__ import print_function from pprint import pprint import matplotlib.pyplot as plt import numpy as np import seaborn as sns try: import tensorflow.compat.v2 as tf except ModuleNotFoundError: # %pip install tensorflow import tensorflow.compat.v2 as tf tf.enable_v2_behavior() try: import tensorflow_probability as tfp except ModuleNotFoundError: # %pip install tensorflow-probability import tensorflow_probability as tfp import numpy as np import matplotlib.pyplot as plt import os figdir = "figures" def savefig(fname): plt.savefig(os.path.join(figdir, fname)) sns.reset_defaults() # sns.set_style('whitegrid') # sns.set_context('talk') sns.set_context(context="talk", font_scale=0.7) tfd = tfp.distributions # @title Synthesize dataset. w0 = 0.125 b0 = 5.0 x_range = [-20, 60] def load_dataset(n=150, n_tst=150): np.random.seed(43) def s(x): g = (x - x_range[0]) / (x_range[1] - x_range[0]) return 3 * (0.25 + g**2.0) x = (x_range[1] - x_range[0]) * np.random.rand(n) + x_range[0] eps = np.random.randn(n) * s(x) y = (w0 * x * (1.0 + np.sin(x)) + b0) + eps x = x[..., np.newaxis] x_tst = np.linspace(*x_range, num=n_tst).astype(np.float32) x_tst = x_tst[..., np.newaxis] return y, x, x_tst y, x, x_tst = load_dataset() plt.figure() # plt.figure(figsize=[8, 5]) # inches plt.plot(x, y, "b.", label="observed") savefig("linreg_1d_hetero_data.pdf") plt.show() ### Fixed output variance model = tf.keras.Sequential( [ tf.keras.layers.Dense(1), # 1 linear layer tfp.layers.DistributionLambda(lambda t: tfd.Normal(loc=t, scale=1)), ] ) negloglik = lambda y, rv_y: -rv_y.log_prob(y) model.compile(optimizer=tf.optimizers.Adam(learning_rate=0.01), loss=negloglik) history = model.fit(x, y, epochs=1000, verbose=False) [print(np.squeeze(w.numpy())) for w in model.weights] yhat = model(x_tst) assert isinstance(yhat, tfd.Distribution) plt.plot(history.history["loss"], label="Train") # plt.plot(history.history['val_loss'], label='Val') plt.legend() plt.xlabel("Epoch") plt.ylabel("NLL") plt.show() plt.figure() plt.plot(x, y, "b.", label="observed") plt.plot(x_tst, yhat.mean(), "r", label="mean", linewidth=4) savefig("linreg_1d_hetero_mean.pdf") plt.show() plt.figure() plt.plot(x, y, "b.", label="observed") m = yhat.mean() s = yhat.stddev() plt.plot(x_tst, m, "r", linewidth=4, label="mean") plt.plot(x_tst, m + 2 * s, "g", linewidth=2, label=r"mean + 2 stddev") plt.plot(x_tst, m - 2 * s, "g", linewidth=2, label=r"mean - 2 stddev") savefig("linreg_1d_hetero_var_fixed.pdf") plt.show() # Data-dependent variance (heteroskedastic) # mu(x) = b + wx # sigma(x) = softplus( 0.05 * b' + w' x) model = tf.keras.Sequential( [ tf.keras.layers.Dense(1 + 1), # linear model for mean and variance tfp.layers.DistributionLambda( lambda t: tfd.Normal(loc=t[..., :1], scale=1e-3 + tf.math.softplus(0.05 * t[..., 1:])) ), ] ) model.compile(optimizer=tf.optimizers.Adam(learning_rate=0.01), loss=negloglik) model.fit(x, y, epochs=1000, verbose=False) yhat = model(x_tst) assert isinstance(yhat, tfd.Distribution) plt.figure() plt.plot(x, y, "b.", label="observed") m = yhat.mean() s = yhat.stddev() plt.plot(x_tst, m, "r", linewidth=4, label="mean") plt.plot(x_tst, m + 2 * s, "g", linewidth=2, label=r"mean + 2 stddev") plt.plot(x_tst, m - 2 * s, "g", linewidth=2, label=r"mean - 2 stddev") savefig("linreg_1d_hetero_var_adaptive.pdf") plt.show()
notebooks/book1/02/linreg_1d_hetero_tfp.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 0. Dependências import numpy as np import pydotplus from IPython.display import Image from sklearn import tree # **Dependências:** # - conda install graphviz # - conda install -c conda-forge pydotplus # # *Em caso de erro*: # - *pip install graphviz* # - *adicionar a pasta "C:\Users\Arnaldo\Miniconda3\Library\bin\graphviz" ao path das variáveis de ambiente* # # 1. Introdução # **Árvores de Decisão** são um dos algoritmos de Machine Learning mais fáceis de implementar. A ideia principla é dividir o problema em sub-problemas mais simples até que se resolva o problema. Nas árvores, cada **nó de decisão** contém um teste em um atributo, cada **folha** representa uma classe ou um valor (no caso da regressão) e o percurso da raiz até uma folha representa uma **regra de classificação/regressão**. Um atributo pode aparecer mais de uma vez na árvore, porém com valores diferentes. # # As **condições de paradas** podem ser inúmeras: # - Os atributos acabaram (no caso em que os atributos não se repetem na árvore) # - Todos os exemplos são de uma mesma classe # - A altura da árvore atingiu um valor previamente definido # - O número de exemplos a serem divididos é menor que um valor definido # # **Vantagens:** # - Fáceis de entender e explicar. Mais fácil inclusive que regressão linear # - Algumas pessoas acreditam que ás arvores de decisão representam a tomada de decisão mais próxima dos seres humanos. # - Podem ser mostradas graficamente e facilmente interpretadas por não-especialistas # - Trabalha tanto com variáveis qualitativas e quantitativas sem precisar de pré-processamento # # **Desvantagens:** # - A precisão não é tão boa quanto outros algoritmos # - Não são robustas. Uma pequena mudança nos dados pode causar uma grande diferença na árvore final. # # **Algoritmo básico:** # 1. Escolher um atributo # 2. Dividir o (sub-)banco por uma valor específico do atributo # 3. Para cada folha: # # 3.1: Se todos os exemplos são da mesma classe, associar essa classe aos exemplos # # 3.2: Caso contrário, repetir os passos 1 a 3 # # ## Impureza e Ganho de Informação # # *Como escolher o melhor atributo?* Existem muitas medidas e algoritmos diferentes: # - **ID3 e C4.5**: utilizam *ganho da informação*. # - **CART**: utiliza *impureza de Gini*. # - **CHAID**: utilizam significância estatística. # # Em geral, todas as abordagens concordam em dois pontos: # - Uma divisão que mantém as proporções das classes é inútil # - Uma divisão onde todos os exemplos são da mesma classe, tem utilidade máxima # # ### Entropia # A **Entropia** caracteriza a impureza de uma coleção arbitrária de exemplos. # # Seja $S$ uma amostra de exemplos e $p_i$ a probabilidade de cada classe $i$. A entropia $E(S)$ é definida como: # # $$E(S) = \sum_i^n{p_i\ln{p_i}}$$ # # ### Ganho de Informação # # O **Ganho de Informação (GI)** é a diferença entre a impureza atual (entropia, gini, etc..) e a impureza ponderada dos dois novos grupos. Intuitivamente, o **GI representa a divisão que reduz a impureza, ou seja, obtém grupos mais homogêneos em comparação com o grupo antes da divisão**. Comparando o GI para várias divisões baseadas nas regras de divisão diferente nos permite escolher a "melhor" divisão. # # 2. Dados # + x = np.array([ ['Sol' , 85, 85, 'Não'], ['Sol' , 80, 90, 'Sim'], ['Nublado', 83, 86, 'Não'], ['Chuva' , 70, 96, 'Não'], ['Chuva' , 68, 80, 'Não'], ['Chuva' , 65, 70, 'Sim'], ['Nublado', 64, 65, 'Sim'], ['Sol' , 72, 95, 'Não'], ['Sol' , 69, 70, 'Não'], ['Chuva' , 75, 80, 'Não'], ['Sol' , 75, 70, 'Sim'], ['Nublado', 72, 90, 'Sim'], ['Nublado', 81, 75, 'Não'], ['Chuva' , 71, 91, 'Sim'] ], dtype='object') y = np.array(['Não', 'Não', 'Sim', 'Sim', 'Sim', 'Não', 'Sim', 'Não', 'Sim', 'Sim', 'Sim', 'Sim', 'Sim', 'Não']).reshape(-1, 1) print(x.shape, y.shape) # - # # 3. Implementação # + def most_frequent_class(y): classes, class_counts = np.unique(y, return_counts=True) return classes[np.argmax(class_counts)] def entropy(y): if len(y) == 0: return 0.0 _, class_counts = np.unique(y, return_counts=True) proportions = class_counts / y.shape[0] return -np.sum(proportions * np.log2(proportions)) def gini(y): if len(y) == 0: return 0.0 _, class_counts = np.unique(y, return_counts=True) proportions = class_counts / y.shape[0] return 1.0 - np.sum(proportions**2) # + class Node(): def __init__(self, parent=None): self.parent = parent self.left_child = None # less than a value (regression) OR not equal to a category (classification) self.right_child = None # otherwise self.col_index = None self.value = None self.is_leaf = True self.output = None class DecisionTree(): def __init__(self, criterion=gini, max_depth=None, min_samples_split=2, is_classification=True): self.criterion = criterion self.max_depth = np.inf if max_depth is None else max_depth self.min_samples_split = min_samples_split self.is_classification = is_classification self.root = Node() def fit(self, x, y): self.__build_tree(self.root, x, y) def predict(self, x): return np.array([self.__compute_output(sample, self.root) for sample in x]) def __build_tree(self, parent_node, x, y, depth=0): parent_node.col_index, parent_node.value, best_gain = self.__find_best_split(x, y) if best_gain == 0.0 or depth > self.max_depth: parent_node.output = most_frequent_class(y) if self.is_classification else np.mean(y) return left_child = Node(parent=parent_node) right_child = Node(parent=parent_node) parent_node.is_leaf = False x_left, y_left, x_right, y_right = self.__split_data_by_value(x, y, parent_node.col_index, parent_node.value) if len(x_left) >= self.min_samples_split: self.__build_tree(left_child, x_left, y_left, depth + 1) else: left_child.output = most_frequent_class(y_left) if self.is_classification else np.mean(y_left) if len(x_right) >= self.min_samples_split: self.__build_tree(right_child, x_right, y_right, depth + 1) else: right_child.output = most_frequent_class(y_right) if self.is_classification else np.mean(y_right) parent_node.left_child = left_child parent_node.right_child = right_child def __find_best_split(self, x, y): best_gain, best_col, best_value = 0.0, None, None current_impurity = self.criterion(y) n_atts = x.shape[1] for att in range(n_atts): att_values = np.unique(x[:, att]) for value in att_values: _, y_left, _, y_right = self.__split_data_by_value(x, y, att, value) left_impurity = self.criterion(y_left) right_impurity = self.criterion(y_right) p = len(y_left) / y.shape[0] gain = current_impurity - (p * left_impurity + (1 - p) * right_impurity) if gain > best_gain: best_gain = gain best_col = att best_value = value return best_col, best_value, best_gain def __split_data_by_value(self, x, y, col_index, value): left_mask = x[:, col_index] < value right_mask = np.invert(left_mask) return x[left_mask], y[left_mask], x[right_mask], y[right_mask] def __compute_output(self, x, node): if node.is_leaf: return node.output is_discrete = isinstance(node.value, str) right_condition = x[node.col_index] == node.value if is_discrete else x[node.col_index] > node.value return self.__compute_output(x, node.right_child if right_condition else node.left_child) def __str__(self): return self.__print_tree(self.root) def __print_tree(self, node, indent=''): if node.is_leaf: return 'Leaf: y = {}\n'.format(node.output) is_discrete = isinstance(node.value, str) right_condition = 'x[{}] {} {} | '.format(node.col_index, '==' if is_discrete else '>=', node.value) left_condition = indent left_condition += 'x[{}] {} {} | '.format(node.col_index, '!=' if is_discrete else '< ', node.value) summary = right_condition + self.__print_tree(node.right_child, indent + ' '*len(right_condition)) summary += left_condition + self.__print_tree(node.left_child, indent + ' '*len(right_condition)) return summary # - # # 4. Teste # + x = np.array([ ['Sol' , 85, 85, 'Não'], ['Sol' , 80, 90, 'Sim'], ['Nublado', 83, 86, 'Não'], ['Chuva' , 70, 96, 'Não'], ['Chuva' , 68, 80, 'Não'], ['Chuva' , 65, 70, 'Sim'], ['Nublado', 64, 65, 'Sim'], ['Sol' , 72, 95, 'Não'], ['Sol' , 69, 70, 'Não'], ['Chuva' , 75, 80, 'Não'], ['Sol' , 75, 70, 'Sim'], ['Nublado', 72, 90, 'Sim'], ['Nublado', 81, 75, 'Não'], ['Chuva' , 71, 91, 'Sim'] ], dtype='object') y = np.array(['Não', 'Não', 'Sim', 'Sim', 'Sim', 'Não', 'Sim', 'Não', 'Sim', 'Sim', 'Sim', 'Sim', 'Sim', 'Não']).reshape(-1, 1) dt = DecisionTree() dt.fit(x, y) print(dt) # - dt.predict([['Chuva', 70, 90, 'Não']]) # ### Comparação com o Scikit-learn # + x = np.array([ [0, 85, 85, 0], [0, 80, 90, 1], [1, 83, 86, 0], [2, 70, 96, 0], [2, 68, 80, 0], [2, 65, 70, 1], [1, 64, 65, 1], [0, 72, 95, 0], [0, 69, 70, 0], [2, 75, 80, 0], [0, 75, 70, 1], [1, 72, 90, 1], [1, 81, 75, 0], [2, 71, 91, 1] ]) y = np.array([0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0]).reshape(-1, 1) clf = tree.DecisionTreeClassifier(criterion='gini') clf.fit(x, y) # + dot_data = tree.export_graphviz(clf, out_file=None, feature_names=['Tempo', 'Temperatura', 'Umidade', 'Vento'], class_names=['Não', 'Sim'], filled=True, rounded=True, special_characters=True) graph = pydotplus.graph_from_dot_data(dot_data) Image(graph.create_png()) # - # ## 5. Referências #
Decision Trees.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # import import numpy as np import json from urllib.request import urlopen from scipy.optimize import curve_fit import matplotlib.pyplot as plt import pickle import os.path with open("user_took.txt", 'rb') as lc: user_suggested = json.load(lc) user_suggested
experiment_vs_uninstall.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] pycharm={"name": "#%% md\n"} # # ZnTrack Parameters with dataclasses # # To structure the parameters used in a Node it can be useful to pass them as a dataclass. The following Notebook will illustrate a small Example. # + pycharm={"name": "#%%\n"} import pathlib from zntrack import config config.nb_name = "dataclasses_for_parameters.ipynb" # + pycharm={"name": "#%%\n"} from zntrack.utils import cwd_temp_dir temp_dir = cwd_temp_dir() # + pycharm={"name": "#%%\n"} # !git init # !dvc init # + pycharm={"name": "#%%\n"} import dataclasses from zntrack import Node, zn import random # + pycharm={"name": "#%%\n"} @dataclasses.dataclass class Parameter: start: int stop: int step: int = 1 class ComputeRandomNumber(Node): parameter: Parameter = zn.Method() number = zn.outs() def __init__(self, parameter: Parameter = None, **kwargs): super().__init__(**kwargs) self.parameter = parameter def run(self): self.number = random.randrange( self.parameter.start, self.parameter.stop, self.parameter.step ) # + pycharm={"name": "#%%\n"} ComputeRandomNumber(parameter=Parameter(start=100, stop=200)).write_graph(no_exec=False) # + pycharm={"name": "#%%\n"} print(ComputeRandomNumber.load().number) print(ComputeRandomNumber.load().parameter) # + [markdown] pycharm={"name": "#%% md\n"} # The arguments of the dataclass are saved in the `params.yaml` file and can also be modified there. # + pycharm={"name": "#%%\n"} print(pathlib.Path("params.yaml").read_text())
examples/dataclasses_for_parameters.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 2D Linear Regression Using Ordinary Least Squares (OLS) # # Essentially we have two features instead of one influencing our system # # ## Process # # * Loading the data # * Exploring the data # * Adding bias to our input # * Solve for weights using OLS # * Make a Prediction # * Determining how well the model performed # # ## Equations # # The hypothesis $h_\theta(x)$ is given by the linear model # # $$ h_\theta(x) = \theta_0 + \theta_1 x_1 + \theta_2 x_2 + ... + \theta_n x_n $$ # # where $\theta_0, \theta_1, \theta_2, ..., \theta_n$ are the weights of the model we need to solve for, # # $x_1, x_2, ..., x_n$ are the features of the model, and # # $n$ represents the number of features. # # Now, for convenience of notation a new feature is defined as $x_0 = 1$. # # This allows us to construct two $n + 1$ dimensional column vectors # # $$ \vec{x} = \begin{bmatrix} x_0 \\ x_1 \\ x_2 \\ \vdots \\ x_n \end{bmatrix}\qquad \vec{\theta} = \begin{bmatrix} \theta_0 \\ \theta_1 \\ \theta_2 \\ \vdots \\ \theta_n \end{bmatrix} $$ # # Now, the hypothesis can be written as # # $$ h_\theta(x) = \theta_0 x_0 + \theta_1 x_1 + \theta_2 x_2 + ... + \theta_n x_n $$ # # Note: $h_\theta(x)$ is still the same as the orginal representation since $x_0 = 1$. # # Now, we can simply write $h_\theta(x)$ as # # $$ h_\theta(x) = \vec{\theta}^T\vec{x} $$ # # where $\vec{\theta}^T$ is the transpose of $\vec{\theta}$, i.e., # # $$ \vec{\theta}^T = \begin{bmatrix} \theta_0 & \theta_1 & \theta_2 & \cdots & \theta_n \end{bmatrix} $$ # # Note: $h_\theta(x) = \vec{\theta}^T\vec{x}$ uses one training example # # Now, if we have m training examples, i.e., $(x^{(1)}, y^{(1)}), (x^{(2)}, y^{(2)}), ..., (x^{(m)}, y^{(m)})$ with $n$ features then we can write each feature vector as # # $$ \vec{x}^{(i)} = \begin{bmatrix} x_0^{(i)} \\ x_1^{(i)} \\ x_2^{(i)} \\ \vdots \\ x_n^{(i)} \end{bmatrix} $$ # # where $i = 1, 2, ..., m$. # # From here we can construct a feature matrix $X$ where the transpose of each feature vector corresponds to a row of $X$ starting with the first feature vector, i.e., $\vec{x}^{(1)}$ and ending with the $m^{th}$ feature vector, i.e., $\vec{x}^{(m)}$ # # $$ X = \begin{bmatrix} (x^{(1)})^T \\ (x^{(2)})^T \\ \vdots \\ (x^{(m)})^T \end{bmatrix} = \begin{bmatrix} x_0^{(1)} && x_1^{(1)} && x_2^{(1)} && \cdots && x_n^{(1)} \\ x_0^{(2)} && x_1^{(2)} && x_2^{(2)} && \cdots && x_n^{(2)} \\ && && \vdots \\ x_0^{(m)} && x_1^{(m)} && x_2^{(m)} && \cdots && x_n^{(m)} \end{bmatrix}$$ # # We can then construct the label or output vector $\vec{y}$ which consists of the output from each training example # # $$ \vec{y} = \begin{bmatrix} y^{(1)} \\ y^{(2)} \\ \vdots \\ y^{(m)} \end{bmatrix} $$ # # Now we can compute $\vec{\theta}$ by using OLS # # $$ \vec{\theta} = \left( X^T X\right)^{-1} X^T\vec{y}$$ # # where $X^T$ is the transpose of the X matrix and, $(X^TX)^{-1}$ is the inverse of the $X^TX$ matrix. # # Note: the way the feature matrix $X$ and the output vector $\vec{y}$ are constructed has the same form as the data in the text file except for the feature vector $\vec{x_0}$ which is appended to the feature matrix. # # Also, the way the equation for the hypothesis is written for multiple training examples is different when compared to the equation that was used for a single training example. This is because each feature vector was transposed from a $(n + 1)$ $x$ $1$ column vector to a $1$ $x$ $(n + 1)$ row vector that were then combined to construct an $m$ $x$ $(n + 1)$ feature matix. So, multiplying the $m$ $x$ $(n + 1)$ feature matrix by the weight vector $\vec{\theta}$ which is a $(n + 1)$ $x$ $1$ column vector means the hypothesis equation must be # # $$ h_\theta(X) = X\vec{\theta}$$ # # Note: here we will be performing 2D Linear Regression which means $n = 2$, and our training data has 47 rows which means $m = 47$ # + import os import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D # %matplotlib inline # - # ## Loading the Data data = np.loadtxt('data/housing.txt', delimiter=',') m = len(data) n = len(data[0]) - 1 print("Shape of data", data.shape) print("Number of samples", m) print("Number of features", n) X, y = data[:, :n], data[:, n] # ## Exploring the Data # # ### Plotting the Data # + # Scatter Plot fig = plt.figure(figsize=(7,7)) ax = fig.add_subplot(111, projection='3d') ax.scatter3D(X[:,0], X[:,1], y/1000) ax.xaxis.set_tick_params(labelsize=8) ax.yaxis.set_tick_params(labelsize=8) ax.zaxis.set_tick_params(labelsize=8) plt.yticks(np.arange(1, 6, 1)) ax.set_xlabel('House Size (sq ft)') ax.set_ylabel('Number of Bedrooms') ax.set_zlabel('Price (in thousands of dollars)'); # - # ## Visualizing the shape of our data # # $X$ is a feature matrix that is $m$ $x$ $(n + 1)$ where: # # * $m =$ number of training examples # * $n =$ number of features # # $\vec{y}$ is our ouput vector that is $m$ $x$ $1$ print("The shape of X is", X.shape) print("The shape of y is", y.shape) # ## Adding bias to our input # # This is the process of adding the feature $x_0$ to # # $$ h_\theta(x) = \theta_0 + \theta_1 x_1 + \theta_2 x_2$$ # # To do this we add a column of 1's to our feature matrix and call it $x_0$ # + # here we add a column of 1's to X for our bias (intercept term) we'll use concatenate for numpy arrays with shape (m,n) # Note: concatenate will add a column of 1's to X each time this cell is run X = np.concatenate([np.ones((m, 1)), X], axis=1) print("Shape of X is", X.shape) print("Example of a feature vector", X[0]) # - # ## Solve for weights using OLS # # Basically we just need to transform the following function into python code # # $$\vec{\theta} = \left( X^T X\right)^{-1} X^T\vec{y}$$ theta = np.linalg.pinv(X.T@X)@X.T@y print("Shape of theta is", theta.shape) print("theta is", theta) # ## Making a prediction # # Now we can create an input vector and get an estimated price for the home # + # input_vec = [size of house (sq ft), number of bedrooms] input_vec = [1650, 3] # add a bias since all inputs must begin with 1 input_vec_bias = np.append(1, input_vec) # make a prediction profit = theta.T@input_vec_bias print("Input vector", input_vec) print("Input vector with bias", input_vec_bias) print("theta", theta) print("Profit", profit) # - # ## Determine how well the model performed # # We need some numerical measure to see how well our model performed # # For this we can use $R^2$ (R-Squared) # # We ususally use this for any regression not just Linear Regression # # The definition for R-Squared is the following: # # $$R^2 = 1 - \frac{SS_{res}}{SS_{tot}}$$ # # where: # # $SS_{res}$ is the sum of squared residual # # and # # $SS_{tot}$ is the sum of squared total # # These are defined as: # # $$SS_{res} = \sum^m_{i=1}(y^{(i)} - h_\theta(X))^2$$ # # <br /> # # $$SS_{tot} = \sum^m_{i=1}(y^{(i)} - \bar{y}^{(i)})^2$$ # # $R^2 = 1$ is a perfect model # # $R^2 = 0$ is basically the average (50%) # # $R^2 = -$ is worse than just computing the average # + # Note: h can also be calculated as h = <EMAIL>, h = theta.T@X.T, or h = X@theta.T because the shape of theta is (3,) in numpy which means it can act as a row vector or a column vector and it will return h with the same shape of (47,) h = X@theta SSres = sum((y-h)**2) SStot = sum((y-y.mean())**2) R2 = 1 - SSres/SStot print('SSres is: ', SSres) print('SStot is:', SStot) print('R-squared is: ', R2)
LinearRegression/04-2D-LR-OLS.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline # # # Plot Fused-gromov-Wasserstein # # # This example illustrates the computation of FGW for 1D measures[18]. # # .. [18] <NAME>, <NAME>, <NAME>, <NAME> # and <NAME> # "Optimal Transport for structured data with application on graphs" # International Conference on Machine Learning (ICML). 2019. # # + # Author: <NAME> <<EMAIL>> # # License: MIT License import matplotlib.pyplot as pl import numpy as np import ot from ot.gromov import gromov_wasserstein, fused_gromov_wasserstein # - # Generate data # --------- # # # We create two 1D random measures # # # + n = 20 # number of points in the first distribution n2 = 30 # number of points in the second distribution sig = 1 # std of first distribution sig2 = 0.1 # std of second distribution np.random.seed(0) phi = np.arange(n)[:, None] xs = phi + sig * np.random.randn(n, 1) ys = np.vstack((np.ones((n // 2, 1)), 0 * np.ones((n // 2, 1)))) + sig2 * np.random.randn(n, 1) phi2 = np.arange(n2)[:, None] xt = phi2 + sig * np.random.randn(n2, 1) yt = np.vstack((np.ones((n2 // 2, 1)), 0 * np.ones((n2 // 2, 1)))) + sig2 * np.random.randn(n2, 1) yt = yt[::-1, :] p = ot.unif(n) q = ot.unif(n2) # - # Plot data # --------- # # # + pl.close(10) pl.figure(10, (7, 7)) pl.subplot(2, 1, 1) pl.scatter(ys, xs, c=phi, s=70) pl.ylabel('Feature value a', fontsize=20) pl.title('$\mu=\sum_i \delta_{x_i,a_i}$', fontsize=25, y=1) pl.xticks(()) pl.yticks(()) pl.subplot(2, 1, 2) pl.scatter(yt, xt, c=phi2, s=70) pl.xlabel('coordinates x/y', fontsize=25) pl.ylabel('Feature value b', fontsize=20) pl.title('$\\nu=\sum_j \delta_{y_j,b_j}$', fontsize=25, y=1) pl.yticks(()) pl.tight_layout() pl.show() # - # Create structure matrices and across-feature distance matrix # --------- # # C1 = ot.dist(xs) C2 = ot.dist(xt) M = ot.dist(ys, yt) w1 = ot.unif(C1.shape[0]) w2 = ot.unif(C2.shape[0]) Got = ot.emd([], [], M) # Plot matrices # --------- # # # + cmap = 'Reds' pl.close(10) pl.figure(10, (5, 5)) fs = 15 l_x = [0, 5, 10, 15] l_y = [0, 5, 10, 15, 20, 25] gs = pl.GridSpec(5, 5) ax1 = pl.subplot(gs[3:, :2]) pl.imshow(C1, cmap=cmap, interpolation='nearest') pl.title("$C_1$", fontsize=fs) pl.xlabel("$k$", fontsize=fs) pl.ylabel("$i$", fontsize=fs) pl.xticks(l_x) pl.yticks(l_x) ax2 = pl.subplot(gs[:3, 2:]) pl.imshow(C2, cmap=cmap, interpolation='nearest') pl.title("$C_2$", fontsize=fs) pl.ylabel("$l$", fontsize=fs) #pl.ylabel("$l$",fontsize=fs) pl.xticks(()) pl.yticks(l_y) ax2.set_aspect('auto') ax3 = pl.subplot(gs[3:, 2:], sharex=ax2, sharey=ax1) pl.imshow(M, cmap=cmap, interpolation='nearest') pl.yticks(l_x) pl.xticks(l_y) pl.ylabel("$i$", fontsize=fs) pl.title("$M_{AB}$", fontsize=fs) pl.xlabel("$j$", fontsize=fs) pl.tight_layout() ax3.set_aspect('auto') pl.show() # - # Compute FGW/GW # --------- # # # + alpha = 1e-3 ot.tic() Gwg, logw = fused_gromov_wasserstein(M, C1, C2, p, q, loss_fun='square_loss', alpha=alpha, verbose=True, log=True) ot.toc() # #%reload_ext WGW Gg, log = gromov_wasserstein(C1, C2, p, q, loss_fun='square_loss', verbose=True, log=True) # - # Visualize transport matrices # --------- # # # + cmap = 'Blues' fs = 15 pl.figure(2, (13, 5)) pl.clf() pl.subplot(1, 3, 1) pl.imshow(Got, cmap=cmap, interpolation='nearest') #pl.xlabel("$y$",fontsize=fs) pl.ylabel("$i$", fontsize=fs) pl.xticks(()) pl.title('Wasserstein ($M$ only)') pl.subplot(1, 3, 2) pl.imshow(Gg, cmap=cmap, interpolation='nearest') pl.title('Gromov ($C_1,C_2$ only)') pl.xticks(()) pl.subplot(1, 3, 3) pl.imshow(Gwg, cmap=cmap, interpolation='nearest') pl.title('FGW ($M+C_1,C_2$)') pl.xlabel("$j$", fontsize=fs) pl.ylabel("$i$", fontsize=fs) pl.tight_layout() pl.show()
_downloads/e5b34c8084f7e566453325162d1a0090/plot_fgw.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + digits=[9] num=0 for i in range(len(digits)): num += digits[i]*pow(10,(len(digits)-1-i)) a=[int(j) for j in str(num+1)] print(a) # - d=[9,9,9] j=0 while j<len(d): j+=1 if d[-j]+1>9: d[-j]=0 if d[0]==0: d=[1]+d break else: d[-j]+=1 break print(d) d=[1,2,3,4] b=[1]+d print(b) def plusOne(self, digits): """ :type digits: List[int] :rtype: List[int] """ '''Sol 1: num=0 for i in range(len(digits)): num += digits[i]*pow(10,(len(digits)-1-i)) return [int(j) for j in str(num+1)] ''' '''Sol 2: j=0 while j<len(digits): j+=1 if digits[-j]+1>9: digits[-j]=0 if digits[0]==0: digits=[1]+digits return digits else: digits[-j]+=1 return digits ''' for i in range(len(digits)-1, -1, -1): if digits[i] != 9: digits[i] += 1 break digits[i] = 0 if digits[0] == 0: digits.insert(0, 1) return digits
66. Plus One.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Objects # # In the last session you learned how to package up useful code into functions. This is a really useful idea, as it lets you re-use useful code in your own scripts, and to then share useful code with other people. # # However, it is normal for functions to rely on data. For example, consider the Morse code `encode` and `decode` functions in the last lesson. These only work because of the data contained in the `letter_to_morse` dictionary. The functions would break if anyone changes the data in this dictionary. letter_to_morse = {'a':'.-', 'b':'-...', 'c':'-.-.', 'd':'-..', 'e':'.', 'f':'..-.', 'g':'--.', 'h':'....', 'i':'..', 'j':'.---', 'k':'-.-', 'l':'.-..', 'm':'--', 'n':'-.', 'o':'---', 'p':'.--.', 'q':'--.-', 'r':'.-.', 's':'...', 't':'-', 'u':'..-', 'v':'...-', 'w':'.--', 'x':'-..-', 'y':'-.--', 'z':'--..', '0':'-----', '1':'.----', '2':'..---', '3':'...--', '4':'....-', '5':'.....', '6':'-....', '7':'--...', '8':'---..', '9':'----.', ' ':'/' } def encode(message): morse = [] for letter in message: morse.append( letter_to_morse[letter.lower()] ) return morse encode("Hello") # The above `encode("Hello")` has worked. However, if we change the data in `letter_to_morse`, e.g. swapping `l` from `.-..` to `-.--`, then we get `['....', '.', '-.--', '-.--', '---']`, which is wrong. We can make even larger changes, which would completely break the function... # While such changes are easy to spot in this example, they become more difficult to find in larger programs. In addition, as you share code, you will find that people using your code will do weird things to the data on which it depends, which can introduce weird bugs and problems. # # The solution is to package a function together with the data on which it depends into a single object. This idea is the foundation of object orientated programming. To explore this, let us start with a simple example that packages the `encode` function together with the `letter_to_morse` dictionary on which it depends. class Morse: def __init__(self): self._letter_to_morse = {'a':'.-', 'b':'-...', 'c':'-.-.', 'd':'-..', 'e':'.', 'f':'..-.', 'g':'--.', 'h':'....', 'i':'..', 'j':'.---', 'k':'-.-', 'l':'.-..', 'm':'--', 'n':'-.', 'o':'---', 'p':'.--.', 'q':'--.-', 'r':'.-.', 's':'...', 't':'-', 'u':'..-', 'v':'...-', 'w':'.--', 'x':'-..-', 'y':'-.--', 'z':'--..', '0':'-----', '1':'.----', '2':'..---', '3':'...--', '4':'....-', '5':'.....', '6':'-....', '7':'--...', '8':'---..', '9':'----.', ' ':'/' } def encode(self, message): morse = [] for letter in message: morse.append( self._letter_to_morse[letter.lower()] ) return morse # Above, we have packaged the data (`letter_to_morse`) together with the `encode` function into what we call a `class`. A Class describes how data and functions are combined together. An instance of a class is called an object, which we can create by calling `Morse()`. m = Morse() # `m` is an object of the class `Morse`. It has its own copy of `letter_to_morse` within it, and its own copy of the `encode` function. We can call `m`'s copy of the `encode` function by typing `m.encode(...)`, e.g. m.encode("Hello World") # To create a new class, you use the `class` keyword, followed by the name of your class. In this case, `class Morse` defined a new class called `Morse`. You then add a colon, and write all of the functions that should be part of the class indented below. At a minimum, you must define one function, called the constructor. This function has the signature `def __init__(self, arguments...)`. The first argument, `self`, is a special variable that allows an object of the class to access the data that belongs to itself. It is the job of the constructor to set up that data. For example, let's now create a new class that provides a simple guessing game. class GuessGame: def __init__(self, secret): self._secret = secret def guess(self, value): if (value == self._secret): print("Well done - you have guessed my secret") else: print("Try again...") # In this class, the constructor `__init__(self, secret)` takes an extra argument after `self`. This argument is saved as the `_secret` variable that is part of the `self` of the object. Note that we always name variables that are part of a class with a leading underscore. We can construct different object instances of GuessGame that have different secrets, e.g. g1 = GuessGame("cat") g2 = GuessGame("dog") # Here, the `self._secret` for `g1` equals "cat". The `self._secret` for `g2` equals "dog". # # When we call the function `g1.guess(value)`, it compares `value` against `self._secret` for `g1`. g1.guess("dog") g1.guess("cat") # When we call the function `g2.guess(value)` it compares `value` against `self._secret` for `g2`. g2.guess("cat") g2.guess("dog") # # Exercise # # ## Exercise 1 # # Edit the below `GuessGame` example so that it records how many unsuccessful guesses have been performed. Add a function called `nGuesses()` that returns the number of unsuccessful guesses. Once you have made the changes, check your class by creating an object of your class and using it to make some successful and unsuccessful guesses. class GuessGame: def __init__(self, secret): self._secret = secret self._nguesses = 0 def guess(self, value): if (value == self._secret): print("Well done - you have guessed my secret") else: self._nguesses += 1 print("Try again...") def nGuesses(self): return self._nguesses g = GuessGame("cat") g.nGuesses() == 0 g.guess("dog") g.nGuesses() == 1 g.guess("horse") g.nGuesses() == 2 g.guess("cat") g.nGuesses() == 2 # ## Exercise 2 # # Edit the constructor of your `GuessGame` class so that the user can optionally specify a maximum number of allowable guesses. If the maximum number of guesses is not supplied, then set the default value to 5. # # Create a `maxGuesses()` function that returns the maximum number of allowable guesses. # # Finally, edit the `guess()` function so that it will not let you make more than the maximum number of guesses (e.g. if the number of guesses exceeds the maximum number, then print out "Sorry, you have run out of guesses."). # # Check that you code works by creating an object of GuessGame that only allows three guesses, and see what happens if you guess incorrectly more than three times. class GuessGame: def __init__(self, secret, max_guesses=5): self._secret = secret self._nguesses = 0 self._max_guesses = max_guesses def guess(self, value): if (self.nGuesses() >= self.maxGuesses()): print("Sorry, you have run out of guesses") elif (value == self._secret): print("Well done - you have guessed my secret") else: self._nguesses += 1 print("Try again...") def nGuesses(self): return self._nguesses def maxGuesses(self): return self._max_guesses g = GuessGame("fish", 3) g.maxGuesses() == 3 g.guess("cat") g.guess("dog") g.guess("horse") g.guess("gerbil")
answers/05_objects.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: .pptt # language: python # name: .pptt # --- # # Privacy Policies Through Time # # *An exploration of how privacy policies have evolved and reacted to different legislative and media events throughout the years.* # # # + # project imports import env from build_master_index import MASTER_CSV, PROBLEM_COMPANIES from wayback_search import POLICY_DIR # python & package imports from collections import defaultdict import matplotlib.pyplot as plt from datetime import date import seaborn as sns import pandas as pd import numpy as np import os import re # plot things # %matplotlib inline # autoreload modules # %load_ext autoreload # %autoreload 2 # - # ## Dataset Introduction # # This project has put together a historical dataset containing all of the privacy policy revisions of various companies with the help of the Internet Archive's [Wayback Machine](https://archive.org/web/). # # Here we import the master csv of the dataset and prepare it for analysis. # + # import and prepare the dataframe df = pd.read_csv(MASTER_CSV) df.policy_date = pd.to_datetime(df.policy_date) df.company = pd.Categorical(df.company) df['company_id'] = df.company.cat.codes # sanity-check: check for any policy paths that do not exist assert len(df[~pd.Series([os.path.exists(os.path.join(POLICY_DIR, x)) for x in df.policy_path])]) == 0 # sometimes duplicate csv rows appear because of overlap in the # dates of two configurations; they are the same policy most likely # so we just drop the duplicate print('Policy count before dropping duplicates: {}'.format(len(df))) df = df[~df.duplicated('policy_path')] # some companies proved more difficult than others when gathering # privacy policies, so we drop them here (the policies are left # as part of the original dataset because they are valid policies, # but we do not have all the revisions) print('Policy count before removing problematic companies: {}'.format(len(df))) print('Company count before removing problematic companies: {}'.format(len(df.company.unique()))) df = df[~df.company.str.contains('|'.join(PROBLEM_COMPANIES))] print('Final policy count: {}'.format(len(df))) # - # A quick look inside the dataframe shows the available columns, the mix of companies, and a range of policy dates df.sample(n=5) # ## Dataset Metadata # # Here we explore the dataset metadata and begin to explore what's inside. # How many and what companies are covered? companies = df.company.unique() print('Companies: {}'.format(', '.join(list(companies)))) print('Count: {}'.format(len(companies))) # How many policies are there? policies = df.policy_path.unique() # sanity-check: we removed duplicates so this should be the same assert len(df) == len(policies) print("Count of policies (number of rows in the dataset): {}".format(len(df))) # How many policies per company? grouped = df.groupby('company').policy_path.count() _ = grouped.plot.bar(title='Privacy Policy Revisions by Company', figsize=(12, 5)) # What date range do the policies cover? print('Oldest policy: {}'.format(min(df.policy_date).strftime('%Y-%m-%d'))) print('Most recent policy: {}'.format(max(df.policy_date).strftime('%Y-%m-%d'))) # How many policies by year? # # Warning: this plot makes it look like companies are making more revisions each year, but the positive trend might be the result of there simply being more active companies. grouped = df.groupby([df.policy_date.dt.year]).policy_path.count() _ = grouped.plot.bar(title='Privacy Policy Revisions by Year (All Companies)', figsize=(12, 5)) # Can we normalize by number of active companies? # + grouped = df.groupby([df.policy_date.dt.year, 'company']).policy_path.count() # there's probably a better way to do this with pandas values = defaultdict(int) companies = defaultdict(set) for index, value in grouped.items(): values[index[0]] += value companies[index[0]].add(index[1]) normalized = dict() for year, count in values.items(): normalized[year] = count / len(companies[year]) pd.Series(normalized).plot.bar( title='Privacy Policy Revisions by Year Normalized by Active Company Count', figsize=(12, 5)) # - # ## The Privacy Policies # # Now let's explore the policies themselves. We'll start by pulling all of the policy text into the dataframe, and then running some pandas cmds to gather metadata about the texts. def get_policy_text(row): """ Creates a pd.Series of full text from policies Args: row: pd.Series, row of policy index dataframe Returns: pd.Series, a new row with 'policy_text' col """ page = '' with open(os.path.join(POLICY_DIR, row['policy_path']), 'r', encoding='utf-8') as f: page = f.read() row['policy_text'] = page return row # Looking inside again shows the same as before with the addition of the new text column df = df.apply(get_policy_text, axis=1) df.sample(n=10) # How many total words? print('Total words: {}'.format(df.policy_text.str.len().sum())) print('Total occurences of "privacy": {}'.format(df.policy_text.str.count('privacy', flags=re.IGNORECASE).sum())) print('Total occurences of "data": {}'.format(df.policy_text.str.count('data', flags=re.IGNORECASE).sum())) print('Total occurences of "personal information": {}'.format(df.policy_text.str.count('personal information', flags=re.IGNORECASE).sum())) # How many words per company? # # Warning: this might be slightly biased based on how long companies have been operating. # # Warning: this is biased by number of revisions. df_tmp = df.copy() df_tmp['policy_len'] = df_tmp.policy_text.str.len() grouped = df_tmp.groupby('company').policy_len.sum() _ = grouped.plot.bar(title='Word Count of Privacy Policies by Company', figsize=(12, 7)) # ## Key Privacy Event Analysis # # In the following sections, we investigate the impact that certain key legislative and popular events had on the privacy policies at large. We do this with a heatmap-derived visualization that shows us when certain terminology enters the vernacular of the policies per company. def prepare_heatmap_data(data, ycol, xcol, valuecol, keep_last=True): """ There has to be a better way to do this... but I procrastinated :) 1. Pivot data and extend a policy's np.nan/0/1 up unto the next policy revision (originally, companies would have a 1 in the months where there were revisions followed by a string of np.nans and another 1 at the next revision) 2. Fill the gaps between policy revision columns; create a new col for each year-month pair between the first and last policies in the dataset and extend between cols as before 3. Rebuild new, complete dataframe """ ### 1 result = data.pivot(index=ycol, columns=xcol, values=valuecol) for i1, row in result.transpose().iteritems(): last = np.nan for i2, value in row.iteritems(): if np.isnan(value) and keep_last: # set it equal to last value value = last result.at[i1, i2] = value last = value ### 2 min_date = result.columns.min() max_date = result.columns.max() companies = result.index last = None dates = defaultdict(lambda: defaultdict(lambda: defaultdict(int))) for year in range(min_date.year, max_date.year + 1): for month in range(1, 13): # create name of new col ym = date(year=year, month=month, day=1).strftime('%Y-%m') # get all old cols (y-m-d) that will need to be combined into this col col_matches = [x for x in result.columns if ym in str(x)] # if there are no revisions in this y-m, then just reuse last month's data if last and len(col_matches) == 0: col_matches = last # set data for col in col_matches: for company in companies: dates[year][month][company] = result.at[company, col] last = col_matches ### 3 new = pd.DataFrame(index=result.index) for year, months in dates.items(): for month, companies in months.items(): # new col ym = date(year=year, month=month, day=1).strftime('%Y-%m') for company, val in companies.items(): if ym not in new.columns: # create new col with np.nan new[ym] = pd.Series([np.nan] * len(new.index)) # set value new.at[company, ym] = val return new def policy_heatmap(data, title, mask=True): # init plot fig, ax = plt.subplots(figsize=(12, 5)) ax.set_title(title) # prepare mask for null values when a company does not yet have a published privacy policy if mask: mask = np.zeros_like(result) mask[np.isnan(result)] = True else: mask = None # do plot # https://stackoverflow.com/questions/37790429/seaborn-heatmap-using-pandas-dataframe sp = sns.heatmap(data, ax=ax, fmt="g", cmap=sns.color_palette("RdBu", 7), mask=mask, cbar=False) # format and space xticks xlocs, xlabels = plt.xticks() #print(list(zip(xlocs, xlabels))) for i, x in enumerate(xlabels): text = '' if i % 6 == 0: text = x._text[:x._text.find('T')] xlabels[i]._text = text _ = ax.set_xticklabels(xlabels) fig.autofmt_xdate() return ax def review_regex_matches(data, filter_col, filter_key, buffer=20): return data[data[filter_col] == 1].policy_text.str.extract( '(.{0,' + str(buffer) + '}' + str(filter_key) + '.{0,' + str(buffer) + '})', flags=re.IGNORECASE|re.DOTALL) def key_match_data(data, filter_key, extra_filters=None): """ Args: data: pd.DataFrame filter_key: str, identifier of filter; will be used to create new boolean col extra_filters: list of str, additional filters joined by '|' to allow for permutations and additional words when filtering Returns: pd.DataFrame with an additional boolean col signaling if a filter is found in the row """ df_copy = data.copy() filter_col = 'mentions_{}'.format(filter_key) # https://chrisalbon.com/python/data_wrangling/pandas_create_column_using_conditional/ if extra_filters: filters = '|'.join([filter_key] + extra_filters) else: filters = filter_key df_copy[filter_col] = np.where(df_copy.policy_text.str.contains(filters, flags=re.IGNORECASE), 1, 0) print('{} / {} policies mention {}'.format(len(df_copy[df_copy[filter_col] == 1]), len(df_copy), filter_key)) return df_copy, filter_col # ### Children's Online Privacy Protection Act (COPPA) # # From [Wikipedia](https://en.wikipedia.org/wiki/Children%27s_Online_Privacy_Protection_Act): # # "The act, effective April 21, 2000, applies to the online collection of personal information by persons or entities under # U.S. jurisdiction about children under 13 years of age or children with disabilities. **It details what a website # operator must include in a privacy policy**, when and how to seek verifiable consent from a parent or guardian, and what # responsibilities an operator has to protect children's privacy and safety online including restrictions on the marketing # of those under 13." # # Based on the above, we should expect to see privacy policies mention their practices with children's data starting in April of 2000. coppa, filter_col = key_match_data(df, 'children', ['children', 'child', 'minor', 'underage', 'teenager', 'Online Privacy Protection Act', 'coppa']) result = prepare_heatmap_data(coppa, 'company', 'policy_date', filter_col) ax = policy_heatmap(result, 'Reference of COPPA & Related Terms in Privacy Policy') # add vertical line for coppa _ = ax.vlines(11, .8, 20, color='purple') _ = plt.text(6, 0.5, 'April 21, 2000: COPPA Begins', color='purple', fontsize=12) # Double-checking that our matches are reasonable review_regex_matches(coppa, 'mentions_children', 'children').sample(n=10) # ## International Safe Harbor Privacy Principles # # From [Wikipedia](https://en.wikipedia.org/wiki/International_Safe_Harbor_Privacy_Principles), # # "The International Safe Harbor Privacy Principles or Safe Harbour Privacy Principles were principles **developed between 1998 and 2000** in order to prevent private organizations within the European Union or United States which store customer data from accidentally disclosing or losing personal information. **They were overturned on October 6, 2015 by the European Court of Justice (ECJ)**, which enabled some US companies to comply with privacy laws protecting European Union and Swiss citizens. US companies storing customer data could self-certify that they adhered to 7 principles, to comply with the EU Data Protection Directive and with Swiss requirements. The US Department of Commerce developed privacy frameworks in conjunction with both the European Union and the Federal Data Protection and Information Commissioner of Switzerland." # # Let's see if "Safe Harbor" begins to disappear from policies around the year 2015. safeharbor, filter_col = key_match_data(df, 'safe\s*harbor') result = prepare_heatmap_data(safeharbor, 'company', 'policy_date', filter_col) result.head() ax = policy_heatmap(result, 'Reference of Safe Harbor & Related Terms in Privacy Policy') # add vertical line for safe harbor _ = ax.vlines(208, .8, 20, color='purple') _ = plt.text(140, 0.5, 'Oct 6, 2015: Safe Harbor Ends', color='purple', fontsize=12) review_regex_matches(safeharbor, filter_col, 'safe\s*harbor').sample(n=10) # ### EU-US Privacy Shield # # From [Wikipedia](https://en.wikipedia.org/wiki/EU%E2%80%93US_Privacy_Shield), # # "In October 2015 the European Court of Justice declared the previous framework called the International Safe Harbor Privacy Principles invalid. Soon after this decision the European Commission and the U.S. Government started talks about a new framework and **on February 2, 2016 they reached a political agreement**. The European Commission published the 'adequacy decision' draft, declaring principles to be equivalent to the protections offered by EU law." # # As we observed, references to "Safe Harbor" begin to disappear after 2015 to be replaced by this EU-US Privacy Shield, but how many websites complied? shield, filter_col = key_match_data(df, 'shield', ['EU-US\s*Privacy\s*Shield', 'EU-US']) result = prepare_heatmap_data(shield, 'company', 'policy_date', filter_col) result.head() ax = policy_heatmap(result, 'Reference of EU-US Privacy Shield & Related Terms in Privacy Policy') # add vertical line for coppa _ = ax.vlines(202, .8, 20, color='purple') _ = plt.text(113, 0.5, 'Feb 2, 2016: EU-US Privacy Shield Begins', color='purple', fontsize=12) review_regex_matches(shield, filter_col, 'shield').sample(n=10) # ### Popularity of "Personal Information" personalinfo, filter_col = key_match_data(df, 'personal\s*information') result = prepare_heatmap_data(personalinfo, 'company', 'policy_date', filter_col) result.head() ax = policy_heatmap(result, 'Reference of "Personal Information" in Privacy Policy') # add vertical line for coppa #_ = ax.vlines(180, .8, 20, color='purple') #_ = plt.text(100, 0.5, 'Feb 2, 2016: EU-US Privacy Shield Begins', color='purple', fontsize=12) review_regex_matches(personalinfo, filter_col, 'personal\s*information').sample(n=10) # ### Popularity of "Data Privacy" dataprivacy, filter_col = key_match_data(df, 'data\s*privacy') result = prepare_heatmap_data(dataprivacy, 'company', 'policy_date', filter_col) result.head() ax = policy_heatmap(result, 'Reference of "Data Privacy" in Privacy Policy') # add vertical line for filter #_ = ax.vlines(180, .8, 20, color='purple') #_ = plt.text(100, 0.5, 'Feb 2, 2016: EU-US Privacy Shield Begins', color='purple', fontsize=12) review_regex_matches(dataprivacy, filter_col, 'data\s*privacy').sample(n=10) # ### Do Not Track # # From [Wikipedia](https://en.wikipedia.org/wiki/Do_Not_Track_legislation), # # "**On December 1, 2010**, the U.S. Federal Trade Commission (FTC) published a preliminary report highlighting the consumers’ right to prevent websites from tracking their online behaviors. The central plank of the bill was to adopt a Do Not Track opt-out function to web browsers. The FTC judged that online marketers’ pervasive collection of personal information could possibly violate privacy. **This issue began to surface again in 2012** after Google announced its new privacy policy. Reps. <NAME>, <NAME>, and <NAME> asked the FTC to investigate the legality of Google’s change of privacy policy; they sent a letter to the FTC regarding Google’s changed privacy policy." # # From the above, we imagine that we might begin to see Do Not Track references in 2010/2011 with more starting to surface in and after 2012. dnt_re = '(do\s*not\s*track|dnt)' dnt, filter_col = key_match_data(df, dnt_re) result = prepare_heatmap_data(dnt, 'company', 'policy_date', filter_col) result.head() ax = policy_heatmap(result, 'Reference of "Do Not Track" in Privacy Policy') # add vertical line for filter _ = ax.vlines(152, .8, 20, color='purple') _ = plt.text(146, 0.5, '2012', color='purple', fontsize=12) review_regex_matches(dnt, filter_col, dnt_re).sample(n=10) # ### General Data Protection Regulation (GDPR) # # From [Wikipedia](https://en.wikipedia.org/wiki/General_Data_Protection_Regulation), # # "The General Data Protection Regulation (EU) 2016/679 ('GDPR') is a regulation in EU law on data protection and privacy for all individuals within the European Union (EU) and the European Economic Area (EEA). It also addresses the export of personal data outside the EU and EEA areas. The GDPR aims primarily to give control to individuals over their personal data and to simplify the regulatory environment for international business by unifying the regulation within the EU.... **The GDPR was adopted on 14 April 2016, and became enforceable beginning 25 May 2018.** As the GDPR is a regulation, not a directive, it is directly binding and applicable, but does provide flexibility for certain aspects of the regulation to be adjusted by individual member states." # # We should therefore see GDPR references in 2018. gdpr_re = 'gdpr|(General Data Protection Regulation)' gdpr, filter_col = key_match_data(df, 'gdpr', ['General Data Protection Regulation']) result = prepare_heatmap_data(gdpr, 'company', 'policy_date', filter_col) result.head() ax = policy_heatmap(result, 'Reference of GDPR & Related Terms in Privacy Policy') # add vertical line for filter _ = ax.vlines(231, -5, 20, color='purple') _ = plt.text(180, -2, 'May 25, 2018: GDPR Begins', color='purple', fontsize=12) # Interestingly, there are very few references to GDPR... review_regex_matches(gdpr, filter_col, gdpr_re).sample(n=7) # Considering the flurry of privacy policy revision activity that supposedly occured before GDPR, perhaps we can visualizae that by not connecting the heatmap between revisions... Below is the attemppt to do exactly that and unfortunately it does not yield anything interesing. gdpr_re = 'gdpr|(General Data Protection Regulation)' gdpr, filter_col = key_match_data(df, 'gdpr', ['General Data Protection Regulation']) result = prepare_heatmap_data(gdpr, 'company', 'policy_date', filter_col, keep_last=False) result.head() ax = policy_heatmap(result, 'Reference of GDPR & Related Terms in Privacy Policy') # add vertical line for filter _ = ax.vlines(231, -5, 20, color='purple') _ = plt.text(180, -2, 'May 25, 2018: GDPR Begins', color='purple', fontsize=12) # ### Popularity of "Delete" # # With GDPR came the notion of the "right to be forgotten" where you can request your data to be removed from a company's data stores. Let's see if GDPR adoption is signaled by the word "delete" making its way into privacy policies. delete_re = 'delet' delete, filter_col = key_match_data(df, delete_re) result = prepare_heatmap_data(delete, 'company', 'policy_date', filter_col) result.head() ax = policy_heatmap(result, 'Reference of "Delete" in Privacy Policy') # add vertical line for filter #_ = ax.vlines(105, .8, 20, color='purple') #_ = plt.text(100, 0.5, '2012', color='purple', fontsize=12) # Surprisingly, many companies supported deleting data before GPDR. Of course, this is not a complete check. This does not tell us to what extent these companies allowed you to delete your data. review_regex_matches(delete, filter_col, delete_re).sample(n=10) # ### Popularity of "Forgotten" forgot_re = 'forgotten' forgot, filter_col = key_match_data(df, forgot_re) result = prepare_heatmap_data(forgot, 'company', 'policy_date', filter_col) result.head() ax = policy_heatmap(result, 'Reference of "Forgotten" in Privacy Policy') # add vertical line for filter #_ = ax.vlines(105, .8, 20, color='purple') #_ = plt.text(100, 0.5, '2012', color='purple', fontsize=12) review_regex_matches(forgot, filter_col, forgot_re) # ### TRUSTe # ## Google google = df[df.company == 'google'] google.head() google = google.apply(get_policy_text, axis=1) google['policy_text_len'] = google.policy_text.str.len() google[['policy_date', 'policy_text_len']].plot.bar('policy_date', title="Google's Privacy Policy Length By Revision", figsize=(12, 5)) # ## Readability Metrics # # To limit noise and increase the value of this analysis, it was not performed on all the companies in the dataset and was instead restricted to the companies commonly referred to as FAANG (Facebook, Apple, Amazon, Netflix, Google) as these companies are large players in the world of data. # + import nltk nltk.download('punkt') from nltk.tokenize import sent_tokenize def prepare_line_data(data, ycol, xcol, valuecol, entities, keep_last=True): result = data.pivot(index=ycol, columns=xcol, values=valuecol) # make index a dedicated column so that it'll play nice with the line plot later lasts = {c:np.nan for c in entities} for i1, row in result.transpose().iteritems(): last = np.nan for i2, value in row.iteritems(): if np.isnan(value) and keep_last: # set it equal to last value value = lasts[i2] result.at[i1, i2] = value lasts[i2] = value result[ycol] = result.index return result def policy_line_plot(data, xcol, entities, colors, title, ylabel, xlabel='Policy Publication Date'): """ Args: data: pd.DataFrame, data to plot xcol: str, name of column to plot on xaxis entities: list of str, name of entities to appear in legend and as lines colors: dict, map between entity name and line color ylabel: str, y axis label xlabel: str, x axis label (optional) """ f, ax = plt.subplots(1, figsize=(12, 7)) for ent in entities: data.plot.line(x=xcol, y=ent, color=colors[ent], ax=ax) ax.set_title(title) ax.set_xlabel(xlabel) ax.set_ylabel(ylabel) return ax # - faang_companies = ['facebook', 'apple', 'amazon', 'netflix', 'google'] faang = df[df['company'].str.contains('|'.join(faang_companies))] print(len(faang)) print(faang.company.unique()) faang.sample(n=10) # init colors now for standardization across plots colors = ['red', 'orange', 'violet', 'green', 'blue'] faang_colors = {x:colors[i] for i, x in enumerate(faang_companies)} # ### Lexicon Count # # Number of words # quick check to make sure that split isn't going to have a problem with newline character faang['policy_text'].str.split().str.contains('\n').unique() faang['lexicon_count'] = faang['policy_text'].str.split().str.len() faang['lexicon_count'].describe() result = prepare_line_data(faang, 'policy_date', 'company', 'lexicon_count', faang_companies) _ = policy_line_plot(result, 'policy_date', faang_companies, faang_colors, 'FAANG Privacy Policy Lexicon Count Over Time', 'Policy Lexicon Count') # ### Syllables Count # # Number of syllables # + # https://stackoverflow.com/questions/46759492/syllable-count-in-python def syllable_count(word): word = word.lower() count = 0 vowels = "aeiouy" if word[0] in vowels: count += 1 for index in range(1, len(word)): if word[index] in vowels and word[index - 1] not in vowels: count += 1 if word.endswith("e"): count -= 1 if count == 0: count += 1 return count syllable_count('banana') # + # brute force it sylcounts = list() faang['syllable_count'] = pd.Series([0] * len(faang)) for index, row in faang.iterrows(): split = row['policy_text'].split() counts = list(map(syllable_count, split)) sumcounts = sum(counts) #print(row['company'], sumcounts) sylcounts.append(sumcounts) faang.at[index, 'syllable_count'] = sumcounts faang['syllable_count'].describe() # - result = prepare_line_data(faang, 'policy_date', 'company', 'syllable_count', faang_companies) _ = policy_line_plot(result, 'policy_date', faang_companies, faang_colors, 'FAANG Privacy Policy Syllable Count Over Time', 'Policy Syllable Count') # ### Sentence Count # # Number of sentences # https://stackoverflow.com/questions/15228054/how-to-count-the-amount-of-sentences-in-a-paragraph-in-python faang['sentence_count'] = faang['policy_text'].apply(sent_tokenize).str.len() faang['sentence_count'].describe() result = prepare_line_data(faang, 'policy_date', 'company', 'sentence_count', faang_companies) _ = policy_line_plot(result, 'policy_date', faang_companies, faang_colors, 'FAANG Privacy Policy Sentence Count Over Time', 'Policy Sentence Count') # ### Passive Voice Index # # Percentage of sentences with passive verb forms # ### Flesch Kincaid Measures # # Readability measure presented as U.S. grade level. The test is also available as a "reading ease" test, but here the grade level measure is used as it is more intuitive (Kincaid et al., 1981) [[Wikipedia](https://en.wikipedia.org/wiki/Flesch%E2%80%93Kincaid_readability_tests)]. # # Flesch Kincaid Reading Ease Test # # ![equation](https://wikimedia.org/api/rest_v1/media/math/render/svg/bd4916e193d2f96fa3b74ee258aaa6fe242e110e) # # Flesch Kincaid Grade Level Test # # ![equation](https://wikimedia.org/api/rest_v1/media/math/render/svg/8e68f5fc959d052d1123b85758065afecc4150c3) # + def flesch_kincaid_reading_ease(words, sentences, syllables): return 206.835 - 1.015 * (words / sentences) - 84.6 * (syllables / words) def flesch_kincaid_grade_level(words, sentences, syllables): return 0.39 * (words / sentences) + 11.8 * (syllables / words) - 15.59 # - faang['flesch_kincaid'] = flesch_kincaid_grade_level(faang['lexicon_count'], faang['sentence_count'], faang['syllable_count']) faang['flesch_kincaid'].describe() result = prepare_line_data(faang, 'policy_date', 'company', 'flesch_kincaid', faang_companies) _ = policy_line_plot(result, 'policy_date', faang_companies, faang_colors, 'FAANG Privacy Policy Flesch Kincaid Score Over Time', 'Policy Flesch Kincaid Score') # ### Dale-Chall Readability Score # # Assess readability based on the use of 3000 words reliably understood by groups of fourth-grade American students (Dale and Chall, 1948) [[Wikipedia](https://en.wikipedia.org/wiki/Dale%E2%80%93Chall_readability_formula). # # ![equation](https://wikimedia.org/api/rest_v1/media/math/render/svg/0541f1e629f0c06796c5a5babb3fac8d100a858c) # # Computed with help from the [textstat](https://pypi.org/project/textstat/) python package import textstat faang['dale_chall'] = faang['policy_text'].apply(textstat.dale_chall_readability_score) faang['dale_chall'].describe() result = prepare_line_data(faang, 'policy_date', 'company', 'dale_chall', faang_companies) _ = policy_line_plot(result, 'policy_date', faang_companies, faang_colors, 'FAANG Privacy Policy Dale Chall Readability Score Over Time', 'Policy Dale Chall Score')
notebooks/privacy-policies-through-time.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + # %matplotlib inline from __future__ import division, print_function from collections import defaultdict import os import numpy as np import pandas as pd import matplotlib.pyplot as plt from matplotlib.ticker import FormatStrFormatter from matplotlib.colors import ListedColormap import matplotlib.gridspec as gridspec import seaborn.apionly as sns import healpy as hp from sklearn.model_selection import KFold import dask from dask import delayed, multiprocessing, compute from dask.diagnostics import ProgressBar import pyprind from scipy.stats import chi2 from scipy.special import erfcinv from icecube import astro import comptools as comp import comptools.analysis.plotting as plotting import comptools.anisotropy.anisotropy as anisotropy color_dict = comp.analysis.get_color_dict() # + config = ['IC86.2011', 'IC86.2012', 'IC86.2013', 'IC86.2014', 'IC86.2015'] years_str = '2011-2015' composition='all' n_side = 64 scale = 3 smooth = 0.0 n_bins = 36 # decmax = -75 # decmax = -60 decmax = -55 decmin = -90 low_energy = True # + def get_proj_nbins_df(bins, data=None, ref=None, composition='all'): dipole_dict = defaultdict(list) for n_bins in bins: dipole_dict['n_bins'].append(n_bins) kwargs_relint_radius = {'config': config, 'low_energy': low_energy, 'smooth': smooth, 'scale': None, 'decmax': decmax, 'decmin': decmin} if data is None: data = anisotropy.get_map(name='data', composition=composition, **kwargs_relint_radius) if ref is None: ref = anisotropy.get_map(name='ref', composition=composition, **kwargs_relint_radius) # relint = anisotropy.get_map(name='relint', composition=composition, **kwargs_relint_radius) # relint_err = anisotropy.get_map(name='relerr', composition=composition, **kwargs_relint_radius) # ri, ri_err, ra, ra_err = anisotropy.get_proj_relint(relint, relint_err, n_bins=n_bins, # decmin=decmin, decmax=decmax) ri, ri_err, ra, ra_err = anisotropy.get_binned_relint(data, ref, n_bins=n_bins, decmin=decmin, decmax=decmax) n_dof = ri.shape[0] chi2_all = np.sum(ri**2 / ri_err**2) pval = chi2.sf(chi2_all, n_dof, loc=0, scale=1) sig = erfcinv(2*pval)*np.sqrt(2) dipole_dict['ri'].append(ri) dipole_dict['ri_err'].append(ri_err) dipole_dict['ra'].append(ra) dipole_dict['pval'].append(pval) dipole_dict['sig'].append(sig) return pd.DataFrame.from_records(dipole_dict, index='n_bins') # + # proj_light_df = get_proj_nbins_df(bins, composition='light') # proj_heavy_df = get_proj_nbins_df(bins, composition='heavy') # - bins = np.arange(1, 72+1, 1, dtype=int) proj_all_df = get_proj_nbins_df(bins, composition='all') proj_light_df = get_proj_nbins_df(bins, composition='light') proj_heavy_df = get_proj_nbins_df(bins, composition='heavy') for proj_df, composition in zip([proj_all_df, proj_light_df, proj_heavy_df], ['total', 'light', 'heavy']): fig, axarr = plt.subplots(3, 3, figsize=(10, 6), sharex=True, sharey=False) # for n_bins, ax in zip(proj_df.index[::10], axarr.flatten()): for n_bins, ax in zip([1, 4, 6, 10, 20, 24, 36, 60, 72], axarr.flatten()): proj_nbins = proj_df.loc[n_bins] ra_bins = np.linspace(0, 360, n_bins + 1) plotting.plot_steps(ra_bins, proj_nbins['ri'], yerr=proj_nbins['ri_err'], color=color_dict[composition], label=composition, fillalpha=0.2, ax=ax) # label='{}$\\sigma$'.format(proj_nbins['sig']), ax=ax) ax.axhline(0, marker='None', ls='-.', c='k') ax.set_title(str(n_bins)+' RA bins') # ax.set_ylabel('$\mathrm{\langle RI \\rangle }$') # ax.set_xlabel('RA [ $^{\circ}$]') ax.grid() # ax.set_ylim(-4.0e-3, 4.0e-3) ax.set_xlim(0, 360) ax.invert_xaxis() ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0)) fig.text(0.5, -0.025, 'RA [ $^{\circ}$]', ha='center', fontsize=16) fig.text(-0.025, 0.5, '$\mathrm{\langle RI \\rangle }$', va='center', rotation='vertical', fontsize=16) plt.tight_layout() proj_vs_nbins_outfile = os.path.join(comp.paths.figures_dir, 'anisotropy', 'proj_vs_nbins_{}.png'.format(composition)) comp.check_output_dir(proj_vs_nbins_outfile) plt.savefig(proj_vs_nbins_outfile) plt.show() # + fig, ax = plt.subplots() ax.plot(proj_all_df.index, proj_all_df['sig'], ls='None', label='Significance', color='C2') ax.axhline(0, marker='None', ls='-.', color='k', lw=1) rolling_mean = proj_all_df['sig'].rolling(window=10,center=True).mean() ax.plot(rolling_mean.index, rolling_mean, marker='None', ls='-', color='C2', label='Rolling mean\n(+/- 5 bins window)') ax.fill_between(rolling_mean.index, rolling_mean+1, rolling_mean-1, color='C2', alpha=0.2) ax.set_xlabel('Number RA bins') ax.set_ylabel('Anisotropy significance [$\\sigma$]') ax.set_ylim(0) ax.set_xlim(0) ax.grid() ax.legend() sig_vs_nbins_outfile = os.path.join(comp.paths.figures_dir, 'anisotropy', 'sig_vs_nbins_all.png') comp.check_output_dir(sig_vs_nbins_outfile) plt.savefig(sig_vs_nbins_outfile) plt.show() # + fig, ax = plt.subplots() for proj_df, composition in zip([proj_light_df, proj_heavy_df], ['light', 'heavy']): ax.plot(proj_df.index, proj_df['sig'], ls='None', label=composition, color=color_dict[composition]) # ax.axhline(0, marker='None', ls='-.', color='k', lw=1) rolling_mean = proj_df['sig'].rolling(window=10,center=True).mean() ax.plot(rolling_mean.index, rolling_mean, marker='None', ls='-', color=color_dict[composition], label='') ax.fill_between(rolling_mean.index, rolling_mean+1, rolling_mean-1, color=color_dict[composition], alpha=0.2, label='') ax.set_xlabel('Number RA bins') ax.set_ylabel('Anisotropy significance [$\\sigma$]') ax.set_ylim(0) ax.set_xlim(0) ax.grid() ax.legend() sig_vs_nbins_outfile = os.path.join(comp.paths.figures_dir, 'anisotropy', 'sig_vs_nbins_comp.png') comp.check_output_dir(sig_vs_nbins_outfile) plt.savefig(sig_vs_nbins_outfile) plt.show() # - # The heavy projected relative intensities (for large number of RA bins) looks like fluxuations, but is still ~4-sigma away from the null hypothesis. That's weird. # # Scramble data in right acension to see if this feature goes away... kwargs_data = {'config': config, 'low_energy': low_energy, 'smooth': smooth, 'scale': None, 'decmax': decmax} data_heavy = anisotropy.get_map(name='data', composition='heavy', **kwargs_data) ref_heavy = anisotropy.get_map(name='ref', composition='heavy', **kwargs_data) data_heavy # Bin in declination theta, phi = hp.pix2ang(n_side, range(len(data_heavy))) thetamax = np.deg2rad(90 - decmin) thetamin = np.deg2rad(90 - decmax) # dec_mask = (theta <= thetamax) & (theta >= thetamin) n_dec_bins = 30 dec_bins= np.linspace(thetamin, thetamax, n_dec_bins+1, dtype=float) theta_bin_num = np.digitize(theta, dec_bins) - 1 theta_bin_num data_heavy_RAscrambled = data_heavy.copy() for idx in range(n_dec_bins): theta_bin_mask = (theta_bin_num == idx) unseen_mask = data_heavy == hp.UNSEEN combined_mask = theta_bin_mask & ~unseen_mask data_in_dec_bin = data_heavy.copy() data_in_dec_bin = data_in_dec_bin[combined_mask] data_series = pd.Series(data_in_dec_bin) print(idx) shuffled_data = data_series.sample(frac=1.0, random_state=2).values data_heavy_RAscrambled[combined_mask] = shuffled_data # np.random.shuffle(data_in_dec_bin) # data_heavy_RAscrambled[combined_mask] = data_in_dec_bin def get_noisy_proj_sig(composition, random_state): # Set random state for trials np.random.seed(random_state) kwargs_data = {'config': config, 'low_energy': low_energy, 'smooth': smooth, 'scale': None, 'decmax': decmax} ref = anisotropy.get_map(name='ref', composition=composition, **kwargs_data) unseen_mask = ref == hp.UNSEEN ref_poisson_noise = ref.copy() ref_poisson_noise[~unseen_mask] = np.random.poisson(ref_poisson_noise[~unseen_mask]) proj_df = get_proj_nbins_df(bins, data=ref_poisson_noise, ref=ref) return proj_df['sig'] n_noise_trials = 1000 sig_ref_noise = [delayed(get_noisy_proj_sig)('all', random_state) for random_state in range(n_noise_trials)] sig_ref_noise = delayed(pd.concat)(sig_ref_noise) # sig_ref_noise = sig_ref_noise.divide(n_noise_trials) with ProgressBar(): # sig_ref_noise = sig_ref_noise.compute(get=dask.get) sig_ref_noise = sig_ref_noise.compute(get=multiprocessing.get, num_works=25) grouped_nbins = sig_ref_noise.groupby(sig_ref_noise.index) def gaussian(x, mu=0, sigma=1): return np.exp(-(x - mu)**2/(2*sigma**2))/np.sqrt(2*np.pi*sigma**2) # + sig_bins, sig_step = np.linspace(-5, 5, 50, retstep=True) sig_midpoints = (sig_bins[1:] + sig_bins[:-1]) / 2 fig, axarr = plt.subplots(3, 3, figsize=(10, 6), sharex=True, sharey=True) for n_bins, ax in zip([1, 4, 6, 10, 20, 24, 36, 60, 72], axarr.flatten()): df_noise_nbins = grouped_nbins.get_group(n_bins) label_mean = '$\mu = {:0.2f}$'.format(df_noise_nbins.mean()) label_std = '$\sigma = {:0.2f}$'.format(df_noise_nbins.std()) df_noise_nbins.plot(kind='hist', bins=sig_bins, histtype='stepfilled', alpha=0.5, lw=1.5, color=color_dict['total'], ax=ax, label=label_mean + '\n ' + label_std) ax.plot(sig_midpoints, n_noise_trials*sig_step*gaussian(sig_midpoints), marker='None', label='Gaussian') ax.set_ylabel('') ax.set_title('{} RA bins'.format(n_bins)) ax.grid() ax.legend() fig.text(0.5, -0.025, 'Anisotropy significance [$\\sigma$]', ha='center', fontsize=16) fig.text(-0.025, 0.5, 'Counts', va='center', rotation='vertical', fontsize=16) plt.tight_layout() sig_vs_nbins_outfile = os.path.join(comp.paths.figures_dir, 'anisotropy', 'sig_vs_nbins_all.png') comp.check_output_dir(sig_vs_nbins_outfile) plt.savefig(sig_vs_nbins_outfile) plt.show() # + fig, ax = plt.subplots() for n_bins in grouped_nbins.indices.keys(): df_noise_nbins = grouped_nbins.get_group(n_bins) # label_mean = '$\mu = {:0.2f}$'.format(df_noise_nbins.mean()) # label_std = '$\sigma = {:0.2f}$'.format(df_noise_nbins.std()) # df_noise_nbins.plot(kind='hist', bins=sig_bins, histtype='stepfilled', alpha=0.5, lw=1.5, # color=color_dict['total'], ax=ax, label=label_mean + '\n ' + label_std) mean = df_noise_nbins.mean() err = df_noise_nbins.std() ax.errorbar(n_bins, mean, yerr=err, marker='.', color=color_dict['total']) # ax.fill_between(n_bins, mean-err, mean+err) ax.set_ylabel('Anisotropy significance [$\\sigma$]') ax.set_xlabel('Number RA bins') ax.grid() ax.legend() # fig.text(0.5, -0.025, 'Anisotropy significance [$\\sigma$]', ha='center', fontsize=16) # fig.text(-0.025, 0.5, 'Counts', va='center', rotation='vertical', fontsize=16) plt.tight_layout() # sig_vs_nbins_outfile = os.path.join(comp.paths.figures_dir, # 'anisotropy', 'sig_vs_nbins_all.png') # comp.check_output_dir(sig_vs_nbins_outfile) # plt.savefig(sig_vs_nbins_outfile) plt.show() # - fig, ax = plt.subplots() sig_ref_noise.plot(kind='hist', bins=20, histtype='stepfilled', alpha=0.5, lw=1.5, color=color_dict['total'], ax=ax) ax.set_ylabel('Counts') ax.set_xlabel('Anisotropy significance [$\\sigma$]') ax.grid() plt.show() def get_RAscrambled_proj_sig(composition, random_state): kwargs_data = {'config': config, 'low_energy': low_energy, 'smooth': smooth, 'scale': None, 'decmax': decmax, 'decmin': decmin} ref = anisotropy.get_map(name='ref', composition=composition, **kwargs_data) data = anisotropy.get_map(name='data', composition=composition, **kwargs_data) # Bin in declination theta, phi = hp.pix2ang(n_side, range(len(data))) thetamax = np.deg2rad(90 - decmin) thetamin = np.deg2rad(90 - decmax) n_dec_bins = 20 theta_bins= np.linspace(thetamin, thetamax, n_dec_bins+1) theta_bin_num = np.digitize(theta, theta_bins) - 1 data_ra_scrambled = data.copy() unseen_mask = data_ra_scrambled == hp.UNSEEN for idx in range(n_dec_bins): theta_bin_mask = (theta_bin_num == idx) combined_mask = theta_bin_mask & ~unseen_mask data_in_dec_bin = data_ra_scrambled[combined_mask] shuffled_data = pd.Series(data_in_dec_bin).sample(frac=1.0, random_state=random_state).values data_ra_scrambled[combined_mask] = shuffled_data proj_df = get_proj_nbins_df(bins, data=data_ra_scrambled, ref=ref) return proj_df # + def get_RAscrambled_data_dists(composition, random_state): kwargs_data = {'config': config, 'low_energy': low_energy, 'smooth': smooth, 'scale': None, 'decmax': decmax, 'decmin': decmin} data = anisotropy.get_map(name='data', composition=composition, **kwargs_data) # Bin in declination theta, phi = hp.pix2ang(n_side, range(len(data))) thetamax = np.deg2rad(90 - decmin) thetamin = np.deg2rad(90 - decmax) n_dec_bins = 20 theta_bins= np.linspace(thetamin, thetamax, n_dec_bins+1) theta_bin_num = np.digitize(theta, theta_bins) - 1 # data_ra_scrambled = data.copy() data_dists = {} for idx in range(n_dec_bins): data_ra_scrambled = data.copy() unseen_mask = data_ra_scrambled == hp.UNSEEN theta_bin_mask = (theta_bin_num == idx) combined_mask = theta_bin_mask & ~unseen_mask data_in_dec_bin = data_ra_scrambled[combined_mask] shuffled_data = pd.Series(data_in_dec_bin).sample(frac=1.0, random_state=random_state).values data_ra_scrambled[combined_mask] = shuffled_data proj, proj_err, ra, ra_err = anisotropy.get_RA_proj_map(data_ra_scrambled, decmin=decmin, decmax=decmax, n_bins=10) data_dists[idx] = proj, proj_err, ra, ra_err return data_dists # - # Bin in declination theta, phi = hp.pix2ang(n_side, range(hp.nside2npix(n_side))) thetamax = np.deg2rad(90 - decmin) thetamin = np.deg2rad(90 - decmax) n_dec_bins = 20 theta_bins= np.linspace(thetamin, thetamax, n_dec_bins+1) theta_bin_num = np.digitize(theta, theta_bins) - 1 theta_bins def get_RAscrambled_proj(composition, random_state, n_ra_bins=10): kwargs_data = {'config': config, 'low_energy': low_energy, 'smooth': smooth, 'scale': None, 'decmax': decmax, 'decmin': decmin} data = anisotropy.get_map(name='data', composition=composition, **kwargs_data) ref = anisotropy.get_map(name='ref', composition=composition, **kwargs_data) # Bin in declination theta, phi = hp.pix2ang(n_side, range(len(ref))) thetamax = np.deg2rad(90 - decmin) thetamin = np.deg2rad(90 - decmax) n_dec_bins = 20 theta_bins= np.linspace(thetamin, thetamax, n_dec_bins+1) theta_bin_num = np.digitize(theta, theta_bins) - 1 dists = [] for idx in range(n_dec_bins): projections = {} data_ra_scrambled = data.copy() unseen_mask = data_ra_scrambled == hp.UNSEEN theta_bin_mask = (theta_bin_num == idx) combined_mask = theta_bin_mask & ~unseen_mask data_in_dec_bin = data_ra_scrambled[combined_mask] shuffled_data = pd.Series(data_in_dec_bin).sample(frac=1.0, random_state=random_state).values data_ra_scrambled[combined_mask] = shuffled_data data_ra_scrambled[~combined_mask] = hp.UNSEEN data_proj, data_proj_err, ra, ra_err = anisotropy.get_RA_proj_map(data_ra_scrambled, decmin=decmin, decmax=decmax, n_bins=n_ra_bins) ref_proj, ref_proj_err, ra, ra_err = anisotropy.get_RA_proj_map(ref, decmin=decmin, decmax=decmax, n_bins=n_ra_bins) projections['data_proj'] = data_proj projections['data_proj_err'] = data_proj_err projections['ref_proj'] = ref_proj projections['ref_proj_err'] = ref_proj_err projections['ra'] = ra dists.append(projections) return pd.DataFrame.from_records(dists) # data n_ra_scramble_trials = 1 ra_scambled_dists = [delayed(get_RAscrambled_proj)('all', random_state, n_ra_bins=30) for random_state in range(n_ra_scramble_trials)] ra_scambled_dists = delayed(pd.concat)(ra_scambled_dists) with ProgressBar(): ra_scambled_dists = compute(ra_scambled_dists, get=multiprocessing.get, num_works=min(n_ra_scramble_trials, 25))[0] ra_scambled_dists # + # with sns.color_palette('Blues_d', 20): data_colors = sns.color_palette('Blues_d', len(ra_scambled_dists)+1).as_hex() ref_colors = sns.color_palette('Greens_d', len(ra_scambled_dists)+1).as_hex() # fig, ax = plt.subplots() fig, ax = plt.subplots(figsize=(10, 8)) for dec_bin_idx, proj_df in ra_scambled_dists.iterrows(): ax.errorbar(proj_df['ra'], proj_df['data_proj'], yerr=proj_df['data_proj_err'], marker='.', ls=':', label=str(dec_bin_idx), color=data_colors[dec_bin_idx]) # ax.errorbar(proj_df['ra'], proj_df['ref_proj'], yerr=proj_df['ref_proj_err'], marker='.', ls='-', label=str(dec_bin_idx), # color='C2') # print(proj_df.iloc[n_ra_bins]) ax.grid() # ax.set_yscale('log', nonposy='clip') # ax.set_ylim(0e6, 2e6) ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0)) ax.invert_xaxis() ax.set_xlabel('RA [ $^{\circ}$]') ax.set_ylabel('Counts') ax.legend() # ax.set_ylabel('$\mathrm{\langle RI \\rangle }$') plt.show() # + # n_ra_scramble_trials = 10 # sig_ra_scambled = [delayed(get_RAscrambled_proj_sig)('all', random_state) # for random_state in range(n_ra_scramble_trials)] # sig_ra_scambled = delayed(pd.concat)(sig_ra_scambled) # - with ProgressBar(): sig_ra_scambled = sig_ra_scambled.compute(get=multiprocessing.get, num_works=min(n_ra_scramble_trials, 25)) grouped_nbins = sig_ra_scambled.groupby(sig_ra_scambled.index) grouped_nbins.get_group(n_bins).ri.mean() fig, axarr = plt.subplots(3, 3, figsize=(10, 6), sharex=True, sharey=True) for n_bins, ax in zip([1, 4, 6, 10, 20, 24, 36, 60, 72], axarr.flatten()): df_scambled_nbins = grouped_nbins.get_group(n_bins) ax.errorbar(df_scambled_nbins['ra'].mean(), df_scambled_nbins['ri'].mean(), yerr=None, marker='.', ls=':') ax.axhline(0, marker='None', ls=':', color='k', lw=1.5) ax.set_title('{} RA bins'.format(n_bins)) ax.grid() ax.invert_xaxis() fig.text(0.5, -0.025, 'RA [ $^{\circ}$]', ha='center', fontsize=16) fig.text(-0.025, 0.5, '$\mathrm{\langle RI \\rangle }$', va='center', rotation='vertical', fontsize=16) plt.tight_layout() scrambled_vs_nbins_outfile = os.path.join(comp.paths.figures_dir, 'anisotropy', 'scrambled_nbins_all.png') comp.check_output_dir(scrambled_vs_nbins_outfile) plt.savefig(scrambled_vs_nbins_outfile) plt.show() # + # sig_ra_scambled.replace([np.inf, -np.inf], np.nan, inplace=True) # - grouped_nbins = sig_ra_scambled.groupby(sig_ra_scambled.index) # + sig_bins, sig_step = np.linspace(-5, 5, 50, retstep=True) sig_midpoints = (sig_bins[1:] + sig_bins[:-1]) / 2 fig, axarr = plt.subplots(3, 3, figsize=(10, 6), sharex=True, sharey=False) for n_bins, ax in zip(range(1, 72), axarr.flatten()): # for n_bins, ax in zip([1, 4, 6, 10, 20, 24, 36, 60, 72], axarr.flatten()): df_noise_nbins = grouped_nbins.get_group(n_bins) print(df_noise_nbins) label_mean = '$\mu = {:0.2f}$'.format(df_noise_nbins.mean()) label_std = '$\sigma = {:0.2f}$'.format(df_noise_nbins.std()) df_noise_nbins.plot(kind='hist', bins=sig_bins, histtype='stepfilled', alpha=0.5, lw=1.5, color=color_dict['total'], ax=ax, label=label_mean + '\n ' + label_std) # ax.plot(sig_midpoints, n_noise_trials*sig_step*gaussian(sig_midpoints), # marker='None', label='Gaussian') ax.set_ylabel('') ax.set_title('{} RA bins'.format(n_bins)) ax.grid() # ax.legend() fig.text(0.5, -0.025, 'Anisotropy significance [$\\sigma$]', ha='center', fontsize=16) fig.text(-0.025, 0.5, 'Counts', va='center', rotation='vertical', fontsize=16) plt.tight_layout() # sig_vs_nbins_outfile = os.path.join(comp.paths.figures_dir, # 'anisotropy', 'sig_vs_nbins_all.png') # comp.check_output_dir(sig_vs_nbins_outfile) # plt.savefig(sig_vs_nbins_outfile) plt.show() # -
notebooks/anisotropy-smoothing-radius.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="rht6XRY8_c61" # ## # # Taller parte 1 # ### Parte 1: # ## Autor:<NAME> # ### Fecha; 02-05-2021 # ### Objetivo: Cree un autoencoder para el conjunto de datos Fashion MNIST, usando una red convolucional. Los datos puede obtenerlos directamente en keras como se hizo el conjunto MNIST de dígitos. # # Carga el conjunto de datos MNIST. # Se trata de un conjunto de datos de 60.000 imágenes # en escala de grises de 28x28 de los 10 dígitos, # junto con un conjunto de prueba de 10.000 imágenes. # # **Returns** # # Tuple of NumPy arrays: (x_train, y_train), (x_test, y_test). # # **x_train**: uint8 NumPy array of grayscale image data with shapes (60000, 28, 28), # containing the training data. Pixel values range from 0 to 255. # # **y_train**: uint8 NumPy array of digit labels (integers in range 0-9) # with shape (60000,) for the training data. # # **x_test**: uint8 NumPy array of grayscale image data with shapes (10000, 28, 28), # containing the test data. Pixel values range from 0 to 255. # # **y_test**: uint8 NumPy array of digit labels (integers in range 0-9) # with shape (10000,) for the test data. # # # + [markdown] id="S_B09MopAPDE" # ## Importa módulos # # + id="I45D3SdfAIys" import numpy as np import matplotlib.pyplot as plt import os from tensorflow.keras.layers import Dense, Input, Conv2D, Conv2DTranspose, Flatten, Reshape, Activation from tensorflow.keras.models import Model from tensorflow.keras.callbacks import ReduceLROnPlateau, ModelCheckpoint from tensorflow.keras.datasets import cifar100 from tensorflow.keras.utils import plot_model import tensorflow as tf from tensorflow.keras import layers from tensorflow.keras.optimizers import Adam from tensorflow.keras.losses import MeanSquaredError # + [markdown] id="v6Z1y1oVAo7g" # ## Leer datos # # + colab={"base_uri": "https://localhost:8080/"} id="nHDUaUNrAzcT" outputId="c0a16eaa-cd5d-4038-9573-75a2f80e5784" mnist_fashion = tf.keras.datasets.fashion_mnist (train_images, train_labels), (test_images, test_labels) = mnist_fashion.load_data() print("Forma de los datos de entrenamiento: ", train_images.shape) print("Forma de los datos de test:",test_images.shape) # + [markdown] id="S9ksW2AOWAuu" # ## Visualización de algunos datos # + colab={"base_uri": "https://localhost:8080/", "height": 589} id="_BEyPHsXWMQj" outputId="1a7959be-e39c-446e-f475-a677fd910369" class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat','Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot'] plt.figure(figsize=(10,10)) for i in range(36): plt.subplot(6,6,i+1) plt.xticks([]) plt.yticks([]) plt.grid(False) plt.imshow(train_images[i], cmap=plt.cm.binary) plt.xlabel(class_names[train_labels[i]]) plt.show() # + [markdown] id="FprchpBgBMdZ" # ## Normaliza los datos # + id="_-rwChOvBXMT" train_images = train_images.astype('float32') / 255. test_images = test_images.astype('float32') / 255. # + [markdown] id="H_RQ0Xh8Ba6_" # ## Configuración - Parametros de la red # + id="J0sEqxfeBgfT" """rows = x_train.shape[1] cols = x_train.shape[2] #channels = x_train.shape[3] input_shape = (rows, cols, 1) batch_size = 256 kernel_size = 3 latent_dim = 256 layer_filters = [64, 128, 256] """ input_shape = (28*28,) # 784 intermediate_dim_1 = 128 intermediate_dim_2 = 64 intermediate_dim_3 = 32 intermediate_dim_4 = 16 latent_dim = 3 # + [markdown] id="m0tabyIWBk7k" # ## Encoder # # + id="AT3Dl-mKBrpy" """ inputs = Input(shape = input_shape) x = layers.Conv2D(32, (3, 3), activation="relu", padding="same")(inputs) x = layers.MaxPooling2D((2, 2), padding="same")(x) x = layers.Dropout(0.2)(x) x = layers.Conv2D(32, (3, 3), activation="relu", padding="same")(x) x = layers.MaxPooling2D((2, 2), padding="same")(x) """ # Define encoder model inputs = Input(shape=input_shape,name='encoder_input') x = Activation('relu')(inputs) x = Dense(intermediate_dim_1, name='hidden_layer_1_e')(x) x = Activation('relu')(x) x = Dense(intermediate_dim_2, name='hidden_layer_2_e')(x) x = Activation('relu')(x) x = Dense(intermediate_dim_3, name='hidden_layer_3_e')(x) x = Activation('relu')(x) x = Dense(intermediate_dim_4, name='hidden_layer_4_e')(x) x = Activation('relu')(x) outputs = Dense(latent_dim, name='latent_space')(x) #outputs = LayerNormalization(axis=1)(x) encoder = Model(inputs=inputs, outputs=outputs, name='dense_encoder') # + [markdown] id="CQlBwozTBx6D" # # ## Summary ENCODER # # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="1qwyUrHQB1k0" outputId="ebe22a55-bfbf-4643-895c-9f82d35c54c0" encoder.summary() plot_model(encoder, to_file='./encoder_dense_fashion_minist.png',show_shapes=True) # + [markdown] id="XGXg78CbB5aQ" # ## Decoder # + id="pGD5m-NHB-0C" """ x = layers.Conv2DTranspose(32, (3, 3), strides=2, activation="relu", padding="same")(x) x = layers.Conv2DTranspose(32, (3, 3), strides=2, activation="relu", padding="same")(x) x = layers.Conv2D(1, (3, 3), activation="sigmoid", padding="same")(x) decoder = Model (inputs=latent_input, outputs=x, name='decoder') """ # Define decoder model latent_input = Input(shape = (latent_dim,), name='decoder_input') x = Activation('relu')(latent_input) x = Dense(intermediate_dim_4 , name='hidden_layer_de_4_d')(latent_input) x = Activation('relu')(x) x = Dense(intermediate_dim_3 , name='hidden_layer_de_3_d')(x) x = Activation('relu')(x) x = Dense(intermediate_dim_2 , name='hidden_layer_de_2_d')(x) x = Activation('relu')(x) x = Dense(intermediate_dim_1 , name='hidden_layer_de_1_d')(x) x = Activation('relu')(x) x = Dense(input_shape[0], name='original_space')(x) x = Activation('sigmoid')(x) decoder = Model (inputs=latent_input, outputs=x, name='decoder') # + [markdown] id="lM99WFwtCTLj" # ## Summary DECODER # # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="D9C2cF24CU_H" outputId="cfd20bb1-9066-4d33-ad47-b7b79d70ef5f" decoder.summary() plot_model(decoder, to_file='./decoder_dense_fashion_minist.png',show_shapes=True) # + [markdown] id="MxMAV61ECtN0" # ## Autoencoder # # + id="mZ2jaN2BCiNO" # Autoencoder Model autoencoder = Model(inputs=inputs, outputs=decoder(encoder(inputs)), name='autoencoder_mnist_fashion_model') # + [markdown] id="55sivo62CzXd" # ## Summary AUTOENCODER # + colab={"base_uri": "https://localhost:8080/", "height": 564} id="-2qubgVuC36c" outputId="d1f5172d-d4b7-4771-9b21-01980fe8d429" autoencoder.summary() plot_model(autoencoder, to_file='./autoenautoencoder_mnist_fashion_model.png',show_shapes=True) # + [markdown] id="F2ZEW1JUC8uE" # ## Callbacks # Crea una clase derivada de tf.keras.callbacks.Callback # Se usa para pasar funciones de control al algoritmo de estimación. # Aquí la usaremos para que el entrenamiento pare cuando se alcance # un determinado accuracy (Presiciòn) con los datos de entrenamiento. # + id="As2EUm_TDIm7" class MNIST_Callback(tf.keras.callbacks.Callback): # método dentro de la clase myCallback, heredada de la clase Callback de keras def on_epoch_end(self, epoch, logs={}): if(logs.get('accuracy')>0.99): print("\nSe alcanzó un 99.9% de precisión en el entrenamiento! Cancelando Entrenamiento...") self.model.stop_training = True # crea una instancia de clase accu_callback = MNIST_Callback() # + [markdown] id="nOvbolvODVxL" # ## Compila # + id="-l6II5-wDX9Y" optimizer = Adam(learning_rate=1e-3) loss_fn = MeanSquaredError() autoencoder.compile(optimizer= optimizer, loss = loss_fn) # + [markdown] id="sbqjle2HgLbL" # ## Entrenamiento # # + colab={"base_uri": "https://localhost:8080/"} id="CWxdIVTxgQqr" outputId="a555846b-709c-4ff6-aba5-40c50091b9e9" train_images = train_images.reshape(60000, 784).astype("float32") test_images = test_images.reshape(10000, 784).astype("float32") epochs = 200 batch_size = 64 history = autoencoder.fit(train_images, train_images, epochs = epochs, batch_size=batch_size, validation_split=0.1) # + [markdown] id="BQmX3twaDcg7" # ## Visualiza las perdias Loss y Val_Loss # + colab={"base_uri": "https://localhost:8080/", "height": 204} id="rrde3Be_DhvJ" outputId="96cb37e4-5920-48c2-b317-546b162622b9" import matplotlib.pyplot as plt hist = pd.DataFrame(history.history) hist['epoch'] = history.epoch hist.tail() # + [markdown] id="V7hm3Q6IDkMu" # ## Perdida y graficas # + colab={"base_uri": "https://localhost:8080/", "height": 283} id="ZoA7SrfSDolS" outputId="a71b4c22-ac55-4292-8e17-78d88d9dfde6" plt.figure() plt.xlabel('Epoca') plt.ylabel('Loss: MSE') plt.plot(hist['epoch'], hist['loss'], label='Error en entrenamiento') plt.plot(hist['epoch'], hist['val_loss'], label='Error en validación') plt.ylim([0.015,0.03]) plt.legend() plt.show() # + [markdown] id="j-pImeZ2mSmu" # ## Evaluación del autoencoder # + colab={"base_uri": "https://localhost:8080/"} id="vIjAMvXumYR3" outputId="da5aa5a0-7987-4b19-dae6-07863a17cac5" autoencoder.evaluate(test_images,test_images) autoencoder.evaluate(train_images,train_images) # + [markdown] id="1A3PfZA3mnHT" # ## Visualizacion de algunos elementos # # + colab={"base_uri": "https://localhost:8080/", "height": 200} id="OOkSzqi_rL1V" outputId="f4d0bca5-6669-4432-b434-f2a68eb8e4e2" n = 20 plt.figure(figsize=(20, 4)) for i in range(1, n + 1): # Display original ax = plt.subplot(2, n, i) plt.imshow(test_images[i].reshape(28, 28)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) # Display reconstruction ax = plt.subplot(2, n, i + n) plt.imshow(decoded_imgs_test[i].reshape(28, 28)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) plt.show() # + [markdown] id="6-p0_jJXvYN4" # # Mostrar Recuperación # + id="1ouVj3dZvf-_" def display(array1, array2, n): """ Displays ten random images from each one of the supplied arrays. """ indices = np.random.randint(len(array1), size=n) images1 = array1[indices, :] images2 = array2[indices, :] plt.figure(figsize=(10, 4)) for i, (image1, image2) in enumerate(zip(images1, images2)): ax = plt.subplot(2, n, i + 1) plt.imshow(image1.reshape(28, 28)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) ax = plt.subplot(2, n, i + 1 + n) plt.imshow(image2.reshape(28, 28)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) plt.show() # + [markdown] id="dk919CNOv5Gu" # # ## Predicciones # + colab={"base_uri": "https://localhost:8080/", "height": 252} id="-0ZgYiS6vghW" outputId="187fedaf-8c4a-4134-a615-ce25723feb7c" num = 3 # Numero de elementos predictions = autoencoder.predict(test_images) display(test_images, predictions, num)
Taller_5_Autoencoders/Taller_5_AutoEncoder.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + import pandas as pd import numpy as np import matplotlib.pyplot as plt from utils import scoring_function pd.set_option('display.max_rows', 200) pd.set_option('display.max_columns', 100) plt.rcParams['figure.figsize'] = (15, 8) plt.rc('axes', axisbelow=True) # - y_val = pd.read_feather('y_val.f').set_index('sku').iloc[:, 0] y_val # + df_compare = pd.concat([ pd.read_csv('2.1-probs-tuned-lgbm.csv', index_col=0).rename( columns={str(i): f'lgbm_{i}' for i in range(30)} ), pd.read_csv('3.1-probs-nn-one-layer-32.csv', index_col=0).rename( columns={str(i): f'nn_one_32_{i}' for i in range(30)} ), pd.read_csv('3.2-probs-nn-two-layers-16-16.csv', index_col=0).rename( columns={str(i): f'nn_two_16_16_{i}' for i in range(30)} ) ], axis=1) df_compare # + # diversity corrs = [] for i in range(30): corrs.append(df_compare[[f'lgbm_{i}', f'nn_one_32_{i}']].corr().values[1][0]) plt.bar(range(1, 31), corrs) plt.xticks(range(1, 31)) plt.yticks(np.arange(0, 1, 0.05)) plt.grid(); # + # diversity corrs = [] for i in range(30): corrs.append(df_compare[[f'lgbm_{i}', f'nn_two_16_16_{i}']].corr().values[1][0]) plt.bar(range(1, 31), corrs) plt.xticks(range(1, 31)) plt.yticks(np.arange(0, 1, 0.05)) plt.grid(); # + # diversity corrs = [] for i in range(30): corrs.append(df_compare[[f'nn_one_32_{i}', f'nn_two_16_16_{i}']].corr().values[1][0]) plt.bar(range(1, 31), corrs) plt.xticks(range(1, 31)) plt.yticks(np.arange(0, 1, 0.02)) plt.grid(); # - weights = list(zip(1 - np.linspace(0, 0.99, 100), np.linspace(0, 0.99, 100))) weights[:10] # + # %%time # weight average rps_weights = [] for ws in weights: rps_weights.append( scoring_function( y_val + 1, np.average([ np.array(df_compare.iloc[:, :30].values), # LGBM # np.array(df_compare.iloc[:, 30:60].values), # NN one layer (32) np.array(df_compare.iloc[:, 60:].values) # NN two layers (16, 16) ], axis=0, weights=ws) ) ) # - plt.plot(rps_weights) plt.axvline(x=np.argmin(rps_weights), c='red', linestyle='--') plt.text(x=50, y = 3.662, s='weights = ' + str(weights[np.argmin(rps_weights)]) + '\n' + str(rps_weights[np.argmin(rps_weights)])) plt.grid(); rps_weights[np.argmin(rps_weights)] # - LGBM: 3.6679233366046873 # # # - NN one layer (32): 3.678533428175507 # # # - NN two layers (16, 16): 3.671092105352449 # ## Averaging submissions # + # using LGBM and the the second NN sub = np.average([ pd.read_csv('2-tuned-lgbm.csv.gz', header=None).values, pd.read_csv('3.2-nn-two-layers-16-16.csv.gz', header=None).values ], axis=0, weights=[0.52, 0.48]) X_test_sub = pd.DataFrame(sub).astype(float).round(4) X_test_sub # - X_test_sub.sum(axis=1).value_counts().sort_index() X_test_sub.idxmax(axis=1).plot.hist(bins=30) plt.xticks(range(1, 31)) plt.grid(); # + # %%time X_test_sub.to_csv( '4-average_predictions-2-tuned-lgbm-3.2-nn-two-layers-16-16-weights-052-048.csv.gz', compression='gzip', index=False, header=False ) # - # - CV: 3.65223 # - Public LB: 3.76937 # - Private LB: 3.77443
4 - Average predictions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.10 (''.venv'': venv)' # language: python # name: python3 # --- # # Macrostructure pre-processor # Load the raw macrostructure # + import json with open("resources/raw_macrostructure.json", "r") as raw_macrostructure_file: raw_macrostructure = json.load(raw_macrostructure_file) # - # Remove unnecessary content clean_macrostructure = [] for json_item in raw_macrostructure: if json_item["status"] == "Success": clean_macrostructure.append(json_item["full_response"]["body"]) # Escape dirty PDF authors field # + # Escape dirty chars escape_chars = { "\u000b":"ff", "\r":"le", "\u0014":"c", "\u0012":"è", "\u0013":"é", "\f":"fil", "\u0018":"c", "\u0015":"", "\u00f1":"", "\u00fa":"", "\u00e6":"", "\u0019":"", "\u007f":"", "\u00a8":"", "\u00b4":"", "\u00a8":"", } #from xml.sax.saxutils import escape clean_macrostructure_txt = json.dumps(clean_macrostructure) clean_macrostructure_txt = clean_macrostructure_txt.encode("utf-8").decode("utf-8") for key, value in escape_chars.items(): clean_macrostructure_txt = clean_macrostructure_txt.replace(key, value) clean_macrostructure = json.loads(clean_macrostructure_txt) # .encode("utf-8") # clean_macrostructure_txt = json.dumps(clean_macrostructure) # clean_macrostructure_txt.encode('utf-8').decode('unicode_escape') # # for key, value in escape_chars.items(): # # clean_macrostructure_txt.replace(key, value) # clean_macrostructure = json.loads(clean_macrostructure_txt) # - # Remove "[54] " pattern from author names # + # for json_item in clean_macrostructure: # for author in json_item["authors"]: # for i in range(0,50): # author.replace("[{}] ".format(i), "") # import tqdm # clean_macrostructure_txt = json.dumps(clean_macrostructure) # for i in tqdm(range(0,50)): # clean_macrostructure_txt = clean_macrostructure_txt.replace("[{}] ".format(i), "") # clean_macrostructure = json.loads(clean_macrostructure_txt) # for key, value in escape_chars: # author.replace(key, value) # clean_macrostructure.append(json_item["full_response"]["body"]) # - # Clear References authors field # Save the clean macrostructure in a file with open("resources/macrostructure.json", "w") as clean_macrostructure_file: clean_macrostructure_file.write(json.dumps(clean_macrostructure, indent=4, sort_keys=True))
data_preprocessing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="EDSz0laxc5yk" # # Data PreProcessing- # + id="-Go1dmzjteM_" #importing Required Libraries import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from datetime import datetime ,timedelta # + colab={"resources": {"http://localhost:8080/nbextensions/google.colab/files.js": {"data": "<KEY>", "ok": true, "headers": [["content-type", "application/javascript"]], "status": 200, "status_text": ""}}, "base_uri": "https://localhost:8080/", "height": 72} id="JsrQAa05t4m5" outputId="2481f906-c5a7-4418-ceab-9e4cfd7662fb" #load the data from google.colab import files #upload the data on the colab notebook uploded = files.upload() # + colab={"base_uri": "https://localhost:8080/", "height": 206} id="YLQAZ-P8uIib" outputId="bac99b0c-e177-43a0-82a2-54ee55b4c7fc" #store the data into dataframe df= pd.read_csv('train_MpHjUjU.csv' ) # reading the data #printing the first 10 rows df.head(5) # + colab={"base_uri": "https://localhost:8080/"} id="Jw93bc2dvOLS" outputId="1c01ddd6-a0b8-418a-cb99-44166ce8b318" df.shape #checking no. of column & rows # + colab={"base_uri": "https://localhost:8080/"} id="wBrYQfxswGi0" outputId="db602c6a-458f-43da-cf7e-4f529df39a0f" df.dtypes #checking datatypes of the column # + colab={"base_uri": "https://localhost:8080/"} id="BDjf2NKFwT5a" outputId="c0e94ff9-6db8-4c8e-dd69-165b45de1c94" #get count of the empty value of each column df.isna().sum() # + colab={"base_uri": "https://localhost:8080/"} id="m3M1uyBKwpcT" outputId="96a86e84-3471-40e6-aa86-d7952b18444b" #check for any missing value df.isnull().values.any() # + colab={"base_uri": "https://localhost:8080/", "height": 300} id="DkXgloJ8w4Ub" outputId="9a054084-4cae-4b02-b7e6-06959d4c467a" #Checking some statistics df.describe() # + colab={"base_uri": "https://localhost:8080/", "height": 206} id="VatPqyOEQ9dJ" outputId="082fe19e-c5b6-4202-f4d4-a9848bd37836" #Deletig for dublicate values df = df.drop_duplicates(subset=['Emp_ID' ,'Age', 'Gender','City','Education_Level', 'Salary','Dateofjoining'],keep = 'last') df.head() # + [markdown] id="90WXiTIihFTM" # # Feature Engineering # + [markdown] id="6AMLkPeKdEGa" # # # # # * Adding two new feature # # # 1. Attrition - From last working date clumn # # 2. Year = Number of years employee worked in the company # # # # # # # # + id="_WPBf3-Fxgr0" colab={"base_uri": "https://localhost:8080/", "height": 206} outputId="c3ea631b-1a6c-4c22-dee7-23371dce48cb" #adding one new column attrition form last ''last working date 'column df['Attrition'] = df['LastWorkingDate'] df['Attrition'] = df['Attrition'].fillna(0) #Replacing NAN values to 0 y = pd.DataFrame(df['Attrition'], columns = ['attrition']) y['attrition']= df['Attrition'] y['attrition'] = y['attrition'].str.isnumeric() def datetime_to_int(y): return int(y['attrition'].strftime("%Y-%m-%d")) #replace date to false value y =y.replace(to_replace=False,value = 1) #reaplce false value to the in 1 for all of those who have left the organisation y= y.fillna(0) ##making new feature attrition from lastworkingday column y.head() # + id="GTGg4fI3N9a4" colab={"base_uri": "https://localhost:8080/"} outputId="6fb7b34a-83ab-4bc8-defd-bb9f110b8f53" #Adding new feature attrition from lastworkingday column df['Attrition']=y['attrition'] df['Attrition'].value_counts() # + id="eAqxRPT4R4iy" colab={"base_uri": "https://localhost:8080/", "height": 350} outputId="8ae14f4d-8b36-44f1-bd30-d1d711e222f7" #the number of employees that stayed and left the company sns.countplot(df['Attrition']) # + colab={"base_uri": "https://localhost:8080/"} id="XrOlhDGxhf_k" outputId="5dac71b5-fe95-4c22-966d-4a4f9ae737f6" df.shape # + [markdown] id="gTVNoo00hmrS" # * We have unique 3786 data points and 14 column,Out of them 2170 data point belonging to Class NO(0) and 1616 data point belonging to class 'Yes(1)' # # # # # + colab={"base_uri": "https://localhost:8080/", "height": 296} id="PakPmJv2ZoL8" outputId="375657d3-e397-45dc-fd5b-cb283510a145" plt.subplots(figsize=(12,4)) sns.countplot(x='Age',hue='Attrition',data=df, palette='colorblind') # + [markdown] id="rc5P4Iv7bR_N" # * From 33 onwards age group, most of the employees did not leave job and majority in attrition is contributed by age group of 31 & 32 # + [markdown] id="Jvx4OCg38usP" # . # + [markdown] id="uPLVuWHnebvW" # **Replacing NaN values to prediction date date so we can calculate number of years employee work** # + colab={"base_uri": "https://localhost:8080/", "height": 310} id="u1bIE920Ea-J" outputId="2e14d25c-f5ca-4200-d590-6858f5eb5cd6" #filling Na/NaN values df['LastWorkingDate'] = df['LastWorkingDate'].fillna('2018-01-01') #filling nan values to prediction date date so we can calculate number of years employee work df.head(5) # + [markdown] id="crs3qbpyeQk7" # **Introducing new feature using first working day and last working day variables** # + colab={"base_uri": "https://localhost:8080/"} id="iQXqRJ2zMrpC" outputId="af7e3438-0e2f-4f80-88fe-5adeeda5acc9" #Introducing new feature using first working day and last working day variables- df["Dateofjoining"] = pd.to_datetime(df["Dateofjoining"]) #handling timeseries data df["LastWorkingDate"] = pd.to_datetime(df["LastWorkingDate"]) df4 = pd.DataFrame( columns = ['day','year']) #creating 2 new features number of days employee work and number of years employee work df4['day'] = df['LastWorkingDate'] - df['Dateofjoining'] df4['year'] = df4["day"] / timedelta(days=365) print(df4) # + [markdown] id="78g7kEWP5K2i" # # # # # # Dropping unwanted column from Dataset - # # # # # # + colab={"base_uri": "https://localhost:8080/", "height": 206} id="3rJgUAo8SE3f" outputId="342b4f93-b71f-42be-b058-21de93ba2c4a" df=df.drop(labels=['MMM-YY','Dateofjoining','LastWorkingDate','Gender','City','Education_Level'],axis=1) # dropping the table because they are useless in our prediction #adding new features to our data df['year'] = df4['year'] df.head() # + id="Fa2Kdbtl9EVX" df.to_csv('test matching data.csv') # creating csv before dropping emp_id column so we can merge test csv with train csv for prediction # + colab={"base_uri": "https://localhost:8080/"} id="M0sF9P0EKZY6" outputId="22e81932-b77d-447b-e3f3-3af3ca8e92b8" df.isna().sum() #checking the na values # + [markdown] id="HbUnnmrSfYhk" # **Dropping id table-** # + colab={"base_uri": "https://localhost:8080/", "height": 206} id="uJgAzh7me0RO" outputId="c169097c-6205-45c5-ff72-462c8f4f541f" #dropping id table df = df.drop (labels='Emp_ID',axis = 1) df['attrition']= df['Attrition'] df= df.drop(labels='Attrition',axis = 1) #shifting attrition table at last df.head() # + [markdown] id="i6TyBLJRfnv9" # # Visualizing our Variables- # + colab={"base_uri": "https://localhost:8080/", "height": 300} id="sOxcfQ3cst8z" outputId="6639c693-2bcb-4cc9-bf22-a926f6f49830" #get the correlation df.corr() # + colab={"base_uri": "https://localhost:8080/", "height": 432} id="Gf6sG9CutxwD" outputId="096181c7-28fe-46fa-a0aa-0c07cd1fbc78" #visualize the correlation plt.subplots(figsize=(12,5)) sns.heatmap(df.corr(), annot=True,fmt ='.0%') # + id="XZCs0qEYdqWq" df.to_csv('processedtrain.csv') # processed trainnig data csv # + colab={"base_uri": "https://localhost:8080/"} id="MjhILHJiU6-2" outputId="e6103580-24ec-4f75-bbf7-1a38067aa88a" df.shape #checking shape of data # + [markdown] id="y-DGG_r2pcOs" # # # Dividing the Data into Two Parts "TRAINING" and "TESTING" - # # # # + id="FW3viCLvV2aC" colab={"base_uri": "https://localhost:8080/"} outputId="b3de749b-a9ea-41ce-be80-314bac281d84" #split the data x = df.iloc[:,0:7].values # splitting all the rows from column 0 to 6 in x y = df.iloc[: ,7].values #splitting attrtion column in y print('x{}'.format(x)) print('================================================') print('y:{}'.format(y)) # + [markdown] id="UtVaB2ASp5vw" # # Building The model from "TRAINING DATA SET"- # + id="lRqPz05xCONp" #split the data into 25% testing and 75% training from sklearn.model_selection import train_test_split #importing train_test_split x_train, x_test, y_train, y_test = train_test_split(x,y,test_size = 0.20,train_size= 0.80 , random_state =0 ) #splitting data , 80-20=train-test split # + id="12mtQ2ghsFQH" #using KNN Classifier as our model from sklearn.neighbors import KNeighborsClassifier #importing knnClassifier knn =KNeighborsClassifier(n_neighbors=7,weights='uniform',algorithm='auto') # n=7 knn.fit(x_train, y_train) #fitting the model y_pred = knn.predict(x_test) # predicting cross validation test data # + [markdown] id="1NkYm67rb2Zv" # # Evaluating KNNclassifier using macro f1_score metrics - # + colab={"base_uri": "https://localhost:8080/"} id="ub_1-_2-n_73" outputId="3fd56a94-e74a-4423-a8ba-949f1a88c9ac" from sklearn.metrics import f1_score #importing f1_score from sklearn f1 = f1_score(y_test,y_pred) #getting the f1_score print(f1) # + [markdown] id="XE6JBQ9Z2gNR" # # 4. Test data: # # # + [markdown] id="Xm66b6cdW-RF" # # # * Creating test file from given 'test_hXY9mYw.csv' # # # # + id="_VvYFOQf2oQS" #Creating test file from given 'test_hXY9mYw.csv' df1=pd.read_csv('/content/test matching data.csv') df2=pd.read_csv('/content/test_hXY9mYw.csv') test_data = pd.merge(df2,df1) #merginf df1 and df2 using EMP_ID column # + id="aoERsQMP4NVK" #dropping duplicate values test_data = test_data.drop(labels=['Attrition','Unnamed: 0'],axis=1) test_data=test_data.drop_duplicates(subset= 'Emp_ID',keep='last') test_data.to_csv('test_data.csv') # + colab={"base_uri": "https://localhost:8080/"} id="WLc0LgrO__yl" outputId="d026c5d5-e2f1-4b8c-ce37-e5834c2d45ea" test_data.shape # + [markdown] id="J8SGlNaaud2s" # ### Dropping the Emp_ID column from test data # + id="yfD7XeboXc8M" colab={"base_uri": "https://localhost:8080/", "height": 206} outputId="de6ba113-bb6d-4ada-d127-641aa52ff128" #dropping the Emp_ID column - test_data= test_data.drop(labels='Emp_ID',axis=1) test_data.head() # + [markdown] id="1QSlzAFpYjhZ" # ### **Predicting the given test-data point** :- # + colab={"base_uri": "https://localhost:8080/"} id="k7TNuTQPYgDJ" outputId="92d0dbda-3546-4c60-98f5-fee818f3b68f" Final_result= knn.predict(test_data) #Predicting the given test-data point where how many employee will leave the company in given jan-2018 quarter # + colab={"base_uri": "https://localhost:8080/", "height": 677} id="1Tef34wjXG72" outputId="ebc28bef-0def-498d-a4e2-471bb34e8d84" #Creating submission file of the final result- df6=pd.read_csv('/content/test_data.csv') df6=df6.drop(labels='Unnamed: 0', axis=1) test= pd.DataFrame(columns=['Emp_ID','Target']) test['Emp_ID']=df6['Emp_ID'] test['Target']=Final_result test.head(20) # + colab={"base_uri": "https://localhost:8080/"} id="kUBDPKfiRd7e" outputId="c1720272-1325-42c7-891a-c5786b286d8b" test['Target'].value_counts() #checking predicted values_count # + [markdown] id="T8W5k4NgY905" # # # Conclusion :- # * From above result we can assume that out of 741 employees ,we can expect that 160 employees will leave the company in the upcoming two quarters (01 Jan 2018 - 01 July 2018) and 581 employees will be remain at their designation. # # # # # # # # + [markdown] id="AlZJlx5vZ7FG" # # Suggestion:- # * As data scientist i would suggest From above Analysis and Prediction that ,company should offer more salary to more experience # # 1. company should offer more salary to more experience person. # # 2. Company should give better designation to old employees. # # # # # + [markdown] id="cJZN8iAGYJN5" # ## Submission File :- # + id="jU5yV8MuwZN7" test.to_csv('Final_result.csv') #submission file
Employee_Attrition_notebook_.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [Root] # language: python # name: Python [Root] # --- # + # %matplotlib inline import numpy as np import pandas as pd import os,sys import matplotlib.pyplot as plt import seaborn as sns import warnings warnings.filterwarnings('ignore') basepath = os.path.expanduser('~/Desktop/src/AllState_Claims_Severity/') sys.path.append(os.path.join(basepath, 'src')) np.random.seed(2016) # - # load files train = pd.read_csv(os.path.join(basepath, 'data/raw/train.csv')) test = pd.read_csv(os.path.join(basepath, 'data/raw/test.csv')) sample_sub = pd.read_csv(os.path.join(basepath, 'data/raw/sample_submission.csv')) # ** Anonymized Features ** # * What is required ? # > We have to predict the loss that would be incurred on a particular insurance. # * How are the different features related to other features ? # > * Plotted relationships of continuous features with the target variable. # * Need to figure out how to do that for categorical variables. # * How is the target variable related to other features ? # > We can check for this by plotting relationship between different features and the target variable. train.head() # ** Relationship between target variable and continuous variables ** cont_features = [col for col in train.columns[1:-1] if 'cont' in col] # + fig, axes = plt.subplots(nrows=3, ncols=5, figsize=(16, 8), sharey=True) ri = 0 ci = 0 for i in range(0, 14): axes[ri][ci].scatter(train[cont_features[i]], train.loss) axes[ri][ci].set_xlabel(cont_features[i]) ci += 1 if ci > 4: ri += 1 ci = 0 plt.tight_layout(); # - plt.scatter(train.id, train.loss); strange_data = train.loc[train.loss > 4e4] def summarize_dataset(df, features): for feat in features: print('Feature Name: %s\n'%(feat)) if df[feat].dtype == np.object: feature_counts = df[feat].value_counts() print(df[feat].value_counts() / feature_counts.sum()) else: print(df[feat].describe()) print('='*50 + '\n') features = train.columns[1:-1] summarize_dataset(train.loc[train.loss <= 4e4] , features[:10]) summarize_dataset(strange_data, features[:10]) summarize_dataset(train.loc[train.loss <= 4e4] , features[10:20]) summarize_dataset(strange_data, features[10:20]) summarize_dataset(train.loc[train.loss <= 4e4] , features[20:30]) summarize_dataset(strange_data, features[20:30]) summarize_dataset(train.loc[train.loss <= 4e4] , features[30:40]) summarize_dataset(strange_data, features[30:40]) summarize_dataset(train.loc[train.loss <= 4e4] , features[40:50]) summarize_dataset(strange_data, features[40:50]) summarize_dataset(train.loc[train.loss <= 4e4] , features[50:60]) summarize_dataset(strange_data, features[50:60]) summarize_dataset(train.loc[train.loss <= 4e4] , features[60:70]) summarize_dataset(strange_data, features[60:70]) summarize_dataset(train.loc[train.loss <= 4e4] , features[70:80]) summarize_dataset(strange_data, features[70:80]) summarize_dataset(train.loc[train.loss <= 4e4] , features[80:90]) summarize_dataset(strange_data, features[80:90]) summarize_dataset(train.loc[train.loss <= 4e4] , features[90:100]) summarize_dataset(strange_data, features[90:100]) summarize_dataset(train.loc[train.loss <= 4e4] , features[100:110]) summarize_dataset(strange_data, features[100:110]) summarize_dataset(train.loc[train.loss <= 4e4] , features[110:116]) summarize_dataset(strange_data, features[110:116]) # ** Observation: Frequency counts might play a role in identifying high losses vs average losses. **
notebooks/DataExploration.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] toc=true # <h1>Table of Contents<span class="tocSkip"></span></h1> # <div class="toc"><ul class="toc-item"><li><span><a href="#Arrays" data-toc-modified-id="Arrays-1"><span class="toc-item-num">1&nbsp;&nbsp;</span>Arrays</a></span><ul class="toc-item"><li><span><a href="#Array-indexing" data-toc-modified-id="Array-indexing-1.1"><span class="toc-item-num">1.1&nbsp;&nbsp;</span>Array indexing</a></span></li><li><span><a href="#Datatypes" data-toc-modified-id="Datatypes-1.2"><span class="toc-item-num">1.2&nbsp;&nbsp;</span>Datatypes</a></span></li><li><span><a href="#Array-math" data-toc-modified-id="Array-math-1.3"><span class="toc-item-num">1.3&nbsp;&nbsp;</span>Array math</a></span></li><li><span><a href="#Broadcasting" data-toc-modified-id="Broadcasting-1.4"><span class="toc-item-num">1.4&nbsp;&nbsp;</span>Broadcasting</a></span></li></ul></li><li><span><a href="#SciPy" data-toc-modified-id="SciPy-2"><span class="toc-item-num">2&nbsp;&nbsp;</span>SciPy</a></span></li><li><span><a href="#Image-operations" data-toc-modified-id="Image-operations-3"><span class="toc-item-num">3&nbsp;&nbsp;</span>Image operations</a></span></li><li><span><a href="#Distance-between-points" data-toc-modified-id="Distance-between-points-4"><span class="toc-item-num">4&nbsp;&nbsp;</span>Distance between points</a></span></li></ul></div> # - # # NumPy # * Numpy: Arrays, Array indexing, Datatypes, Array math, Broadcasting # * Matplotlib: Plotting, Subplots, Images # * Jupyter Notebook: Creating notebooks, Typical workflows # %matplotlib inline import os from IPython.core.display import HTML def load_style(directory = '../', name='customMac.css'): styles = open(os.path.join(directory, name), 'r').read() return HTML(styles) load_style() # NumPy is the core library for scientific computing in Python. It provides a high-performance multi-dimensional array object, and tools for working with these arrays. If you are already familiar with MATLAB, you might find this [tutorial](https://docs.scipy.org/doc/numpy/user/numpy-for-matlab-users.html) useful to get started with NumPy. # To use NumPy, we first need to `import` the `numpy` package: import numpy as np # ## Arrays # A numpy array is a grid of values, all of the same type, and is indexed by a tuple of non-negative integers. The number of dimensions is the rank of the array; the shape of an array is a `tuple` of integers giving the size of the array along each dimension. # We can initialize numpy arrays from nested Python lists, and access elements using square brackets: # + arr_r1 = np.array([1, 2, 3]) # Create a rank 1 array print(type(arr_r1), arr_r1.shape, arr_r1[0], arr_r1[1], arr_r1[2]) arr_r1[0] = 5 # Change an element of the array print(arr_r1) # - arr_r2 = np.array([[1,2,3], [4,5,6]]) # Create a rank 2 array print(arr_r2) print(arr_r2.shape) print(arr_r2[0, 0], arr_r2[0, 1], arr_r2[1, 0]) # Numpy also provides many functions to create arrays: arr = np.zeros((2,2)) # Create an array of all zeros print(arr) arr = np.ones((1,2)) # Create an array of all ones print(arr) arr = np.full((2,2), 7) # Create a constant array print(arr) arr = np.eye(2) # Create a 2x2 identity matrix print(arr) arr = np.random.random((2,2)) # Create an array filled with random values print(arr) # ### Array indexing # Numpy offers several ways to index into arrays. # Slicing: Similar to Python lists, numpy arrays can be sliced. Since arrays may be multi-dimensional, you must specify a slice for each dimension of the array: # + import numpy as np # Create the following rank 2 array with shape (3, 4) # [[ 1, 2, 3, 4] # [ 5, 6, 7, 8] # [ 9, 10, 11, 12]] a = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]]) # Use slicing to pull out the subarray consisting of the first 2 rows # and columns 1 and 2; b is the following array of shape (2, 2): # [[2, 3] # [6, 7]] b = a[:2, 1:3] print(b) # - # A slice of an array is a view into the same data, so modifying it will modify the original array. print(a[0, 1]) b[0, 0] = 77 # b[0, 0] is the same piece of data as a[0, 1] print(a[0, 1]) # You can also mix integer indexing with slice indexing. However, doing so will yield an array of lower rank than the original array. Note that this is quite different from the way that MATLAB handles array slicing: # Create the following rank 2 array with shape (3, 4) a = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]]) print(a) # Two ways of accessing the data in the middle row of the array. # Mixing integer indexing with slices yields an array of lower rank, # while using only slices yields an array of the same rank as the # original array: # + row_r1 = a[1, :] # Rank 1 view of the second row of a row_r2 = a[1:2, :] # Rank 2 view of the second row of a print(row_r1, row_r1.shape) # Prints "[5 6 7 8] (4,)" print(row_r2, row_r2.shape) # Prints "[[5 6 7 8]] (1, 4)" # + # We can make the same distinction when accessing columns of an array: col_r1 = a[:, 1] col_r2 = a[:, 1:2] print(col_r1, col_r1.shape) # Prints "[ 2 6 10] (3,)" print(col_r2, col_r2.shape) # Prints "[[ 2] # [ 6] # [10]] (3, 1)" # - # Integer array indexing: When you index into numpy arrays using slicing, the resulting array view will always be a subarray of the original array. In contrast, integer array indexing allows you to construct arbitrary arrays using the data from another array. Here is an example: # + a = np.array([[1,2], [3, 4], [5, 6]]) # An example of integer array indexing. # The returned array will have shape (3,) and print(a[[0, 1, 2], [0, 1, 0]]) # The above example of integer array indexing is equivalent to this: print(np.array([a[0, 0], a[1, 1], a[2, 0]])) # + # When using integer array indexing, you can reuse the same # element from the source array: print(a[[0, 0], [1, 1]]) # Equivalent to the previous integer array indexing example print(np.array([a[0, 1], a[0, 1]])) # - # One useful trick with integer array indexing is selecting or mutating one element from each row of a matrix: # Create a new array from which we will select elements a = np.array([[1,2,3], [4,5,6], [7,8,9], [10, 11, 12]]) print(a) # + # Create an array of indices b = np.array([0, 2, 0, 1]) # Select one element from each row of a using the indices in b print(a[np.arange(4), b]) # Prints "[ 1 6 7 11]" # - # Mutate one element from each row of a using the indices in b a[np.arange(4), b] += 10 print(a) # Boolean array indexing: Boolean array indexing lets you pick out arbitrary elements of an array. Frequently this type of indexing is used to select the elements of an array that satisfy some condition. Here is an example: # + a = np.array([[1,2], [3,4], [5,6]]) bool_idx = (a > 2) # Find the elements of a that are bigger than 2; # this returns a numpy array of Booleans of the same # shape as a, where each slot of bool_idx tells # whether that element of a is > 2. print(bool_idx) # + # We use boolean array indexing to construct a rank 1 array # consisting of the elements of a corresponding to the True values # of bool_idx print(a[bool_idx]) # We can do all of the above in a single concise statement: print(a[a > 2]) # - # For brevity, we have left out a lot of details about numpy array indexing; if you want to know more, you should read the [documentation](https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html). # ### Datatypes # Every numpy array is a grid of elements of the same type. Numpy provides a large set of numeric datatypes that you can use to construct arrays. Numpy tries to guess a datatype when you create an array, but functions that construct arrays usually also include an optional argument to explicitly specify the datatype. Here is an example: # + x = np.array([1, 2]) # Let numpy choose the datatype y = np.array([1.0, 2.0]) # Let numpy choose the datatype z = np.array([1, 2], dtype=np.int64) # Force a particular datatype print(x.dtype, y.dtype, z.dtype) # - # You can read all about numpy datatypes in the [documentation](https://docs.scipy.org/doc/numpy/reference/arrays.dtypes.html). # ### Array math # Basic mathematical functions operate element-wise on arrays, and are available both as operator overloads and as functions in the numpy module: # + x = np.array([[1,2], [3,4]], dtype=np.float64) y = np.array([[5,6], [7,8]], dtype=np.float64) # Element-wise sum; both produce the array print(x + y) print(np.add(x, y)) # - # Element-wise difference; both produce the array print(x - y) print(np.subtract(x, y)) # Element-wise product; both produce the array print(x * y) print(np.multiply(x, y)) # Element-wise division; both produce the array # [[ 0.2 0.33333333] # [ 0.42857143 0.5 ]] print(x / y) print(np.divide(x, y)) # Element-wise square root; produces the array # [[ 1. 1.41421356] # [ 1.73205081 2. ]] print(np.sqrt(x)) # Note that unlike MATLAB, `*` is element-wise multiplication, not matrix multiplication. We instead use the `dot` function to compute inner products of vectors, to multiply a vector by a matrix, and to multiply matrices. `dot` is available both as a function in the numpy module and as an instance method of array objects: # + x = np.array([[1,2], [3,4]]) y = np.array([[5,6], [7,8]]) v = np.array([9,10]) w = np.array([11,12]) # Inner product of vectors; both produce 219 print(v.dot(w)) print(np.dot(v, w)) # - # Matrix / vector product; both produce the rank 1 array [29 67] print(x.dot(v)) print(np.dot(x, v)) # Matrix / matrix product; both produce the rank 2 array # [[19 22] # [43 50]] print(x.dot(y)) print(np.dot(x, y)) # Numpy provides many useful functions for performing computations on arrays; one of the most useful is `sum`: # + x = np.array([[1,2], [3,4]]) print(np.sum(x)) # Compute sum of all elements; prints "10" print(np.sum(x, axis=0)) # Compute sum of each column; prints "[4 6]" print(np.sum(x, axis=1)) # Compute sum of each row; prints "[3 7]" # - # You can find the full list of mathematical functions provided by numpy in the [documentation](https://docs.scipy.org/doc/numpy/reference/routines.math.html). # # Apart from computing mathematical functions using arrays, we frequently need to reshape or otherwise manipulate data in arrays. The simplest example of this type of operation is transposing a matrix; to transpose a matrix, simply use the `T` attribute of an array object: print(x) print(x.T) # + v = np.array([[1,2,3]]) print(v) print(v.T) # - # ### Broadcasting # Broadcasting is a powerful mechanism that allows numpy to work with arrays of different shapes when performing arithmetic operations. Frequently we have a smaller array and a larger array, and we want to use the smaller array multiple times to perform some operation on the larger array. # # For example, suppose that we want to add a constant vector to each row of a matrix. We could do it like this: # + # We will add the vector v to each row of the matrix x, # storing the result in the matrix y x = np.array([[1,2,3], [4,5,6], [7,8,9], [10, 11, 12]]) v = np.array([1, 0, 1]) y = np.empty_like(x) # Create an empty matrix with the same shape as x # Add the vector v to each row of the matrix x with an explicit loop for idx, _ in np.ndenumerate(x): y[idx, :] = x[idx, :] + v print(y) # - # This works; however when the matrix `x` is very large, computing an explicit loop in Python could be slow. Note that adding the vector v to each row of the matrix `x` is equivalent to forming a matrix `vv` by stacking multiple copies of `v` vertically, then performing elementwise summation of `x` and `vv`. We could implement this approach like this: vv = np.tile(v, (4, 1)) # Stack 4 copies of v on top of each other print(vv) # Prints "[[1 0 1] # [1 0 1] # [1 0 1] # [1 0 1]]" y = x + vv # Add x and vv element-wise print(y) # Numpy broadcasting allows us to perform this computation without actually creating multiple copies of v. Consider this version, using broadcasting: # We will add the vector v to each row of the matrix x, # storing the result in the matrix y x = np.array([[1,2,3], [4,5,6], [7,8,9], [10, 11, 12]]) v = np.array([1, 0, 1]) y = x + v # Add v to each row of x using broadcasting print(y) # The line `y = x + v` works even though `x` has shape `(4, 3)` and `v` has shape `(3,)` due to broadcasting; this line works as if v actually had shape `(4, 3)`, where each row was a copy of `v`, and the sum was performed elementwise. # # Broadcasting two arrays together follows these rules: # # 1. If the arrays do not have the same rank, prepend the shape of the lower rank array with 1s until both shapes have the same length. # 2. The two arrays are said to be compatible in a dimension if they have the same size in the dimension, or if one of the arrays has size 1 in that dimension. # 3. The arrays can be broadcast together if they are compatible in all dimensions. # 4. After broadcasting, each array behaves as if it had shape equal to the elementwise maximum of shapes of the two input arrays. # 5. In any dimension where one array had size 1 and the other array had size greater than 1, the first array behaves as if it were copied along that dimension # # If this explanation does not make sense, try reading the explanation from the [documentation](https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) or this [explanation](https://scipy.github.io/old-wiki/pages/EricsBroadcastingDoc). # # Functions that support broadcasting are known as universal functions. You can find the list of all universal functions in the [documentation](https://docs.scipy.org/doc/numpy/reference/ufuncs.html#available-ufuncs). # # Here are some applications of broadcasting: # + # Compute outer product of vectors v = np.array([1,2,3]) # v has shape (3,) w = np.array([4,5]) # w has shape (2,) # To compute an outer product, we first reshape v to be a column # vector of shape (3, 1); we can then broadcast it against w to yield # an output of shape (3, 2), which is the outer product of v and w: print(np.reshape(v, (3, 1)) * w) # + # Add a vector to each row of a matrix x = np.array([[1,2,3], [4,5,6]]) # x has shape (2, 3) and v has shape (3,) so they broadcast to (2, 3), # giving the following matrix: print(x + v) # + # Add a vector to each column of a matrix # x has shape (2, 3) and w has shape (2,). # If we transpose x then it has shape (3, 2) and can be broadcast # against w to yield a result of shape (3, 2); transposing this result # yields the final result of shape (2, 3) which is the matrix x with # the vector w added to each column. Gives the following matrix: print((x.T + w).T) # - # Another solution is to reshape w to be a row vector of shape (2, 1); # we can then broadcast it directly against x to produce the same # output. print(x + np.reshape(w, (2, 1))) # Multiply a matrix by a constant: # x has shape (2, 3). Numpy treats scalars as arrays of shape (); # these can be broadcast together to shape (2, 3), producing the # following array: print(x * 2) # Broadcasting typically makes your code more concise and faster, so you should strive to use it where possible. # This brief overview has touched on many of the important things that you need to know about numpy, but is far from complete. Check out the [numpy reference](https://docs.scipy.org/doc/numpy/reference/) to find out much more about numpy. # ## SciPy # Numpy provides a high-performance multidimensional array and basic tools to compute with and manipulate these arrays. [SciPy](http://docs.scipy.org/doc/scipy/reference/) builds on this, and provides a large number of functions that operate on numpy arrays and are useful for different types of scientific and engineering applications. # The best way to get familiar with SciPy is to [browse the documentation](http://docs.scipy.org/doc/scipy/reference/index.html). We will highlight some parts of SciPy that you might find useful for this class. # ## Image operations # SciPy provides some basic functions to work with images. For example, it has functions to read images from disk into numpy arrays, to write numpy arrays to disk as images, and to resize images. Here is a simple example that showcases these functions: # + import scipy from scipy.misc import imread, imsave, imresize # Read an JPEG image into a numpy array img = imread('../imgs/cat.jpg') # print(img.dtype, img.shape) # Prints "uint8 (400, 248, 3)" # scipy(__version__) # + # We can tint the image by scaling each of the color channels # by a different scalar constant. The image has shape (400, 248, 3); # we multiply it by the array [1, 0.95, 0.9] of shape (3,); # numpy broadcasting means that this leaves the red channel unchanged, # and multiplies the green and blue channels by 0.95 and 0.9 # respectively. img_tinted = img * [1, 0.95, 0.9] # Resize the tinted image to be 300 by 300 pixels. img_tinted = imresize(img_tinted, size=(300, 300)) img_tinted = np.uint8(img_tinted) # Write the tinted image back to disk imsave('imgs/cat_tinted.jpg', img_tinted) # - # | The original image | The tinted and resized image | # | ------------- |:-------------:| # | ![alt-text-1](assets/cat.jpg "title-1") | ![alt-text-2](assets/cat_tinted.jpg "title-2") | # ## Distance between points # [SciPy](https://docs.scipy.org/doc/scipy/reference/) defines some useful functions for computing distances between sets of points. # The function `scipy.spatial.distance.pdist` computes the distance between all pairs of points in a given set: # + import numpy as np from scipy.spatial.distance import pdist, squareform # Create the following array where each row is a point in 2D space: # [[0 1] # [1 0] # [2 0]] x = np.array([[0, 1], [1, 0], [2, 0]]) print(x) # Compute the Euclidean distance between all rows of x. # d[i, j] is the Euclidean distance between x[i, :] and x[j, :], # and d is the following array: # [[ 0. 1.41421356 2.23606798] # [ 1.41421356 0. 1. ] # [ 2.23606798 1. 0. ]] d = squareform(pdist(x, 'euclidean')) print(d) # - # You can read all the details about this function in the [documentation](http://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.pdist.html). # A similar function (`scipy.spatial.distance.cdist`) computes the distance between all pairs across two sets of points; you can read about it in the [documentation](http://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.cdist.html).
Numpy/Numpy01.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="FAzvhBnvzxS5" colab_type="text" # # Räkna sjöar i norra Sverige # # I den här uppgiften ska vi räkna hur många vattendrag det finns i en satellibild av en del av Sverige. # # Programmeringsmässigt går den här uppgiften rätt långt på djupet, där vi lär oss om djupetförst-sökning (DFS). # # ![Bild på en sjö](https://cdn.pixabay.com/photo/2019/08/05/11/48/reflection-4385834_1280.jpg) # [Foto](https://pixabay.com/sv/photos/reflektion-sj%C3%B6-vatten-liggande-4385834/) av vrolanas / [Pixabay License](https://pixabay.com/sv/service/license/) # + [markdown] id="Zm52asP-DMHQ" colab_type="text" # Genom att köra den dolda kodrutan nedan kan du se vilket område vi kommer titta på i satellitbilden: # + id="JDenTv_rERJL" colab_type="code" cellView="form" colab={} #@title Kod för att rita karta import folium m = folium.Map( location=[66,17.75], tiles='Stamen Terrain', zoom_start=9 ) folium.Rectangle( bounds=[(65.79,17.22), (66.26,18.31)], fill=False ).add_to(m) m # + [markdown] id="mxoYj1zBHLEG" colab_type="text" # Zooma ut så att du ser var i Sverige bilden är tagen. # # Uppgiften går ut på att vi ska försöka räkna hur många sjöar det är i området. Utzoomat kanske det ser ut som att det bara är en eller några få sjöar. Zoomar man in så ser man dock att det dyker upp betydligt fler och att de som tidigare såg ut att hänga ihop inte gör det. # # Det kommer i princip vara omöjligt att räkna antalet sjöar för hand, därför ska vi ta hjälp av programmering! # + [markdown] id="PDCdAXf8CIUe" colab_type="text" # Det vi kollade på ovan var en karta. För att ladda in den riktiga satellitbilden behöver vi köra koden här nedanför. # + id="jBevuXN1MI4w" colab_type="code" colab={} # !wget https://github.com/lunduniversity/schoolprog-satellite-data/raw/master/lakes/lakes_data.npz --quiet import numpy as np import matplotlib.pyplot as plt bands = np.load('lakes_data.npz') # Visa skalor i ovankant av grafer: plt.rcParams['xtick.bottom'] = plt.rcParams['xtick.labelbottom'] = False plt.rcParams['xtick.top'] = plt.rcParams['xtick.labeltop'] = True # + [markdown] id="KapKeij4CXi0" colab_type="text" # Innehållet i filen vi laddade ner har lagts in i variabeln `bands`. Satellitbilden är inte en bild, utan flera bilder för olika band. Ett band betyder att man har tagit en bild men bara för ljus med vissa våglängder. # # `Bands` är en dictionary som innehåller mätningar för de olika banden. Vi kan se vilka band som finns genom att skriva ut nycklarna. # + id="EW1LN0gzN7Ol" colab_type="code" colab={} print(list(bands.keys())) # + [markdown] id="BArGCa8VCsaH" colab_type="text" # Vi har rött, grönt, blått och nära infrarött ljus att jobba med. Det nära infraröda ljuset kallar vi för ```nir``` (near infrared). Nära infrarött ljus kan vi inte se med våra mänskliga ögon men det kan vara till stor hjälp ändå. I denna uppgift kommer vi utnyttja att vatten inte reflekterar nära infrarött men reflekterar små mängder rött ljus. Detta gör att vi kommer kunna skilja vatten från växtlighet och bebyggelse. # # Vi sparar varje band i varsin variabel så att det blir lättare att jobba med. # + id="jJAoibKjOXJ_" colab_type="code" colab={} red =bands["red"] blue =bands["blue"] green=bands["green"] nir =bands["nir"] # + [markdown] id="CFd8uJ4g1Ecl" colab_type="text" # Så vad innehåller de här banden? Skriv ut något av dem för att se vad de innehåller. # + id="RVrrl9353-Q-" colab_type="code" colab={} # Skriv ut något av banden print(...) # + [markdown] id="4nbjREmu4EMA" colab_type="text" # Varje band verkar bara vara en stor tabell med tal. Kanske verkar det konstigt att detta kan vara en bild, men det är faktiskt så det funkar. # # Varje tal i tabellen motsvarar en pixel, alltså en liten ruta. Talet säger oss hur ljust det ska vara i just den pixeln, för ljusfrekvensen som bandet har mätt på. # # Vi kan göra en bild av till exempel det gröna bandet med koden nedan. # # # + id="Q-uXsCkCOkSU" colab_type="code" colab={} plt.figure(figsize=(10,10)) plt.imshow(green) plt.ylim(green.shape[0], 0) plt.xlim(0, green.shape[0]) plt.show() # + [markdown] id="JHptK1X45tlE" colab_type="text" # Det ser inte riktigt ut som en vanlig kamerabild, men du kanske kan se att det är samma område som på kartan längre upp. # # Att det inte ser ut som en vanlig bild beror på att vi bara kollat på det gröna ljuset. Sen har `matplotlib.pyplot`, eller `plt`, valt en egen färgskala när den visar bilden. # + [markdown] id="9ElMVzi8746B" colab_type="text" # För att få en vanlig bild behöver vi använda rött, grönt och blått. För var och en av färgerna kan vi välja ett tal mellan 0 och 255 som är hur mycket den färgen ska vara med i en pixel. Genom att kombinera olika mycket av de olika färgerna så kan man skapa alla färger. Till exempel om man har 128 rött, 0 grönt och 128 blått så får man lila. 0 i allt blir svart. 255 i allt blir vitt. Såhär funkar i princip alla färgskärmar, troligen skärmen du använder just nu. # # I princip så motsvarar intervallet 0 till 255 också talen som är uppmätta för varje band. Som du kanske såg så var dock talen uppmätta för banden betydligt större (det beror lite på vilket band du skrev ut i en tidigare uppgift, men det finns i alla fall tal som är större än 1000). Därför måste vi omvandla de stora talen till mindre. Det gör koden nedan. Kör den så får du en bild. Det kan ta 10 - 20 sekunder. # + id="vmcxwLUsO_Ft" colab_type="code" colab={} from PIL import Image rgb = np.zeros([1000, 1000, 3], dtype=np.uint8) divide = 8 # Testa att ändra! Bör hållas mellan 3 och 30 ungefär. for x in range(1000): for y in range(1000): r = min(255, red[x][y]/divide) g = min(255, green[x][y]/divide) b = min(255, blue[x][y]/divide) rgb[x][y] = [r,g,b] img = Image.fromarray(rgb) display(img) # + [markdown] id="ssTCjh3I-Zw5" colab_type="text" # Testa att ändra variabeln `divide` ovan. Vad händer med bilden? # # <details> # <summary markdown="span"> # Svar # </summary> # <p> # Hur ljus bilden är ändras. Om <code>divide</code> är liten blir bilden väldigt ljus, om <code>divide</code> är stor blir den väldigt mörk. <code>divide</code> styr i princip hur stor skillnad de är på de olika färgerna i bilden. # # För att få en sanningsenlig bild ska <code>divide</code> väljas till 14, då blir dock bilden ganska mörk. För 8 är det lättare att se de olika nyanserna i bilden. # </p> # </details> # + [markdown] id="wnS0Uo22A0rf" colab_type="text" # Eftersom vi ska räkna sjöar i bilden vill vi ha ett bra sätt att veta vad som är vatten. Vi kommer använda oss av NDVI som du bör ha stött på i tidigare uppgifter. NDVI kollar på hur det nära infraröda bandet skiljer sig från det röda bandet. # + id="3ziYX7SVx5Md" colab_type="code" colab={} ndvi = (nir-red)/(nir+red) #Skapar en ny variabel 'ndvi' med samma format som de andra banden. plt.figure(figsize=(10,10)) plt.pcolormesh(ndvi, cmap='PiYG') plt.ylim(ndvi.shape[0], 0) plt.clim(-1.0, 1.0) plt.colorbar(label='NDVI') plt.show() # + [markdown] id="pSs7_QdoBgrn" colab_type="text" # NDVI blir ett tal mellan -1 och 1. I bilden ser vi att vatten blir rosa, det vill säga NDVI är litet för vatten. En regel vi kan ha är att om NDVI är mellan -1 och -0.1 så är det vatten i pixeln. # + [markdown] id="w8JoTc9WCqRG" colab_type="text" # Varje pixel i bilden motsvarar ett område som är $50*50$ meter. Som man kan se av skalan så är det $1000*1000$ pixlar i bilden. # # Hur brett och högt är området på bilden? # # <details> # <summary markdown="span"> # Lösning # </summary> # <p> # Varje pixel är $50m$ bred, totalt är området $50*1000 = 50000 m = 50 km$ brett. Det samma gäller höjden på bilden. # </p> # </details> # + [markdown] id="d2KDf7BNFRaj" colab_type="text" # Hur stor är arean av hela området? # # <details> # <summary markdown="span"> # Lösning # </summary> # <p> # $50 km * 50 km = 2500 km^2$ # </p> # </details> # + [markdown] id="u_Kjee6pF-u0" colab_type="text" # Ungefär hur många sjöar/vattendrag tror du att det är? # # <details> # <summary markdown="span"> # Svar # </summary> # <p> # Detta kan vi förstås inte skriva ett svar på här, det är ju det vi ska räkna ut i resten av uppgiften! Dock kanske du kan konstatera att det verkar vara väldigt många, fler än du skulle kunna/vilja räkna för hand. # </p> # </details> # + [markdown] id="BiddOScfHHA3" colab_type="text" # För att slippa tänka på NDVI i fortsättningen, vill vi göra en tabell som är `True` för pixlar det är vatten, och `False` annars. Fyll i koden nedan så att tabellen `water` beskriver just detta. # + id="-I7etrmTHfkJ" colab_type="code" colab={} water = np.zeros([1000, 1000], dtype=np.bool) for x in range(1000): for y in range(1000): if ...: # Skriv ett vilkor så att vi vet om det är vatten här eller inte. water[x][y] = True else: water[x][y] = False # + [markdown] id="D_ZT1wLpH2AL" colab_type="text" # <details> # <summary markdown="span"> # Tips # </summary> # <p> # Givet värdet på NDVI, hur vet vi om det är vatten då? # </p> # </details> # # <details> # <summary markdown="span"> # Svar # </summary> # <p> # # ```python # water = np.zeros([1000, 1000], dtype=np.bool) # for x in range(1000): # for y in range(1000): # if ndvi[x][y] < -0.1: # Skriv ett vilkor så att vi vet om det är vatten här eller inte. # water[x][y] = True # else: # water[x][y] = False # ``` # </p> # </details> # + [markdown] id="m2M3nFk8BaNW" colab_type="text" # Vi kan nu göra om tabellen till en svartvit bild. # + id="z7GZ7r3cX7I_" colab_type="code" colab={} waterpic = np.zeros([1000, 1000], dtype=np.uint8) for x in range(1000): for y in range(1000): if water[x,y]: waterpic[x,y]=255 img = Image.fromarray(waterpic) display(img) # + [markdown] id="9axankeok-Kr" colab_type="text" # Nu ska vi släppa vattendragen en liten stund och istället gå in lite på djupet i hur vi ska lösa det här problemet med programmering. # + [markdown] id="Lr5R2ESbI5kp" colab_type="text" # ### Att hitta komponenter med hjälp av DFS # # I uppgifterna ovan har vi kommit fram till en tabell som i varje pixel säger oss om det är vatten där eller inte. Många pixlar med vatten kommer att "sitta ihop" och tillsammans bilda en sjö. Vi behöver nu på något sätt klumpa ihop pixlarna så att vi vet vilka pixlar som sitter ihop med varandra. Vi kan kalla pixlar som sitter ihop för en komponent. # # Vi kan tänka att vi ska börja i en pixel med vatten, och sen gå ut från den så att vi hittar alla pixlar den sitter ihop med. I varje steg så har vi en nuvarande pixel och tittar på alla dess närliggande pixlar. De som vi inte varit på innan och som är vatten lägger vi till i en lista som håller reda på vilka pixlar vi har kvar att besöka. Sen påbörjar vi ett nytt steg där vi tar bort den sista pixeln i listan och väljer till vår nuvarande punkt. Nu har vi en ny nuvarande punkt och fortsätter vi på det här sättet # # # + [markdown] id="pTDK2v3kUBdg" colab_type="text" # Vi börjar med att göra en stor tabell som för varje pixel håller reda på vilken sjö den tillhör. I början är alla värden 0 i tabellen vilket betyder att pixeln inte hamnat i en sjö än. # + id="FwRuuZLiT6Wj" colab_type="code" colab={} lakenumbers = np.zeros([1000, 1000], dtype=np.uint16) # + [markdown] id="r_3dWBpnWJ_i" colab_type="text" # Skriv en hjälp-funktion som kollar om en punkt ligger i bilden. Tänk på att bilden är $1000*1000$ pixlar stor. Hjälpfunktionen kommer vi använda längre fram. # + id="TMnKLKUlWWPu" colab_type="code" colab={} #Ska returnera True om punkten (x,y) ligger i bilden, annars False def in_picture(x,y): # Skriv din kod här: # + [markdown] id="01s95sTpWgjG" colab_type="text" # <details> # <summary markdown="span"> # Tips # </summary> # <p> # Både x och y ska vara ett värde mellan 0 och 999. # </p> # </details> # <details> # <summary markdown="span"> # Lösning # </summary> # <p> # # ```python # def in_picture(x,y): # return 0 <= x < 1000 and 0 <= y < 1000 # ``` # </p> # </details> # + [markdown] id="8C5b_fInbcpY" colab_type="text" # Nu ska du snart skriva en funktion som utgår från en punkt och hittar alla punkter med vatten som sitter ihop med den och markerar dessa med talet lakenumber. # # Det kan vara några begrepp du inte sett innan eller som du behöver friska upp minnet för. Läs igenom och gör uppgifterna. # + [markdown] id="nR6Njeizbnqz" colab_type="text" # #### Begrepp som kan vara användbara # + [markdown] id="xXanVSvUdk66" colab_type="text" # Tuplar är ett enkelt sätt att gruppera saker i python. # # * De fungerar ungefär som listor men kan inte ändras. # * Skriver man `a = (5,2,"hejsan")` så blir `a` en tupel med talen 5 och 2 samt strängen `"hejsan"`. # * Man kan få ut respektive element på samma sätt som med en lista: `print(a[1])` skriver ut `2`. # * Man kan dock inte ändra i tupeln: `a[2] = "tjena"` ger ett fel. # * Man kan 'packa upp' tupeln genom att skriva: `x,y,z = a`, så blir `x`, `y` och `z` varsitt av elementen som ligger i `a`. # # Testa att packa upp tuplen nedan i rätt ordning. # + id="tYZQgBAIefGk" colab_type="code" colab={} a = ("dig!", "på", "Hej") #Skriv kod här för att packa upp tuplen i rätt ordning! print(x,y,z) # Utskriften ska bli: Hej på dig! # + [markdown] id="vuBCQsI5Qv3H" colab_type="text" # <details> # <summary markdown="span"> # Lösning # </summary> # <p> # Uppackningen kan göras med: # # ```python # z, y, x = a # ``` # </p> # </details> # # + [markdown] id="8IbGDIcZfhpb" colab_type="text" # Pop är en funktion som tar ut det sista elementet i en lista. # # * `last = list.pop()` Tar bort det sista elementet ur `list` och lägger i variabeln `last`. # # Vad skriver följande kodcell ut? Förstår du varför? # + id="UyN8WkAffhak" colab_type="code" colab={} a = ["dig!", "på", "Hej"] print(a.pop(), a.pop(), a.pop()) # + [markdown] id="5ioozvqjgL_O" colab_type="text" # <details> # <summary markdown="span"> # Lösning # </summary> # <p> # Då pop plockar ut det sista elementet ur listan kommer det vara ett nytt element som ligger sist i listan <code>a</code> varje gång vi anropar pop. # </p> # </details> # + [markdown] id="3B1k_Oj9Wy3U" colab_type="text" # # # # # # # En while-loop körs så länge ett visst villkor är uppfyllt. # Till exempel: # ``` # i = 10 # while i > 5: # print(i) # i = i-1 # ``` # # Skriver ut talen `10, 9, 8, 7, 6` på varsin rad. # # Kan du skriva ett villkor till while-loopen nedan så att den kör tills listan är tom? # + id="5AbLkFS5hKE6" colab_type="code" colab={} a = ["!","g","i","d"," ","å","p"," ","j","e","H"] while ... : #Byt ut ... mot ett villkor. print(a.pop()) # + [markdown] id="qBraphw-iWNx" colab_type="text" # <details> # <summary markdown="span"> # Tips # </summary> # <p> # Du kan exempelvis kolla om längden av listan <code>a</code> är större än 0. # </p> # </details> # <details> # <summary markdown="span"> # Lösning # </summary> # <p> # # ```python # while len(a) > 0 : # print(a.pop()) # ``` # </p> # </details> # + [markdown] id="ZCmtSDjZbuOP" colab_type="text" # #### Skriv funktion för att hitta komponenter med DFS # + [markdown] id="_8s797JGbxaz" colab_type="text" # Nu ska du skriva koden som hittar en komponent genom en djupet-först sökning. # # Det finns så kallad psuedokod inskriven, som hjälper dig veta vad som ska göras i varje steg. # # Fyll i kod som gör det som kommentarerna säger. # + id="TZ4mSTO9Ts4Z" colab_type="code" colab={} def find_component(startx, starty, lakenumber): #Skapa en lista points och stoppa in tupeln (startx,starty) i den #Sätt att lakenumbers[startx][starty] är lakenumber #så länge points inte är tom gör följande: #Plocka bort den sista punkten i points och lägg i variablerna x och y #Skapa en lista med alla närliggande punkter som gränsar till (x, y) (upp,ner,vänster,höger) #Skapa en loop som går igenom listan av grannar: #kalla den nya punkten för (nx, ny) #om (nx,ny) ligger i bilden, är vatten, och det står 0 i lakenumbers[nx][ny]: #Sätt att lakenumbers[nx][ny] är lakenumber #Lägg in tupeln (nx,ny) längst bak i listan points. # + [markdown] id="9eCxGTa1cjW6" colab_type="text" # <details> # <summary markdown="span"> # Om det är för svårt kan du istället börja med följande kodskelett # </summary> # <p> # # ```python # def find_component(startx, starty, lakenumber): # points = [...] # lakenumbers[startx][starty] = ... # while ...: # x,y = ... # neighbours = [(... , ...),(... , ...),(... , ...),(... , ...)] # for neighbour in ...: # nx = ... # ny = ... # if ... and ... and ... : # lakenumbers[nx][ny] = ... # points.append(...) # ``` # </p> # </details> # <details> # <summary markdown="span"> # Tips # </summary> # <p> # Begreppen som vi tidigare kollade på kan komma till stor användning. # Vi kan använda tuplar för att hålla koll på x och y kordinater, och packa upp dem när det behövs. # Vi såg att pop kan användas för att plocka ut det sista elementet. # En while-loop kan användas för att göra något flera gånger så länge ett villkor är uppfyllt. # # </p> # </details> # <details> # <summary markdown="span"> # Lösning # </summary> # <p> # # ```python # def find_component(startx, starty, lakenumber): # points = [(startx,starty)] # lakenumbers[startx][starty] = lakenumber # while len(points) > 0: # x,y = points.pop() # neighbours = [(x-1,y),(x+1,y),(x,y-1),(x,y+1)] # for neighbour in neighbours: # nx = neighbour[0] # ny = neighbour[1] # if in_picture(nx,ny) and water[nx][ny] and lakenumbers[nx][ny] == 0: # lakenumbers[nx][ny] = lakenumber # points.append((nx,ny)) # ``` # </p> # </details> # + [markdown] id="btXBZFp1ez18" colab_type="text" # När koden är färdig kan vi testa att funktionen fungerar som den ska. Följande kod borde visa hur vi hittar en sjö uppe till vänster. Tar det väldigt lång tid utan att hända något kan din funktion ovanför vara felaktig och hamna i en oändlig loop. I så fall får du gå upp och fundera vad det kan bero på eller ta hjälp av lösningsförslaget/tipset. # + id="UzklpPoqe5NV" colab_type="code" colab={} lakenumbers = np.zeros([1000, 1000], dtype=np.uint8) find_component(0,30,255) img = Image.fromarray(lakenumbers) display(img) # + [markdown] id="o-mHOX8I3hNv" colab_type="text" # Det hade varit smidigt om vi visste hur stor sjön vi hittar är när vi anropar `find_component`. Längre fram vill vi nämligen inte räkna de komponenter som är för små för att räknas som sjöar. # # Kopiera in din tidigare kod för `find_component` och ändra så att den returnerar antalet pixlar i komponenten som hittades. # # <details> # <summary markdown="span"> # Tips # </summary> # <p> # # Ha en variabel som räknar upp 1 varje gång en punkt/pixel läggs in i listan points. Returnera variabeln i slutet av funktionen. # </p> # </details> # # # + id="UUaTcVKo4Z3t" colab_type="code" colab={} # Kopiera in din tidigare kod här, och ändra den. # + [markdown] id="6jmzdJL54cGk" colab_type="text" # <details> # <summary markdown="span"> # Lösning # </summary> # <p> # # ```python # def find_component(startx, starty, lakenumber): # points = [(startx,starty)] # component_size = 1 # lakenumbers[startx][starty] = lakenumber # while len(points) > 0: # x,y = points.pop() # steps = [(1,0),(-1,0),(0,1),(0,-1)] # for step in steps: # nx = x+step[0] # ny = y+step[1] # if in_picture(nx,ny) and water[nx][ny] and lakenumbers[nx][ny] == 0: # lakenumbers[nx][ny] = lakenumber # points.append((nx,ny)) # component_size += 1 # return component_size # ``` # </p> # </details> # + [markdown] id="s8RqrJob61sf" colab_type="text" # Vad är en sjö och vad är en pöl? I sverige har vi satt gränsen vid 1 hektar. Det vill säga om vattensamlingen är större än ett område som är $100 * 100$ meter så räknas det som en sjö. # Om du minns så representerade pixlarna i bilden ett område på $50 * 50$ meter. Hur många pixlar krävs för att vattensamlingen ska räknas som en sjö? Fyll i i koden nedan och kör för att se hur många sjöar det är på bilden. # + id="1kX9iJ29k_uu" colab_type="code" colab={} lakenumbers = np.zeros([1000, 1000], dtype=np.uint16) min_number_pixels = ??? # Fyll i minsta antal pixlar för en sjö lakenumber = 1 lakesfound = 0 for x in range(1000): for y in range(1000): if water[x][y] and lakenumbers[x][y] == 0: if find_component(x,y,lakenumber)>=min_number_pixels: lakesfound+=1 lakenumber += 1 print ("Antal sjöar hittade:") print (lakesfound) # + [markdown] id="6oOoUCBX-nVh" colab_type="text" # Hur många sjöar finns det totalt i bilden? # # <details> # <summary markdown="span"> # Svar # </summary> # <p> # Det ska finnas 674 sjöar totalt. # </p> # </details> # # # # + [markdown] id="Tm7-GcdmAQrv" colab_type="text" # Testa att ändra gränsen för hur stor en sjö måste vara. Hur många sjöar blir det om det räcker med en pixel för att räknas som en sjö? # # Enligt [SMHI](https://www.smhi.se/kunskapsbanken/hydrologi/sveriges-sjoar-1.4221) finns det totalt nästan 100000 sjöar i Sverige. Jämför detta med antalet sjöar vi hittade i vår bild. Verkar det rimligt? Du kan scrolla upp och titta på kartan längst upp för att se hur stort område satellitbilden täckte. # # <details> # <summary markdown="span"> # Svar # </summary> # <p> # De 674 sjöarna vi hittade motsvarar knappt 1% av alla sveriges sjöar. Om vi scrollar upp och tittar på kartan över Sverige verkar detta rimligt, satellitbilden täcker ungefär så stor del av Sveriges yta. # </p> # </details> # + [markdown] id="kOPE7RjpSl7j" colab_type="text" # Nu har du lärt dig att hitta sjöar eller avgränsade områden med hjälp av programmering. Att räkna just sjöar kanske inte fyller ett större syfte än att vara en rolig övning, men idén skulle kunna användas i andra sammanhang. Till exempel skulle man kunna undersöka hur mycket höjningen av havsnivån får storleken på en ö att sjunka eller om bönderna bygger fler snirkliga vattendrag för att hinna filtrera jordbruksvattnet innan det när ut till havet. # + [markdown] id="2pXgiKkLSTMz" colab_type="text" # # Minns du vad du har gått igenom? Nu är det dags för ett quiz! # + id="8cWuPthLST3D" colab_type="code" cellView="form" colab={} #@title Kör rutan för att öppna quizet! # !wget https://raw.githubusercontent.com/lunduniversity/schoolprog-satellite/master/for_developers/quiz.py --quiet # !wget https://raw.githubusercontent.com/lunduniversity/schoolprog-satellite/master/exercises/lakes/quiz_lakes.json --quiet import json import quiz with open("quiz_lakes.json") as f: quiz_dict = json.load(f) quiz.main(quiz_dict) # + [markdown] id="y5JDaxYQ9zOZ" colab_type="text" # ## Fördjupning / Projektidéer # # * Ta reda på storleken på den största sjön i bilden. Skapa en bild som endast visar den sjön. Du kan ta inspiration från kodrutan som hittar en sjö uppe till vänster i bilden, och/eller kodrutan som visar det gröna bandet som en bild. Som utökning skulle du kunna visa de 10 största sjöarna, i varsin färg. # * Man kan räkna antalet öar i bilden på ett liknande sätt som antalet sjöar. Byt ut villkor om att endast besöka rutor med vatten till att endast besöka rutor utan vatten. Nästa steg är att skilja vanliga landmassor från öar. Ett sätt skulle kunna vara att vanliga landmassor alltid gränsar till kanten av bilden. Modifiera `find_component` för att använda detta och räkna slutligen ut antalet öar på bilden. # #
exercises/lakes/rakna_sjoar.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Ruby 2.3.1 # language: ruby # name: ruby # --- # _Note:_ this tutorial is from the Daru visualization tutorial : http://nbviewer.jupyter.org/github/SciRuby/sciruby-notebooks/blob/master/Visualization/Visualizing%20data%20with%20daru%20DataFrame.ipynb # ## Creating Visualizations with DataFrame # # Using nyaplot in the background to generate interactive plots, which can be viewed in your browser. # # In this tutorial we'll see how we can create some interesting plots with Daru::DataFrame using the Daru::View::Plot function. require 'daru/view' # Set a default plotting library Daru::View.plotting_library = :nyaplot # ### Scatter Plot # df = Daru::DataFrame.new({ a: Array.new(100) {|i| i}, b: 100.times.map{rand} }) scatter_1 = Daru::View::Plot.new(df, type: :scatter, x: :a, y: :b) scatter_1.show_in_iruby # Just specifying the options to plot yields a very simple graph without much customization. # # But what if you want to enhance your scatter plot with colors, add tooltips for each point and change the label of the X and Y axes. Also you may be faced with a situation where you want to see two different scatter plots on the same graph, each with a different color. # # All this can be done by combining #plot with a block. The #plot method yields the corresponding Nyaplot::Plot and Nyaplot::Diagram objects for the graph, which can be used for many varied customizations. Lets see some examples: # + # DataFrame denoting Ice Cream sales of a particular food chain in a city # according to the maximum recorded temperature in that city. It also lists # the staff strength present in each city. df = Daru::DataFrame.new({ :temperature => [30.4, 23.5, 44.5, 20.3, 34, 24, 31.45, 28.34, 37, 24], :sales => [350, 150, 500, 200, 480, 250, 330, 400, 420, 560], :city => ['Pune', 'Delhi']*5, :staff => [15,20]*5 }) df # + # Generating a scatter plot with tool tips, colours and different shapes. scatter_2 = Daru::View::Plot.new(df, type: :scatter, x: :temperature, y: :sales).chart scatter_2.tap do |plot, diagram| plot.x_label "Temperature" plot.y_label "Sales" plot.yrange [100, 600] plot.xrange [15, 50] plot.diagrams[0].tooltip_contents([:city, :staff]) plot.diagrams[0].color(Nyaplot::Colors.qual) # set the color scheme for this diagram. See Nyaplot::Colors for more info. plot.diagrams[0].fill_by(:city) # Change color of each point WRT to the city that it belongs to. plot.diagrams[0].shape_by(:city) # Shape each point WRT to the city that it belongs to. end # Move the mouse pointer over the points to see the tool tips. # - # Array of diagrams scatter_2.diagrams # ### Bar Graph # Generating a bar graph requires passing `:bar` into the `:type` option. # + # A Bar Graph denoting the age at which various Indian Kings died. df = Daru::DataFrame.new({ name: ['<NAME>', '<NAME>', '<NAME>', '<NAME>', 'Krishnadevaraya'], age: [72,63,57,53,58] }, order: [:name, :age]) df.sort!([:age]) # - Daru::View::Plot.new(df, type: :bar, x: :name, y: :age).chart.tap do |plot| plot.x_label "Name" plot.y_label "Age" plot.yrange [20,80] end # **It is also possible to simply pass in the `:x ` parameter if you want to the frequency of occurence of each element in a Vector.** # + a = ['A', 'C', 'G', 'T'] v = 1000.times.map { a.sample } puts "v : ", v df = Daru::DataFrame.new({ a: v }) Daru::View::Plot.new(df, type: :bar, x: :a).chart.tap do |plot| plot.yrange [0,350] plot.y_label "Frequency" plot.x_label "Letter" end # - # ### Box Plots # # A box plot can be generated of the numerical vectors in the DataFrame by simply passing `:box` to the `:type` argument. # # To demonstrate, I'll prepare some data using the distribution gem to get a bunch of normally distributed random variables. We'll then plot in a Box plot after creating a DataFrame with the data. # + require 'distribution' rng = Distribution::Normal.rng # Daru.lazy_update = false arr = [] 1000.times {arr.push(rng.call)} arr1 = arr.map{|val| val/0.8-2} arr2 = arr.map{|val| val*1.1+0.3} arr3 = arr.map{|val| val*1.3+0.3} df = Daru::DataFrame.new({ a: arr, b: arr1, c: arr2, d: arr3 }) box_1 = Daru::View::Plot.new(df, type: :box) box_1.show_in_iruby # - # ### Line Graphs # # Line graphs can be easily generated by passing `:line` to the `:type` option. # # For example, lets plot a simple line graph showing the temperature of New York City over a week. # + df = Daru::DataFrame.new({ temperature: [43,53,50,57,59,47], day: [1,2,3,4,5,6] }) line_1 = Daru::View::Plot.new(df,type: :line, x: :day, y: :temperature).chart line_1.tap do |plot| plot.x_label "Day" plot.y_label "Temperature" plot.yrange [20,60] plot.xrange [1,6] plot.legend true plot.diagrams[0].title "Temperature in NYC" end # - # ### Histogram # # Specify `:histogram` to `:type` will make a histogram from the data. # # Histograms dont need a X axis label (because they show the frequency of elements in each bin) so you need to specify the name of the vector you want to plot by passing its name into the `:x` option. # + v = 1000.times.map { rand } df = Daru::DataFrame.new({ a: v }) Daru::View::Plot.new(df, type: :histogram, x: :a).chart.tap do |plot| plot.yrange [0,150] plot.y_label "Frequency" plot.x_label "Bins" end # - # ### Multiple Diagrams on the same Plot¶ # # ##### Scatter Diagrams on the same Plot # # Daru allows you to plot as many columns of your dataframe as you want on the same plot. # # This can allow you to plot data from the dataframe onto the same graph and visually compare results from observations. You can individually set the color or point shape of each diagram on the plot. # # As a first demostration, lets create a DataFrame of the temperatures of three different cities over the period of a week. Then, we'll plot them all on the same graph by passing options to the plot method which tell it the Vectors that are to be used for each of the diagrams. # + df = Daru::DataFrame.new({ nyc_temp: [43,53,50,57,59,47], chicago_temp: [23,30,35,20,26,38], sf_temp: [60,65,73,67,55,52], day: [1 ,2 ,3 ,4 ,5 , 6] }) # As you can see, the options passed denote the x and y axes that are to be used by each diagram. # You can add as many x any y axes as you want, just make sure the relevant vectors are present # in your DataFrame! # # Heres an explanation of all the options passed: # # * type - The type of graph to be drawn. All the diagrams will be of the same type in this case. # * x1/x2/x3 - The Vector from the DataFrame that is to be treated as the X axis for each of the # three diagrams. In this case all of them need the :day Vector. # * y1/y2/y3 - The Vector from the DataFrame that is to be treated as the Y axis for each of the # three diagrams. As you can see the 1st diagram will plot nyc_temp, the 2nd chicago_temp and the # the 3rd sf_temp. # # The values yielded in the block are also slightly different in this case. # The first argument ('plot') is the same as in all the above examples (Nyaplot::Plot), but the # second argument ('diagrams') is now an Array of Nyaplot::Diagram objects. Each of the elements # in the Array represents the diagrams that you want to plot according to the sorting sequence # of the options specifying the axes. graph = Daru::View::Plot.new(df, type: :scatter, x1: :day, y1: :nyc_temp, x2: :day, y2: :chicago_temp, x3: :day, y3: :sf_temp) graph.chart.tap do |plot| nyc = plot.diagrams[0] chicago = plot.diagrams[1] sf = plot.diagrams[2] nyc.title "Temprature in NYC" nyc.color "#00FF00" chicago.title "Temprature in Chicago" chicago.color "#FFFF00" sf.title "Temprature in SF" sf.color "#0000FF" plot.legend true plot.yrange [0,100] plot.x_label "Day" plot.y_label "Temperature" end # - # #### Scatter and Line Diagram on the same Plot # # It is also possible to plot two different kinds of diagrams on the same plot. To show you how this works, I will plot a scatter graph and a line graph on the same plot. # # To elaborate, we will be plotting the a set of points on a scatter plot alongwith their line of best fit. df = Daru::DataFrame.new({ burger: ["Hamburger","Cheeseburger","Quarter Pounder","Quarter Pounder with Cheese","Big Mac","Arch Sandwich Special","Arch Special with Bacon","Crispy Chicken","Fish Fillet","Grilled Chicken","Grilled Chicken Light"], fat: [9,13 ,21 ,30 ,31 ,31 ,34 ,25 ,28 ,20 ,5], calories: [260,320,420,530,560,550,590,500,560,440,300] }, order: [:burger, :fat, :calories]) # We'll now write a small algorithm to compute the slope of the line of best fit by placing the fat content as the X co-ordinates and calories as Y co-ordinates. # # The line of best fit will be a line graph of red color and the fat and calorie contents will be plotted as usual using a scatter plot. # + # Algorithm for computing the line of best fit sum_x = df[:fat].sum sum2_x = (df[:fat]*df[:fat]).sum sum_xy = (df[:fat]*df[:calories]).sum mean_x = df[:fat].mean mean_y = df[:calories].mean slope = (sum_xy - sum_x * mean_y) / (sum2_x - sum_x * mean_x) yint = mean_y - slope * mean_x # Assign the computed Y co-ordinates of the line of best fit to a column # in the DataFrame called :y_coords df[:y_coords] = df[:fat].map {|f| f*slope + yint } # As you can see the options passed into plot are slightly different this time. # # Instead of passing Vector names into :x1, :x2... separately, this time we pass # the relevant names of the X and Y axes co-ordinates as an Array into the :x and # :y options.This is a simpler and easier way to plot multiple diagrams. # # As is demonstrated in the previous example, the first argument yields a Nyaplot::Plot # object and the second an Array of Nyaplot::Diagram objects. The diagrams are ordered # according to the types specified in the `:type` option. graph = Daru::View::Plot.new(df, type: [:scatter, :line], x: [:fat, :fat], y: [:calories, :y_coords]) graph.chart.tap do |plot| plot.x_label "Fat" plot.y_label "Calories" plot.xrange [0,50] scatter = plot.diagrams[0] line = plot.diagrams[1] line.color "#FF0000" #set color of the line to 'red' scatter.tooltip_contents [:burger] # set tool tip to :burger end # -
spec/dummy_iruby/.ipynb_checkpoints/Nyaplot | Creating Visualizations with DataFrame | from daru examples-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- # ## Racial data vs. Congressional districts # # We are now awash with data from different sources, but pulling it all together to gain insights can be difficult for many reasons. In this notebook we show how to combine data of very different types to show previously hidden relationships: # # * **"Big data"**: 300 million points indicating the location and racial or ethnic category of each resident of the USA in the 2010 census. See the [datashader census notebook](https://anaconda.org/jbednar/census) for a detailed analysis. Most tools would need to massively downsample this data before it could be displayed. # * **Map data**: Image tiles from ArcGIS showing natural geographic boundaries. Requires alignment and overlaying to match the census data. # * **Geographic shapes**: 2015 Congressional districts for the USA, downloaded from census.gov. Requires reprojection to match the coordinate system of the image tiles. # # Few if any tools can alone handle all of these data sources, but here we'll show how freely available Python packages can easily be combined to explore even large, complex datasets interactively in a web browser. The resulting plots make it simple to explore how the racial distribution of the USA population corresponds to the geographic features of each region and how both of these are reflected in the shape of US Congressional districts. For instance, here's an example of using this notebook to zoom in to Houston, revealing a very precisely [gerrymandered](https://en.wikipedia.org/wiki/Gerrymandering_in_the_United_States) [Hispanic district](https://green.house.gov/about/our-district): # # ![Houston district 29](../assets/images/houston_district29.png) # # Here the US population is rendered using racial category using the key shown, with more intense colors indicating a higher population density in that pixel, and the geographic background being dimly visible where population density is low. Racially integrated neighborhoods show up as intermediate or locally mixed colors, but most neighborhoods are quite segregated, and in this case the congressional district boundary shown clearly follows the borders of this segregation. # # If you run this notebook and zoom in on any urban region of interest, you can click on an area with a concentration of one racial or ethnic group to see for yourself if that district follows geographic features, state boundaries, the racial distribution, or some combination thereof. # # Numerous Python packages are required for this type of analysis to work, all coordinated using [conda](http://conda.pydata.org): # # * [Numba](http://numba.pydata.org): Compiles low-level numerical code written in Python into very fast machine code # * [Dask](http://dask.pydata.org): Distributes these numba-based workloads across multiple processing cores in your machine # * [Datashader](http://datashader.readthedocs.io): Using Numba and Dask, aggregates big datasets into a fixed-sized array suitable for display in the browser # * [GeoViews](http://geo.holoviews.org/) (using [Cartopy](http://scitools.org.uk/cartopy)): Project longitude, latitude shapes into Web Mercator and create visible objects # * [HoloViews](http://holoviews.org/): Flexibly combine each of the data sources into a just-in-time displayable, interactive plot # * [Bokeh](http://bokeh.pydata.org/): Generate JavaScript-based interactive plot from HoloViews declarative specification # # Each package is maintained independently and focuses on doing one job really well, but they all combine seamlessly and with very little code to solve complex problems. # + import holoviews as hv from holoviews import opts import geoviews as gv import datashader as ds import dask.dataframe as dd from cartopy import crs from holoviews.operation.datashader import datashade hv.extension('bokeh', width=95) opts.defaults( opts.Points(apply_ranges=False, ), opts.RGB(width=1200, height=682, xaxis=None, yaxis=None, show_grid=False), opts.Shape(fill_alpha=0, line_width=1.5, apply_ranges=False, tools=['tap']), opts.WMTS(alpha=0.5) ) # - # In this notebook, we'll load data from different sources and show it all overlaid together. First, let's define a color key for racial/ethnic categories: # + color_key = {'w':'blue', 'b':'green', 'a':'red', 'h':'orange', 'o':'saddlebrown'} races = {'w':'White', 'b':'Black', 'a':'Asian', 'h':'Hispanic', 'o':'Other'} color_points = hv.NdOverlay( {races[k]: gv.Points([0,0], crs=crs.PlateCarree()).opts(color=v) for k, v in color_key.items()}) # - # Next, we'll load the 2010 US Census, with the location and race or ethnicity of every US resident as of that year (300 million data points), and define a plot using datashader to show this data with the given color key: df = dd.io.parquet.read_parquet('../data/census.snappy.parq') df = df.persist() census_points = hv.Points(df, kdims=['easting', 'northing'], vdims=['race']) # Now we can datashade and render these points, coloring the points by race: x_range, y_range = ((-13884029.0, -7453303.5), (2818291.5, 6335972.0)) # Continental USA shade_defaults = dict(x_range=x_range, y_range=y_range, x_sampling=10, y_sampling=10, width=1200, height=682, color_key=color_key, aggregator=ds.count_cat('race'),) shaded = datashade(census_points, **shade_defaults) shaded # Next, we'll load congressional districts from a publicly available [shapefile](https://www.census.gov/geo/maps-data/data/cbf/cbf_cds.html) and project them into Web Mercator format using GeoViews (which in turn calls Cartopy): shape_path = '../data/cb_2015_us_cd114_5m.shp' districts = gv.Shape.from_shapefile(shape_path, crs=crs.PlateCarree()) districts = gv.project(districts) # Finally, we'll define some image tiles to use as a background, using any publicly available Web Mercator tile set: tiles = gv.WMTS('https://server.arcgisonline.com/ArcGIS/rest/services/World_Imagery/MapServer/tile/{Z}/{Y}/{X}.jpg') # Each of these data sources can be visualized on their own (just type their name in a separate cell), but they can also easily be combined into a single overlaid plot to see the relationships: opts.defaults( opts.Polygons(fill_alpha=0)) shaded = datashade(census_points, **shade_defaults) tiles * shaded * color_points * districts # You should now be able to interactively explore these three linked datasets, to see how they all relate to each other. In a live notebook, this plot will support a variety of interactive features: # # * Pan/zoom: Select the "wheel zoom" tool at the left, and you can zoom in on any region of interest using your scroll wheel. The shapes should update immediately, while the map tiles will update as soon as they are loaded from the external server, and the racial data will be updated once it has been rendered for the current viewport by datashader. This behavior is the default for any HoloViews plot using a Bokeh backend. # * Tapping: click on any region of the USA and the Congressional district for that region will be highlighted (and the rest dimmed). This behavior was enabled for the shape outlines by specifying the "tap" tool in the options above. # # Most of these interactive features are also available in the static HTML copy visible at [anaconda.org](https://anaconda.org/jbednar/census-hv-dask), with the restriction that because there is no Python process running, the racial/population data will be limited to the resolution at which it was initially rendered, rather than being dynamically re-rendered to fit the current zoom level. Thus in a static copy, the data will look pixelated, whereas in the live server you can zoom all the way down to individual datapoints (people) in each region.
examples/topics/gerrymandering.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="MuyRMqVLi30R" colab_type="code" colab={} # clone binary-classification-vMYield repo # !git clone https://github.com/lbborkowski/binary-classification-vMYield.git # %cd /content/binary-classification-vMYield/ # + id="j_1zUlYp-VPs" colab_type="code" colab={} # imports import numpy as np from keras.models import Sequential from keras.layers import Dense import matplotlib.pyplot as plt from numpy import loadtxt # + id="nS4FrjUZGrNp" colab_type="code" colab={} # total number of train and test data trainArrSize=500000 # + id="k0JkbpYUB-Ar" colab_type="code" colab={} # create random array of length trainArrSize for three stress values (sig_rr, sig_tt, sig_rt) sigArr=np.random.rand(trainArrSize,3) # + id="RpFBPfoWW9M7" colab_type="code" colab={} # range of stress components sig_rr, sig_tt, sig_rt obtained from running Octave file PlateWithHole.m # this is used to create bounds for the training data range_sig_rr=np.array([-0.03809,0.9024]) range_sig_tt=np.array([-1,3]) range_sig_rt=np.array([-0.6661,0.6661]) # + id="IA1A9oJnYrE6" colab_type="code" colab={} # extend range by 1% beyond max and min to ensure all possible training stress values are sampled range_sig_rrExt=1.01*range_sig_rr range_sig_ttExt=1.01*range_sig_tt range_sig_rtExt=1.01*range_sig_rt # + id="BS55yqXHWa2g" colab_type="code" colab={} # create array of random numbers between max and min stress values using following formula: # r = a + (b-a).*rand(N,1) where a and b are the min and max bounds, respectively sigArr[:,0]=range_sig_rrExt[0]+(range_sig_rrExt[1]-range_sig_rrExt[0])*sigArr[:,0] sigArr[:,1]=range_sig_ttExt[0]+(range_sig_ttExt[1]-range_sig_ttExt[0])*sigArr[:,1] sigArr[:,2]=range_sig_rtExt[0]+(range_sig_rtExt[1]-range_sig_rtExt[0])*sigArr[:,2] # + id="JDFuHILnNlDB" colab_type="code" colab={} # check to ensure min and max of random array extend beyond stress component ranges if np.amin(sigArr[:,0])>range_sig_rr[0] or np.amax(sigArr[:,0])<range_sig_rr[1]: print('Training data does not extend beyond sig_rr range. Rerun with larger training array size') raise SystemExit("Execution halted") elif np.amin(sigArr[:,1])>range_sig_tt[0] or np.amax(sigArr[:,1])<range_sig_tt[1]: print('Training data does not extend beyond sig_tt range. Rerun with larger training array size') raise SystemExit("Execution halted") elif np.amin(sigArr[:,2])>range_sig_rt[0] or np.amax(sigArr[:,2])<range_sig_rt[1]: print('Training data does not extend beyond sig_rt range. Rerun with larger training array size') raise SystemExit("Execution halted") # + id="_ScJqxOYcvwy" colab_type="code" colab={} # assign training/test array columns to appropriate stress variables in cylindrical coordinate system sig_rr=sigArr[:,0] sig_tt=sigArr[:,1] sig_rt=sigArr[:,2] # + id="54e4s_5gcpUD" colab_type="code" colab={} # calculate von Mises stress sig_vM=np.sqrt(sig_rr**2-sig_rr*sig_tt+sig_tt**2+3*sig_rt**2) # + id="kIg4Ao2keCwn" colab_type="code" colab={} # calculate yield based on von Mises yield criterion # yield occurs when von Mises stress is equal to or greater than 115% of applied stress yld=np.zeros(sig_vM.shape) yld[sig_vM>=1.15]=1 # + id="UP6PwqDtF_-o" colab_type="code" colab={} # build the binary classification model # input: stress values (3) # output: yield (0 or 1) - i.e., binary classification problem # + id="LB2VWegfLoVB" colab_type="code" colab={} # separate train and test data (80/20 split) sigArr_train=sigArr[:int(trainArrSize*0.8)] sigArr_test=sigArr[int(trainArrSize*0.8):] yld_train=yld[:int(trainArrSize*0.8)] yld_test=yld[int(trainArrSize*0.8):] # + id="bEFqCyMmQ1-L" colab_type="code" colab={} # define the model model = Sequential() model.add(Dense(12, input_dim=3, activation='relu')) model.add(Dense(1, activation='sigmoid')) # + id="MR2uymt_KfWL" colab_type="code" colab={} # compile the model model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) # + id="g903pJwidxkk" colab_type="code" colab={} # print summary of model including total number of parameters model.summary() # + id="o4odP2WWKomZ" colab_type="code" colab={} # fit model using training data # %%time # fit the model on the dataset without progress bars model.fit(sigArr_train, yld_train, epochs=2, batch_size=5, verbose=0) # + id="FWnyKPeeK59h" colab_type="code" colab={} # evaluate the model training accuracy _, accuracy_train = model.evaluate(sigArr_train, yld_train, verbose=0) # + id="7IpNKrOiLLhl" colab_type="code" colab={} print('Training accuracy: %.2f%%' % (accuracy_train*100)) # + id="b7COYfxuLVbE" colab_type="code" colab={} # evaluate the model testing accuracy _, accuracy_test = model.evaluate(sigArr_test, yld_test, verbose=0) # + id="yH7He09qNcbD" colab_type="code" colab={} print('Testing accuracy: %.2f%%' % (accuracy_test*100)) # + id="VJ6PN64L6V_B" colab_type="code" colab={} # load 2D Cartesian coordinates of nodes in the plate nodes_plate=loadtxt('nodes.txt', delimiter=',') # + id="u-H_DI9NBhNk" colab_type="code" colab={} # load stress values for each of the three components (sig_rr, sig_tt, sig_rt) at every node in the plate stress_plate=loadtxt('stress.txt', delimiter=',') # + id="6HKpAb_5H6YB" colab_type="code" colab={} # load the yield value (0 or 1) for every node in the plate yield_plate=loadtxt('yield.txt', delimiter=',') # + id="9ax_7cG7K3iH" colab_type="code" colab={} # now run the previously fit model on the plate data (validation data) to # evaluate the model validation accuracy _, accuracy_valid = model.evaluate(stress_plate, yield_plate, verbose=0) # + id="QFu6IPH_LM7Z" colab_type="code" colab={} print('Validation accuracy: %.2f%%' % (accuracy_valid*100)) # + id="iJ0yHJ3JLTGs" colab_type="code" colab={} # make prediction using model for plate with a hole problem # model will predict which nodes have yielded to compare with the analytical (baseline) solution predictions_valid = model.predict_classes(stress_plate) # + id="ZG2EocH8Lkgy" colab_type="code" colab={} # plot comparison between analytical model (baseline) and the neural network model # %matplotlib inline plt.figure(figsize=(26,10)) plt.subplot(1, 2, 1) plt.scatter(nodes_plate[:,0],nodes_plate[:,1],c=yield_plate) plt.axis('off') plt.title('Baseline model',fontsize=20) plt.jet() plt.colorbar() plt.subplot(1, 2, 2) plt.scatter(nodes_plate[:,0],nodes_plate[:,1],c=predictions_valid) plt.axis('off') plt.title('Neural network model',fontsize=20) plt.jet() plt.colorbar() plt.show()
vMBinaryClassification_200726.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Now You Code 1: Vote or Retire? # # # Part 1 # # Write a program to ask for your age as input, then output 1) whether or not you can vote and 2) whether your not you can retire. Let's assume the voting age is 18 or higher, and the retirement age is 65 or higher. # # **NOTE:** This program is making two seprate decisions, and thus should have two separate if else statements. # # Example Run: # # ``` # Enter your age: 45 # You can vote. # You cannot retire. # ``` # # ## Step 1: Problem Analysis # # Inputs: # # Outputs: # # Algorithm (Steps in Program): # # # # + #Step 2: write code here # - # # Part 2 # # Now that you have it working, re-write your code to handle bad input using Python's `try... except` statement: # # Example run: # # ``` # Enter your age: threve # That's not an age! # ``` # # **Note:** Exception handling is not part of our algorithm. It's a programming concern, not a problem-solving concern! ## Step 2 (again): write code again but handle errors with try...except # ## Step 3: Questions # # 1. What specific Python Error are we handling (please provide the name of it)? # 2. What happens when you enter an age of `-50`? Does the program still run? Fix your program so that it says `That's not an age` when a value less than zero is entered. # 3. How many times (at minimum) must we execute this program and check the results before we can be reasonably assured it is correct? # # ## Reminder of Evaluation Criteria # # 1. Was the problem attempted (analysis, code, and answered questions) ? # 2. Was the problem analysis thought out? (does the program match the plan?) # 3. Does the code execute without syntax error? # 4. Does the code solve the intended problem? # 5. Is the code well written? (easy to understand, modular, and self-documenting, handles errors) #
content/lessons/04/Now-You-Code/NYC1-Vote-Or-Retire.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="CLATnUvpdftH" colab_type="text" # # jax-bayes CIFAR10 Example --- Traditional ML Approach # # ## Set up the environment # + id="UfMSaNHlceB7" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="d04f814f-8140-4a5c-f67a-d3035258bc14" #see https://github.com/google/jax#pip-installation # !pip install --upgrade https://storage.googleapis.com/jax-releases/cuda101/jaxlib-0.1.51-cp36-none-manylinux2010_x86_64.whl # !pip install --upgrade jax # !pip install git+https://github.com/deepmind/dm-haiku # !pip install git+https://github.com/jamesvuc/jax-bayes # + id="l70DSI0ajQJq" colab_type="code" colab={} import haiku as hk import jax.numpy as jnp from jax.experimental import optimizers import jax import sys, os, math, time import numpy as np os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' import tensorflow_datasets as tfds # + [markdown] id="B686kNdCzFEP" colab_type="text" # ## Build the dataset loader and CNN # + id="AzgaUa2owIqg" colab_type="code" colab={} def load_dataset(split, is_training, batch_size, repeat=True, seed=0): if repeat: ds = tfds.load('cifar10', split=split).cache().repeat() else: ds = tfds.load('cifar10', split=split).cache() if is_training: ds = ds.shuffle(10 * batch_size, seed=seed) ds = ds.batch(batch_size) return tfds.as_numpy(ds) # build a 32-32-64-32 CNN with max-pooling # followed by a 128-10-n_classes MLP class Net(hk.Module): def __init__(self, dropout=0.1, n_classes=10): super(Net, self).__init__() self.conv_stage = hk.Sequential([ #block 1 hk.Conv2D(32, kernel_shape=3, stride=1, padding='SAME'), jax.nn.relu, hk.MaxPool(window_shape=(1,2,2,1), strides=(1,1,1,1), padding='VALID'), # block 2 hk.Conv2D(32, kernel_shape=3, stride=1, padding='SAME'), jax.nn.relu, hk.MaxPool(window_shape=(1,2,2,1), strides=(1,1,1,1), padding='VALID'), # block 3 hk.Conv2D(64, kernel_shape=3, stride=1, padding='SAME'), jax.nn.relu, hk.MaxPool(window_shape=(1,2,2,1), strides=(1,1,1,1), padding='VALID'), # block 4 hk.Conv2D(32, kernel_shape=3, stride=1, padding='SAME') ]) self.mlp_stage = hk.Sequential([ hk.Flatten(), hk.Linear(128), jax.nn.relu, hk.Linear(n_classes) ]) self.p_dropout = dropout def __call__(self, x, use_dropout=True): x = self.conv_stage(x) dropout_rate = self.p_dropout if use_dropout else 0.0 x = hk.dropout(hk.next_rng_key(), dropout_rate, x) return self.mlp_stage(x) # standard normalization constants mean_norm = jnp.array([[0.4914, 0.4822, 0.4465]]) std_norm = jnp.array([[0.247, 0.243, 0.261]]) #define the net-function def net_fn(batch, use_dropout): net = Net(dropout=0.0) x = batch['image']/255.0 x = (x - mean_norm) / std_norm return net(x, use_dropout) # + id="eo-Gypdbo0wY" colab_type="code" colab={} # hyperparameters lr = 1e-3 reg = 1e-4 # instantiate the network net = hk.transform(net_fn) # build the optimizer opt_init, opt_update, opt_get_params = optimizers.rmsprop(lr) # standard L2-regularized crossentropy loss function def loss(params, rng, batch): logits = net.apply(params, rng, batch, use_dropout=True) labels = jax.nn.one_hot(batch['label'], 10) l2_loss = 0.5 * sum(jnp.sum(jnp.square(p)) for p in jax.tree_leaves(params)) softmax_crossent = - jnp.mean(labels * jax.nn.log_softmax(logits)) return softmax_crossent + reg * l2_loss @jax.jit def accuracy(params, batch): preds = net.apply(params, jax.random.PRNGKey(101), batch, use_dropout=False) return jnp.mean(jnp.argmax(preds, axis=-1) == batch['label']) @jax.jit def train_step(i, opt_state, rng, batch): params = opt_get_params(opt_state) fx, dx = jax.value_and_grad(loss)(params, rng, batch) opt_state = opt_update(i, dx, opt_state) return fx, opt_state # + [markdown] id="rfv4Mldkdt40" colab_type="text" # ## Load the Initialization, Val and Test Batches & Do the Optimization # + id="NYeEembgpgSk" colab_type="code" colab={} init_batches = load_dataset("train", is_training=True, batch_size=256) val_batches = load_dataset("train", is_training=False, batch_size=1_000) test_batches = load_dataset("test", is_training=False, batch_size=1_000) # + id="-MDzIu4uxmeD" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 397} outputId="240f3443-5529-4e12-9ad8-7af870738e3d" # %%time # intialize the paramaeters params = net.init(jax.random.PRNGKey(42), next(init_batches), use_dropout=True) opt_state = opt_init(params) # initialize a key for the dropout rng = jax.random.PRNGKey(2) for epoch in range(100): #generate a shuffled epoch of training data train_batches = load_dataset("train", is_training=True, batch_size=256, repeat=False, seed=epoch) for batch in train_batches: # run an optimization step train_loss, opt_state = train_step(epoch, opt_state, rng, batch) # make more rng for the dropout rng, _ = jax.random.split(rng) if epoch % 5 == 0: params = opt_get_params(opt_state) val_acc = accuracy(params, next(val_batches)) test_acc = accuracy(params, next(test_batches)) print(f"epoch = {epoch}" f" | train loss = {train_loss:.4f}" f" | val acc = {val_acc:.3f}" f" | test acc = {test_acc:.3f}")
examples/deep/cifar10/cifar10.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Ray Tune - Search Algorithms and Schedulers # # © 2019-2021, Anyscale. All Rights Reserved # # ![Anyscale Academy](../images/AnyscaleAcademyLogo.png) # # This notebook introduces the concepts of search algorithms and schedulers which help optimize HPO. We'll see an example that combines the use of one search algorithm and one schedulers. # # The full set of search algorithms provided by Tune is documented [here](https://docs.ray.io/en/latest/tune/api_docs/suggestion.html), along with information about implementing your own. The full set of schedulers provided is documented [here](https://docs.ray.io/en/latest/tune/api_docs/schedulers.html). # We need to install a few libraries. We'll explain what they are below. # !pip install hpbandster ConfigSpace # !python --version # > **NOTE:** If you are see **Python 3.6** in the output from the previous cell, run remove the `#` in the following cell and run it. This will fix a dependency bug needed for this notebook. # > # > Afterwards, **restart the kernel for this notebook**, using the circular error in the tool bar. After that, proceed with the rest of the notebook. # > # > If you have **Python 3.7** or later, skip these steps. # + # #!pip install statsmodels -U --pre # - # ## About Search Algorithms # # Tune integrates many [open source optimization libraries](https://docs.ray.io/en/latest/tune/api_docs/suggestion.html), each of which defines the parameter search space in its own way. Hence, you should read the corresponding documentation for an algorithm to understand the particular details of using it. # # Some of the search algorithms supported include the following: # # * [Bayesian Optimization](https://github.com/fmfn/BayesianOptimization): This constrained global optimization process builds upon bayesian inference and gaussian processes. It attempts to find the maximum value of an unknown function in as few iterations as possible. This is a good technique for optimization of high cost functions. # * [BOHB (Bayesian Optimization HyperBand](https://github.com/automl/HpBandSter): An algorithm that both terminates bad trials and also uses Bayesian Optimization to improve the hyperparameter search. It is backed by the [HpBandSter](https://docs.ray.io/en/latest/tune/api_docs/schedulers.html#tune-scheduler-bohb) library. BOHB is intended to be paired with a specific scheduler class: [HyperBandForBOHB](https://docs.ray.io/en/latest/tune/api_docs/schedulers.html#tune-scheduler-bohb). # * [HyperOpt](http://hyperopt.github.io/hyperopt): A Python library for serial and parallel optimization over awkward search spaces, which may include real-valued, discrete, and conditional dimensions. # * [Nevergrad](https://github.com/facebookresearch/nevergrad): HPO without computing gradients. # # These and other algorithms are described in the [documentation](https://docs.ray.io/en/latest/tune/api_docs/suggestion.html). # # A limitation of search algorithms used by themselves is they can't affect or stop training processes, for example early stopping of trail that are performing poorly. The schedulers can do this, so it's common to use a compatible search algorithm with a scheduler, as we'll show in the first example. # ## About Schedulers # # Tune includes distributed implementations of several early-stopping algorithms, including the following: # # * [Median Stopping Rule](https://research.google.com/pubs/pub46180.html): It applies the simple rule that a trial is aborted if the results are trending below the median of the previous trials. # * [HyperBand](https://arxiv.org/abs/1603.06560): It structures search as an _infinite-armed, stochastic, exploration-only, multi-armed bandit_. See the [Multi-Armed Bandits lessons](../ray-rllib/multi-armed-bandits/00-Multi-Armed-Bandits-Overview.ipynb) for information on these concepts. The infinite arms correspond to the tunable parameters. Trying values stochastically ensures quick exploration of the parameter space. Exploration-only is desirable because for HPO, we aren't interested in _exploiting_ parameter combinations we've already tried (the usual case when using MABs where rewards are the goal). Intead, we need to explore as many new parameter combinations as possible. # * [ASHA](https://openreview.net/forum?id=S1Y7OOlRZ). This is an aynchronous version of HyperBand that improves on the latter. Hence it is recommended over the original HyperBand implementation. # # Tune also includes a distributed implementation of [Population Based Training (PBT)](https://deepmind.com/blog/population-based-training-neural-networks). When the PBT scheduler is enabled, each trial variant is treated as a member of the _population_. Periodically, top-performing trials are checkpointed, which means your [`tune.Trainable`](https://docs.ray.io/en/latest/tune/api_docs/trainable.html#tune-trainable) object (e.g., the `TrainMNist` class we used in the previous exercise) has to support save and restore. # # Low-performing trials clone the checkpoints of top performers and perturb the configurations in the hope of discovering an even better variation. PBT trains a group of models (or RLlib agents) in parallel. So, unlike other hyperparameter search algorithms, PBT mutates hyperparameters during training time. This enables very fast hyperparameter discovery and also automatically discovers good [annealing](https://en.wikipedia.org/wiki/Simulated_annealing) schedules. # # See the [Tune schedulers](https://docs.ray.io/en/latest/tune/api_docs/schedulers.html) for a complete list and descriptions. # ## Examples # # Let's initialize Ray as before: import ray from ray import tune ray.init(ignore_reinit_error=True) # ### BOHB # # BOHB (Bayesian Optimization HyperBand) is an algorithm that both terminates bad trials and also uses Bayesian Optimization to improve the hyperparameter search. The [Tune implementation](https://docs.ray.io/en/latest/tune/api_docs/suggestion.html#bohb-tune-suggest-bohb-tunebohb) is backed by the [HpBandSter library](https://github.com/automl/HpBandSter), which we must install, along with [ConfigSpace](https://automl.github.io/HpBandSter/build/html/quickstart.html#searchspace), which is used to define the search space specification: # We use BOHB with the scheduler [HyperBandForBOHB](https://docs.ray.io/en/latest/tune/api_docs/schedulers.html#bohb-tune-schedulers-hyperbandforbohb). # Let's try it. We'll use the same MNIST example from the previous lesson, but this time, we'll import the code from a file in this directory, `mnist.py`. Note that the implementation of `TrainMNIST` in the file has enhancements not present in the previous lesson, such as methods to support saving and restoring checkpoints, which are required to be used here. See the code comments for details. from mnist import ConvNet, TrainMNIST, EPOCH_SIZE, TEST_SIZE, DATA_ROOT # Import and configure the `ConfigSpace` object we need for the search algorithm. import ConfigSpace as CS from ray.tune.schedulers.hb_bohb import HyperBandForBOHB from ray.tune.suggest.bohb import TuneBOHB # + config_space = CS.ConfigurationSpace() # There are also UniformIntegerHyperparameter and UniformFloatHyperparameter # objects for defining integer and float ranges, respectively. For example: # config_space.add_hyperparameter( # CS.UniformIntegerHyperparameter('foo', lower=0, upper=100)) config_space.add_hyperparameter( CS.CategoricalHyperparameter('lr', choices=[0.001, 0.01, 0.1])) config_space.add_hyperparameter( CS.CategoricalHyperparameter('momentum', choices=[0.001, 0.01, 0.1, 0.9])) config_space # + experiment_metrics = dict(metric="mean_accuracy", mode="max") search_algorithm = TuneBOHB(config_space, max_concurrent=4, **experiment_metrics) scheduler = HyperBandForBOHB( time_attr='training_iteration', reduction_factor=4, max_t=200, **experiment_metrics) # - # Through experimentation, we determined that `max_t=200` is necessary to get good results. For the smallest learning rate and momentum values, it takes longer for training to converge. analysis = tune.run(TrainMNIST, scheduler=scheduler, search_alg=search_algorithm, num_samples=12, # Force it try all 12 combinations verbose=1 ) stats = analysis.stats() secs = stats["timestamp"] - stats["start_time"] print(f'{secs:7.2f} seconds, {secs/60.0:7.2f} minutes') print("Best config: ", analysis.get_best_config(metric="mean_accuracy")) analysis.dataframe().sort_values('mean_accuracy', ascending=False).head() analysis.dataframe()[['mean_accuracy', 'config/lr', 'config/momentum']].sort_values('mean_accuracy', ascending=False) # The runs in the previous lesson, for the class-based and the function-based Tune APIs, took between 12 and 20 seconds (on my machine), but we only trained for 20 iterations, where as here we went for 100 iterations. That also accounts for the different results, notably that a much smaller momentum value `0.01` and `0.1` perform best here, while for the the previous lesson `0.9` performed best. This is because a smaller momentum value will result in longer training times required, but more fine-tuned iterating to the optimal result, so more training iterations will favor a smaller momentum value. Still, the mean accuracies among the top three or four combinations are quite close. # ## Exercise - Population Base Training # # Read the [documentation]() on _population based training_ to understand what it is doing. The next cell configures a PBT scheduler and defines other things you'll need. # # See also the discussion for the results in the [solutions](solutions/03-Search-Algos-and-Schedulers-Solutions.ipynb). # # > **NOTE:** For a more complete example using MNIST and PyTorch, see [this example code](https://github.com/ray-project/ray/blob/master/python/ray/tune/examples/mnist_pytorch_lightning.py). # + from ray.tune.schedulers import PopulationBasedTraining pbt_scheduler = PopulationBasedTraining( time_attr='training_iteration', perturbation_interval=10, # Every N time_attr units, "perturb" the parameters. hyperparam_mutations={ "lr": [0.001, 0.01, 0.1], "momentum": [0.001, 0.01, 0.1, 0.9] }, **experiment_metrics) # Note: This appears to be needed to avoid a "key error", but in fact these values won't change # in the analysis.dataframe() object, even though they will be tuned by the PBT scheduler. # So when you look at the analysis.dataframe(), look at the `experiment_tag` to see the actual values! config = { "lr": 0.001, # Use the lowest values from the previous definition "momentum": 0.001 } # - # Now run the the following cell, modified from above, which makes these changes: # 1. Uses the new scheduler. # 2. Removes the search_alg argument. # 3. Adds the `config` argument. # 4. Don't allow it to keep going past `0.97` accuracy for `600` iterations. # 5. Use `1` for the `verbose` argument to reduce the "noise". # # Then run it. # # > **WARNING:** This will run for a few minutes. # + analysis = tune.run(TrainMNIST, scheduler=pbt_scheduler, config=config, stop={"mean_accuracy": 0.97, "training_iteration": 600}, num_samples=8, verbose=1 ) stats = analysis.stats() secs = stats["timestamp"] - stats["start_time"] print(f'{secs:7.2f} seconds, {secs/60.0:7.2f} minutes') # - # Look at the `analysis` data of interest, as done previously. (You might want to focus on other columns in the dataframe.) How well does PBT work? # The final lesson in this tutorial discusses the new Ray SGD library. ray.shutdown() # "Undo ray.init()".
ray-tune/03-Search-Algos-and-Schedulers.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Accessing and managing users # Users are an indispensible part of your web GIS. As the number of users grow, you can see value in automating your management tasks such as provisioning licenses, privileges, creating and removing user accounts etc. The `gis` module provides you with `User` and `UserManager` classes to respresent users as objects and help you accomplish the most common tasks. In this guide, we will learn about: # # * [About your account](#about-your-account) # * [Properties of a `User` object](#properties-of-a-user-object) # * [Searching for user accounts](#searching-for-user-accounts) # * [Creating new user accounts](#creating-new-user-accounts) # * [About user roles](#about-user-roles) # * [Managing user roles](#managing-user-roles) # * [Creating new roles](#creating-new-roles) # * [Creating a user with a custom role](#creating-a-user-with-a-custom-role) # * [Listing all the custom roles in an org](#listing-all-the-custom-roles-in-an-org) # * [Deleting user accounts](#deleting-user-accounts) # * [Accessing user content](#accessing-user-content) # * [Reassigning user content](#reassigning-user-content) # # As you might have seen the pattern with `ContentManager` and `Item` objects, the `UserManager` object is a resource manager that gives you access to `User` objects. You access a `UserManager` object not by instantiating that class through its constructor, but by accessing the `users` property of your `GIS` object. This is the typical pattern of usage throughout the `gis` module. # <a id="about-your-account"></a> # ## About your account # Let us get to know a bit about our logged in account before we observe how to manage other user accounts. Let us import the `GIS` class from `gis` module and connect to an ArcGIS Enterprise: from arcgis.gis import GIS gis = GIS("portal url", "username", "password") # You can access your user account by accessing `me` property as shown below: me = gis.users.me me # Similar to `Item` objects, when using the Jupyter notebook IDE, you can visualize `User` objects in rich HTML representation with thumbnails and attribute information. # <a id="properties-of-a-user-object"></a> # ## Properties of a `User` object # You can query much more information about the user account as properties on the `User` object: me.access # You can find out when an account was last active and determine if an account was abandoned and remove it if necessary. # + import time # convert Unix epoch time to local time created_time = time.localtime(me.created/1000) print("Created: {}/{}/{}".format(created_time[0], created_time[1], created_time[2])) last_accessed = time.localtime(me.lastLogin/1000) print("Last active: {}/{}/{}".format(last_accessed[0], last_accessed[1], last_accessed[2])) # - # Let us print some more information about this account print(me.description, " ", me.email, " ", me.firstName, " ", me.lastName, " ", me.fullName) print(me.level, " ", me.mfaEnabled, " ", me.provider, " ", me.userType) # You can determine how much storage is being used by this account quota = me.storageQuota used = me.storageUsage pc_usage = round((used / quota)*100, 2) print("Usage: " + str(pc_usage) + "%") # You can determine the groups the user is a member of: # + user_groups = me.groups print("Member of " + str(len(user_groups)) + " groups") # groups are returned as a dictionary. Lets print the first dict as a sample user_groups[0] # - # <a id="searching-for-user-accounts"></a> # ## Searching for user accounts # The `search()` method of `UserManager` class helps you search for users of the org. The `query` parameter in the `search()` method accepts standard [ArcGIS REST API queries](http://resources.arcgis.com/en/help/arcgis-rest-api/#/Search_reference/02r3000000mn000000/) and behaves similar to the search method on `ContentManager` and `GroupManager` classes. To illustrate this better, let us search ArcGIS Online as there are many more users available there. # anonymous connection to ArcGIS Online ago_gis = GIS() # search the users whose email address ends with esri.com esri_public_accounts = ago_gis.users.search(query='email = <EMAIL>') len(esri_public_accounts) # Each element in the list returned is a `User` object that you can query. # lets filter out Esri curator accounts from this list curator_accounts = [acc for acc in esri_public_accounts if acc.username.startswith('Esri_Curator')] curator_accounts curator_accounts[0] # Once you know a user's username, you can access that object using the **`get()`** method. Let us access the Esri curator account for historical maps esri_hist_maps = ago_gis.users.get(username='Esri_Curator_Historical') esri_hist_maps # <a id="creating-new-user-accounts"></a> # ## Creating new user accounts # You can add new users to the org using either the `signup()` or `create()` methods available on the `UserManager` class. The `signup()` method is limited in scope as it can be used only for adding built-in accounts to an ArcGIS Enterprise instance and not for an org that is hosted on ArcGIS Online. However, you can call the `signup()` anonymously and does not require admin privileges unlike the `create()` method. # > Note, you can disable self-signup in your ArcGIS Enterprise which would render the `signup()` unusable if you wanted to turn the org invite-only. # # You need admin privileges to call the `create()` method. This method is very powerful in an instance of ArcGIS Enterprise, as it allows you to create new accounts from either the arcgis built-in credential store or your enterprise's credential store. For an ArcGIS Online Organization, you can only create users that will use the built-in credential store. For the case of accounts from a built-in credential store, you would provide a password when the account is created. The user can change it at any time once they login. For accounts from your enterprise's credential store, you can ignore the `password` parameter and your users will authenticate through that credential store. # # In addition to `role` that can be set, a `level` can be used to allocate accounts based on the privileges that members need. The level determines which privileges are available to the member. The enterprise offers two levels of membership. Level 1 membership is for members who only need privileges to **view** content, such as maps and apps, that has been shared with them through the organization, as well as join groups within the organization. Level 2 membership is for members who need to view, create, and share content and own groups, in addition to other tasks. # # Let us log in to an ArcGIS Enterprise and create some users: # let us create a built-in account with username: demo_user1 with org_user privilege demo_user1 = gis.users.create(username = 'demo_user1', password = '<PASSWORD>', firstname = 'demo', lastname = 'user', email = '<EMAIL>', description = 'Demonstrating how to create users using ArcGIS Python API', role = 'org_user', level = 2, provider = 'arcgis') demo_user1 # > Note that we specified `arcgis` as the `provider` argument. If you were creating accounts from your enterprise credential store, you would specify this value as `enterprise` and use the `idpUsername` parameter to specify the username of the user in that credential store. To learn more about this configuration, refer to this help topic on [setting up enterprise logins](http://enterprise.arcgis.com/en/portal/latest/administer/windows/about-configuring-portal-authentication.htm#ESRI_SECTION1_83F7B85FEF594A6B96997AF3CADF3D38). # # Note, the `role` parameter was specified as `org_user`. This takes us to the next section on `Role` and `RoleManager` objects. # <a id="about-user-roles"></a> # ### About user roles # ArcGIS provides a security concept called roles which defines the privileges a user has within an organization. By default, your org has 3 roles - `org_user`, `org_publisher` and `org_admin`. You can refer to [this topic on organizational roles](http://doc.arcgis.com/en/arcgis-online/reference/roles.htm) to learn about these three roles and their privileges. In summary, a user role can be an active user of the org, create items, join groups and share content. A publisher role has all of user privileges and can create hosted content and perform analysis. An administrator role has all possible privileges. # # Depending on the size of your org and the security needs, you can customize this and create any number of roles with fine grained privileges. For reference on custom roles in an org, refer to [this doc](http://doc.arcgis.com/en/arcgis-online/reference/roles.htm#ESRI_SECTION1_7071F89DE04B448CA833A4164A98DF94) # # To know about the role of a `User` object, you can query the `role` property: demo_user1_role = demo_user1.role print(type(demo_user1_role)) print(demo_user1_role) # Since this user was created with a built in role specified as a string, we get back a string with value `org_user`. # <a id ="managing-user-roles"></a> # ### Managing user roles # Let us create a new role that can only publish tile layers. This role should have none of admin privileges and can have only some of user privileges, namely creating new items and joining groups. # # <a id = "creating-new-roles"></a> # #### Creating new roles # To create a new role, call the `create()` on `RoleManager` class. As with any resource manager, you should access it through the `roles` property on a `UserManager` object. You should access the `UserManager` object in turn through the `users` property of your `GIS` object. # + # create a tiles publisher role privilege_list = ['portal:publisher:publishTiles', 'portal:user:createItem', 'portal:user:joinGroup'] tiles_pub_role = gis.users.roles.create(name = 'tiles_publisher', description = 'User that can publish tile layers', privileges = privilege_list) tiles_pub_role # - # inspect the privileges of this role tiles_pub_role.privileges # **Note**: the `privileges` parameter was provided a list of strings specifying each individual privilege. Refer to the [api ref doc on the `privileges` parameter](http://esri.github.io/arcgis-python-api/apidoc/html/arcgis.gis.html#arcgis.gis.Role.privileges) to know about the finite list of strings you can use. # <a id="creating-a-user-with-a-custom-role"></a> # #### Creating a user with a custom role # Now that we created a `Role` object with desired privileges, let us create a new user with this role. The workflow here is to create a user account with one of 3 default roles then use the `update_role()` method of the `User` object to update to a custom role. # + tiles_pub_user = gis.users.create(username='tiles_publisher', password = '<PASSWORD>', firstname = 'tiles', lastname = 'publisher', email = '<EMAIL>', description = 'custom role, can only publish tile layers', role = 'org_user') #org_user as thats the closest. tiles_pub_user # - # Querying the `privileges` property of a `User` object returns a list of strings with fine grained privileges. When creating a `Role` object, you can pick and choose from this or refer to the [api ref doc](http://esri.github.io/arcgis-python-api/apidoc/html/arcgis.gis.html#arcgis.gis.Role.privileges). tiles_pub_user.privileges # Let us update this user's privileges tiles_pub_user.update_role(role = tiles_pub_role) # query the privileges to confirm tiles_pub_user.privileges # Querying the `roleId` property of a `User` returns you the custom roles' ID. You can use this to search for that role to know more details or create another user with the same role: tiles_pub_user.roleId searched_role = gis.users.roles.get_role(tiles_pub_user.roleId) searched_role.description # <a id ="listing-all-the-custom-roles-in-an-org"></a> # #### Listing all the custom roles in an org # When migrating users from one org to another or even to duplicate an org on new infrastructure, you would go through the process of cloning the users and their roles. For this, you can get the list of roles using the `all()` method on the `RolesManager` resource object: gis.users.roles.all(max_roles=50) # <a id = "deleting-user-accounts"></a> # ## Deleting user accounts # You can delete user accounts by calling the `delete()` method on a `User` object from an account that has administrator privileges. However, deleting raises important questions such as what happens to the content owned by that user? Further, ArcGIS does not allow you to delete users until you have dealt with that users' items and groups. Thus as an administrator, it becomes useful to list and view the content owned by any user in your org. # # <a id = "accessing-user-content"></a> # ### Accessing user content # Once you have a `User` object, you can view the folders and items owned by the user by querying the `folders` property and calling the `items()` method. # let us access an account named publisher1 publisher1 = gis.users.get('publisher1') publisher1 #list all folders as dictionaries publisher1_folder_list = publisher1.folders publisher1_folder_list # + # list all items belonging to this user publisher1_item_list_rootfolder = publisher1.items() print("Total number of items in root folder: " + str(len(publisher1_item_list_rootfolder))) #access the first item for a sample publisher1_item_list_rootfolder[0] # - # list all items in the first folder publisher1.items(folder = publisher1_folder_list[0]) # Thus using a `GIS` object created with an account that has admin privileges, you were able to query the contents of another user without knowing that user's password or logging in as that user. # <a id="reassigning-user-content"></a> # ### Reassigning user content # As an administrator, you have the privileges to list and view other users' content. When the time comes to delete a user account, you can filter these items and choose to preserve some of them and delete the rest. # # Let us delete the `tiles_pub_user` account we created earlier in this guide. # list the items owned by the user tiles_pub_user_items = tiles_pub_user.items() tiles_pub_user_items # You can reassign specific items to another user by calling the `reassign_to()` method on that `Item` object. Let us reassign the tile layer named `Transport_tiles` to `publisher1` account from earlier. We can get rid of the redundant ocean_tiles items and reassign the rest, to the account `arcgis_python_api`. Since this user does not have privilege to create groups, we do not have to worry about that. We can then delete this user safely. # reassign Transport_tiles to publisher1 transport_tiles_item = tiles_pub_user_items[2] transport_tiles_item # the reassign_to() method accepts user name as a string. We can also specify a destination folder name transport_tiles_item.reassign_to(target_owner = 'publisher1', target_folder= 'f1_english') # now let us get rid of redundant ocean tiles items tiles_pub_user_items[1].delete() tiles_pub_user_items[-1].delete() # an index of -1 in a list refers to the last item # Now we are left with a few more items which should all go to user `arcgis_python_api`. We can either call `reassign_to()` method of the `User` object or call the `delete()` method of the `User` object and pass this information to the `reassign_to` parameter. Let's do that: tiles_pub_user.delete(reassign_to='arcgis_python_api') # Thus, we have successfully deleted a user after taking care of that user's content.
guide/03-the-gis/accessing-and-managing-users.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.impute import SimpleImputer from sklearn.pipeline import make_pipeline from sklearn.preprocessing import StandardScaler from category_encoders import OneHotEncoder, OrdinalEncoder from sklearn.multioutput import MultiOutputRegressor from sklearn.linear_model import Ridge from sklearn.linear_model import LogisticRegression, LogisticRegressionCV #import the data and set to pets dataframe pets = pd.read_csv('train.csv',index_col='AnimalID') pets.head() #target column pets['OutcomeType'].value_counts(normalize=True) ohe=OneHotEncoder(use_cat_names=True) target = ohe.fit_transform(pets['OutcomeType']) target.head() # + y = target X = pets.drop(columns=target) # -
Antony_Farag_pre_buildweek_1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <img src='./img/LogoWekeo_Copernicus_RGB_0.png' align='right' width='20%'></img> # # Tutorial on climate data access through WEkEO # This tutorial focusses on how to access climate data through [WEkEO](https://www.wekeo.eu/), the EU Copernicus DIAS (Data and Information Access Service) reference service for environmental data. In addition to data, WEkEO provides virtual processing environments and skilled user support. # # WEkEO offers access to satellite data from the Sentinel missions, and many products produced by the Copernicus climate change, atmosphere, marine and land monitoring services. # # The Copernicus Climate Change service provides authoritative information about the past, present and future climate. Its product portfolio includes the following: # # - **Satellite and in-situ observations** # - **Reanalysis** # - **Seasonal forecasts** # - **Climate projections** # - **Climate indices** # # This data is accessible through the [C3S Climate Data Store (CDS)](https://cds.climate.copernicus.eu/), and in future also through WEkEO. While some C3S datasets are available through WEkEO, this is still work in progress. This Jupyter Notebook demonstrates how to access data through WEkEO, but please be aware that for the time being, C3S data should be accessed via the CDS. # # Users can access data from WEkEO either directly from the [WEkEO web platform](https://www.wekeo.eu/), or through the [Harmonised Data Access (HDA) API](https://www.wekeo.eu/documentation/using_jupyter_notebooks), which is a REST interface. # # This Jupyter Notebook is a step-by-step guide on how to search for and download data from WEkEO using the `HDA API`. It makes use of functions stored in the notebook [hda_api_functions](./hda_api_functions.ipynb). # # The tutorial consists of the following steps: # # 1. [Search for datasets on WEkEO](#wekeo_search) # 2. [Get your WEkEO API key](#wekeo_api_key) # 3. [Initialise the WEkEO Harmonised Data Access request](#wekeo_hda_request) # 4. [Load data descriptor file and request data](#wekeo_request) # 5. [Download requested data](#wekeo_download) # # Having downloaded data, follow the step below to view it and create a simple plot: # # 6. [View and plot data](#wekeo_view) # #### Load required libraries # + import os import sys import json from zipfile import ZipFile import time import base64 from IPython.core.display import HTML import requests import warnings warnings.filterwarnings('ignore') import numpy as np import xarray as xr # - # #### Load helper functions from hda_api_functions import * # <hr> # ### <a id='wekeo_search'></a>1. Search for datasets on WEkEO # Under [WEkEO DATA](https://wekeo.eu/data?view=catalogue). Clicking the + to add a layer, opens a catalogue search. Here you can use free text, or you can use the filter options on the left to refine your search and look by satellite plaform, sensor, Copernicus service, area (region of interest), general time period (past or future), as well as through a variety of flags. # # You can click on the dataset you are interested in and you will be guided to a range of details including the dataset temporal and spatial extent, collection ID, and metadata. # # Now search for the product `Sea level daily gridded data for the global ocean from 1993 to present`. You can find it more easily by selecting 'C3S (Climate)' in the 'COPERNICUS SERVICE' filter group. # # Once you have found it, select 'Details' to read the dataset description. # # <br> # # <div style='text-align:center;'> # <figure><img src='./img/WEkEO_data.png' width='70%' /> # <figcaption><i>WEkEO interface to search for datasets</i></figcaption> # </figure> # </div> # The dataset description provides the following information: # - **Abstract**, containing a general description of the dataset, # - **Classification**, including the Dataset ID # - **Resources**, such as a link to the Product Data Format Specification guide, and JSON metadata # - **Contacts**, where you can find further information about the data source from its provider. # # You need the `Dataset ID` to request data from the Harmonised Data Access API. # # <br> # # <div style='text-align:center;'> # <figure><img src='./img/SeaLevel_info.png' width='40%' /> # <figcaption><i>Dataset information on WEkEO</i></figcaption> # </figure> # </div> # <br> # # Let's store the Dataset ID as a variable called `dataset_id` to be used later. dataset_id = "EO:ECMWF:DAT:SEA_LEVEL_DAILY_GRIDDED_DATA_FOR_GL" # Now select `Add to map` in the data description to add the selected dataset to the list of layers in your map view. Once the dataset appears as a layer, select the `subset and download` icon. This will enable you to specify the variables, temporal and in some cases geographic extent of the data you would like to download. Select `2019` as year, `August` as month, and `15` as day. Then select `Zip file` as format. # # Now select `Show API request`. This will show the details of your selection in `JSON` format. If you now select `Copy`, you can copy these details to the clipboard then paste it either into a text file to create a `JSON` file (see example [here](./SeaLevel_data_descriptor.json)), or paste it directly into the cell below. # # The Harmonised Data Access API can read this information, which is in the form of a dictionary with the following keys: # - `datasetID`: the dataset's collection ID # - `stringChoiceValues`: type of dataset, e.g. 'Non Time Critical' # - `dataRangeSelectValues`: time period you would like to retrieve data # - `boundingBoxValues`: optional to define a subset of a global field # # <br> # # <div style='text-align:center;'> # <figure><img src='./img/SeaLevel_params_json.png' width='60%' /> # <figcaption><i>Displaying a JSON query from a request made to the Harmonised Data Access API through the data portal</i></figcaption> # </figure> # </div> # <br> # If you created a `JSON` file, you can load it with `json.load()`: try: with open('./SeaLevel_data_descriptor.json', 'r') as f: data = json.load(f) print('Your JSON file:') print(data) except: print('Your JSON file is not in the correct format, or is not found, please check it!') # Alternatively, you can paste the dictionary describing your data into a cell, as done below: data = { "datasetId": "EO:ECMWF:DAT:SEA_LEVEL_DAILY_GRIDDED_DATA_FOR_GLOBAL_OCEAN_1993_PRESENT", "multiStringSelectValues": [ { "name": "variable", "value": [ "all" ] }, { "name": "year", "value": [ "2019" ] }, { "name": "month", "value": [ "08" ] }, { "name": "day", "value": [ "15" ] } ], "stringChoiceValues": [ { "name": "format", "value": "zip" } ] } # ### <a id='wekeo_api_key'></a>2. Get the WEkEO API key # In order to interact with WEkEO's Harmonised Data Access API, each user requires an `API token`. This token can be generated from an `API Key`. Your current token can be found in your [WEkEO Dashboard](https://www.wekeo.eu/web/guest/dashboard) under Settings, but it is better to do it in a programmatic way, as shown below, as tokens have a limited lifetime of 1 hour! # # The `API key` can generated by encoding your `username` and `password` to Base64. You can use the function [generate_api_key](./hda_api_functions.ipynb#generate_api_key) to programmatically generate your Base64-encoded api key. For this, you have to replace the 'username' and 'password' strings with your WEkEO username and password in the cell below. # # Alternatively, you can go to this [website](https://www.base64encode.org/) that allows you to manually encode your `username:password` combination. An example of an encoded key is `wekeo-test:wekeo-test`, which is encoded to `d2VrZW8tdGVzdDp3ZWtlby10ZXN0`. # your WEkEO API username and password (needs to be in ' ') user_name = 'USERNAME' password = 'PASSWORD' api_key = generate_api_key(user_name, password) display(HTML('Your API key is: <b>'+api_key+'</b>')) # #### Alternative: enter manually the generated api key api_key = # ### <a id='wekeo_hda_request'></a>3. Initialise the Harmonised Data Access (HDA) API request # In order to initialise an API request, you have to initialise a dictionary that contains information on `dataset_id`, `api_key` and `download_directory_path`. # # Please enter the path of the directory where the data shall be downloaded to. # Enter here the directory path where you want to download the data to download_dir_path = os.getcwd() # With `dataset_id`, `api_key` and `download_dir_path`, you can initialise the dictionary with the function [init](./hda_api_functions.ipynb#init). hda_dict = init(dataset_id, api_key, download_dir_path) # #### Request access token # Once initialised, you can request an access token with the function [get_access_token](./hda_api_functions.ipynb#get_access_token). The access token is stored in the `hda_dict` dictionary. hda_dict = get_access_token(hda_dict) # #### Accept Terms and Conditions (if applicable) # You might need to accept the Terms and Conditions, which you can do with the function [acceptTandC](./hda_api_functions.ipynb#acceptTandC). hda_dict = acceptTandC(hda_dict) # ### <a id='wekeo_request'></a>4. Request data # #### Initiate the request by assigning a job ID # The function [get_job_id](./hda_api_functions.ipynb#get_job_id) will launch your data request and your request is assigned a `job ID`. hda_dict = get_job_id(hda_dict, data) # #### Build list of file names to be ordered and downloaded # The next step is to gather a list of file names available, based on your assigned `job ID`. The function [get_results_list](./hda_api_functions.ipynb#get_results_list) creates the list. hda_dict = get_results_list(hda_dict) # #### Create an `order ID` for each file to be downloaded # The next step is to create an `order ID` for each file name to be downloaded. You can use the function [get_order_ids](./hda_api_functions.ipynb#get_order_ids). hda_dict = get_order_ids(hda_dict) # ### <a id='wekeo_download'></a>5. Download requested data # As a final step, you can use the function [download_data](./hda_api_functions.ipynb#download_data) to initialize the data download and to download each file that has been assigned an `order ID`. hda_dict = download_data(hda_dict) # ### <a id='wekeo_view'></a>6. View and plot data # First we need to unzip the file we downloaded: # + zip_file = r'dataset-satellite-sea-level-global-48ffff29-fc03-4dc0-bf38-6e071421c012.zip' # Create a ZipFile Object and load sample.zip in it with ZipFile(zip_file, 'r') as zipObj: # Extract all the contents of zip file in current directory zipObj.extractall() # - # Having unzipped the file, notice that the data is in NetCDF format (.nc file). This is a commonly used format for array-oriented scientific data. # # To read and view this data we will make use of the Xarray library. Xarray is an open source project and Python package that makes working with labelled multi-dimensional arrays simple, efficient, and fun! We will read the data from our NetCDF file into an Xarray **"dataset"** nc_file = r'dt_global_twosat_phy_l4_20190815_vDT2018.nc' ds = xr.open_dataset(nc_file) # Now we can query our newly created Xarray dataset: ds # We see that the dataset has multiple variables and coordinates. We would like to plot a map of the absolute dynamic topography (the sea surface height above geoid). The variable for this is **'adt'**. # # While an Xarray **dataset** may contain multiple variables, an Xarray **data array** holds a single multi-dimensional variable and its coordinates. To make the processing of the **adt** data easier, we convert it into an Xarray data array. da = ds['adt'] # We can now use the "plot" function of Xarray to create a simple plot of this variable. da.plot() # <hr> # <p><img src='./img/all_partners_wekeo.png' align='left' alt='Logo EU Copernicus' width='100%'></img></p>
climate/WEkEO_climate_training_1_data_access.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### FactRuEval nmt evaluation # + # %reload_ext autoreload # %autoreload 2 # %matplotlib inline import warnings import sys sys.path.append("../") warnings.filterwarnings("ignore") # + import os data_path = "/home/lis/ner/ulmfit/data/factrueval/" train_path = os.path.join(data_path, "train_with_pos.csv") valid_path = os.path.join(data_path, "valid_with_pos.csv") model_dir = " /datadrive/models/multi_cased_L-12_H-768_A-12/" init_checkpoint_pt = os.path.join("/datadrive/models/multi_cased_L-12_H-768_A-12/", "pytorch_model.bin") bert_config_file = os.path.join("/datadrive/bert/multi_cased_L-12_H-768_A-12/", "bert_config.json") vocab_file = os.path.join("/datadrive/bert/multi_cased_L-12_H-768_A-12/", "vocab.txt") # - import torch torch.cuda.set_device(1) torch.cuda.is_available(), torch.cuda.current_device() # ### 1. Create dataloaders from modules import BertNerData as NerData data = NerData.create(train_path, valid_path, vocab_file) # For factrueval we use the following sample of labels: print(data.label2idx) # ### 2. Create model # For creating pytorch model we need to create `NerModel` object. from modules.models.bert_models import BertBiLSTMAttnNMT model = BertBiLSTMAttnNMT.create(len(data.label2idx), bert_config_file, init_checkpoint_pt, enc_hidden_dim=128, dec_hidden_dim=128, dec_embedding_dim=16) model.decoder model.get_n_trainable_params() # ### 3. Create learner # # For training our pytorch model we need to create `NerLearner` object. from modules import NerLearner num_epochs = 100 learner = NerLearner(model, data, best_model_path="/datadrive/models/factrueval/final_attn_cased_nmt.cpt", lr=0.01, clip=1.0, sup_labels=data.id2label[5:], t_total=num_epochs * len(data.train_dl)) # ### 4. Learn your NER model # Call `learner.fit` learner.fit(num_epochs, target_metric='prec') # ### 5. Evaluate # Create new data loader from existing path. from modules.data.bert_data import get_bert_data_loader_for_predict dl = get_bert_data_loader_for_predict(data_path + "valid_with_pos.csv", learner) learner.load_model() preds = learner.predict(dl) # IOB precision from modules.train.train import validate_step print(validate_step(learner.data.valid_dl, learner.model, learner.data.id2label, learner.sup_labels)) # Span precision from modules.utils.plot_metrics import get_bert_span_report clf_report = get_bert_span_report(dl, preds) print(clf_report)
examples/factrueval-nmt.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd import numpy as np import matplotlib.pyplot as plt import datetime # + import quandl df = quandl.get("NSE/INFY", start_date="2013-04-04") # - main_date=df.loc['2018-04-04']['Close'] # + from sklearn import preprocessing, cross_validation, svm from sklearn.linear_model import LinearRegression #Visualization import matplotlib import matplotlib.pyplot as plt # %matplotlib inline # + #Moving Average def MA(df, n): name = 'SMA_' + str(n) #MA = pd.Series(pd.rolling_mean(df['Close'], n), name = 'SMA_' + str(n)) #df = df.join(MA) df[name]=pd.rolling_mean(df['Close'],n) return df #Exponential Moving Average def EMA(df, n): name = 'EMA_' + str(n) #MA = pd.Series(pd.rolling_mean(df['Close'], n), name = 'SMA_' + str(n)) #df = df.join(MA) df[name]=pd.ewma(df['Close'], span = n, min_periods = n - 1) return df # + for i in [30,40,50]: MA(df,i) for i in [30,40,50]: EMA(df,i) # + #Make array of dates #Last 30 dates will be used for forecasting. # dates = np.array(df["Date"]) # dates_check = dates[-30:] # dates = dates[:-30] dates = np.array(df.index.values) print(dates) dates_check = dates[-30:] dates = dates[:-30] # define a new feature, HL_PCT df['HL_PCT'] = (df['High'] - df['Low'])/(df['Low']*100) # define a new feature percentage change df['PCT_CHNG'] = (df['Close'] - df['Open'])/(df['Open']*100) df = df[['Close', 'HL_PCT', 'PCT_CHNG', 'Total Trade Quantity','SMA_30', 'SMA_40', 'SMA_50', 'EMA_30', 'EMA_40', 'EMA_50']] df.fillna( value=0, inplace=True) df.isnull().sum() # - forecast_out = int(30) # predicting 30 days into future df['Prediction'] = df[['Close']].shift(-forecast_out) # label column with data shifted 30 units X = np.array(df.drop(['Prediction'], 1)) X = preprocessing.scale(X) X_forecast = X[-forecast_out:] # set X_forecast equal to last 30 X = X[:-forecast_out] # remove last 30 from X y = np.array(df['Prediction']) y = y[:-forecast_out] X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size = 0.2) # Training from sklearn.ensemble import RandomForestRegressor clf = RandomForestRegressor() clf.fit(X_train,y_train) # Testing confidence = clf.score(X_test, y_test) print("confidence: ", confidence) forecast_prediction = clf.predict(X_forecast) print(forecast_prediction) #Make the final DataFrame containing Dates, ClosePrices, and Forecast values actual = pd.DataFrame(dates, columns = ["Date"]) actual.set_index("Date", inplace = True) actual["ClosePrice"] = df["Close"] actual actual["Forecast"] = np.nan # actual.set_index("Date", inplace = True) forecast = pd.DataFrame(dates_check, columns=["Date"]) forecast["Forecast"] = forecast_prediction forecast["ClosePrice"] = np.nan forecast.set_index("Date", inplace = True) var = [actual, forecast] result = pd.concat(var) #This is the final DataFrame result.info() #Plot the results result.plot(figsize=(20,10), linewidth=1.5) plt.legend(loc=2, prop={'size':20}) plt.xlabel('Date') plt.ylabel('Price') a=result['ClosePrice'].iloc[-31] b=result['Forecast'].iloc[-1] ret=((b-main_date)/main_date)*100 ret sub=pd.read_csv('submission.csv') sub sub=pd.read_csv('submission.csv') sub.iloc[2,2] for i in range(12): if sub.loc[i]['Symbol']=='INFY.NS': sub.iloc[i,2]="{0:.2f}".format(ret) sub.to_csv('submission.csv',index=False) sub
FORECASTING MODELS/real data stock price model/INFY_REAL.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Importing Neccesary Libraries and Data import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns VaccineTypes2020 = pd.read_csv("2020VAERSVAX.csv") VaccineData2020 = pd.read_csv("2020VAERSData.csv") VaccineSymptoms2020 = pd.read_csv("2020VAERSSYMPTOMS.csv") VaccineTypes2021 = pd.read_csv("2021VAERSVAX.csv") VaccineData2021 = pd.read_csv("2021VAERSData.csv") VaccineSymptoms2021 = pd.read_csv("2021VAERSSYMPTOMS.csv") TotalVaccinationsOverTime=pd.read_csv("Vaccinations - OWID covid-19-data.csv") # # Merging Data Into Composite DataFrame Vac2020A=pd.merge(VaccineTypes2020,VaccineData2020,how='inner',on='VAERS_ID') Vac2020Complete=pd.merge(Vac2020A,VaccineSymptoms2020,how='inner',on='VAERS_ID') Vac2021A=pd.merge(VaccineTypes2021,VaccineData2021,how='inner',on='VAERS_ID') Vac2021Complete=pd.merge(Vac2021A,VaccineSymptoms2021,how='inner',on='VAERS_ID') COVID2021=Vac2021Complete[Vac2021Complete['VAX_TYPE']=='COVID19'] COVID2020=Vac2020Complete[Vac2020Complete['VAX_TYPE']=='COVID19'] COVIDOverall=pd.concat([COVID2020,COVID2021],axis=0) # # Generating COVID-19 Deaths By Age Graph COVIDOverall['DIED'].value_counts() COVIDDeaths=COVIDOverall[COVIDOverall['DIED']=='Y'] # + fig, ax = plt.subplots(figsize = ( 12 , 4 ),dpi=600) g = sns.histplot(data=COVIDDeaths,x='AGE_YRS',bins=50,kde=True,color='red') plt.title('COVID-19 Vaccine Deaths By Age') # Set label for x-axis ax.set_xlabel( "Age at Vaccination" , size = 12 ) # Set label for y-axis ax.set_ylabel( "Number of Deaths" , size = 12 ) plt.xlim(20,120) fig.savefig('vaccinedeaths.jpg', bbox_inches='tight', dpi=1000) # - # # Estimating Adverse COVID-19 Reaction Ratio len(COVIDOverall[COVIDOverall['DIED']=='Y']) TotalVaccinationsOverTime['people_vaccinated'].max() 1000*(len(COVIDOverall)/TotalVaccinationsOverTime['people_vaccinated'].max()) # # Estimating Lethality Ratio Between COVID-19 And Its Vaccine VaccineDeathRate=len(COVIDOverall[COVIDOverall['DIED']=='Y'])/TotalVaccinationsOverTime['people_vaccinated'].max() #COVID-19 CFR / VaccineDeathRate .021/VaccineDeathRate VaccineDeathRate DeathOdds=pd.DataFrame({'Cause of Death': ['COVID-19 Vaccine', 'COVID-19'],'Odds of Death': [VaccineDeathRate, .021]}) plt.figure(figsize=(12,4),dpi=200) sns.barplot(data=DeathOdds,y='Cause of Death',x='Odds of Death') DeathOdds
Vaccine Visualizations - Final.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # ## Analytic Solutions Convergence Tests # # This notebook runs series of simulations across different resolutions to extract error convergence information. Analytic Stokes flow solutions are used as a basis for error estimation. # # + from collections import OrderedDict as OD regress_solns = [ # ("name", {soln_params}, {solver_tolerances}, graph_result ) ("A", OD(), {"itol":1.e-6, "otol":1.e-6}, True ), ("Cx", OD(), {"itol":1.e-9, "otol":1.e-9}, True ), ("Kx", OD(), {"itol":1.e-4, "otol":1.e-4}, True ), ("NL", OD(), {"itol":1.e-7, "otol":1.e-7}, True ), ] import os if "UW_LONGTEST" in os.environ: regress_solns += [ ("B", OD(), {"itol":1.e-5, "otol":1.e-5}, False ), ("C", OD(), {"itol":1.e-5, "otol":1.e-5}, False ), ("DA", OD(), {"itol":1.e-7, "otol":1.e-7}, True ), ("DB2d", OD(), {"itol":1.e-6, "otol":1.e-6}, False ), ("DB3d", OD(), {"itol":1.e-8, "otol":1.e-8}, False ), # ("H", OD([("nmodes",240),]), {"itol":1.e-6, "otol":1.e-6}, True ), # needs parallel for analytic ("Kz", OD(), {"itol":1.e-4, "otol":1.e-4}, False ), ("M", OD(), {"itol":1.e-6, "otol":1.e-6}, False ), ] do_analysis = True graph_all = True two_d_only = False regress_res = [8,16,32] orders = [1,2] if len(regress_res)<2: raise RuntimeError("At least 2 resolutions required for regression analysis.") # + import underworld as uw import glucifer as glucifer from underworld import function as fn import math import numpy as np import collections uw.matplotlib_inline() # - # Find all available solutions. # Use ordered dict to preserve alphabetical ordering solns_avail = collections.OrderedDict() for soln_name in dir(fn.analytic): if soln_name[0] == "_": continue # if private member, ignore # get soln class soln = getattr(fn.analytic,soln_name) # check if actually soln if issubclass(soln, fn.analytic._SolBase): print("Solution added: {}".format(soln_name)) solns_avail[soln_name] = soln solns = collections.OrderedDict() for sol in regress_solns: solname = "Sol"+sol[0] solns[solname] = solns_avail[solname.split(',')[0]](**sol[1]) solns[solname].parameters = sol[1] # record this for the table later solns[solname].tolerances = sol[2] # record required tolerances as well solns[solname].graph = True if graph_all else sol[3] # record if should add to graph # + # Create the tools we need def get_numerical( soln, res=32, order=1, itol=1.e-6, otol=1.e-6 ): ''' Creates the numerical system corresponding to the provided analytic system. Parameters ---------- soln : uw.function.analytic._SolBase The analytic system res : int System resolution. Same resolution is used for each axis. order : int System numerical order. itol : float Inner solve tolerance. otol : float Outer solve tolerance. ''' if order == 1: els = "Q1/dQ0" elif order == 2: els = "Q2/dPc1" else: raise ValueError("Provided system order should be 1 or 2.") dim = soln.dim mesh = uw.mesh.FeMesh_Cartesian(elementType=els, elementRes=(res,)*dim,minCoord=(0.,)*dim,maxCoord=(1.,)*dim) vel = uw.mesh.MeshVariable(mesh,dim) press = uw.mesh.MeshVariable(mesh.subMesh, 1) vel.data[:] = (0.,)*dim press.data[:] = 0. bcs = soln.get_bcs(vel) visc = soln.fn_viscosity if soln.nonlinear==True: visc = soln.get_viscosity_nl(vel,press) stokes = uw.systems.Stokes(vel, press, fn_viscosity=visc, fn_bodyforce=soln.fn_bodyforce, conditions=[bcs,]) solver = uw.systems.Solver(stokes) if uw.mpi.size==1: solver.set_inner_method("lu") solver.set_inner_rtol(itol) solver.set_outer_rtol(otol) # if nonlinear, lets first grab a const visc approx soln if soln.nonlinear==True: stokes.fn_viscosity = 1. solver.solve() stokes.fn_viscosity = visc return mesh, vel, press, solver def normalise_press(press): intSwarm = uw.swarm.GaussIntegrationSwarm(mesh,3) # use 3 point gauss swarms for efficiency av_press = uw.utils.Integral( press, mesh, integrationSwarm=intSwarm, integrationType=None).evaluate()[0] return press - av_press def rms_error(numeric, analytic, mesh): ''' Calculates the rms error. Returns ------- abs, abs_scaled: float The absolute and scaled absolute errors. ''' delta = analytic - numeric delta_dot = fn.math.dot(delta,delta) analytic_dot = fn.math.dot(analytic,analytic) # l2 norms intSwarm = uw.swarm.GaussIntegrationSwarm(mesh,3) # use 3 point gauss swarms for efficiency rms_err_abs = np.sqrt(uw.utils.Integral( delta_dot, mesh, integrationSwarm=intSwarm, integrationType=None ).evaluate()[0]) rms_sol_ana = np.sqrt(uw.utils.Integral( analytic_dot, mesh, integrationSwarm=intSwarm, integrationType=None ).evaluate()[0]) rms_err_sca = rms_err_abs / rms_sol_ana return rms_err_abs, rms_err_sca # - # ### Now perform convergence rate tests # + velocity_key = "Velocity" pressure_key = "Pressure" resolutions = regress_res dx = np.reciprocal(resolutions,dtype='double') # + soln_results = collections.OrderedDict() for soln_name in solns.keys(): solnguy = solns[soln_name] if (solnguy.dim == 3) and two_d_only: continue for order in orders: if (soln_name,order,velocity_key) in soln_results: err_pre = soln_results[ (soln_name,order,pressure_key) ] err_vel = soln_results[ (soln_name,order,velocity_key) ] else: err_pre = collections.OrderedDict() err_vel = collections.OrderedDict() for res in resolutions: # do not rerun existing results if res in err_vel: continue mesh, vel, press, solver = get_numerical( solnguy, res, order=order, **solnguy.tolerances ) if uw.mpi.rank==0: print("Performing simulations for solution: {} {} {}".format(soln_name, order, res), flush=True) if solnguy.nonlinear!=True: solver.solve() else: solver.solve(nonLinearIterate=True,nonLinearTolerance=1.e-7) pressn = normalise_press(press) pressa = normalise_press(solnguy.fn_pressure) err_vel[res] = rms_error( vel, solnguy.fn_velocity, mesh ) err_pre[res] = rms_error( pressn, pressa, mesh ) # lets store these for future comparison err_vel.parameters = solnguy.parameters err_vel.tolerances = solnguy.tolerances soln_results[(soln_name,order,velocity_key)] = err_vel soln_results[(soln_name,order,pressure_key)] = err_pre # - if (uw.mpi.rank==0) and do_analysis: print("Performing analysis.", flush=True) errtype = 1 # {0:"absolute", 1:"scaled absolute"} fitfn = lambda x,a,b: a+b*x def get_linear_fit(x,y): ''' Returns best fit (a,b) for $ln(y)=a+b*ln(x)$ for provided set of points (x,y). ''' import scipy.optimize return scipy.optimize.curve_fit(fitfn, np.log(x), np.log(y)) def get_fit_line(dx, fit): ''' Evaluates fit across a set of points. ''' dxmin = 0.9*dx.min() dxmax = 1.1*dx.max() xpts = np.linspace(dxmin,dxmax,20) ypts = np.exp(fitfn(np.log(xpts),*fit)) return xpts, ypts import matplotlib.pyplot as plt fig = plt.figure(dpi=200, figsize=(8.27, 11.69/2.)) plt.subplots_adjust(wspace=.0) # create some consistent colours & linestyles from matplotlib.pyplot import cm colours = cm.tab10(np.linspace(0,1,len(solns.keys()))) scheme = {} for it,sol in enumerate(solns.keys()): scheme[(sol,pressure_key)] = (colours[it],'--') scheme[(sol,velocity_key)] = (colours[it],'-') def create_ax(pos, title=None, other_ax=None): ax = plt.subplot(1,2,pos,xscale='log', yscale='log', sharey=other_ax) ax.set_title(title,fontsize=8) ax.invert_xaxis() ax.xaxis.set_ticks(dx) ax.xaxis.set_ticklabels(["$ {{ {} }}^{{-1}}$".format(x) for x in resolutions]) ax.grid(axis="y", which="both",linestyle=':',linewidth=0.25) ax.tick_params(axis='both', which='major', labelsize=8) # ax.set_xlabel("dx", fontsize=8) if not other_ax: ax.set_ylabel("error", fontsize=8) # disable minor ticks marks on axis for tic in ax.xaxis.get_minor_ticks() + ax.yaxis.get_minor_ticks(): tic.tick1On = tic.tick2On = False tic.label1On = tic.label2On = False for tic in ax.xaxis.get_major_ticks() + ax.yaxis.get_major_ticks(): tic.label.set_fontsize(6) # disable tick marks on rhs of other axis if other_ax: for tic in ax.yaxis.get_major_ticks(): tic.tick1On = tic.tick2On = False tic.label1On = tic.label2On = False return ax axes = {} axes[1] = create_ax(1, title="Q1/dQ0") axes[2] = create_ax(2, title="Q2/dPc1", other_ax=axes[1] ) # get fit results now so we can set plot labels fits = {} errs = {} for key, err in soln_results.items(): soln_name = key[0] if (soln_name not in solns): #or (solns[soln_name].graph==False): continue fits[key], errs[key] = get_linear_fit(np.reciprocal(list(err.keys()),dtype='double'), [errval[errtype] for errval in err.values()] ) # keep set of lines for legend lines = collections.OrderedDict() for key, err in sorted(soln_results.items(), key=lambda x: x[0][0]): soln_name = key[0] if (soln_name not in solns) or (solns[soln_name].graph==False): continue order = key[1] velpres = key[2] ax = axes[order] fit = fits[key] fitdata = get_fit_line(np.reciprocal(list(err.keys()),dtype='double'),fit) expected_order = order if (key[2]==pressure_key) else order+1 if not np.isclose(fit[1],expected_order,rtol=1.5e-1): raise RuntimeError("Rejecting {} fit = {}, expected = {}.".format(soln_name,fit[1],expected_order)) col,ls = scheme[(soln_name,velpres)] line = ax.plot(*fitdata, linewidth=1., color=col, linestyle=ls) if velpres == velocity_key: lines[soln_name] = line ax.plot(np.reciprocal(list(err.keys()),dtype='double'), [errval[errtype] for errval in err.values()], 'o', markersize=1., color='black') lbls = [] lns = [] for soln_name in lines.keys(): vel_1 = fits[(soln_name,1,velocity_key)][1] pre_1 = fits[(soln_name,1,pressure_key)][1] vel_2 = fits[(soln_name,2,velocity_key)][1] pre_2 = fits[(soln_name,2,pressure_key)][1] lbls.append("{} ({: .2f},{: .2f}), ({: .2f},{: .2f})".format(soln_name[3:].ljust(4), vel_1, pre_1, vel_2, pre_2)) lns.append(lines[soln_name][0]) leg = fig.legend( lns, lbls, loc = (0.15, 0.15), prop={'family': 'monospace', 'size':6}) leg.set_title("Q1 dQ0 Q2 dPc1 ", {'family': 'monospace', 'size':6 }) leg._legend_box.align = "right" #fig.savefig("Analytic_Convergence_Graph.png")
docs/test/Analytic Soln Convergence Tests.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Natural parameterized quantum circuit for multi-parameter sensing # "Natural parameterized quantum circuit" by <NAME>, <NAME> # # The Natural parameterized quantum circuit is a parameterized quantum circuit which has euclidean quantum geometry. That means that the quantum Fisher information metric is the identity for a particular parameter set, which we call the reference parameter. This NPQC is very useful for various applications. # - Training variational quantum algorithms # - Multi-parameter quantum sensing # - Preparation of superposition states # # Here, we study multi-parameter sensing using the NPQC. The goal is determine the unknown parameters of the NPQC by measuring the quantum state. We can sense many parameters at the same time by sampling in the computational basis. # # The implementation is based on qutip # # @author: <NAME>, github txhaug # Imperial College London # # + import qutip as qt from functools import partial import operator from functools import reduce import numpy as np import scipy import matplotlib.pyplot as plt # - # Set parameters for NPQC here # + n_qubits=6 #number qubits depth=6 #number of layers, is the number of layers of parameterized single qubit rotations type_circuit=1##0: natural parameterized quantum circuit (NPQC), 1: natural parameterized quantum circuit with y rotations only for sensing initial_angles=1 ##0: random angles 1: reference parameters \theta_r that has QFIM =I distance_parameters_estimation=0.4 # norm of parameters to be estimated random_seed=1#seed of random generator n_samples=10**7 ##number of measurements for sensing # + def prod(factors): return reduce(operator.mul, factors, 1) def flatten(l): return [item for sublist in l for item in sublist] #tensors operators together def genFockOp(op,position,size,levels=2,opdim=0): opList=[qt.qeye(levels) for x in range(size-opdim)] opList[position]=op return qt.tensor(opList) #construct from parameter 1D list a 2D array with [depth,n_qubits], ignore unused rotations where paulis2d=0 def construct_2d_parameters(angles,paulis2d,extraangles=0): depth,n_qubits=np.shape(paulis2d) angles2d=np.zeros([depth,n_qubits]) counter=0 for i in range(depth): for j in range(n_qubits): if(paulis2d[i,j]>0): #only take parameters where paulis is greater 0, indicating they are variable parameters angles2d[i,j]=angles[counter] counter+=1 if(extraangles==0): return angles2d else: return angles2d,angles[counter:] #take parameters as a 2D array with [depth,n_qubits] to do 1D list, ignore unused rotations where paulis2d=0 def construct_1d_parameters(angles2d,paulis2d): depth,n_qubits=np.shape(paulis2d) angles1d=[] for i in range(depth): for j in range(n_qubits): if(paulis2d[i,j]>0): #only take parameters where paulis is greater 0, indicating they are variable parameters angles1d.append(angles2d[i,j]) return np.array(angles1d) # + if(n_qubits%2==1): raise NameError("Only even number of qubits allowed") #random generator used rng = np.random.default_rng(random_seed) #define angles for circuit ini_angles=np.zeros([depth,n_qubits]) if(initial_angles==0): ini_angles=rng.random([depth,n_qubits])*2*np.pi elif(initial_angles==1): #choose angles as \theta_r as defined in paper ini_angles[1:depth:2,:]=0 ini_angles[0:depth:2,:]=np.pi/2 #note that not all angles are actually used, the ones where ini_pauli=0 are ignored #define rotations for circuit in each layer, 0: identity, 1: X, 2:Y 3:Z ini_pauli=np.zeros([depth,n_qubits],dtype=int) ##set initial layer of pauli rotations if(type_circuit==0):#NPQC #set first and second layer, rest comes later ini_pauli[0,:]=2 #y rotation if(depth>1): ini_pauli[1,:]=3 #z rotation elif(type_circuit==1): #NPQC with y rotations only for sensing #set first and second layer, rest comes later ini_pauli[0,0:n_qubits:2]=2 #y rotation ini_pauli[0,1:n_qubits:2]=-22 #fix y pi/2 rotation on odd qubit index ##define entangling layers and add more pauli rotations if(type_circuit==0 or type_circuit==1): #construct natural parameterized circuit entangling_gate_index_list=[[] for i in range(depth)] ##stores where entangling gates are placed orderList=[] for i in range(n_qubits//2): if(i%2==0): orderList.append(i//2) else: orderList.append((n_qubits-i)//2) if(n_qubits>1): shiftList=[orderList[0]] else: shiftList=[] for i in range(1,n_qubits//2): shiftList.append(orderList[i]) shiftList+=shiftList[:-1] #this list gives which entangling gates are applied in which layer for j in range(min(len(shiftList),int(np.ceil(depth/2))-1)): entangling_gate_index_list[1+2*j]=[[2*i,(2*i+1+2*shiftList[j])%n_qubits,3] for i in range(n_qubits//2)] #this is the 2 qubit entangling operation, it is a pi/2 y rotation on first qubit with CPHASE gate U_entangling=qt.qip.operations.csign(2,0,1)*qt.tensor(qt.qip.operations.ry(np.pi/2),qt.qeye(2)) for i in range(len(entangling_gate_index_list)-1): if(len(entangling_gate_index_list[i])>0): for j in range(len(entangling_gate_index_list[i])): ini_pauli[i+1,entangling_gate_index_list[i][j][0]]=2 if(i+2<depth and type_circuit==0):##add z rotations, but not for sensing NPQC ini_pauli[i+2,entangling_gate_index_list[i][j][0]]=3 #operators for circuit levels=2# opZ=[genFockOp(qt.sigmaz(),i,n_qubits,levels) for i in range(n_qubits)] opX=[genFockOp(qt.sigmax(),i,n_qubits,levels) for i in range(n_qubits)] opY=[genFockOp(qt.sigmay(),i,n_qubits,levels) for i in range(n_qubits)] opId=genFockOp(qt.qeye(levels),0,n_qubits) opZero=opId*0 zero_state=qt.tensor([qt.basis(levels,0) for i in range(n_qubits)]) #construct unitaries for entangling layer all_entangling_layers=[] for ind in range(len(entangling_gate_index_list)): if(type_circuit==0 or type_circuit==1): entangling_gate_index=entangling_gate_index_list[ind] if(len(entangling_gate_index)==0): entangling_layer=opId else: entangling_layer=prod([qt.qip.operations.gate_expand_2toN(U_entangling,n_qubits,j,k) for j,k,n in entangling_gate_index[::-1]]) all_entangling_layers.append(entangling_layer) #calculate number of parameters n_parameters=len(construct_1d_parameters(ini_angles,ini_pauli)) ##check which paulis at what depth and qubit is identitity or not parameter_where=np.zeros([n_parameters,2],dtype=int) counter=0 for i in range(depth): for j in range(n_qubits): if(ini_pauli[i,j]>0): #count only paulis with entry greater zero, indicating its a parameter parameter_where[counter]=[i,j] counter+=1 #save single qubit rotations unitary with fixed ini_angles. Use them later for the adjoint circuit needed for sensing save_initial_rot_op=[] for j in range(depth): rot_op=[] for k in range(n_qubits): angle=ini_angles[j][k] type_pauli=ini_pauli[j][k] if(type_pauli==1): rot_op.append(qt.qip.operations.rx(angle)) elif(type_pauli==2): rot_op.append(qt.qip.operations.ry(angle)) elif(type_pauli==3): rot_op.append(qt.qip.operations.rz(angle)) elif(type_pauli==0): rot_op.append(qt.qeye(2)) elif(type_pauli==-22): #fixed rotation around y axis rot_op.append(qt.qip.operations.ry(np.pi/2)) save_initial_rot_op.append(qt.tensor(rot_op)) ##H=opZ[0]*opZ[1] #local Hamiltonian to calculate energy and gradient from print("Number of parameters of PQC",n_parameters) # - ##calc_mode #0: calc all gradients 1: calc frame potential only 2: calc both, 3: only get gradient ##can apply adjoint unitary with fixed angles "add_adjoint_unitary" for sensing def do_calc(input_angles,input_paulis,get_gradients=True,add_adjoint_unitary=False): initial_state_save=qt.tensor([qt.basis(levels,0) for i in range(n_qubits)]) #save here quantum state of gradient for qfi grad_state_list=[] #list of values of gradient gradient_list=np.zeros(n_parameters) save_rot_op=[] #save single-qubit rotations here so we can reuse them for j in range(depth): rot_op=[] for k in range(n_qubits): angle=input_angles[j][k] type_pauli=input_paulis[j][k] if(type_pauli==1): rot_op.append(qt.qip.operations.rx(angle)) elif(type_pauli==2): rot_op.append(qt.qip.operations.ry(angle)) elif(type_pauli==3): rot_op.append(qt.qip.operations.rz(angle)) elif(type_pauli==0): rot_op.append(qt.qeye(2)) elif(type_pauli==-22): rot_op.append(qt.qip.operations.ry(np.pi/2)) save_rot_op.append(qt.tensor(rot_op)) #p goes from -1 to n_parameters-1. -1 is to calculate quantum state, rest for gradient if(get_gradients==True): #calculate gradients by doing n_parameters+1 calcuations n_p=n_parameters else: #without gradient, need only one calculation n_p=0 for p in range(-1,n_p): initial_state=qt.Qobj(initial_state_save) for j in range(depth): apply_rot_op=save_rot_op[j] #for p>=0, we are calculating gradients. Here, we need to add derivative of repsective parameter if(p!=-1 and j==parameter_where[p][0]): which_qubit=parameter_where[p][1] type_pauli=input_paulis[j][which_qubit] if(type_pauli==1): apply_rot_op=apply_rot_op*(-1j*opX[which_qubit]/2) elif(type_pauli==2): apply_rot_op=apply_rot_op*(-1j*opY[which_qubit]/2) elif(type_pauli==3): apply_rot_op=apply_rot_op*(-1j*opZ[which_qubit]/2) #apply single qubit rotations initial_state=apply_rot_op*initial_state #apply entangling layer initial_state=all_entangling_layers[j]*initial_state #after constructing the circuit, apply inverse with parameters fixed to ini_angles if(add_adjoint_unitary==True):#apply inverse of circuit for sensing for j in np.arange(depth)[::-1]: initial_state=all_entangling_layers[j].dag()*initial_state initial_state=save_initial_rot_op[j].dag()*initial_state if(p==-1): #calculate loss circuit_state=qt.Qobj(initial_state)#state generated by circuit if(loss_hamiltonian==True): #loss is hamiltonian loss=qt.expect(H,circuit_state) else: #loss is infidelity with target state H_state loss=1-np.abs(circuit_state.overlap(H_state))**2 else: #calculate gradient grad_state_list.append(qt.Qobj(initial_state))#state with gradient applied for p-th parameter if(loss_hamiltonian==True): gradient_list[p]=2*np.real(circuit_state.overlap(H*initial_state)) else: gradient_list[p]=2*np.real(circuit_state.overlap(initial_state)-circuit_state.overlap(H_state)*H_state.overlap(initial_state)) return circuit_state,grad_state_list,loss,gradient_list # + #construct parameters of state to be estimated loss_hamiltonian=False #loss is inifidelity 1-F #we shift parameterized quantum circuit from initial parameters by a fixed distance. #we know approximatly what distance corresponds to what fidelity #get random normalized parameter vector random_vector_opt_normed=(2*rng.random(np.shape(ini_pauli))-1)*(ini_pauli>0) random_vector_opt_normed=construct_1d_parameters(random_vector_opt_normed,ini_pauli) random_vector_opt_normed=random_vector_opt_normed/np.sqrt(np.sum(np.abs(random_vector_opt_normed)**2)) random_vector_opt_normed=construct_2d_parameters(random_vector_opt_normed,ini_pauli) #shift parameters by the following distance,. We use resulting state for estimation factor_rand_vector=distance_parameters_estimation #construct parameter of state to be learned target_angles=ini_angles+random_vector_opt_normed*factor_rand_vector H_state=zero_state #set so do_calc runs properly # + #quantum fisher information metric #calculated as \text{Re}(\braket{\partial_i \psi}{\partial_j \psi}-\braket{\partial_i \psi}{\psi}\braket{\psi}{\partial_j \psi}) ##get gradients for quantum state circuit_state,grad_state_list,energy,gradient_list=do_calc(ini_angles,ini_pauli,get_gradients=True) #first, calculate elements \braket{\psi}{\partial_j \psi}) single_qfi_elements=np.zeros(n_parameters,dtype=np.complex128) for p in range(n_parameters): #print(circuit_state.overlap(grad_state_list[p])) single_qfi_elements[p]=circuit_state.overlap(grad_state_list[p]) #calculcate the qfi matrix qfi_matrix=np.zeros([n_parameters,n_parameters]) for p in range(n_parameters): for q in range(p,n_parameters): qfi_matrix[p,q]=np.real(grad_state_list[p].overlap(grad_state_list[q])-np.conjugate(single_qfi_elements[p])*single_qfi_elements[q]) #use fact that qfi matrix is real and hermitian for p in range(n_parameters): for q in range(p+1,n_parameters): qfi_matrix[q,p]=qfi_matrix[p,q] # - ##plot the quantum Fisher information metric (QFIM) #should be a diagonal with zero off-diagonal entries for initial_angles=1 plt.imshow(qfi_matrix) # + if(type_circuit==1): #NPQC with y rotations only for sensing hilbertspace=2**n_qubits ##get reference state and gradients to determine which parameter belongs to which computational state circuit_state_reuse,grad_state_list_reuse,_,gradient_list=do_calc(ini_angles,ini_pauli,get_gradients=True,add_adjoint_unitary=True) ##first, figure out which parameter changes which computational basis state parameter_which_state=np.zeros(n_parameters,dtype=int) #tells us which state belongs to which parameter state_which_parameter=np.ones(hilbertspace,dtype=int)*-1 for i in range(n_parameters): grad_abs=np.abs(grad_state_list_reuse[i].data.toarray()[:,0])**2 index=(np.arange(hilbertspace)[grad_abs>10**-14]) if(len(index)!=1): raise NameError("More than one direction!") else: parameter_which_state[i]=index[0] state_which_parameter[index[0]]=i #check if a computational basis states belongs to more than one parameter if(len(np.unique(parameter_which_state))!=len(parameter_which_state)): raise NameError("Double occupations of computational states for sensing!") #get difference between target angles and reference angles. We now want to estimate this from measurements! exact_sensing_parameters=construct_1d_parameters(target_angles-ini_angles,ini_pauli) norm_sensing_parameters=np.sqrt(np.sum(np.abs(exact_sensing_parameters)**2)) print("Norm of parameters to be sensed",norm_sensing_parameters) ##get state that we use for sensing and want to know its parameters target_state,_,energy,_=do_calc(target_angles,ini_pauli,get_gradients=False,add_adjoint_unitary=True) #sample from target state, then identify parameters probs=np.abs(target_state.data.toarray()[:,0])**2 print("Probability zero state",probs[0]) #get exact probability term assoicate with each parameter prob_parameters=np.zeros(n_parameters) for i in range(n_parameters): prob_parameters[i]=probs[parameter_which_state[i]] #now sample probabilities to simulate measurements with finite number of measurements ##get sampled probabilities for each sensing parameter sampled_probs=np.zeros(n_parameters) sample_index = np.random.choice(hilbertspace,n_samples,p=probs) for k in range(n_samples): index_parameter=state_which_parameter[sample_index[k]] if(index_parameter>=0): sampled_probs[index_parameter]+=1 sampled_probs/=n_samples ##parameters we estimated by sampling state sampled_estimation_parameters=2*np.sqrt(sampled_probs) MSE_bound=n_parameters/n_samples ##parameters as estimated by our protocol for infinite number of shots infinite_shots_estimation_parameters=2*np.sqrt(prob_parameters) ##error for infinite sampling MSE_infinite=np.mean(np.abs(infinite_shots_estimation_parameters-np.abs(exact_sensing_parameters))**2) rel_RMSE_error_infinite=np.sqrt(MSE_infinite)/np.mean(np.abs(exact_sensing_parameters)) MSE_sampled=np.mean(np.abs(sampled_estimation_parameters-np.abs(exact_sensing_parameters))**2) rel_RMSE_error_sampled=np.sqrt(MSE_sampled)/np.mean(np.abs(exact_sensing_parameters)) #MSE_sampled=np.mean(np.abs(sampled_estimation_parameters-np.abs(infinite_shots_estimation_parameters))**2) print("Sensing",n_parameters,"parameters with",n_samples) print("Mean-square error of infinite samples",MSE_infinite) print("MSE of infinite samples relative to exact norm of exact parameters",rel_RMSE_error_infinite) print("Mean-square error of finite samples",MSE_sampled) print("MSE sampled with finite shots relative to norm of exact parameters",rel_RMSE_error_sampled) # -
Natural_PQC_sensing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Tune GPT2 to generate positive reviews # > Optimise GPT2 to produce positive IMDB movie reviews using a BERT sentiment classifier for rewards. # <div style="text-align: center"> # <img src='images/gpt2_bert_training.png' width='600'> # <p style="text-align: center;"> <b>Figure:</b> Experiment setup to tune GPT2. The yellow arrows are outside the scope of this notebook, but the trained models are available through Hugging Face. </p> # </div> # # # In this notebook we fine-tune GPT2 (small) to generate positive movie reviews based on the IMDB dataset. The model gets 5 tokens from a real review and is tasked to produce positive continuations. To reward positive continuations we use a BERT classifier to analyse the sentiment of the produced sentences and use the classifier's outputs as rewards signals for PPO training. # ## Setup experiment # ### Import dependencies # %load_ext autoreload # %autoreload 2 # + import torch import wandb import time import os from tqdm import tqdm import numpy as np import pandas as pd tqdm.pandas() from transformers import GPT2Tokenizer from transformers import AutoModelForSequenceClassification, AutoTokenizer from trl.gpt2 import GPT2HeadWithValueModel, respond_to_batch from trl.ppo import PPOTrainer from trl.core import build_bert_batch_from_txt # - # ### Configuration config = { "lm_name": "lvwerra/gpt2-imdb", "ref_lm_name": "lvwerra/gpt2-imdb", "cls_model_name": "lvwerra/bert-imdb", "tk_name": "gpt2", "steps": 25600, "batch_size": 256, "forward_batch_size": 16, "ppo_epochs": 4, "txt_in_len": 5, "txt_out_len": 15, "lr": 1.41e-5, "init_kl_coef":0.2, "target": 6, "horizon":10000, "gamma":1, "lam":0.95, "cliprange": .2, "cliprange_value":.2, "vf_coef":.1, } # You can see that we load a GPT2 model called `gpt2_imdb`. This model was additionally fine-tuned on the IMDB dataset for 1 epoch with the huggingface [script](https://github.com/huggingface/transformers/blob/master/examples/run_language_modeling.py) (no special settings). The other parameters are mostly taken from the original paper ["Fine-Tuning Language Models from Human Preferences"]( # https://arxiv.org/pdf/1909.08593.pdf). This model as well as the BERT model is available in the Huggingface model zoo [here](https://huggingface.co/models). The following code should automatically download the models. # ### Initialize W&B logger # We use `wandb`to log all the metrics during training. wandb.init(name='run-42', project='gpt2-sentiment', config=config) # ## Load data and models # ### Load IMDB dataset # The IMDB dataset contains 50k movie review annotated with "positive"/"negative" feedback indicating the sentiment. It can be downloaded from Kaggle ([link](https://www.kaggle.com/lakshmi25npathi/imdb-dataset-of-50k-movie-reviews)). We load the IMDB dataset into a DataFrame and filter for comments that are at least 500 characters long and take the first 1000 characters of each comment. The first filter we apply to avoid comments that are less than `txt_in_len` token long and the second to avoid tokenizing way more text than we actually need. # + # makes sure you download the imdb-dataset in the data folder df = pd.read_csv('../data/imdb-dataset.csv') # make sure the comments are long enough df = df.loc[df['review'].str.len() > 500] # make sure comments are not too long df['review'] = df['review'].apply(lambda x: x[:1000]) df.head() # - # ### Load BERT classifier # We load a BERT classifier fine-tuned on the IMDB dataset. sentiment_model = AutoModelForSequenceClassification.from_pretrained(config["cls_model_name"]) sentiment_tokenizer = AutoTokenizer.from_pretrained(config["cls_model_name"]) # The model outputs are the logits for the negative and positive class. We will use the logits for positive class as a reward signal for the language model. text = 'this movie was really bad!!' output = sentiment_model.forward(sentiment_tokenizer.encode(text, return_tensors="pt")) output text = 'this movie was really good!!' output = sentiment_model.forward(sentiment_tokenizer.encode(text, return_tensors="pt")) output # The resulting reward signal: output[0][0, 1] # ### Load pre-trained GPT2 language models # We load the GPT2 model with a value head and the tokenizer. We load the model twice; the first model is optimized while the second model serves as a reference to calculate the KL-divergence from the starting point. This serves as an additional reward signal in the PPO training to make sure the optimized model does not deviate too much from the original language model. gpt2_model = GPT2HeadWithValueModel.from_pretrained(config['lm_name']) gpt2_model_ref = GPT2HeadWithValueModel.from_pretrained(config['ref_lm_name']) gpt2_tokenizer = GPT2Tokenizer.from_pretrained(config['tk_name']) # ### Watch model with wandb # This wandb magic logs the gradients and weights of the model during training. wandb.watch(gpt2_model, log='all') # ### Move models to GPU # If `cuda` is available move the computations to the GPU. device = torch.device("cuda" if torch.cuda.is_available() else "cpu") _ = gpt2_model.to(device) _ = sentiment_model.to(device) _ = gpt2_model_ref.to(device) # ### Tokenize IMDB reviews # We tokenize all IMDB in advance to avoid tokenizing twice. In the first step we encode the queries and slice the first `txt_in_len` tokens. In a second step we decode these tokens back to text for later display. df['tokens'] = df['review'].progress_apply(lambda x: gpt2_tokenizer.encode(x, return_tensors="pt").to(device)[0, :config['txt_in_len']]) df['query'] = df['tokens'].progress_apply(lambda x: gpt2_tokenizer.decode(x)) # ## Optimize model # **Steps** # # The training loop consists of the following steps: # 1. Get a batch of queries # 2. Get the query responses from the policy # 3. Join query and responses and tokenize for BERT analysis # 4. Get sentiments for query/responses from BERT # 5. Optimize policy with PPO using the (query, response, reward) triplet # 6. Log all the training statistics # # **Forward batching** # # Since the models can be fairly big and we want to rollout large PPO batches this can lead to out-of-memory errors when doing the forward passes for text generation and sentiment analysis. We introduce the parameter `forward_batch_size` to split the forward passes into smaller batches. Although this hurts performance a little this is neglectible compared to the computations of the backward passes when optimizing the model. The same parameter is used in the `PPOTrainer` when doing forward passes. The `batch_size` should multiple of `forward_batch_size`. # # **Training time** # # This step takes **~2h** on a P6000 GPU with the above specified settings. # + ppo_trainer = PPOTrainer(gpt2_model, gpt2_model_ref, **config) fbs = config['forward_batch_size'] for epoch in tqdm(range(int(np.ceil(config["steps"]/config['batch_size'])))): torch.cuda.empty_cache() logs = dict() game_data = dict() timing = dict() t0 = time.time() #### get a batch from the dataset df_batch = df.sample(config['batch_size']) game_data['query'] = df_batch['query'].tolist() query_tensors = torch.stack(df_batch['tokens'].tolist()) #### get response from gpt2 t = time.time() total_length = config['txt_in_len']+config['txt_out_len'] response_tensors = [] for i in range(int(config['batch_size']/fbs)): response = respond_to_batch(gpt2_model, query_tensors[i*fbs:(i+1)*fbs], txt_len=config['txt_out_len']) response_tensors.append(response) response_tensors = torch.cat(response_tensors) game_data['response'] = [gpt2_tokenizer.decode(response_tensors[i, :]) for i in range(config['batch_size'])] timing['time/get_response'] = time.time()-t #### tokenize text for sentiment analysis t = time.time() texts = [q + r for q,r in zip(game_data['query'], game_data['response'])] sentiment_inputs, attention_masks = build_bert_batch_from_txt(texts, sentiment_tokenizer, device) timing['time/build_input_sentiment'] = time.time()-t #### get sentiment score t = time.time() rewards = [] for i in range(int(config['batch_size']/fbs)): res = sentiment_model.forward(sentiment_inputs[i*fbs:(i+1)*fbs], attention_masks[i*fbs:(i+1)*fbs])[0][:, 1].detach() rewards.append(res) rewards = torch.cat(rewards) timing['time/get_sentiment_preds'] = time.time()-t #### Run PPO training t = time.time() stats = ppo_trainer.step(query_tensors, response_tensors, rewards) timing['time/optimization'] = time.time()-t #### Log everything timing['time/epoch'] = time.time()-t0 table_rows = [list(r) for r in zip(game_data['query'], game_data['response'], rewards.cpu().tolist())] logs.update({'game_log':wandb.Table( columns=['query', 'response', 'reward'], rows=table_rows)}) logs.update(timing) logs.update(stats) logs['env/reward_mean'] = torch.mean(rewards).cpu().numpy() logs['env/reward_std'] = torch.std(rewards).cpu().numpy() logs['env/reward_dist'] = rewards.cpu().numpy() wandb.log(logs) # - # ### Training progress # If you are tracking the training progress with Weights&Biases you should see a plot similar to the one below. Check out the interactive sample report on wandb.ai: [link](https://app.wandb.ai/lvwerra/trl-showcase/runs/1jtvxb1m/). # # <div style="text-align: center"> # <img src='images/gpt2_tuning_progress.png' width='800'> # <p style="text-align: center;"> <b>Figure:</b> Reward mean and distribution evolution during training. </p> # </div> # # One can observe how the model starts to generate more positive outputs after a few optimisation steps. # # > Note: Investigating the KL-divergence will probably show that at this point the model has not converged to the target KL-divergence, yet. To get there would require longer training or starting with a higher inital coefficient. # ## Model inspection # Let's inspect some examples from the IMDB dataset. We can use `gpt2_model_ref` to compare the tuned model `gpt2_model` against the model before optimisation. # + #### get a batch from the dataset bs = 16 game_data = dict() df_batch = df.sample(bs) game_data['query'] = df_batch['query'].tolist() query_tensors = torch.stack(df_batch['tokens'].tolist()) #### get response from gpt2 and gpt2_ref total_length = config['txt_in_len']+config['txt_out_len'] response_tensors_ref = respond_to_batch(gpt2_model_ref, query_tensors, txt_len=config['txt_out_len']) game_data['response (before)'] = [gpt2_tokenizer.decode(response_tensors_ref[i, :]) for i in range(bs)] response_tensors = respond_to_batch(gpt2_model, query_tensors, txt_len=config['txt_out_len']) game_data['response (after)'] = [gpt2_tokenizer.decode(response_tensors[i, :]) for i in range(bs)] #### sentiment analysis of query/response pairs before/after texts = [q + r for q,r in zip(game_data['query'], game_data['response (before)'])] sentiment_inputs, attention_masks = build_bert_batch_from_txt(texts, sentiment_tokenizer, device) rewards = sentiment_model.forward(sentiment_inputs, attention_masks)[0][:, 1].detach() game_data['rewards (before)'] = rewards.cpu().numpy() texts = [q + r for q,r in zip(game_data['query'], game_data['response (after)'])] sentiment_inputs, attention_masks = build_bert_batch_from_txt(texts, sentiment_tokenizer, device) rewards = sentiment_model.forward(sentiment_inputs, attention_masks)[0][:, 1].detach() game_data['rewards (after)'] = rewards.cpu().numpy() # store results in a dataframe df_results = pd.DataFrame(game_data) df_results # - # Looking at the reward mean/median of the generated sequences we observe a significant difference. print('mean:') display(df_results.mean()) print() print('median:') display(df_results.median()) # ## Save model # Finally, we save the model to disk for later usage. os.makedirs('gpt2-imdb-pos') gpt2_model.save_pretrained('gpt2-imdb-pos') gpt2_tokenizer.save_pretrained('gpt2-imdb-pos')
nbs/04-gpt2-sentiment-ppo-training.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Trying our different learning rate schedules # # Using the default network configuration, the goal is to get to the reference valid loss as quickly as possible. # # 1. Typical learning rate decau # 2. One cycle LR # + jupyter={"outputs_hidden": true} # %load_ext autoreload # %autoreload 2 # + jupyter={"outputs_hidden": true} from src.score import * from src.utils import * from src.networks import * from src.data_generator import * import matplotlib.pyplot as plt # - import seaborn as sns sns.set_style('whitegrid') # + jupyter={"outputs_hidden": true} os.environ["CUDA_VISIBLE_DEVICES"]=str(0) limit_mem() # + jupyter={"outputs_hidden": true} datadir = '/data/stephan/WeatherBench/5.625deg/' var_dict = {'geopotential': ('z', [500]), 'temperature': ('t', [850])} output_vars = None filters = [64, 64, 64, 64, 2] kernels = [5, 5, 5, 5, 5] lead_time = 72 batch_size = 128 activation = 'elu' # + jupyter={"outputs_hidden": true} ds = xr.merge([xr.open_mfdataset(f'{datadir}/{var}/*.nc', combine='by_coords') for var in var_dict.keys()]) # + jupyter={"outputs_hidden": true} # ds_train = ds.sel(time=slice('1979', '2015')) ds_train = ds.sel(time=slice('2000', '2015')) ds_valid = ds.sel(time=slice('2016', '2016')) ds_test = ds.sel(time=slice('2017', '2018')) # + jupyter={"outputs_hidden": true} 324336 /100000 # + jupyter={"outputs_hidden": true} dg_train = DataGenerator(ds_train, var_dict, lead_time, batch_size=batch_size, output_vars=output_vars, norm_subsample=10000) dg_valid = DataGenerator(ds_valid, var_dict, lead_time, batch_size=batch_size, mean=dg_train.mean, std=dg_train.std, shuffle=False, output_vars=output_vars) dg_test = DataGenerator(ds_test, var_dict, lead_time, batch_size=batch_size, mean=dg_train.mean, std=dg_train.std, shuffle=False, output_vars=output_vars) print(f'Mean = {dg_train.mean}; Std = {dg_train.std}') # + jupyter={"outputs_hidden": true} dg_train.mean, dg_train.std # - # ## Learning rate decay # + Collapsed="false" class LRUpdate(object): def __init__(self, init_lr, step, divide): # From goo.gl/GXQaK6 self.init_lr = init_lr self.step = step self.drop = 1. / divide def __call__(self, epoch): lr = self.init_lr * np.power(self.drop, np.floor((epoch) / self.step)) print(f'Learning rate = {lr}') return lr # + Collapsed="false" lrs = LRUpdate(1e-3, 2, 5) # + Collapsed="false" plt.plot(lrs(np.arange(10))) plt.yscale('log') # + Collapsed="false" model = build_cnn(filters, kernels, input_shape=(32, 64, len(dg_train.data.level)), activation=activation) model.compile(keras.optimizers.Adam(), 'mse') # + Collapsed="false" model.summary() # + Collapsed="false" model.fit_generator(dg_train, epochs=100, validation_data=dg_valid, callbacks=[keras.callbacks.LearningRateScheduler(lrs)]) # - # ## One Cycle # + jupyter={"outputs_hidden": true} from src.clr import LRFinder import seaborn as sns sns.set_style('darkgrid') # + jupyter={"outputs_hidden": true} dg_train.n_samples, batch_size # + jupyter={"outputs_hidden": true} lrf = LRFinder( dg_train.n_samples, batch_size, minimum_lr=1e-4, maximum_lr=10, lr_scale='exp', save_dir='./', verbose=0) # + jupyter={"outputs_hidden": true} model = build_cnn(filters, kernels, input_shape=(32, 64, len(dg_train.data.level)), activation=activation) model.compile(keras.optimizers.Adam(), 'mse') # + jupyter={"outputs_hidden": "false"} model.fit_generator(dg_train, epochs=1, callbacks=[lrf]) # + jupyter={"outputs_hidden": true} plt.plot(10**lrf.lrs, lrf.losses) # plt.yscale('log') plt.ylim(6, 10) # + jupyter={"outputs_hidden": true} max_lr = 2.5e-4 # max_lr = 5e-4 # + jupyter={"outputs_hidden": true} from src.clr import OneCycleLR # + jupyter={"outputs_hidden": true} one_cycle = OneCycleLR(max_lr, end_percentage=0.1, scale_percentage=None, # maximum_momentum=None, minimum_momentum=None ) # + jupyter={"outputs_hidden": true} model = build_cnn(filters, kernels, input_shape=(32, 64, len(dg_train.data.level)), activation=activation) model.compile(keras.optimizers.SGD(), 'mse') # + jupyter={"outputs_hidden": true} model.fit_generator(dg_train, epochs=5, validation_data=dg_valid, callbacks=[one_cycle]) # + jupyter={"outputs_hidden": true} model.fit_generator(dg_train, epochs=5, validation_data=dg_valid, callbacks=[one_cycle]) # + jupyter={"outputs_hidden": true} one_cycle = OneCycleLR(max_lr, end_percentage=0.1, scale_percentage=None, maximum_momentum=None, minimum_momentum=None ) # + jupyter={"outputs_hidden": true} model = build_cnn(filters, kernels, input_shape=(32, 64, len(dg_train.data.level)), activation=activation) model.compile(keras.optimizers.Adam(), 'mse') # + jupyter={"outputs_hidden": true} model.fit_generator(dg_train, epochs=5, validation_data=dg_valid, callbacks=[one_cycle]) # + jupyter={"outputs_hidden": true} model.fit_generator(dg_train, epochs=5, validation_data=dg_valid, callbacks=[one_cycle]) # + jupyter={"outputs_hidden": true} plt.plot(one_cycle.history['lr']) # + jupyter={"outputs_hidden": true} plt.plot(one_cycle.history['momentum']) # - # ## Compare different schedules # + Collapsed="false" from src.utils import * # + Collapsed="false" # !ls /home/stephan/data/myWeatherBench/predictions/saved_models/*hist* # + Collapsed="false" h01 = read_pickle( '/home/stephan/data/myWeatherBench/predictions/saved_models/01.1-default_no_es_history.pkl') h05 = read_pickle( '/home/stephan/data/myWeatherBench/predictions/saved_models/05-2.5e-4_no_decay_history.pkl') h06 = read_pickle( '/home/stephan/data/myWeatherBench/predictions/saved_models/06-decay_10_5_history.pkl') h07 = read_pickle( '/home/stephan/data/myWeatherBench/predictions/saved_models/07-decay_40_10_history.pkl') h08 = read_pickle( '/home/stephan/data/myWeatherBench/predictions/saved_models/08-2.5e-4_es_history.pkl') # + Collapsed="false" fig, ax = plt.subplots(1, 1, figsize=(10, 6)) for h, name, c in zip([h01, h05, h06, h07, h08], ['01', '05', '06', '07', '08'], ['b', 'r', 'g', 'orange', 'black']): plot_hist(h, ax, (4.5, 8), name, valid=False, c=c) plot_hist(h, ax, (4.5, 8), name, train=False, c=c, ls='--') # + Collapsed="false" for h in [h01, h05, h06, h07, h08]: print(np.min(h['val_loss']), np.argmin(h['val_loss'])) # - # ## LR schedule for big network # + jupyter={"outputs_hidden": true} h10 = read_pickle( '/home/stephan/data/myWeatherBench/predictions/saved_models/10-big_fc_history.pkl') # + jupyter={"outputs_hidden": true} fig, ax = plt.subplots(1, 1, figsize=(10, 6)) for h, name, c in zip([h08, h10], ['08', '10'], ['b', 'r']): plot_hist(h, ax, (4, 8), name, valid=False, c=c) plot_hist(h, ax, (4, 8), name, train=False, c=c, ls='--') # + jupyter={"outputs_hidden": true} lrf = LRFinder( dg_train.n_samples, batch_size, minimum_lr=1e-5, maximum_lr=1, lr_scale='exp', save_dir='./', verbose=0) # + jupyter={"outputs_hidden": true} filters = [128, 128, 128, 128, 128, 128, 2] kernels = [5, 5, 5, 5, 5, 5, 5] # + jupyter={"outputs_hidden": true} model = build_cnn(filters, kernels, input_shape=(32, 64, len(dg_train.data.level)), activation=activation) model.compile(keras.optimizers.Adam(), 'mse') # + jupyter={"outputs_hidden": "false"} model.fit_generator(dg_train, epochs=1, callbacks=[lrf]) # + jupyter={"outputs_hidden": true} plt.plot(10**lrf.lrs, lrf.losses) plt.ylim(7, 11) # + jupyter={"outputs_hidden": true} tf.keras.callbacks.EarlyStopping( monitor='val_loss', min_delta=0, patience=5, verbose=1, mode='auto', restore_best_weights=True ) # - # ## NEW: Check learning curves h13 = read_pickle( '/home/rasp/data/myWeatherBench/predictions/saved_models/13-resnet_bn_dr_0.1_history.pkl') h17 = read_pickle( '/home/rasp/data/myWeatherBench/predictions/saved_models/17-resnet_deeper_bn_dr_0.1_history.pkl') ??plot_hist from glob import glob path = '/home/rasp/data/myWeatherBench/predictions/saved_models/' glob(f'{path}14*pkl') sns.palettes.color_palette(n_colors=5) def plot_losses(path, exp_ids, plot_lrs=True, ylim=None): exp_ids = [str(exp_id) for exp_id in exp_ids] fig, axs = plt.subplots(2 if plot_lrs else 1, 1, figsize=(10, 10 if plot_lrs else 5)) colors = sns.palettes.color_palette(n_colors=len(exp_ids)) for exp_id, c, in zip(exp_ids, colors): fn = glob(f'{path}{exp_id}*.pkl')[0] h = read_pickle(fn) plot_hist(h, axs[0], name=exp_id, valid=False, c=c) plot_hist(h, axs[0], name=exp_id, train=False, c=c, ls='--') axs[0].set_ylim(ylim) if plot_lrs: axs[1].plot(h['lr'], c=c) plot_losses(path, [15, 18], ylim=(0.01, 0.03)) fig, ax = plt.subplots(1, 1, figsize=(10, 6)) for h, name, c in zip([h13, h17], ['13', '17'], ['b', 'r']): plot_hist(h, ax, name=name, valid=False, c=c) plot_hist(h, ax, name=name, train=False, c=c, ls='--') plt.ylim(0.01, 0.03) # plt.yscale('log') plt.plot(np.array((h17['lr']))*1e3) plt.plot(np.array((h13['lr']))*1e3) h29 = read_pickle( '/home/rasp/data/myWeatherBench/predictions/saved_models/29-resnet_hr_history.pkl') h33 = read_pickle( '/home/rasp/data/myWeatherBench/predictions/saved_models/33-resnet_hr_pre_history.pkl') h33 fig, ax = plt.subplots(1, 1, figsize=(10, 6)) for h, name, c in zip([h29, h33], ['29', '33'], ['b', 'r']): plot_hist(h, ax, name=name, valid=False, c=c) plot_hist(h, ax, name=name, train=False, c=c, ls='--') # ## Again: Check learning curves. Does it need to take so long!? path = '/home/rasp/data/myWeatherBench/predictions/saved_models/' plot_losses(path, ['13-', 42, 46], ylim=(0.024, 0.026)) plot_losses(path, [40], ylim=(0.026, 0.028)) plot_losses(path, [44, 45], ylim=(0.013, 0.015)) fn = '/home/rasp/data/myWeatherBench/predictions/saved_models/29-resnet_hr_history.pkl' with open(fn, 'wb') as f: print('a')
devlog/03-lr-schedule-Copy1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/denzilseason/Linear-Algebra-58019/blob/main/Final_Exam.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="hv_shAfEHDii" # ##Problem 1. Student A, Student B, and Student C have a total of Php 89 cash in their banks. Student A has 6 less than Student C. Student B has 3 times what Student C has. How much does each student have? (30 points) # + id="IVhPE1Y-HLFC" colab={"base_uri": "https://localhost:8080/"} outputId="dc3527ff-4099-4a40-910f-0b8701972390" import numpy as np A = np.array([[1,1,1],[1,0,4],[0,0,5]]) #First equation B = np.array([[89],[89],[95]]) #Second equation X = np.linalg.inv(A).dot(B) #Amount each student have print(X) # + [markdown] id="SX53AJTGI9Tk" # ##Problem 2. Solve each system: # 3x - y + z = 5 # 9x - 3y +3z = 15 # -12x + 4y -4z = -20 # + colab={"base_uri": "https://localhost:8080/", "height": 467} id="N_iWT2YYKUQr" outputId="77996282-2611-4c9f-91e7-19a1ca30f81d" import numpy as np from scipy.linalg import solve A=np.array([[3,-1,1],[9,-3,3],[-12,4,-4]]) print(A) B=np.array([[5],[15],[-20]]) print(B) X=solve(A,B) print(X) #Problem is not answerable because matrix is singular, so codes show error. 0 determinant does not have an inverse. # + [markdown] id="2rz7zMjcX9Jg" # ##Problem 3. # + colab={"base_uri": "https://localhost:8080/"} id="YmJjztE0X_ma" outputId="6d0f032a-12b9-4547-ba91-44ea6c7fd422" import numpy as np from scipy.linalg import solve #from numpy.linalg import eig A = np.array([[8,5,-6],[-12,-9,12],[-3,-3,5]]) print(A) a,b = np.linalg.eig(A) print("The eigenvalues is/are:",a) print("The right eigenvectors are:",b) #x = v.round() #print(x)
Final_Exam.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %%capture import numpy as np from prfpy.stimulus import PRFStimulus2D from prfpy.model import Iso2DGaussianModel from prfpy.fit import Iso2DGaussianFitter prf_stim = PRFStimulus2D(screen_size_cm=screen_size_cm, screen_distance_cm=screen_distance_cm, design_matrix=dm_full, TR=TR, task_lengths=task_lengths, task_names=task_names, late_iso_dict=late_iso_dict) # + gg = Iso2DGaussianModel(stimulus=prf_stim, hrf=hrf, filter_predictions=filter_predictions, filter_type=filter_type, filter_params=filter_params, normalize_RFs=normalize_RFs) gf = Iso2DGaussianFitter(data=data, model=gg, n_jobs=n_jobs, fit_hrf=fit_hrf) # - gf.grid_fit(ecc_grid=eccs, polar_grid=polars, size_grid=sizes, verbose=verbose, n_batches=n_batches, pos_prfs_only=pos_prfs_only) gf.iterative_fit(rsq_threshold=rsq_threshold, verbose=verbose, bounds=gauss_bounds, constraints=constraints, xtol=xtol, ftol=xtol)
test/example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # | [&gt; Notebook Popularity](1_Popularity.ipynb) # + [markdown] toc-hr-collapsed=false # # Public GitHub Notebook Corpus Research Collaboration # - # ## Project Description # As an extension of work done by [Rule et al.](https://blog.jupyter.org/we-analyzed-1-million-jupyter-notebooks-now-you-can-too-guest-post-8116a964b536) in 2017, the goal of this project is to collect and analyze all public Jupyter Notebooks on GitHub ([~1 million in 2017](https://library.ucsd.edu/dc/collection/bb6931851t), [now nearly 5 million](https://github.com/parente/nbestimate/blob/master/estimate.ipynb)). This analysis will help designers, developers, and researchers in the Jupyter community quantitatively assess how people use notebooks, with an emphasis on applications in data science, machine learning, and information visualization. This GitHub repository holds the scripts used to search, download, and process notebooks from GitHub, and the set of notebooks used to analyze the corpus. # # The results of this research can complement qualitative user studies and inform challenging UX questions to focus development on real user needs. This understanding of notebook applications is crucial to user-centered design. Because many of the notebooks hosted publicly on GitHub are created as part of educational endeavours such as online and in-person courses, these insights may be particularly valuable for the Jupyter education community. # ## Questions Addressed in this Research # # 1. [How has the popularity of Jupyter changed over time?](1_Popularity.ipynb) # 2. [What programming languages are people using and how has this changed over time?](2_Languages.ipynb) # 3. [Who are the owners of Jupyter notebooks on GitHub?](3_Owners.ipynb) # 4. [How are notebooks organized?](4_Organization.ipynb) # 5. [How collaborative are repositories with Jupyter notebooks?](5_Collaboration.ipynb) # 6. [What Python packages (e.g. numpy, pandas) are people using and how is that usage related to the machine learning framework choice?](6_Packages.ipynb) # 7. [What machine learning/data science frameworks are people using and how has this changed over time?](7_Frameworks.ipynb) # 8. [What machine learning models are people using?](8_Models.ipynb) # 9. [What visualization packages are people using and what kinds of visualizations are they creating with them?](9_Visualizations.ipynb) # 10. [What are they struggling the most with?](10_Struggles.ipynb) # 11. [What can we learn about API design for data science?](11_APIs.ipynb) # 12. [How are IPython special features (magic, bash, documentation) used?](12_Magic.ipynb) # 13. [How do the uses of Jupyter Notebooks live up to the intentions of its creators?](13_Standards.ipynb) # 14. [What steps of analysis are users devoting the most code to (e.g. data manipulation, visualization, modeling)?](14_Time.ipynb) # 15. [Exploring interactive subsets.](15_Interactive.ipynb) # 16. [Jupyter Notebook Hall of Fame!](16_HallOfFame.ipynb) # ## Collaboration # # The initial collaboration will involve several individuals and institutions: # - <NAME>: The original author of the work, and currently a postdoc at OHSU. # - <NAME>: An AWS summer data science intern and contributor to Project Jupyter. # - <NAME>: A Project Jupyter software engineering and data science intern. # - <NAME>: An AWS Principal TPM and co-founder of Project Jupyter. # - <NAME>: The UX Designer/Research for Project Jupyter. # - The broader Jupyter open source community. There is an emerging community of HCI and researchers in Jupyter's open source community that we will engage with. # # | [&gt; Notebook Popularity](1_Popularity.ipynb)
analysis_notebooks/0_TableOfContents.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Building Data Genome Project 2.0 # ## Exploratory data analysis of metadata # Biam! (<EMAIL>) # + # data and numbers import numpy as np import pandas as pd # Visualization import matplotlib.pyplot as plt import matplotlib as mpl import matplotlib.pylab as pylab # %matplotlib inline import seaborn as sns sns.set_style("darkgrid") mpl.style.use('ggplot') import gc # + [markdown] toc-hr-collapsed=false # # Dataset # - # **Buildings metadata** # # * <code>building_id</code>: building code-name with the structure <i>UniqueFirstName_SiteID_primaryspaceusage</i>. # * <code>site_id</code>: animal-code-name for the site. # * <code>primaryspaceusage</code>: Primary space usage of all buildings is mapped using the <a href="https://www.energystar.gov/buildings/facility-owners-and-managers/existing-buildings/use-portfolio-manager/identify-your-property-type" rel="nofollow">energystar scheme building description types</a>. # * <code>sqft</code>: Floor area of building in square feet (sq ft). # * <code>lat</code>: Latitude of building location to city level. # * <code>lng</code>: Longitude of building location to city level. # * <code>electricity</code>: presence of this kind of meter in the building. <code>Yes</code> if affirmative, <code>NaN</code> if negative. # * <code>hotwater</code>: presence of this kind of meter in the building. <code>Yes</code> if affirmative, <code>NaN</code> if negative. # * <code>chilledwater</code>: presence of this kind of meter in the building. <code>Yes</code> if affirmative, <code>NaN</code> if negative. # * <code>steam</code>: presence of this kind of meter in the building. <code>Yes</code> if affirmative, <code>NaN</code> if negative. # * <code>water</code>: presence of this kind of meter in the building. <code>Yes</code> if affirmative, <code>NaN</code> if negative. # * <code>irrigation</code>: presence of this kind of meter in the building. <code>Yes</code> if affirmative, <code>NaN</code> if negative. # * <code>solar</code>: presence of this kind of meter in the building. <code>Yes</code> if affirmative, <code>NaN</code> if negative. # * <code>gas</code>: presence of this kind of meter in the building. <code>Yes</code> if affirmative, <code>NaN</code> if negative./li> # * <code>yearbuilt</code>: Year corresponding to when building was first constructed, in the format YYYY. # * <code>numberoffloors</code>: Number of floors corresponding to building. # * <code>date_opened</code>: Date building was opened for use, in the format D/M/YYYY. # * <code>sub_primaryspaceusage</code>: <a href="https://www.energystar.gov/buildings/facility-owners-and-managers/existing-buildings/use-portfolio-manager/identify-your-property-type" rel="nofollow">energystar scheme building description types</a> subcategory. # * <code>energystarscore</code>: Rating of building corresponding to building energystar scheme ([Energy Star Score](https://www.energystar.gov/buildings/facility-owners-and-managers/existing-buildings/use-portfolio-manager/understand-metrics/how-1-100)). # * <code>eui</code>: [Energy use intensity](https://www.energystar.gov/buildings/facility-owners-and-managers/existing-buildings/use-portfolio-manager/understand-metrics/what-energy) of the building (kWh/year/m2). # * <code>heatingtype</code>: Type of heating in corresponding building. # * <code>industry</code>: Industry type corresponding to building. # * <code>leed_level</code>: LEED rating of the building ([Leadership in Energy and Environmental Design](https://en.wikipedia.org/wiki/Leadership_in_Energy_and_Environmental_Design")), most widely used green building rating system. # * <code>occupants</code>: Usual number of occupants in the building. # * <code>rating</code>: Other building energy ratings. # * <code>site_eui</code>: Energy (Consumed/Purchased) use intensity of the site (kWh/year/m2). # * <code>source_eui</code>: Total primary energy consumption normalized by area (Takes into account conversion efficiency of primary energy into secondary energy). # * <code>sqm</code>: Floor area of the building in squared meters. # * <code>subindustry</code>: More detailed breakdown of Industry type corresponding to building. # * <code>timezone</code>: site time zone. path = "..\\data\\metadata\\" # Buildings data metadata = pd.read_csv(path + "metadata.csv") metadata.info() # # Exploratory Data Analysis # ## Missing values # Percentage of missing values in each feature round(metadata.isna().sum()/len(metadata)*100,2) # ## Categories # + cat = ["site_id","primaryspaceusage","sub_primaryspaceusage","industry","subindustry","timezone"] col = [] for feature in cat: col_list = list(metadata[feature].unique()) len_col_list = len(list(metadata[feature].unique())) col_list.insert(0, len_col_list) col.append(col_list) # - cat_df = pd.DataFrame.from_records(col).T.rename( columns={ 0: "site_id", 1: "primaryspaceusage", 2: "sub_primaryspaceusage", 3: "industry", 4: "subindustry", 5: "timezone", } ) cat_df cat_df.to_csv("cat_df.csv", index=False) # ## Sites location import geopandas as gpd from shapely.geometry import Point, Polygon # World map world = gpd.read_file(gpd.datasets.get_path('naturalearth_lowres')) # Exclude Antartica world = world[(world.name != "Antarctica") & (world.name != "Fr. S. Antarctic Lands")] # Coordinate reference system used in this map world.crs # Latitude and longitude are of the site location (all buildings from the same location shares <code>lng</code> and <code>lat</code> values). # All the sites sites = metadata[["site_id","lat","lng"]].groupby("site_id").median() # Converts lat and lng to geometry objects geometry = [Point(xy) for xy in zip (sites["lng"], sites["lat"])] # Creates geoDataFrame geo_sites = gpd.GeoDataFrame(sites, crs = world.crs, geometry = geometry) geo_sites # Plot sns.set(font_scale = 1) fig, ax = plt.subplots(figsize = (15,15)) world.plot(ax = ax, alpha = 0.4, color = "grey") geo_sites.plot(ax = ax, alpha = 0.8, color = "dodgerblue") fig.savefig("..\\figures\\map.pdf", bbox_inches='tight') fig.savefig("..\\figures\\map.png", dpi=72, bbox_inches='tight') # Zoom Plot fig, ax = plt.subplots(figsize = (15,15)) world.plot(ax = ax, alpha = 0.4, color = "grey") geo_sites.plot(ax = ax, color = "dodgerblue") ax.set_xlim([-125, 25]) ax.set_ylim([20, 60]) # ## Features frequency plots len(np.unique(metadata["building_id"])) # + # colors = ["crimson","mediumvioletred","orangered","gold","yellowgreen","lightseagreen","royalblue","rebeccapurple","slategray"] # + sns.set(rc={'figure.figsize':(36,21)}) sns.set(font_scale = 2) f, axes = plt.subplots(3, 3) axes = axes.flatten() color = "yellowgreen" # primary use category countplot in decreasing order # Temporary dataset top5 = list(metadata['primaryspaceusage'].value_counts().iloc[:5].index) temp = metadata[["primaryspaceusage"]].copy() temp.loc[temp.primaryspaceusage.isin(top5) == False, "primaryspaceusage"] = "Other" # Plot ax1 = axes[0] g1 = sns.countplot(data=temp, y="primaryspaceusage", color= color, orient="h", ax=ax1, order = temp['primaryspaceusage'].value_counts().index) ax1.title.set_text('Primary use category') ax1.set(ylabel="", xlabel="", xlim=(0,1600)) # adds percentage total = float(len(metadata)) # number of buildings for p in g1.patches: width = p.get_width() g1.text(width + 150, p.get_y() + p.get_height()/1.5, '{:1.2%}'.format(width/total), ha="center") del(top5, temp) # primary use subcategory countplot in decreasing order # Temporary dataset top5 = list(metadata['sub_primaryspaceusage'].value_counts().iloc[:8].index) temp = metadata[["sub_primaryspaceusage"]].copy() temp.loc[temp.sub_primaryspaceusage.isin(top5) == False, "sub_primaryspaceusage"] = "Other" # Plot ax2 = axes[1] g2 = sns.countplot(data=temp, y="sub_primaryspaceusage", color= color, orient="h", ax=ax2, order = temp['sub_primaryspaceusage'].value_counts().iloc[:16].index) ax2.title.set_text('Primary use subcategory') ax2.set(ylabel="", xlabel="", xlim=(0,1600)) # adds percentage total = float(len(metadata)) # number of buildings for p in g2.patches: width = p.get_width() g2.text(width + 150, p.get_y() + p.get_height()/1.5, '{:1.2%}'.format(width/total), ha="center") del(top5, temp) # industry countplot in decreasing order ax3 = axes[2] g3 = sns.countplot(data=metadata, y="industry", color=color, ax=ax3, orient="h", order = metadata['industry'].value_counts().index) ax3.title.set_text('Industry category (65% missing values)') ax3.set(ylabel="", xlabel="", xlim=(0,1600)) # adds percentage total = float(len(metadata)) # number of buildings for p in g3.patches: width = p.get_width() g3.text(width + 150, p.get_y() + p.get_height()/1.5, '{:1.2%}'.format(width/total), ha="center") # subindustry countplot in decreasing order # Temporary dataset top5 = list(metadata['subindustry'].value_counts().iloc[:5].index) temp = metadata[["subindustry"]].copy() temp.loc[temp.subindustry.isin(top5) == False, "subindustry"] = "Other" # Plot ax4 = axes[3] g4 = sns.countplot(data=temp, y="subindustry", color=color, ax=ax4, orient="h", order = temp['subindustry'].value_counts().index) ax4.title.set_text('Subindustry category (65% missing values)') ax4.set(ylabel="", xlabel="", xlim=(0,1600)) # adds percentage total = float(len(metadata)) # number of buildings for p in g4.patches: width = p.get_width() g4.text(width + 150, p.get_y() + p.get_height()/1.5, '{:1.2%}'.format(width/total), ha="center") del(top5, temp) # timezone countplot in decreasing order ax5 = axes[4] g5 = sns.countplot(data=metadata, y="timezone", color=color, ax=ax5, orient="h", order = metadata['timezone'].value_counts().index) ax5.title.set_text('Timezone') ax5.set(ylabel="", xlabel="", xlim=(0,1600)) # adds percentage total = float(len(metadata)) # number of buildings for p in g5.patches: width = p.get_width() g5.text(width + 150, p.get_y() + p.get_height()/1.5, '{:1.2%}'.format(width/total), ha="center") # Meters type frequency ax6 = axes[5] # Temporal datafram temp = pd.melt(metadata[["building_id","electricity","hotwater","chilledwater","steam","water","irrigation","gas","solar"]],id_vars = "building_id", var_name="meter") # plot g6 = sns.countplot(data=temp.loc[temp['value']=="Yes"], y='meter', color= color, ax=ax6, orient="h", order = temp.loc[temp['value']=="Yes"]["meter"].value_counts().index) g6.title.set_text('Meter type frequency') g6.set(ylabel="", xlabel="", xlim=(0,1600)) # adds percentage total = temp.loc[temp['value']=="Yes"]["value"].value_counts()[0] # number of meters for p in g6.patches: width = p.get_width() g6.text(width + 150, p.get_y() + p.get_height()/1.5, '{:1.2%}'.format(width/total), ha="center") del(temp) # "sqft" histogram ax7 = axes[6] g7 = sns.distplot(metadata["sqft"], ax=ax7, color=color) g7.set(ylabel="", xlabel="") ax7.set_title('Building Area') # "yearbuilt" histogram ax8 = axes[7] g8 = sns.distplot(metadata["yearbuilt"].dropna(), ax=ax8, color=color) g8.set(ylabel="", xlabel="") ax8.set_title('Year built (11% missing values)') # "occupants" histogram ax9 = axes[8] g9 = sns.distplot(metadata["occupants"].dropna(), ax=ax9, color=color) g9.set(ylabel="", xlabel="") ax9.set_title('Ocuppants (85% missing values)') plt.tight_layout() # - f.savefig("..\\figures\\metadata_features.pdf", bbox_inches='tight') f.savefig("..\\figures\\metadata_features.png", bbox_inches='tight') # ### Number of buildings in each site metadata.groupby("site_id").building_id.count()
notebooks/01_EDA-metadata.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- import pandas as pd import numpy as np # + # read in csv files # crimes data crimes_la = pd.read_csv('crime_data/crimes_la.csv') crimes_ventura = pd.read_csv('crime_data/crimes_ventura.csv') crimes_orange = pd.read_csv('crime_data/crimes_orange.csv') # personal justice data criminal_justice_la = pd.read_csv('criminal_justice_data/criminal_justice_la.csv') criminal_justice_ventura = pd.read_csv('criminal_justice_data/criminal_justice_ventura.csv') criminal_justice_orange = pd.read_csv('criminal_justice_data/criminal_justice_orange.csv') # + # Rename the columns crimes_la.columns = ['Crime Types', 'LA County'] crimes_ventura.columns = ['Crime Types', 'Ventura County'] crimes_orange.columns = ['Crime Types', 'Orange County'] # Transpose the dataframes. First set the index crimes_la.set_index('Crime Types',inplace=True) crimes_ventura.set_index('Crime Types',inplace=True) crimes_orange.set_index('Crime Types',inplace=True) # Tranpose crimes_la = crimes_la.transpose() crimes_ventura = crimes_ventura.transpose() crimes_orange = crimes_orange.transpose() # - # Merge the crime dataframes for all three counties crimes_df = [crimes_la, crimes_ventura, crimes_orange] crimes_df = pd.concat(crimes_df) crimes_df = crimes_df.rename_axis(None, axis=1).rename_axis('', axis=0) crimes_df # + # Do the same thing for the personal justice data # Rename the columns criminal_justice_la.columns = ['Personal Justice #', 'LA County'] criminal_justice_ventura.columns = ['Personal Justice #', 'Ventura County'] criminal_justice_orange.columns = ['Personal Justice #', 'Orange County'] # Transpose the dataframes. First set the index criminal_justice_la.set_index('Personal Justice #',inplace=True) criminal_justice_ventura.set_index('Personal Justice #',inplace=True) criminal_justice_orange.set_index('Personal Justice #',inplace=True) # Tranpose criminal_justice_la = criminal_justice_la.transpose() criminal_justice_ventura = criminal_justice_ventura.transpose() criminal_justice_orange = criminal_justice_orange.transpose() # - # Merge the personal justice dataframes for all three counties personal_justice_df = [criminal_justice_la, criminal_justice_ventura, criminal_justice_orange] personal_justice_df = pd.concat(personal_justice_df) personal_justice_df = personal_justice_df.rename_axis(None, axis=1).rename_axis('', axis=0) personal_justice_df # + # Crimes Dataframe Initial EDA # - crimes_df.shape crimes_df.columns crimes_df_undetailed = crimes_df[['Violent Crimes','Homicide', 'Rape (Forcible Rape prior to 2014)', 'Robbery', 'Aggravated Assault', 'Property Crimes', 'Burglary', 'Motor Vehicle Theft', 'Larceny-Theft', 'Arson']] crimes_df_undetailed personal_justice_df.columns personal_justice_df = personal_justice_df[personal_justice_df.columns[~personal_justice_df.columns.isin(['Total', 'Sworn', 'Civilian', 'Other', 'Attorneys', 'Investigators', 'Clerical', 'Probation Officers', 'CA Highway Patrol'])]] personal_justice_df full_crime_data = pd.concat([crimes_df_undetailed, personal_justice_df], axis=1) full_crime_data.to_csv('full_crime_data.csv', encoding='utf-8', index=True) full_crime_data
homees/Crimes Data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Using `pyirf` to calculate IRFs from the FACT Open Data # # # **Note** In FACT, we used a different terminology, partly because of being a monoscopic telescope or out of confusion witht the CTA terms, in this context DL3 are reconstructed events, but not necessarily already with the IRF import numpy as np import astropy.units as u import matplotlib.pyplot as plt # %matplotlib inline # ## Download Data # ! curl -z gamma_test_dl3.hdf5 -LO https://factdata.app.tu-dortmund.de/dl3/FACT-Tools/v1.1.2/gamma_test_dl3.hdf5 # ## Read in the data # from astropy.table import QTable import astropy.units as u import tables # ### Simulated Event Info # # Currently, pyirf only works with powerlaw simulated events, like CORSIKA does it. # We want to also support arbitrary histograms / event distributions, but that is not yet implemented. # # This can be created from a file with that information, but I will just create it here. # + from pyirf.simulations import SimulatedEventsInfo simulation_info = SimulatedEventsInfo( energy_min=200 * u.GeV, energy_max=50 * u.TeV, spectral_index=-2.7, n_showers=12600000, max_impact=300 * u.m, viewcone=0 * u.deg, ) # - # ### DL2 Event List # # # `pyirf` does not prescribe or use a specific DL2 *file* format. # You need to read the data into an `astropy.table.QTable` following our conventions, detailed in the docs here: # # https://cta-observatory.github.io/pyirf/introduction.html#dl2-event-lists # # The FACT-Tools / aict-tools analysis chain uses a column-oriented hdf5 file written using h5py. # Unfortunately, units have to be known and are not in the metadata. # + gammas = QTable() # mapping of <target column name>: (<column in the file, unit>) columns = { 'obs_id': ('run_id', None), 'event_id': ('event_num', None), 'reco_energy': ('gamma_energy_prediction', u.GeV), 'true_energy': ('corsika_event_header_total_energy', u.GeV), 'true_az': ('source_position_az', u.deg), 'pointing_az': ('pointing_position_az', u.deg), 'theta': ('theta_deg', u.deg), 'gh_score': ('gamma_prediction', None), } with tables.open_file('gamma_test_dl3.hdf5', mode='r') as f: events = f.root.events for col, (name, unit) in columns.items(): if unit is not None: gammas[col] = u.Quantity(events[name][:], unit, copy=False) else: gammas[col] = events[name][:] gammas['true_alt'] = u.Quantity(90 - events['source_position_zd'][:], u.deg, copy=False) gammas['pointing_alt'] = u.Quantity(90 - events['pointing_position_zd'][:], u.deg, copy=False) # make it display nice for col in gammas.colnames: if gammas[col].dtype == float: gammas[col].info.format = '.2f' # - gammas[:10] # ### Apply Event Selection # # We remove likely hadronic events by requiring a minimal `gh_score`. # # We will calculate point-like IRFs, that means selecting events in a radius around the # assumed source position. # + gammas['selected_gh'] = gammas['gh_score'] > 0.8 gammas['selected_theta'] = gammas['theta'] < 0.16 * u.deg gammas['selected'] = gammas['selected_gh'] & gammas['selected_theta'] np.count_nonzero(gammas['selected']) / len(gammas) # - # ## Calculate IRFs # # ### Effective area # # We only have point-like simulations at a specific wobble offset (0.6° for FACT), # so we calculate the effective area for all events at once, equivalent to a single fov offset bin. # # # #### Create the binning from pyirf.binning import create_bins_per_decade, bin_center # + true_energy_bins = create_bins_per_decade(simulation_info.energy_min, simulation_info.energy_max, 5) # single offset bin around the wobble distance # since we are dealing with point-like simulations wobble_offset = 0.6 * u.deg fov_offset_bins = [0.59, 0.61] * u.deg # - # ### Calculate effective area # # # Effective area is calculated before and after cuts, for the IRF, we only need after the event selection # has been applied. # # The difference between point-like IRFs and Full-Enclosure IRFs is if a theta cut has been applied or not. # + from pyirf.irf import effective_area_per_energy aeff_all = effective_area_per_energy(gammas, simulation_info, true_energy_bins) aeff_selected = effective_area_per_energy(gammas[gammas['selected']], simulation_info, true_energy_bins) # - # Let's use gammapy to plot the IRF # + # utility function to converet pyirf Quantities to the gammapy classes from pyirf.gammapy import create_effective_area_table_2d plt.figure() for aeff, label in zip((aeff_all, aeff_selected), ('All Events', 'Selected Events')): aeff_gammapy = create_effective_area_table_2d( # add a new dimension for the single fov offset bin effective_area=aeff[..., np.newaxis], true_energy_bins=true_energy_bins, fov_offset_bins=fov_offset_bins, ) aeff_gammapy.plot_energy_dependence(label=label, offset=[wobble_offset]) plt.xlim(true_energy_bins.min().to_value(u.GeV), true_energy_bins.max().to_value(u.GeV)) plt.yscale('log') plt.legend() print(aeff_gammapy) # - # ### Point Spread Function # # The point spread function describes how well the direction of the gamma rays is estimated. # + from pyirf.irf import psf_table from pyirf.utils import calculate_source_fov_offset gammas['true_source_fov_offset'] = calculate_source_fov_offset(gammas) source_offset_bins = np.linspace(0, 3, 100) * u.deg # calculate this only for the events after the gamma/hadron separation psf = psf_table(gammas[gammas['selected_gh']], true_energy_bins, source_offset_bins, fov_offset_bins) # - psf.shape # Again, let's use gammapy to plot: # + from pyirf.gammapy import create_psf_3d psf_gammapy = create_psf_3d(psf, true_energy_bins, source_offset_bins, fov_offset_bins) plt.figure() psf_gammapy.plot_psf_vs_rad(offset=[wobble_offset], energy_true=[1., 10.]*u.TeV) plt.legend(plt.gca().lines, ['1 TeV', '10 TeV']) # - # ### Energy Dispersion # # Describes how well the energy is estimated # + from pyirf.irf import energy_dispersion # logarithmic space, is "symmetric" in terms of ratios 0.1 is a factor of 10 from 1 is a factor of 10 from 10 migration_bins = np.geomspace(0.1, 10, 100) edisp = energy_dispersion( gammas[gammas['selected']], true_energy_bins=true_energy_bins, fov_offset_bins=fov_offset_bins, migration_bins=migration_bins, ) # - # Plot edisp # + from gammapy.irf import EnergyDispersion2D plt.figure() plt.pcolormesh( true_energy_bins.to_value(u.GeV), migration_bins, edisp[:, :, 0].T, cmap='inferno' ) plt.xlabel('$E_\mathrm{true} / \mathrm{GeV}$') plt.ylabel('$\mu$') plt.yscale('log') plt.xscale('log') # - # ## Export to GADF FITS files # # We use the classes and methods from `astropy.io.fits` and `pyirf.io.gadf` to write files following the GADF # specification, which can be found here: # # https://gamma-astro-data-formats.readthedocs.io/en/latest/ # + from pyirf.io.gadf import create_aeff2d_hdu, create_energy_dispersion_hdu, create_psf_table_hdu from astropy.io import fits from astropy.time import Time from pyirf import __version__ # set some common meta data for all hdus meta = dict( CREATOR='pyirf-v' + __version__, TELESCOP='FACT', INSTRUME='FACT', DATE=Time.now().iso, ) hdus = [] # every fits file has to have an Image HDU as first HDU. # GADF only uses Binary Table HDUs, so we need to add an empty HDU in front hdus.append(fits.PrimaryHDU(header=fits.Header(meta))) hdus.append(create_aeff2d_hdu(aeff_selected, true_energy_bins, fov_offset_bins, **meta)) hdus.append(create_energy_dispersion_hdu(edisp, true_energy_bins, migration_bins, fov_offset_bins, **meta)) hdus.append(create_psf_table_hdu(psf, true_energy_bins, source_offset_bins, fov_offset_bins, **meta)) fits.HDUList(hdus).writeto('fact_irf.fits.gz', overwrite=True) # -
docs/notebooks/fact_example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: myKernel # language: python # name: venv # --- # Generate MTurk experiments import os import glob as glob import itertools import pandas as pd import numpy as np import json from scipy.stats import wilcoxon, norm import math folder = 'UserC1' def getFilename(path): return "https://xxxxxxxxx/" + os.path.split(path)[1] humanC = [getFilename(x) for x in glob.glob(os.path.join(folder, "human", "*.png"))] excelC = [getFilename(x) for x in glob.glob(os.path.join(folder, "excelFig", "*.png"))] optC = [getFilename(x) for x in glob.glob(os.path.join(folder, "ours", "*.png"))] randomC = [getFilename(x) for x in glob.glob(os.path.join(folder, "random", "*.png"))] humanC.sort() excelC.sort() optC.sort() randomC.sort() fourwayZip = list(zip(humanC, excelC, optC, randomC)) pairwiseCompare = [list(itertools.combinations(cc, 2)) for cc in fourwayZip] pairs = list(itertools.chain(*pairwiseCompare)) np.random.shuffle(pairs) def zipForCompare(pairs, perBatch = 9): if len(pairs) % perBatch != 0: remaining = perBatch - len(pairs) % perBatch pairs.extend(pairs[:remaining]) zipPairs = [list(itertools.chain(*pairs[i:i+perBatch])) for i in range(0, len(pairs), perBatch)] columnNames = [] for i in range(1, perBatch + 1): columnNames.append('img' + str(i) + '_1') columnNames.append('img' + str(i) + '_2') return pd.DataFrame(zipPairs, columns=columnNames) zipForCompare(pairs)
user-study/MTurk.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/probml/pyprobml/blob/master/book1/dimred/pca.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="SSmXaJG75K0A" # Standard Python libraries from __future__ import absolute_import, division, print_function, unicode_literals import os import time import numpy as np import glob import matplotlib.pyplot as plt import PIL import imageio from IPython import display import sklearn import seaborn as sns; sns.set(style="ticks", color_codes=True) import pandas as pd pd.set_option('precision', 2) # 2 decimal places pd.set_option('display.max_rows', 20) pd.set_option('display.max_columns', 30) pd.set_option('display.width', 100) # wide windows # + [markdown] id="lb1yP0QR5bSi" # ## Dimensionality reduction of iris data using PCA <a class="anchor" id="PCA-iris"></a> # # In this section, we show how to find low dimensional structure # in an unlabeled version of the Iris dataset by fitting a PCA model. # We will use sklearn. # + id="Bp1Q5QjO5XhB" # Visualize raw 3d data from sklearn.datasets import load_iris from mpl_toolkits import mplot3d from mpl_toolkits.mplot3d import Axes3D #https://jakevdp.github.io/PythonDataScienceHandbook/04.12-three-dimensional-plotting.html iris = load_iris() X = iris.data y = iris.target # + colab={"base_uri": "https://localhost:8080/", "height": 247} id="Qc2b7TU0wTfP" outputId="2f26989e-bf24-4b70-c6bc-dfc19a57964e" fig = plt.figure().gca(projection='3d') fig.scatter(X[:,0], X[:,1], X[:,2]) fig.set_xlabel('sepal length') fig.set_ylabel('sepal width') fig.set_zlabel('petal length') plt.show() # + id="svUstZ6a5g8X" colab={"base_uri": "https://localhost:8080/", "height": 267} outputId="eb0c6897-ff21-4d94-dd15-f8cd9f935f61" # 2d projection of points from sklearn.decomposition import PCA X = iris.data[:,0:3] pca_xy = PCA(n_components=2).fit_transform(X) fig, ax = plt.subplots() ax.scatter(pca_xy[:,0], pca_xy[:,1]) #save_fig("iris-pca") plt.show() # + id="YXZ4NhzJ5jkr" colab={"base_uri": "https://localhost:8080/", "height": 247} outputId="255e2e5c-6995-44d6-970f-4987ca0d644d" # plot latent 2d projection of points in ambient 3d feature space pca = PCA(n_components=2) mu = np.mean(X, axis=0) Xc = X - mu # center the data pca.fit(Xc) W = pca.components_.T # D*K Z = np.dot(Xc, W) # N * K latent scores Xrecon = np.dot(Z, W.T) + mu # N*D # span the latent space in area covered by data a = np.min(Z[:,0]) b = np.max(Z[:,0]) c = np.min(Z[:,1]) d = np.max(Z[:,1]) z0 = np.linspace(a, b, 10) z1 = np.linspace(c, d, 10) ZZ0, ZZ1 = np.meshgrid(z0, z1) Zgrid = np.c_[ZZ0.ravel(), ZZ1.ravel()] # 100x2 plane = np.dot(Zgrid, W.T) + mu # N*D latent_corners = np.array([ [a,c], [a,d], [b,c], [b,d] ]) # 4x2 recon_corners = np.dot(latent_corners, W.T) + mu # 4x3 fig = plt.figure().gca(projection='3d') scatterplot = fig.scatter(X[:,0], X[:,1], X[:,2], color="red") #recon = fig.scatter(Xrecon[:,0], Xrecon[:,1], Xrecon[:,2], marker='*', color='green') lineplot = fig.scatter(plane[:,0], plane[:,1], plane[:,2], color="black", alpha=0.5) fig.set_xlabel('sepal length') fig.set_ylabel('sepal width') fig.set_zlabel('petal length') #save_fig("iris-pca-3d") plt.show() # + id="cb7CY7C85m8N"
notebooks/pca.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: conda_python3 # language: python # name: conda_python3 # --- # ## Generate predictions and evaluate the model # ### Introduction # I this notebook, we will run predictions on the model that we trained and deployed in the previous steps. If you recall, the model is hosted on Sagemaker realtime prediction endpoint. We will invoke that endpoint to generate the binary labels(1,0) on a few rows that we have in our test file (test_data.csv). We will then evaluate the results against the ground truth and see how the model performs. # ### Prerequisits # Before proceeding, make sure you have run the following notebook in order without any errors: # 1. variant_classifier-autopilot.ipynb # 2. SageMakerAutopilotDataExplorationNotebook.ipynb # 3. SageMakerAutopilotCandidateDefinitionNotebook.ipynb # # If not, please go back to the notebooks and run them before proceeding. # ### Setup # Lets start by importing the libraries that we will need for executing this notebook. import pandas as pd import sagemaker from sagemaker.predictor import RealTimePredictor from sagemaker.content_types import CONTENT_TYPE_CSV import boto3 from sklearn import metrics import numpy as np import seaborn as sns import matplotlib.pyplot as plt # ### Get the endpoint name # To generate predictions on test data, we need to get the endpoint name of the model that we deployed at the end of the SageMakerAutopilotCandidateDefinitionNotebook.ipynb notebook. To do this, we find the endpoint among the list of endpoints that starts with the string "AutoML-automl-vc". This is the default naming format that has been used in the variant_classifier-autopilot.ipynb and SageMakerAutopilotCandidateDefinitionNotebook.ipynb notebooks. # # **NOTE:** If you changed the naming convention and/or have multiple endpoints beginning with the string "AutoML-automl-vc", the endpoint retrieved may not be the correct one. You can verify by logging into the AWS console, navigating to Sagemaker and selecting "Endpoints" from the left hand menu. Here you will see all the endpoints that have been created in your account. Select the one that you created as part of the SageMakerAutopilotCandidateDefinitionNotebook.ipynb notebook. If the correct endpoint is not selected, you can overwrite the name of the variable "endpoint_name" with the correct endpoint name. Make sure the correct endpoint is selected before proceeding. sm = boto3.client('sagemaker') endpoints=sm.list_endpoints()['Endpoints'] for val in endpoints: ep=val.get("EndpointName") if ep.startswith('AutoML-automl-vc'): endpoint_name=ep print ('Model endpoint: '+endpoint_name) print ('Make sure this is the correct model endpoint before proceeding') break print('No endpoint found. Make sure you have completed the steps mentioned in the prerequisits above.') # ### Data Preprocessing # We will now read the file "test_data.csv" into a dataframe and randomly sample 1000 records from it. test_file=pd.read_csv('test_data.csv') test_rows=test_file.sample(1000) test_rows.head() # As you can see, the test rows look exactly like the rows in the training dataset as expected. We will now saperate out our target variable "CLASS" from the test data and store it in a new dataframe "actual" test_rows_notarget=test_rows.drop(['CLASS'],axis=1) actual=test_rows['CLASS'].to_frame(name="actual") actual.reset_index(drop=True, inplace=True) # ### Generate Predictions # Next, we will invoke the endpoint of our model with the test rows and generate a prediction for each row. We will then store the results of the predciton in a new dataframe called "predicted". sm_session = sagemaker.Session() variant_predictor=RealTimePredictor(endpoint=endpoint_name,sagemaker_session=sm_session,content_type=CONTENT_TYPE_CSV, accept=CONTENT_TYPE_CSV) predicted_str=variant_predictor.predict(test_rows_notarget.to_csv(sep=',', header=False, index=False)).decode('utf-8') predicted=pd.Series(predicted_str.split(),name='predicted').to_frame().astype(int) # Finally, we combine "actual" and "predicted" values into a single dataframe called "results" results=pd.concat([actual, predicted],axis=1) results.head() # ### Model Evaluation # We will now generate some evaluation metrics for our binary classification model. We will start with a [confusion matrix](https://en.wikipedia.org/wiki/Confusion_matrix) and follow that up with an [Receiver Operating Characteristic (ROC) curve](https://en.wikipedia.org/wiki/Receiver_operating_characteristic). cf_matrix = metrics.confusion_matrix(results['actual'], results['predicted']) group_names = ['True Neg','False Pos','False Neg','True Pos'] group_counts = ["{0:0.0f}".format(value) for value in cf_matrix.flatten()] group_percentages = ["{0:.2%}".format(value) for value in cf_matrix.flatten()/np.sum(cf_matrix)] labels = [f"{v1}\n{v2}\n{v3}" for v1, v2, v3 in zip(group_names,group_counts,group_percentages)] labels = np.asarray(labels).reshape(2,2) sns.heatmap(cf_matrix, annot=labels, fmt='', cmap='Blues'); fpr, tpr, thresholds = metrics.roc_curve(results['actual'], results['predicted']) # + roc_auc=metrics.auc(fpr, tpr) accuracy=metrics.accuracy_score(results['actual'], results['predicted']) precision=metrics.precision_score(results['actual'], results['predicted']) recall=metrics.recall_score(results['actual'], results['predicted']) f1score=metrics.f1_score(results['actual'], results['predicted']) plt.figure() lw = 2 plt.plot(fpr, tpr, color='darkorange', lw=lw, label='ROC curve (area = %0.2f)' % roc_auc) plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--') plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('Receiver operating characteristic (ROC) curve') plt.legend(loc="lower right") plt.text(1.1,0.75,s='Accuracy: '+str(round(accuracy,2))+'\nPrecision: '+str(round(precision,2))+ '\nRecall: '+str(round(recall,2))+'\nF1 Score: '+str(round(f1score,2)),bbox=dict(boxstyle="square", ec=(1., 0.5, 0.5), fc=(1., 0.8, 0.8), )) plt.show()
source/GenomicsLearningCode/resources/notebooks/variant_predictor.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import random number = random.randrange(0,100) print("Рандомне число: ", number) while True: num = int(input("Введіть число від 0 до 10 включно: ")) rand_num = random.randrange(0,11) print("Рандомне число: ", rand_num) if num == rand_num: print("You Win!") elif num > 10: print("Please enter number again") else: print("You Lose!")
lesson_tasks/lesson11/num1,2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import sys import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from scipy import stats sns.set(style="white") sns.set_context("paper", 1.8) # + df = pd.read_csv("mw-ob-new.csv") slope, intercept, r_value, p_value, std_err = stats.linregress(df['MW'],df['RMSD']) g = sns.jointplot(df['MW'], df['RMSD'], kind="reg", joint_kws={'color':'k'}, scatter=False, xlim=(100, 650), ylim=(0, 5)) sns.kdeplot(df['MW'], df['RMSD'], ax=g.ax_joint, cmap='Blues') g.ax_joint.set_xlabel("Molecular weight (Da)") g.ax_joint.set_ylabel("RMSD ($\AA$)") #g.ax_joint.legend_.remove() g.ax_joint.legend_.texts[0].set_text("$R^2$: {:.2f}, y={:.3f}x{:.3f}".format(r_value**2, slope, intercept)) plt.tight_layout() plt.savefig("mw-ob-new.pdf") # + df = pd.read_csv("mw-ob-org.csv") slope, intercept, r_value, p_value, std_err = stats.linregress(df['MW'],df['RMSD']) g = sns.jointplot(df['MW'], df['RMSD'], kind="reg", joint_kws={'color':'k'}, scatter=False, xlim=(100, 650), ylim=(0, 5)) sns.kdeplot(df['MW'], df['RMSD'], ax=g.ax_joint, cmap='Blues') g.ax_joint.set_xlabel("Molecular weight (Da)") g.ax_joint.set_ylabel("RMSD ($\AA$)") #g.ax_joint.legend_.remove() g.ax_joint.legend_.texts[0].set_text("$R^2$: {:.2f}, y={:.3f}x{:.3f}".format(r_value**2, slope, intercept)) plt.tight_layout() plt.savefig("mw-ob-org.pdf") # + df = pd.read_csv("mw-rdkit-etkdg.csv") slope, intercept, r_value, p_value, std_err = stats.linregress(df['MW'],df['RMSD']) g = sns.jointplot(df['MW'], df['RMSD'], kind="reg", joint_kws={'color':'k'}, scatter=False, xlim=(100, 650), ylim=(0, 5)) sns.kdeplot(df['MW'], df['RMSD'], ax=g.ax_joint, cmap='Blues') g.ax_joint.set_xlabel("Molecular weight (Da)") g.ax_joint.set_ylabel("RMSD ($\AA$)") #g.ax_joint.legend_.remove() g.ax_joint.legend_.texts[0].set_text("$R^2$: {:.2f}, y={:.3f}x{:.3f}".format(r_value**2, slope, intercept)) plt.tight_layout() plt.savefig("mw-rdkit.pdf")
evaluation/mwplot.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Revelio Labs - Data Engineering Assignment # This is a simplified version of a data engineering project we had at Revelio Labs. The objective is to set up scalable pipelines using the existing stack that can ingest, update and process job posting data from our provider, GHR. You’ll be evaluated on your thought process, your SQL coding skills, how comfortable you are with basic terminal commands and your ability to explain your work. You don’t need to run any code for this exercise. Make sure you’ve read it entirely before you start so that you can see the full picture. Make the best use of online resources to catch up on technologies you are not familiar with. Choose the format that you think is best to gather your answers. # Email received from GHR: # We're in the process of re-creating an entire image of the dataset for you. It will be placed in a new folder 20200406; the files will be: # - master_[index]_[date].csv, (job_id, company, post_date, salary, city) - titles_[index]_[date].csv, (job_id, title) # - timelog_[index]_[date].csv, (job_id, remove_date) # Each of these files represents a table for our database, and for this run, each table is split across several files (identified by [index]) due to size limitations in AWS. # Going forward, new and revised job listing data will be published weekly (typically by Tuesday) in a new folder, dated for the Monday of the week (e.g. next one will be 20200413). These files will be much smaller, since they'll include only a week's worth of new and updated data. # About the data: # GHR data contains data on job postings starting in 2014. Each job_id is a unique identifier for a job posting (one row per job_id in each table) but sometimes the data is revised by including the same job_id in multiple deliveries. You can find a small sample of each file here (the sets of job_id are not the same among the files): https://info0.s3.us-east-2.amazonaws.com/assignment/engineering/data/master.csv https://info0.s3.us-east-2.amazonaws.com/assignment/engineering/data/title.csv https://info0.s3.us-east-2.amazonaws.com/assignment/engineering/data/timelog.csv # # Size of the entire image (20200406): master 30Gb, title 6Gb, timelog 9.4Gb # Size of a weekly update (average): master 1Gb, title 100Mb, timelog 150 Mb # # GHR data (SFTP) Access Details: # # ● Server: ghr-server # ● UID: client-revelio # ● private key: stored on EC2 at ~/.ssh/id-rsa-revelio-new # ● Port: 22 # # About Revelio Labs Stack (AWS) # # ● EC2 r5.4xlarge (remote Ubuntu machine, 16 CPU, 128Gb memory, 9.5Tb disk space). # We access it through a terminal on our local machine using the ssh protocol, everything has to be done through the command line once connected to this EC2 machine. You can connect to the GHR bucket using the SFTP protocol on this EC2 machine. # https://aws.amazon.com/ec2/ # # ● Redshift storage for structured data via PostgreSQL tables (7-node cluster, ~18Tb of disk overall) # https://aws.amazon.com/redshift/ # # For the following tasks, please provide commented scripts that perform all the necessary actions. SQL commands should be valid PostgreSQL syntax. Please make the best use of types, keys and indexes. Specify any credentials needed which have not already been specified as “<credential_name>”. # # Task 1) SFTP ingestion # # Please produce a bash script to transfer the folder YYYYMMDD on the GHR server to the Revelio EC2 instance over SFTP and save the data at “/work/ghr/YYYYMMDD”. This script should work for the entire image of the original dataset and for all following weekly updates. # Lets login to the EC2 instance. Hopefully the credentials and iam roles/policies are already set. ssh -i <credential.pem> ec2-user@revelio.bunchofawschars.compute-1.amazonaws.com # Ok lets make it easy on myself. I'm just gonna write a python code that writes a bash. Then we will write a bash that calls that code and the bash to run on the EC2 server. # # We want to watch out for what we have and what we dont have so we only request gets that are needed. We don't want to redownload things we already have. # + import os import datetime as dt #get current files in directory existing = set([ name for name in os.listdir('./work/ghr/') if os.path.isdir(os.path.join('./', name)) ]) #get span of dates for folders folders = set() start = dt.datetime.strptime("20200403","%Y%m%d") end = dt.datetime.strptime("20200604","%Y%m%d") #We can set it to the current time #end = datetime.today() week = start #get all the folder names while week < end: folders.add(week.strftime("%Y%m%d")) d = dt.timedelta(days=7) week += d #get the folders we are missing getfolders = folders-existing # - #here we write the get commands to download the missing folders with open ('download_files.sh', 'w') as rsh: rsh.write('''\ #! /bin/bash\n''') for folder in getfolders: #note if -i doesnt work, -oIdentityFile=~/.ssh/id-rsa-revelio-new could also work rsh.writelines(f'echo "get -r {folder}/ work/ghr/" | sftp -P 22 -i ~/.ssh/id-rsa-revelio-new client-revelio@ghr-server\n') # here is the bash to download the files # #! /bin/bash python get_ghr_files.py bash download_files.sh # + #or we can call the bash commands in python instead of writing a whole new bash import subprocess for folder in getfolders: #note if -i doesnt work, -oIdentityFile=~/.ssh/id-rsa-revelio-new could also work command = f'echo "get -r {folder}/ work/ghr/" | sftp -P 22 -i ~/.ssh/id-rsa-revelio-new client-revelio@ghr-server' process = subprocess.Popen(command.split(), stdout=subprocess.PIPE) output, error = process.communicate() print(output, error) # - # # Task 2) SQL Ingestion # Create a single table in PostgreSQL called posting_20200406 containing all the data from each of the three files (job_id, company, post_date, salary, city, title, remove_date) for the initial GHR image. # ## set up staging # These are the staging tables we upload the csv files to. The last staging table is the final table we copy the joined staging tables to. # + #We use this code to see the sizes of the string to help determine the datatypes of the table import pandas as pd import numpy as np df = pd.read_csv('title.csv') measurer = np.vectorize(len) restitle = measurer(df.values.astype(str)).max(axis=0) df1 = pd.read_csv('timelog.csv') restime = measurer(df.values.astype(str)).max(axis=0) df2 = pd.read_csv('master.csv', encoding='latin-1') resmaster = measurer(df2.values.astype(str)).max(axis=0) # + CREATE TABLE public.stage_master ( job_id BIGINT, company VARCHAR(200), post_date VARCHAR(19), salary INT, city VARCHAR(60), ) DISTSTYLE KEY DISTKEY (job_id) SORTKEY (post_date); CREATE TABLE public.stage_title ( job_id BIGINT, title VARCHAR(70), ) DISTSTYLE KEY DISTKEY (job_id) SORTKEY (title); CREATE TABLE public.stage_timelog ( job_id BIGINT, remove_date VARCHAR(19), ) DISTSTYLE KEY DISTKEY (job_id) SORTKEY (remove_date); # - CREATE TABLE public.posting_20200406_stage ( job_id BIGINT, company VARCHAR(200), post_date TIMESTAMP, salary INT, city VARCHAR(60), title VARCHAR(70), remove_date TIMESTAMP, ) DISTSTYLE KEY DISTKEY (job_id) SORTKEY (remove_date); # ## Upload from EC2 to S3 bucket # # We could try to upload straight from the EC2 (using a manifest with endpoints and all that jazz), but Redshift is just easier to use with S3 buckets. So lets just copy it over there. Also the cost of the bucket would be like $1 a month so might as well use it. So here is how we can do it with python. # We need to make sure the IAM roles and policies are set so the EC2 has s3 access. (Side note: Probably whoever is writing this should have access to EC2 and S3. If not then the user arns should be added so they can modify the EC2 and S3 roles.) # + import boto3 #if bucket doesn't not exist we make one def create_bucket(session, bucket_name): """Creates an AWS S3 bucket in the 'us-east-1' region""" try: s3_resource = session.resource('s3') response = s3_resource.create_bucket( Bucket=bucket_name, #below is sometimes not needed when the default profile is the same region CreateBucketConfiguration={'LocationConstraint': 'us-east-1'} ) bucket = s3_resource.Bucket(bucket_name) return bucket except s3_resource.meta.client.exceptions.BucketAlreadyExists: bucket = s3_resource.Bucket(bucket_name) return bucket except Exception as e: print(e) def create_EC2_role(session, role_name, policies): """Create role with S3 full access policy""" try: iam_client = session.client('iam') policy_document = json.dumps({ "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Principal": { "Service": "ec2.amazonaws.com" }, "Action": "sts:AssumeRole" } ] }) role = iam_client.create_role( RoleName=role_name, AssumeRolePolicyDocument=policy_document, ) for policy in policies: response = iam_client.attach_role_policy( RoleName=role_name, PolicyArn=policy ) return role except Exception as e: print(e) session = boto3.session.Session( aws_access_key_id=<credentials> aws_secret_access_key=<credentials>, region_name='us-east-1' ) bucket = (session, 'ghr_data') role_name = 'EC2toS3' policies = [ 'arn:aws:iam::aws:policy/AmazonS3FullAccess' ] role = create_EC2_role(session, role_name, policies) # - # Now if the EC2 has AWS CLI installed with it, we can just sync the directories # ! aws s3 sync /work/ghr/ s3://ghr_data # Otherwise we could just upload it with python & boto3. And while we're here we can use the code to build a manifest for Redshift to read # + import os files = [f for f in os.listdir('./work/ghr/20200406') if os.path.isfile(f)] master_entries = [] title_entries = [] timelog_entries = [] for f in files: if "csv" in f: bucket.upload_file(Filename=f, Key=f'ghr_data/20200406/{f}') if "master" in f: manifest_master.append({"url":f"s3://ghr_data/20200406/{f}", "mandatory":true}) elif "title" in f: manifest_title.append({"url":f"s3://ghr_data/20200406/{f}", "mandatory":true}) elif "timelog" in f: manifest_timelog.append({"url":f"s3://ghr_data/20200406/{f}", "mandatory":true}) master_manifest = { "entries": master_entries } title_manifest = { "entries": title_entries } timelog_manifest = { "entries": timelog_entries } s3_client = session.client('s3') putresponse = s3_client.put_object( Body=json.dumps(master_manifest), Bucket='ghr_data', Key='20200406/master.manifest' ) putresponse = s3_client.put_object( Body=json.dumps(title_manifest), Bucket='ghr_data', Key='20200406/title.manifest' ) putresponse = s3_client.put_object( Body=json.dumps(timelog_manifest), Bucket='ghr_data', Key='20200406/timelog.manifest' ) # - # # ## copy data to staging table # Here we can run the copy commands using the manifests from S3. First lets add the policies so redshift can access S3. (Side note: Probably whoever is writing this should have access to the Redshift cluster. If not then the user arns should be added so they can modify the red shift roles.) def create_redshift_role(session, role_name, policies): """Create role with S3 full access policy""" try: iam_client = session.client('iam') policy_document = json.dumps({ "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Principal": { "Service": "redshift.amazonaws.com" }, "Action": "sts:AssumeRole" } ] }) role = iam_client.create_role( RoleName=role_name, AssumeRolePolicyDocument=policy_document, ) for policy in policies: response = iam_client.attach_role_policy( RoleName=role_name, PolicyArn=policy ) return role except Exception as e: print(e) role_name = 'RedshifttoS3' policies = [ 'arn:aws:iam::aws:policy/AmazonS3FullAccess' ] role = create_redshift_role(session, role_name, policies) # here we upload the files to the tables in Spectrum. #Hopefully the role has the permissions it needs to use S3 # copy public.stage_master from 's3://ghr_data/20200406/master.manifest' iam_role 'arn:aws:iam::10912381340:role/RedshifttoS3' manifest; # copy public.stage_title from 's3://ghr_data/20200406/title.manifest' iam_role 'arn:aws:iam::10912381340:role/RedshifttoS3' manifest; # copy public.stage_timelog from 's3://ghr_data/20200406/timelog.manifest' iam_role 'arn:aws:iam::10912381340:role/RedshifttoS3' manifest; # # ## copy data from staging to posting_20200406 # Note we change the date strings to TIMESTAMP and join all the staging tables together INSERT into public.posting_20200406_stage SELECT m.job_id, m.company, TO_TIMESTAMP(m.post_date, 'YYYY-MM-DD HH24:MI:SS') as post_date, m.salary, m.city, t.title, TO_TIMESTAMP(tl.remove_date, 'YYYY-MM-DD HH24:MI:SS') as remove_date, FROM public.stage_master m LEFT JOIN public.stage_title t USING(job_id) LEFT JOIN public.stage_timelog tl USING (job_id); # Now we need to make sure there are no job duplicates. lets take the job_id with the latest remove_date. Because Redshift uploads in parallel, who knows what file was uploaded when for both the GHR database and this Redshift ingestion? It'd be best if there was a timestamp with the master file of when the records was entered in the GHR database. So lets best guess for duplicates with the lastest removal date as the most recent entry. # # + CREATE table public.posting_20200406 DISTSTYLE KEY DISTKEY (job_id) SORTKEY (remove_date) As SELECT job_id, company, post_date, salary, city, title, remove_date FROM ( SELECT *, row_number() over (partition by job_id order by remove_date desc) rno From public.posting_20200406_stage ) where rno = 1; # - # # Task 3) Data update # Now assume we have a history of posting data received from GHR stored in the tables posting_YYYYMMDD, where YYYYMMDD represents the date the data was received, and a full history of the posting data stored as positng_current. Please explain how you would update the posting_current table with new data stored at posting_20210601. # + #Lets assume that we did well on our part that job_id are distinct on posting_20210601 and posting_current #We make a copy because deleting is scary BEGIN; DROP TABLE IF EXISTS public.posting_current_old CREATE TABLE public.posting_current_old AS SELECT * FROM public.posting_current; #for the future we could just truncate the table and insert posting_current #We delete the jobs from posting current that exist in posting_20210601 DELETE FROM public.posting_current WHERE job_id IN( SELECT job_id FROM public.posting_20210601 ) #We insert the posting_20210601 table into public.post_current INSERT INTO public.posting_current SELECT * FROM public.posting_20210601; COMMIT; # - # # Task 4) Data processing # Starting from the posting_current table, build a table in PostgreSQL that contains the counts of the new, active and removed job postings, as well as the average salaries of the new, active and removed job postings for each month (from 2014 to now). Each row should correspond to a single month and should have the columns (month, count_new, count_active, count_removed, salary_new, salary_active, salary_removed). # For a given month, a posting is considered new if it was posted during that month. A posting is considered removed if it was removed during that month. A posting is considered active if it was posted in the current or a previous month and stays active through the month in which it is removed. For example, if posting_current had only one posting in it, posted in 2020-01 and removed in 2020-05, with a salary of 100K, the final aggregated table would look like: # # + BEGIN; # Here we create a new post view for the following fields: # count_new = posting date month count of job ids # salary_new = posting date month SUM of salaries CREATE MATERIALIZED VIEW public.new_post_mv AS SELECT TOCHAR(DATE_TRUNC('month', posting_date),'YYYY-MM') AS month, COUNT(job_id) AS count_new, SUM(salary) AS salary_new FROM public.posting_current GROUP BY month; # Here we create a removed post view for the following fields: # count_remove = posting removal date count of job ids # salary_remove = posting removal date SUM of salaries CREATE MATERIALIZED VIEW public.remove_post_mv AS SELECT TO_CHAR(DATE_TRUNC('month', remove_date),'YYYY-MM') AS month, COUNT(job_id) AS count_removed, SUM(salary) AS salary_removed FROM public.posting_current GROUP BY month; # Here we create a view for the following fields: # count_active = all dates from posting date to (including) posting removal month count of job ids # salary_active = all dates from posting date to (including) posting removal month SUM of salaries # we use generate_series to create dates between the times span of post_date and remove_date\ # Note that we may get nulls in the remove_date because it could still be a current post. # We could fill the value with NOW(), for this exercise we will used the last date CREATE MATERIALIZED VIEW public.active_post_mv AS SELECT TO_CHAR(generate_series(DATE_TRUNC('month',post_date), DATE_TRUNC('month', COALESCE(remove_date,'2020-06-01T00:00:00Z'::TIMESTAMP), '1 month'::interval), 'YYYY-MM') AS month, COUNT(job_id) AS count_active, SUM(salary) AS salary_active FROM public.posting_current GROUP BY month; # Here we create a view for the following fields: # month = all the dates from 2014 to 2020 (NOW() could be used to make it current) CREATE MATERIALIZED VIEW public.post_dates_mv AS SELECT TO_CHAR(generate_series,'YYYY-MM') as month FROM generate_series('2014-01-01T00:00:00Z'::TIMESTAMP, '2020-06-01T00:00:00Z'::TIMESTAMP, '1 month'::interval); # Here we join all the tables on the months from public.post_dates_mv # We watch out for null for months where there is no data CREATE TABLE public.post_counts AS SELECT pd.month, COALESCE(np.count_new,0) AS count_new, COALESCE(ap.count_active,0) AS count_active, COALESCE(rp.count_remove,0) AS count_remove, COALESCE(np.salary_new,0) AS salary_new, COALESCE(ap.salary_active,0) AS salary_active, COALESCE(rp.salary_remove,0) AS salary_remove, FROM public.post_dates_mv pd LEFT JOIN public.new_post_mv np USING(month) LEFT JOIN public.remove_post_mv rp USING(month) LEFT JOIN public.active_post_mv ap USING(month) ORDER BY month; COMMIT; # - # # A different approach to the problem # # Just a few thoughts on a different pipeline we could use to achieve the same goal. # 1. We could use a lambda to download the files to S3 directly by including an SFTP package. Then we could trigger it with an Eventbridge chron for once a week. Or we could mount an S3 bucket with FUSE on the EC2. # ```python # import paramiko # def lambda_handler(event, context): # k = paramiko.RSAKey.from_private_key_file("~/.ssh/id-rsa-revelio-new") # c = paramiko.SSHClient() # c.set_missing_host_key_policy(paramiko.AutoAddPolicy()) # print("connecting") # c.connect( hostname = "ghr-server", username = "client-revelio", pkey = k ) # print("connected") # #we can iterate through the files # commands = [ "cp file1 s3://bucket", "cp file1 s3://bucket" ] # for command in commands: # print "Executing {}".format( command ) # stdin , stdout, stderr = c.exec_command(command) # print stdout.read() # print( "Errors") # print stderr.read() # c.close() # ``` # 2. Since its all in the S3, we can use Glue to help clean/verify data as an in between before SQL ingestion. We can also use Glue to change the data to parquet files so its faster to read into Athena and Redshift. # # 3. Once we dump everything into an S3 bucket, we could use Athena instead of Redshift. Redshift seems like overkill for this amount of data so unless there is already a Redshift cluster running and you want to squeeze this data in there, its more cost effective to use Athena. We could use a lambda like below to be triggered by the previous lambda. # ```python # import time # import boto3 # # #this only creates the table... we can add onto the query or create a ddl file and read it. # query = """CREATE TABLE public.posting_20200406 # ( # job_id BIGINT, # company VARCHAR(200), # post_date TIMESTAMP, # salary INT, # city VARCHAR(60), # title VARCHAR(70), # remove_date TIMESTAMP # ); # """ # DATABASE = 'GHR' # output='s3://ghr_data/queries' # def lambda_handler(event, context): # client = boto3.client('athena') # # Execution # response = client.start_query_execution( # QueryString=query, # QueryExecutionContext={ # 'Database': DATABASE # }, # ResultConfiguration={ # 'OutputLocation': output, # } # ) # return response # # ```
Data-Ingestion-Redshift/Revelio-1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: ani # --- # # 0. Load results and tools # # * Get paths to RJMC output, import bayes_implicit_solvent, load and join the outputs... from tree_rjmc_w_elements import * import numpy as np import matplotlib.pyplot as plt # %matplotlib inline # + npz = '/Users/joshuafass/Documents/GitHub/bayes-implicit-solvent/bayes_implicit_solvent/rjmc_experiments/tree_rjmc_from_elemental_n_iter=5000_ll=student-t_job_id=1274.npz' pkl = '/Users/joshuafass/Documents/GitHub/bayes-implicit-solvent/bayes_implicit_solvent/rjmc_experiments/tree_rjmc_from_elemental_n_iter=5000_ll=student-t_job_id=1274.pkl' continued_npz = '/Users/joshuafass/Documents/GitHub/bayes-implicit-solvent/bayes_implicit_solvent/rjmc_experiments/tree_rjmc_from_elemental_n_iter=5000_ll=student-t_job_id=992_continued_.npz' continued_pkl = '/Users/joshuafass/Documents/GitHub/bayes-implicit-solvent/bayes_implicit_solvent/rjmc_experiments/tree_rjmc_from_elemental_n_iter=5000_ll=student-t_job_id=992_continued_.pkl' # - result0 = np.load(npz) result1 = np.load(continued_npz) print(result0['predictions'].shape, result1['predictions'].shape) from pickle import load with open(pkl, 'rb') as f: tree_traj0 = load(f) with open(continued_pkl, 'rb') as f: tree_traj1 = load(f) tree_traj = tree_traj0[1:] + tree_traj1[1:] print(len(tree_traj)) # ## 0.1. Form prediction traj prediction_traj = np.vstack( [result0['predictions'], result1['predictions'], ]) prediction_traj.shape # ## 0.2. Form RMSE traj get_rmse_in_kcal_per_mol(prediction_traj[0]) rmse_traj = list(map(get_rmse_in_kcal_per_mol, prediction_traj)) # # 0.3. Form within_model_trajs # # 0.3.1. Split by element # ## 0.4. Form n_types_traj tree = tree_traj[0] n_types_traj = [tree.number_of_nodes for tree in tree_traj] # # 1. Exploratory / diagnostic plots min_from_fixed_dimension_sampling = 1.6344935894012451 n_steps_per_cm_proposal = 5 x = np.arange(len(rmse_traj)) * n_steps_per_cm_proposal # + ax = plt.subplot(2,1,1) ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) plt.plot(x, rmse_traj, label='variable-dimension MCMC') #plt.hlines(2.62, 0, x[-1], linestyles='dashed', color='grey', label='OBC2') plt.hlines(min_from_fixed_dimension_sampling, 0, x[-1], linestyles='dotted', color='darkblue', label='best from fixed-dimension MCMC') plt.ylim(1.2,) plt.legend(loc='best') plt.ylabel('RMSE (kcal/mol)') plt.xlabel('iterations of variable-dimension MCMC\n(one cross-model proposal per 5 within-model steps)') # - len(n_types_traj) plt.plot(x, n_types_traj) # + def clean_ax(ax): ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) return ax plt.figure(figsize=(10,4)) # 1. RMSE ax = clean_ax(plt.subplot(1,3,1)) plt.plot(x, rmse_traj, label='rjmc') plt.title('(a) model fit') #plt.hlines(2.62, 0, x[-1], linestyles='dashed', color='grey', label='OBC2') plt.hlines(min_from_fixed_dimension_sampling, 0, x[-1], linestyles='dotted', color='darkblue', label='best from sampling with\nfixed obc2 types') plt.ylim(1.2,) #plt.yticks(np.arange(1.2, 2.5,0.2)) plt.legend(loc='best') plt.ylabel('RMSE (kcal/mol)') plt.xlabel('iterations') # 2. oxygen radii #plt.title(parent_type) ax = clean_ax(plt.subplot(1,3,2)) plt.hlines(default_r, 0, ind_sub_trajs[-1][-1] * n_steps_per_cm_proposal, linestyles='--', label='mbondi') for i in range(len(ind_sub_trajs)): n_dim = r_sub_trajs[i].shape[1] colors = sns.color_palette("GnBu_d", n_dim) for j in range(n_dim): label = None if i + j == 0: label = 'rjmc' plt.plot(ind_sub_trajs[i]*n_steps_per_cm_proposal, r_sub_trajs[i][:,j], color=colors[j], label=label) #plt.scatter([ind_sub_trajs[i][0]*n_steps_per_cm_proposal]*n_dim, r_sub_trajs[i][0], color='grey') #plt.scatter([ind_sub_trajs[i][-1]*n_steps_per_cm_proposal]*n_dim, r_sub_trajs[i][-1], color='grey') # TODO: include dot at split / merge points # TODO: include a little connector when there are splits / merges #plt.xlabel('iteration') plt.legend() plt.ylabel('oxygen sub-type radii (Å)') plt.xlabel('iterations') plt.title('(b) continuous parameter subset') # 3. n_types ax = clean_ax(plt.subplot(1,3,3)) plt.plot(x, n_types_traj, label='all') plt.plot(x, type_count_traj[:,node_to_ind['[#8]']], label='oxygen') #plt.yticks(np.arange(11,23,2)) plt.ylabel('# types') plt.legend() plt.title('(c) number of types') plt.tight_layout() plt.xlabel('iterations')#\n(5 within-model steps per cross-model step)') plt.savefig('rjmc_pilot_figure.png', dpi=300) # - tree = tree_traj[-1] tree def get_parent_element(tree, smirks): if smirks == '*': return '*' while tree.get_parent_type(smirks) != '*': smirks = tree.get_parent_type(smirks) return smirks for s in tree.nodes: print(s,'\t', get_parent_element(tree, s)) initial_tree = tree_traj[0] initial_tree.nodes node_to_ind = {} for i in range(len(initial_tree.nodes)): node_to_ind[initial_tree.nodes[i]] = i node_to_ind def get_type_counts(tree): counts = np.zeros((len(node_to_ind)), dtype=int) for s in tree.nodes: counts[node_to_ind[get_parent_element(tree, s)]] += 1 return counts get_type_counts(tree_traj[-1]) type_count_traj = [] for tree in tree_traj: type_count_traj.append(get_type_counts(tree)) type_count_traj = np.array(type_count_traj) initial_tree.nodes np.bincount(type_count_traj[:,2]) for i in range(1, len(initial_tree.nodes)): if len(set(type_count_traj[:,i])) == 1: print("didn't elaborate on type ", initial_tree.nodes[i]) else: plt.plot(type_count_traj[:,i], label=initial_tree.nodes[i]) plt.legend(loc='best') plt.ylabel('# of sub-types') initial_tree.get_radius('*') def get_sub_parameters(tree, parent_type='[#8]'): radii = [] scales = [] for s in tree.nodes: if get_parent_element(tree, s) == parent_type: radii.append(tree.get_radius(s) / unit.angstrom) scales.append(tree.get_scale_factor(s)) return radii, scales def get_sub_trajs(tree_traj, parent_type='[#8]'): param_traj = [get_sub_parameters(t, parent_type) for t in tree_traj] r_traj = [r for (r,s) in param_traj] s_traj = [s for (r,s) in param_traj] ind_sub_trajs = [] r_sub_trajs = [] s_sub_trajs = [] r = r_traj[0] s = s_traj[0] current_ind_sub_traj = [0] current_r_sub_traj = [r] current_s_sub_traj = [s] current_dim = len(r) for i in range(1,len(param_traj)): r = r_traj[i] s = s_traj[i] if len(r) == current_dim: current_ind_sub_traj.append(i) current_r_sub_traj.append(r) current_s_sub_traj.append(s) else: ind_sub_trajs.append(np.array(current_ind_sub_traj)) r_sub_trajs.append(np.array(current_r_sub_traj)) s_sub_trajs.append(np.array(current_s_sub_traj)) current_ind_sub_traj = [i] current_r_sub_traj = [r] current_s_sub_traj = [s] current_dim = len(r) ind_sub_trajs.append(np.array(current_ind_sub_traj)) r_sub_trajs.append(np.array(current_r_sub_traj)) s_sub_trajs.append(np.array(current_s_sub_traj)) return ind_sub_trajs, r_sub_trajs, s_sub_trajs get_sub_parameters(tree_traj[-1], '[#16]') parent_type = '[#8]' ind_sub_trajs, r_sub_trajs, s_sub_trajs = get_sub_trajs(tree_traj, parent_type) default_r = mbondi_model.get_radius(parent_type) / unit.angstrom len(r_sub_trajs) sub_trajs[0].shape import seaborn as sns colors = sns.color_palette('viridis') plt.plot(type_count_traj[:,node_to_ind[parent_type]]) change_points = np.where(np.diff(type_count_traj[:,node_to_ind[parent_type]]) != 0)[0] change_points [tree_traj[0]] + [tree_traj[i+1] for i in change_points] plt.title(parent_type) plt.hlines(default_r, 0, ind_sub_trajs[-1][-1] * n_steps_per_cm_proposal, linestyles='--') for i in range(len(ind_sub_trajs)): n_dim = r_sub_trajs[i].shape[1] colors = sns.color_palette("GnBu_d", n_dim) for j in range(n_dim): plt.plot(ind_sub_trajs[i]*n_steps_per_cm_proposal, r_sub_trajs[i][:,j], color=colors[j]) plt.scatter([ind_sub_trajs[i][0]*n_steps_per_cm_proposal]*n_dim, r_sub_trajs[i][0], color='grey') plt.scatter([ind_sub_trajs[i][-1]*n_steps_per_cm_proposal]*n_dim, r_sub_trajs[i][-1], color='grey') # TODO: include dot at split / merge points # TODO: include a little connector when there are splits / merges plt.xlabel('iteration') plt.ylabel('radius (Å)') # + for i in range(len(ind_sub_trajs)): n_dim = r_sub_trajs[i].shape[1] colors = sns.color_palette("GnBu_d", n_dim) for j in range(n_dim): plt.plot(ind_sub_trajs[i], s_sub_trajs[i][:,j], color=colors[j]) # - for i in range(len(ind_sub_trajs)): n_dim = r_sub_trajs[i].shape[1] colors = sns.color_palette("GnBu_d", n_dim) for j in range(n_dim): plt.plot(r_sub_trajs[i][:,j], s_sub_trajs[i][:,j], color=colors[j]) time_colors = sns.color_palette("GnBu_d", len(ind_sub_trajs)) for i in range(len(ind_sub_trajs)): n_dim = r_sub_trajs[i].shape[1] for j in range(n_dim): plt.plot(r_sub_trajs[i][:,j], s_sub_trajs[i][:,j], color=time_colors[i]) change_points = np.diff() x_range = (1,2) y_range = (0.75, 1.5) all_x = np.hstack([r_sub_trajs[i][:,j] for j in range(n_dim)]) all_y = np.hstack([s_sub_trajs[i][:,j] for j in range(n_dim)]) plt.figure() sns.kdeplot(all_x, all_y, bw=0.01, shade=True, color=time_colors[i]) plt.xlim(*x_range) plt.ylim(*y_range) # + time_colors = sns.color_palette("GnBu_d", len(ind_sub_trajs)) x_range = (1,2) y_range = (0.75, 1.5) #x_range = (np.min(r_sub_trajs)-0.1, np.max(r_sub_trajs) + 0.1) #shape errors #y_range = (np.min(s_sub_trajs)-0.1, np.max(s_sub_trajs) + 0.1) #shape errors for i in range(len(ind_sub_trajs)): n_snaps = n_dim = r_sub_trajs[i].shape[1] all_x = np.hstack([r_sub_trajs[i][:,j] for j in range(n_dim)]) all_y = np.hstack([s_sub_trajs[i][:,j] for j in range(n_dim)]) plt.figure() #sns.kdeplot(all_x, all_y, bw=0.01, shade=True, color=time_colors[i]) #ax = sns.kdeplot(all_x, all_y, bw=0.01, color=time_colors[i], cut=0, shade=True) #ax.collections[0].set_alpha(0) ax = sns.kdeplot(all_x, all_y, bw=0.01, color=time_colors[i], shade=True, shade_lowest=False) plt.xlim(*x_range) plt.ylim(*y_range) # + time_colors = sns.color_palette("GnBu_d", len(ind_sub_trajs)) x_range = (1,2) y_range = (0.75, 1.5) for i in range(len(ind_sub_trajs))[::3]: n_dim = r_sub_trajs[i].shape[1] all_x = np.hstack([r_sub_trajs[i][:,j] for j in range(n_dim)]) all_y = np.hstack([s_sub_trajs[i][:,j] for j in range(n_dim)]) ax = sns.kdeplot(all_x, all_y, bw=0.01, color=time_colors[i], shade=True, shade_lowest=False) plt.xlim(*x_range) plt.ylim(*y_range) # - [len(s) for s in ind_sub_trajs] len(range(len(ind_sub_trajs))[::3]) plt.scatter(all_x, all_y) sub_trajs[-1].shape sub_trajs[0][-1], sub_trajs[1][0] sub_trajs[-2][-1], sub_trajs[-1][0] # # 2. Plot "RJMC for automatic model selection" # # * Going to need plots of radii vs. iteration # * Putting all gazillion parameter on top of each other will be crowded... Can zoom in on specific subsets of the parameters # # * Going to need to print out some of the discrete trees visited... # # * Going to need to show # of types vs. RJMC iteration # * Comment on but don't necessarily solve the convergence issue # # * Going to need to put a bincount on the right of the # types vs. iteration plot # # 3. Plot Bayesian Model Averaging for uncertainty quantification # # * Using LOO CV... # just eye-balled, since it's clearly not converged... equil_step = 400 plt.plot(prediction_traj[equil_step:][:,50]) from scipy.stats import norm from scipy.stats import t as student_t individual_normal_likelihood_terms = norm.logpdf(prediction_traj, loc=expt_means, scale=expt_uncertainties) individual_student_t_likelihood_terms = student_t.logpdf(prediction_traj, loc=expt_means, df=7) individual_normal_likelihood_terms.shape plt.hist(np.max(individual_normal_likelihood_terms, 0)); plt.figure() plt.hist(np.min(individual_normal_likelihood_terms, 0)); plt.hist(np.max(individual_student_t_likelihood_terms, 0)); plt.figure() plt.hist(np.min(individual_student_t_likelihood_terms, 0)); worst_fit = np.argmin(np.max(individual_student_t_likelihood_terms, 0)) print(train_smiles[worst_fit]) plt.plot(prediction_traj[:,worst_fit]) plt.hlines(expt_means[worst_fit], 0, len(prediction_traj)) best_fit = np.argmax(np.min(individual_normal_likelihood_terms, 0)) plt.plot(prediction_traj[:,best_fit]) plt.hlines(expt_means[best_fit], 0, len(prediction_traj)) train_smiles[best_fit] plt.hist(prediction_traj[equil_step:,best_fit] - expt_means[best_fit], bins=50); plt.hist(individual_student_t_likelihood_terms[:,best_fit]); plt.hist(individual_student_t_likelihood_terms[equil_step:].std(0), bins=50); # + from scipy.special import logsumexp def weights_from_log_weights(log_weights): log_Z = logsumexp(log_weights) weights = np.exp(log_weights - log_Z) return weights def ESS(log_weights): """ TODO: look also at the function whose expectation we're trying to approximate... See "Rethinking the effective sample size" https://arxiv.org/abs/1809.04129 and references therein for some inspiration in this direction... """ weights = weights_from_log_weights(log_weights) return 1 / np.sum(weights ** 2) # - loo_log_weights = -individual_student_t_likelihood_terms effective_sample_sizes = np.array([ESS(loo_log_weights[equil_step:,i]) for i in range(loo_log_weights.shape[1])]) min(effective_sample_sizes) len(individual_student_t_likelihood_terms[equil_step:]) plt.hist(effective_sample_sizes, bins=50); effective_sample_sizes[0] # # compare weighted LOO-CV CDF with experimental CDF... i = 5 w = weights_from_log_weights(loo_log_weights[equil_step:, i]) preds = kT_to_kcal_mol * prediction_traj[equil_step:, i] plt.hist(preds, bins=50, density=True, label='train', alpha=0.5); bin_heights, _, _ = plt.hist(preds, bins=50, density=True, weights=w, label='validation', alpha=0.5); plt.legend() plt.vlines(expt_means[i] * kT_to_kcal_mol, 0, max(bin_heights)) #plt.yscale('log') inds = np.argsort(preds) cdf_x = np.linspace(-15,5,1000) expt_cdf = norm.cdf(cdf_x, loc=expt_means[i] * kT_to_kcal_mol, scale=expt_uncertainties[i] * kT_to_kcal_mol) plt.plot(cdf_x, expt_cdf) norm.cdf(-1, loc=0) plt.plot(cdf_x, expt_cdf, label='expt cdf') plt.plot(preds[inds],np.linspace(0,1,len(preds)), label='train cdf') plt.plot(preds[inds],np.cumsum(w[inds]), label='loo-validation cdf') plt.xlim(-7,-3) plt.legend() np.interp(x=(0.025, 0.975), xp=np.cumsum(w[inds]), fp=preds[inds]) # TODO: a function that gives me an central 95% (or X%) predictive interval from weighted predictions... def get_central_interval(predictions, weights, fraction_included=0.95): eps = (1.0 - fraction_included) / 2 lower_quantile, upper_quantile = eps, 1.0 - eps inds = np.argsort(predictions) return np.interp(x=(lower_quantile, upper_quantile), xp=np.cumsum(weights[inds]), fp=predictions[inds]) interval = get_central_interval(preds, w) interval def get_expt_density_in_interval(interval, expt_mean, expt_unc): expt_cdf = norm.cdf(interval, loc=expt_mean, scale=expt_unc) return expt_cdf[1] - expt_cdf[0] unitd_expt_means = expt_means * kT_to_kcal_mol unitd_expt_uncs = expt_uncertainties * kT_to_kcal_mol get_expt_density_in_interval(interval, unitd_expt_means[i], unitd_expt_uncs[i]) def get_calibration_curve(i, desired_coverage=np.linspace(0,1,100)): actual_coverage = np.nan * np.zeros(len(desired_coverage)) for j in range(len(desired_coverage)): preds = kT_to_kcal_mol * prediction_traj[equil_step:, i] weights = weights_from_log_weights(loo_log_weights[equil_step:, i]) interval = get_central_interval(preds, weights, desired_coverage[j]) actual_coverage[j] = get_expt_density_in_interval(interval, unitd_expt_means[i], unitd_expt_uncs[i]) return desired_coverage, actual_coverage i = np.random.randint(0,631) x_calibration, y_calibration = get_calibration_curve(i) from tqdm import tqdm_notebook as tqdm calibration_curves = [] for i in tqdm(range(len(expt_means))): x_calibration, y_calibration = get_calibration_curve(i) calibration_curves.append((x_calibration, y_calibration)) # + plt.figure(figsize=(6,6)) ax = plt.subplot(1,1,1) plt.plot(np.linspace(0,1), np.linspace(0,1), '--') ax.axis('equal') for (x_,y_) in calibration_curves: plt.plot(x_, y_, color='blue', alpha=0.3) plt.xlim(0,1) plt.ylim(0,1) plt.xlabel('desired coverage probability') plt.ylabel('actual coverage probability') # + plt.figure(figsize=(6,6)) ax = plt.subplot(1,1,1) ax.axis('equal') y_mean = np.mean([y_ for (x_, y_) in calibration_curves], 0) plt.plot(x_, y_mean, label='with rjmc') plt.xlim(0,1) plt.ylim(0,1) plt.xlabel('desired coverage probability') plt.ylabel('actual coverage probability') plt.plot(np.linspace(0,1), np.linspace(0,1), '--', color='grey') # - # # TODO: repeat for within-model-sampling only # + from glob import glob rwmh_path = '/Users/joshuafass/Desktop/fig3_results/' suffix = '*.npz' rwmh_fnames = glob(rwmh_path + 'rw_mh' + suffix) def get_within_model_prediction_trajs(): results = [] for fname in rwmh_fnames: results.append(np.load(fname)) return [r['prediction_traj'] for r in results] # - within_model_prediction_trajs = get_within_model_prediction_trajs() within_model_prediction_trajs[0].shape # + # these can't be directly compared, because the amount of sampling is so different!!d # - all_within_model_predictions = np.vstack([traj[(int(len(traj)/2)):] for traj in within_model_prediction_trajs[::10]]) all_within_model_predictions.shape wm_loo_log_weights = - student_t.logpdf(all_within_model_predictions, loc=expt_means, df=7) wm_loo_log_weights def get_within_model_calibration_curve(i, desired_coverage=np.linspace(0,1,100)): actual_coverage = np.nan * np.zeros(len(desired_coverage)) for j in range(len(desired_coverage)): preds = kT_to_kcal_mol * all_within_model_predictions[:, i] weights = weights_from_log_weights(wm_loo_log_weights[:, i]) interval = get_central_interval(preds, weights, desired_coverage[j]) actual_coverage[j] = get_expt_density_in_interval(interval, unitd_expt_means[i], unitd_expt_uncs[i]) return desired_coverage, actual_coverage within_model_calibration_curves = [] for i in tqdm(range(len(expt_means))): x_calibration, y_calibration = get_within_model_calibration_curve(i) within_model_calibration_curves.append((x_calibration, y_calibration)) # + plt.figure(figsize=(6,6)) ax = plt.subplot(1,1,1) ax.axis('equal') y_mean = np.mean([y_ for (x_, y_) in calibration_curves], 0) y_mean_wm = np.mean([y_ for (x_, y_) in within_model_calibration_curves], 0) y_mean[-1] = 1 y_mean_wm[-1] = 1 plt.plot(x_, y_mean_wm, label='continuous parameters') plt.plot(x_, y_mean, label='continuous parameters + atom-types') plt.xlim(0,1) plt.ylim(0,1) plt.xlabel('desired coverage probability') plt.ylabel('actual coverage probability') plt.legend(title='sampling') plt.plot(np.linspace(0,1), np.linspace(0,1), '--', color='grey') plt.title('') # - # # TODO: include EXP uncertainty! max(bin_heights) expt_means[0] # + # todo: estimate the leave-one-out-CV RMSE n_molecules = loo_log_weights.shape[1] train_mean_preds = [] test_mean_preds = [] for i in range(n_molecules): preds_kcalmol = kT_to_kcal_mol * prediction_traj[equil_step:, i] #expt_kcalmol = kT_to_kcal_mol * expt_means[i] loo_weights = weights_from_log_weights(loo_log_weights[equil_step:, i]) flat_weights = np.ones(len(loo_weights)) / len(loo_weights) train_mean_preds.append(np.dot(flat_weights, preds_kcalmol)) test_mean_preds.append(np.dot(loo_weights, preds_kcalmol)) # - get_rmse_in_kcal_per_mol(np.array(train_mean_preds)) get_rmse_in_kcal_per_mol(np.array(test_mean_preds)) np.array(train_mses).shape plt.hist(np.sqrt(train_mses), alpha=0.5) plt.hist(np.sqrt(test_mses), alpha=0.5) np.mean(np.sqrt(test_mses)) np.mean(np.sqrt(train_mses)) mean_pred = np.mean(prediction_traj[equil_step:], 0) np.sqrt(np.mean((mean_pred - expt_means)**2)) initial_pred = prediction_traj[0] np.sqrt(np.mean((initial_pred - expt_means)**2)) final_pred = prediction_traj[-1] np.sqrt(np.mean((final_pred - expt_means)**2)) prediction_traj[equil_step:, 0].max() # + # pick some random indices to inspect worst_fit = np.argmin(np.max(individual_normal_likelihood_terms, 0)) train_smiles[worst_fit] # - plt.plot(prediction_traj[:,worst_fit]) plt.hlines(expt_means[worst_fit], 0, len(prediction_traj)) mols[worst_fit].smiles # # 4. Analyze pairwise comparisons...
bayes_implicit_solvent/rjmc_experiments/rjmc_pilot_figure.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + #Structured arrays or record arrays are useful when you perform computations, and at the same time you could keep closely related data together. Structured arrays provide efficient storage for compound, heterogeneous data. #NumPy also provides powerful capabilities to create arrays of records, as multiple data types live in one NumPy array. However, one principle in NumPy that still needs to be honored is that the data type in each field (think of this as a column in the records) needs to be homogeneous. # - import numpy as np # + #Imagine that we have several categories of data on a number of students say, name, roll number, and test scores. # - name = ["Alice","Beth","Cathy","Dorothy"] studentId = [1,2,3,4] score = [85.4,90.4,87.66,78.9] # + # There's nothing here that tells us that the three arrays are related; it would be more natural if we could use a single structure to store all of this data. #Define the np array with the names of the 'columns' and the data format for each #U10 represents a 10-character Unicode string #i4 is short for int32 (i for int, 4 for 4 bytes) #f8 is shorthand for float64 # - #initialise with 0 and shape is 4 student_data = np.zeros(4, dtype={'names':('name', 'studentId', 'score'), 'formats':('U10', 'i4', 'f8')}) #np.zeros() for a string sets it to an empty string student_data print(student_data.dtype) #Now that we've created an empty container array, we can fill the array with our lists of values student_data['name'] = name student_data['studentId'] = studentId student_data['score'] = score print(student_data) #The handy thing with structured arrays is that you can now refer to values either by index or by name student_data['name'] student_data['studentId'] student_data['score'] #If you index student_data at position 1 you get a structure: student_data[1] #Get the name attribute from the last row student_data[-1]['name'] # + #Get names where score is above 85 # - student_data[student_data['score'] > 85]['name']
IndexingWithStringsStructuredArrays_m02_demo04.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ![HPE DEV Logo](Pictures/hpe-dev-logo.png) # # Version 0.14 # # # Thank you for visiting the Hack Shack # # Powered by [HPE DEV Team](https://hpedev.io) # # # Conclusion # # In this workshop, you obtained a practical experience with different methods to use Ansible for Redfish management tasks in an HPE Synergy and HPE OneView environment. The first method was with the Ansible built-in `uri` method and the second with the HPE iLOrest library. Then, you had an overview of the Ansible Galaxy collection and its three Redfish modules. The last method used the Ansible `Shell` module calling the [HPE iLOrest tool](http://hpe.com/info/resttool). # # At the end of notebooks 2,3 and 4, you validated the portability of the code against two different types of servers (Synergy and rack-mount). # # **Congratulations!** # # # Final step # # ### Please take a moment to fill out our [survey]( {{SURVEYURL}} ) so we can ensure future workshops meet your needs. # # ### And make sure you use `File->Log Out` to terminate your workshop session. # # # Thank you # ![ThankYou](Pictures/grommet.JPG)
Redfish/Current/RedfishAnsible/6-Conclusion.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Working in COVID Care map # # This notebook is meant to give more general information that will help you navigate the notebooks and use the patterns and practices of the project. # ## Working with the `covidcaremap` package # # The CovidCareMap.org data science environment, which these notebooks run inside, # consists of a Docker container running Jupyter with all dependenices pre-installed. It also includes a python module `covidcaremap` which hosts reusable code that can be used in any notebook. This notebook will give some examples of how we use the `covidcaremap` package in the project. # ### Data paths from covidcaremap.data import (PUBLISHED_DATA_DIR, PROCESSED_DATA_DIR, EXTERNAL_DATA_DIR) # These three methods describe the data paths where our data files live, as described in the [data folder README](https://github.com/covidcaremap/covid19-healthsystemcapacity/tree/master/data). If you want to see what's data files are available, you can use `!ls` on these paths: # !ls $PUBLISHED_DATA_DIR # We also have methods to return the correct file path given the file name: # + from covidcaremap.data import (published_data_path, processed_data_path, external_data_path) published_data_path('us_healthcare_capacity-county-CovidCareMap.csv') # - # ### Loading realtime data # # There's also some functionaliy to pull information from updated files or live APIs in the package. For example, we use the `covidcaremap.cases` subpackage to pull in USAFacts case data per county with their latest updated data. this example pulls from the data to get the most recent number of cases (as updated by USAFacts) for Philadelphia County: # + from covidcaremap.cases import get_usafacts_cases_by_county # Downloads the file and converts to a dataframe df = get_usafacts_cases_by_county() df[(df['County Name'] == 'Philadelphia County')].iloc[:,-1].values[0] # - # ## GeoPandas # # We use [GeoPandas](https://geopandas.org/) to read and write [GeoJSON](https://geojson.org/) files and perform spatial operations. This includes joining facility level date into thier appropriate region utilizing a spatial join. # # You can treat a GeoDataFrame just like a DataFrame for the most part.
notebooks/00_getting_started/00_Working_in_COVID_Care_Map.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import os import pandas as pd df = pd.read_csv("../data_given/winequality.csv") overview = df.describe() overview.loc[["min","max"]].to_json("schema_in.json") class NotInRangeError(Exception): def __init__(self,message="value not in range"): self.message = message super().__init__(self.message) raise NotInRangeError
notebooks/Untitled.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Plotting HYCOM Global Ocean Forecast Data # # _Note: this notebook requires python3._ # # This notebook demonstrates a simple Planet OS API use case using the [HYCOM Global Ocean Forecast dataset](http://data.planetos.com/datasets/hycom_glbu0.08_91.1_global_0.08d:hycom-hybrid-coordinate-ocean-model-global-ocean-forecast?utm_source=github&utm_medium=notebook&utm_campaign=hycom-api-notebook). # # API documentation is available at http://docs.planetos.com. If you have questions or comments, join the [Planet OS Slack community](http://slack.planetos.com) to chat with our development team. # # For general information on usage of IPython/Jupyter and Matplotlib, please refer to their corresponding documentation. https://ipython.org/ and http://matplotlib.org/ # %matplotlib inline import numpy as np import matplotlib.pyplot as plt import dateutil.parser import datetime from urllib.request import urlopen, Request import simplejson as json def extract_reference_time(API_data_loc): """Find reference time that corresponds to most complete forecast. Should be the earliest value.""" reftimes = set() for i in API_data_loc['entries']: reftimes.update([i['axes']['reftime']]) reftimes=list(reftimes) if len(reftimes)>1: reftime = reftimes[0] if dateutil.parser.parse(reftimes[0])<dateutil.parser.parse(reftimes[1]) else reftimes[1] else: reftime = reftimes[0] return reftime # Let's choose a location near Oahu, Hawaii... location = 'Hawaii Oahu' if location == 'Est': longitude = 24.+45./60 latitude = 59+25/60. elif location == 'Au': longitude = 149. + 7./60 latitude = -35.-18./60 elif location == "Hawaii Oahu": latitude = 21.205 longitude = -158.35 elif location == 'Somewhere': longitude == -20. latitude == 10. # **Important!** You'll need to replace `apikey` below with your actual Planet OS API key, which you'll find on the [Planet OS account settings page](http://data.planetos.com/account/settings/?utm_source=github&utm_medium=notebook&utm_campaign=hycom-api-notebook). apikey = "YOUR-API-KEY-GOES-HERE" API_url = "http://api.planetos.com/v1/datasets/hycom_glbu0.08_91.2_global_0.08d/point?lon={0}&lat={1}&count=10000&verbose=false&apikey={2}".format(longitude,latitude,apikey) request = Request(API_url) response = urlopen(request) API_data = json.loads(response.read()) # Show the available variables and their contexts... varlist = [] print("{0:<50} {1}".format("Variable","Context")) print() for k,v in set([(j,i['context']) for i in API_data['entries'] for j in i['data'].keys()]): print("{0:<50} {1}".format(k,v)) varlist.append(k) reftime = extract_reference_time(API_data) # Now let's extract data for all variables and create a different plot for each... vardict = {} for i in varlist: vardict['time_'+i]=[] vardict['data_'+i]=[] for i in API_data['entries']: #print(i['context']) reftime = extract_reference_time(API_data) for j in i['data']: if reftime == i['axes']['reftime']: if j != 'surf_el': if i['axes']['z'] < 1.: vardict['data_'+j].append(i['data'][j]) vardict['time_'+j].append(dateutil.parser.parse(i['axes']['time'])) else: vardict['data_'+j].append(i['data'][j]) vardict['time_'+j].append(dateutil.parser.parse(i['axes']['time'])) for i in varlist: fig = plt.figure(figsize=(15,3)) plt.title(i) ax = fig.add_subplot(111) plt.plot(vardict['time_'+i],vardict['data_'+i],color='r') ax.set_ylabel(i) print(API_data['entries'][0]['data']) print(API_data['entries'][0]['axes']) print(API_data['entries'][0]['context'])
api-examples/hycom-api.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [RoboND] # language: python # name: Python [RoboND] # --- # + import pandas as pd import numpy as np import matplotlib.pyplot as plt # %matplotlib inline np.random.seed(42) coffee_full = pd.read_csv('coffee-dataset.csv') coffee_red = coffee_full.sample(200) # + diff = [] for _ in range(10000): bootsample = coffee_red.sample(200, replace=True) mean_coff = bootsample[bootsample['drinks_coffee'] == True]['height'].mean() mean_nocoff = bootsample[bootsample['drinks_coffee'] == False]['height'].mean() diff.append(mean_coff - mean_nocoff) np.percentile(diff, 2.5), np.percentile(diff, 97.5) # + import statsmodels.stats.api as sms X1 = coffee_red[coffee_red['drinks_coffee'] == True]['height'] X2 = coffee_red[coffee_red['drinks_coffee'] == False]['height'] cm = sms.CompareMeans(sms.DescrStatsW(X1), sms.DescrStatsW(X2)) cm.tconfint_diff(usevar='unequal') # -
Practical_Statistics/Practical_Statistics/09_Confidence Intervals/Traditional Confidence Intervals Screencast.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Analysis II # # In this thesis that I am preparing is another method for analyzing the importance of hyperparameter. In this method, my new aproach is considering the below assumption: # ##### Assumtion # * Why not the Data Base of `Hyperparameter` that will be turned in by the BD team be itself considered as a Individual Dataset - This approach is called as building a `Surrogate Model` # * The dataset will consist of MetaData, Algorithm, Leaderboard(collected from H2O), Hyperparameter for the respective Algorithms # All arranged as a columns with it's corresponding respective values in it. Now, this I am going o use it to build a model around it. # Below are the columns, which are dummy as far now, as I have built it individually according to my assumptions. The DB team is yet to turn in it's DataBase, I am waiting for the good model amongst what they turn in to be considered for my thesis - to build a model. # # The columns that I have taken till now are as below with it's description: # # * runid - Run id for a dataset and its iteration # * dataset - The dataset and its link from where it's being fetched # * problem - if it is a classification/regression # * runtime - runtime for which H2O was run on this dataset # * columns - number of columns in the dataset # * rows - number rows in the dataset # * tags - What genre it belongs to # * algo - algorithm applied on this dataset by H2O # * model_id - the model id generated # * ntree - value for n_estimator # * max_depth - value for the hyperaparameter # * learn_rate - value for the hyperaparameter # * mean_residual_deviance - value for the hyperaparameter # * rmse - value for the metric # * mse - value for the metric # * mae - value for the metric # * rmsle - value for the metric # # # ![image.png](attachment:image.png) import h2o from h2o.automl import H2OAutoML import pandas as pd from sklearn.model_selection import train_test_split from xgboost import XGBClassifier from sklearn.model_selection import GridSearchCV from sklearn.model_selection import StratifiedKFold import matplotlib.pyplot as plt h2o.init(min_mem_size_GB=2) df = h2o.import_file('./hyperparamter_db_test.csv') # This dataset is created on own, and the data are not true. This is just t test the thesis and the hypothesis to check if this will give us a good analysis results df.head() # I am running for a simple AutoML runtime of 100 seconds. The results will be better if I run for more time. aml = H2OAutoML(max_runtime_secs=100) X=df.columns # Below are the predictors we have considered X = ['runid', 'dataset', 'problem', 'runtime', 'columns', 'rows', 'tags', 'model_id', 'ntree', 'max_depth', 'learn_rate', 'mean_residual_deviance', 'rmse', 'mse', 'mae', 'rmsle'] y='algo' aml.train(x=X,y=y,training_frame=df) # On running H2O AutoML for 100 seconds, for this simple Surrogate Dataset, we are getting 68 models generated by H2O aml_leaderboard_df=aml.leaderboard.as_data_frame() aml_leaderboard_df model_set=aml_leaderboard_df['model_id'] mod_best=h2o.get_model(model_set[0]) mod_best # ##### Now, what we perform here is important and is going to give us a definite result for our analysis. Something called Variable importance is brought into the picture to analysis our thesis. # # # ##### `Variable importance is an indication of which predictors are most useful for predicting the response variable. Various measures of variable importance have been proposed in the data mining literature. The most important variables might not be the ones near the top of the tree.` # # # Below is something that H2O turns in when one of the models except `StackedEnsemble` is called # mod_best.varimp_plot() # ### Conclusion # # Thus, from the variable importance plot, we can conclude, that if our Dataset of hyperparameter itself can be used to build a `Surrogate Model` - using H2O we can try to find the variable importance plot which in turn becomes `Hyperparameter` Imporatnce plot. # # Thus from the graph if we notice, we will be able to say which hyperparameter is more important, compared to the rest. This is calculated based on `Standard coefficient` = `Coeeficient/Standard Deviance` # # Thus, the features if selected model except - RandomForrest/XGBoost models, are ranked based on their Standard coefficient. While in the case of tree based model, it will be ranked based on it's entropy. # # Here, fro mthe above graph, we can build a conclusion based on what H2O turns in is that - # Arranged based on more important # # `n_tree` > `learn_rate` > `max_depth` # # The importance reduces as we go to the right. Now, this will help us denote and signify to any Data Scientist with a proof, that which hyperparameters are relly important. That enables them to select the top 5 or 10 (based on their requirement) to select and provide it to their model algorithm to train their model in less coputational time and speed and seek the best model out of it. # # As this is our aim for the project. # ### Contribution # # 100% self # # ## License # # MIT License # # <img src="Images/OSI_Approved_License.png" width="100" align="right"/> # # # Copyright (c) 2019 <NAME> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. #
Phase II/Surrogate Model using VarrImp Plot.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # A simple analysis of the ANTARES neutrino data # # This example directly uses pyvo. For the full KM3NeT course see # https://edu.km3net.de/lesson/a-simple-analysis-in-python/ import pyvo as vo import matplotlib.pyplot as plt # The data is directly available from the KM3NeT VO server, which is searchable with ADQL (SQL-like query language) service = vo.dal.TAPService("http://vo.km3net.de/__system__/tap/run/tap") resultset = service.search("SELECT * FROM ant20_01.main") # ## distribution of number of hits # + nhits_array = [] i=0 while i < len(resultset): for row in resultset: nhits = resultset['nhit', i ] i=i+1 nhits_array.append(nhits) plt.hist(nhits_array, bins=60 , range=[0,300] , label = "all_events") plt.show() # - # ## choose the best reconstructed events (angular_error<=0.2) nhits_array_gr = [] i=0 while i < len(resultset): for row in resultset: beta = resultset['beta', i ] nhits = resultset['nhit', i ] i=i+1 if beta<=0.2 : nhits_array_gr.append(nhits) plt.hist(nhits_array_gr, bins=60 , range=[0,300] , label = "angular_error<=0.2") plt.legend() plt.show() # ## Create a RA vs declination scatter plot # + ra_array = [] decl_array = [] i=0 while i < len(resultset): for row in resultset: ra = resultset['ra', i ] decl = resultset['decl', i ] i=i+1 ra_array.append(ra) decl_array.append(decl) plt.scatter( ra_array , decl_array , s=5 ) plt.show() # -
neutrinos/pyvo_simple_ANTARES_analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/nemuelpalomo/Python-Crash-Course/blob/main/Chapter_5.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="SqTRfPnKDaX2" # ##5-1 Conditional Tests # + colab={"base_uri": "https://localhost:8080/"} id="31G_IPEO_Tuw" outputId="c3b70cf9-aa6f-47db-e6f2-cf93e7a03424" #True car = "ferrari" print(f"Is the car a ferrari? I think it is True.\n") print(car == "ferrari") car2 = "bmw" print(f"\nThe car is not a 'bmw', Are you sure? I think it is True.\n") print(car2 == "bmw") num1 = 52 num2 = 32+20 print(f"\nAre both numbers Equal? True") print(num1 == num2) #False car3 = "lamborghini" print(f"\nThe car #3 is a Honda.") print(car3 == "honda") car4 = "toyota" print(f"\nThe car #4 is not a ford, its a toyota. So the answer is False") print(car4 == "ford") num3 = 21 num4 = 22 print(f"\nnum3 and num4 are equal") print(num3 == num4) # + [markdown] id="zwsAfPy-DixL" # ##5-2 More Conditional Tests # + colab={"base_uri": "https://localhost:8080/"} id="uf2L6Mu3Dnrd" outputId="f29e5601-6a62-471b-c55b-8ff59742f280" string1 = "hahaha" string2 = "HAHAHA" print(f"String 1 and String 2 are not the same because of upper case and lower case so it is {string1 == string2}.") print(f"\n Now that we set string2 to .lower() which makes the variable all lower case the result is now {string1 == string2.lower()}.") # + colab={"base_uri": "https://localhost:8080/"} id="kN2N8PR3GqyT" outputId="3fad20c0-9eb5-4024-945a-e252ba9e648a" number = 52 number_1 = 32 number_2 = 22 equalizer = [52, 32, 22] print(f"52 >= 32? {(number) >= (number_1)}.\n") print(f"52 <= 22? {(number) <= (number_2)}.\n") print(f"52 = 52? {(equalizer[0]) == (number)}.\n") #Using the 'and' condition, must be both true. print(f"52 = 52 and 32 = 32? {number == equalizer[0] and equalizer[1] == number_1}.\n") #Using the 'or' condition, it will become true whether one of the statements are false. print(f"22 = 22 or 32 >= 52? {equalizer[2] == number_2 or number_1 >= equalizer[1]}.") # + colab={"base_uri": "https://localhost:8080/"} id="XW8Z2poyKndU" outputId="28c00407-2f4c-440f-8d80-fba474e1b343" list1 = [1, 2, 3, 4, 5] if 1 in list1: #In the list? Tawa ka print("AHAHAHA\n") if 6 not in list1: #Not in the list? Gago print("WALA GAGO") # + [markdown] id="Dj4iPVArPJQ5" # ##5-3 Alien Colors #1 # + colab={"base_uri": "https://localhost:8080/"} id="uU47_ZNnPNfl" outputId="779582dc-0c66-41d8-96a0-bbe0158a9d5e" alien_color = 'green' #The if statement passes the test, therefore we got an output. if alien_color == 'green': print("You just got 5 pts!") #The if statement failed the test, there we got no output. if alien_color == 'red': print("You just got 3 pts!") # + [markdown] id="1z6SMNFDPvkB" # ##5-4 Alien Colors #2 # # + colab={"base_uri": "https://localhost:8080/"} id="lfadENI0Pyel" outputId="8790a3a7-60e4-406d-8055-4064d1c34a05" #If statements to output every if test whatever color it is. alien_color_green = 'green' alien_color_red = 'red' alien_color_blue = 'blue' if alien_color_green == 'green': print("You just got 10pts!") if alien_color_red == 'red': print("You just got 5pts!") if alien_color_blue == 'blue': print("You just got 3pts!\n") #For loop until the green alien is shot. alien_color = ['red', 'blue', 'green'] for color in alien_color: if color == 'green': print("You just shot the green one!") else: print("You missed the green one!") # + [markdown] id="21XCr50sRh3J" # ##5-5 Alien Colors #3 # + colab={"base_uri": "https://localhost:8080/"} id="0KXFC1aARlSX" outputId="483e1bfc-db89-4ba9-abab-bc1a2ae49548" alien_kulay = 'yellow' if alien_kulay == 'green': print("The player earned 5pts") elif alien_kulay == 'red': print("The player earned 15pts") else: print("The player killed the yellow one and earned 10 pts.") # + colab={"base_uri": "https://localhost:8080/"} id="0WvkagwbSmMr" outputId="4c4f2ca5-f945-4cbd-b20d-582de8810a39" age = 22 if age < 2: print("He's a baby.") elif age < 4: print("He's a toddler.") elif age < 13: print("He's a kid.") elif age < 20: print("He's a teenager.") elif age < 65: print("He's an adult.") else: print("He's an elder!") # + [markdown] id="iXcR2HChUrzm" # ##5-7 Favorite Fruit # # + colab={"base_uri": "https://localhost:8080/"} id="TDIHVxkQUum3" outputId="48152173-f3c2-4774-c624-3d63e4c65e16" #Independent if statements paboritong_prutas = ['saging', 'mansanas', 'ubas'] if 'saging' in paboritong_prutas: print("Mahilig ka sa saging tropa!\n") if 'mansanas' in paboritong_prutas: print("Abay mahilig ka din sa mansanas!\n") if 'ubas' in paboritong_prutas: print("Tarap nyan lods ubas.\n") if 'ponkan' not in paboritong_prutas: print("Yots di mahilig sa ponkan... :(") # + [markdown] id="2ly2-4f1Xhyt" # ##5-8 Hello Admin # + colab={"base_uri": "https://localhost:8080/"} id="xS9ZBQHYXrWy" outputId="bc8cadb2-ab93-4b7c-8cfb-f587e0c7c8f8" usernames = ['Nemuel', 'Rico', 'Liam', "Hem", 'Admin'] for users in usernames: if users.lower() == 'admin': print("Hello Admin! Would you like to see a status report?\n") else: print(f"Hello {users.title()}, How are you today?") # + [markdown] id="INdHrA4uYn5G" # ##5-9 No Users # + colab={"base_uri": "https://localhost:8080/"} id="i56XmyTfYuT_" outputId="d39e8585-2245-4245-9b21-329b4bf0e508" emptyusers = [] for users in emptyusers: if users == 'admin': print("Hello Admin!") else: print(f"Hello {users}!") else: print("We need to find some users!") # + [markdown] id="pWj1hJjoa8I_" # ##5-10 Checking Usernames # + colab={"base_uri": "https://localhost:8080/"} id="MeSbPDyfa_iV" outputId="39206ff8-fb43-42e1-e566-ee8725053d3b" current_users = ['jologs', 'barkos', 'betlogs', 'tologs', 'jeprox'] new_users = ['burgok', 'jologs', 'awitzz', 'klong', 'jeprox', 0.2] for user in new_users: if user in current_users: print(f"Isip ka ng bago gamit na yan tropa, '{user}'.\n") elif user not in current_users: print(f"'{user}', Pwede to tropa wala pang nakakagamit :)\n") else: print("Ano yan lods?") # + [markdown] id="Eoss9kQWcwbI" # ##5-11 Ordinal Numbers # + colab={"base_uri": "https://localhost:8080/"} id="FcWdH4fkcwKC" outputId="913a45e5-41bd-43b3-a1b7-d8939f6e1fe1" number_list = [1, 2, 3, 4, 5, 6, 7, 8, 9] for number in number_list: if number == 1: print("1st") elif number == 2: print("2nd") elif number == 3: print("3rd") else: print(f"{number}th")
Chapter_5.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: ml # language: python # name: ml # --- import pickle import pandas as pd import re with open('../data/interim/ham_train.pkl', 'rb') as f: ham_train = pickle.load(f) with open('../data/interim/spam_train.pkl', 'rb') as f: spam_train = pickle.load(f) print(len(ham_train)) print(len(spam_train)) with open('../data/interim/ham_val.pkl', 'rb') as f: ham_val = pickle.load(f) with open('../data/interim/ham_test.pkl', 'rb') as f: ham_test = pickle.load(f) with open('../data/interim/spam_val.pkl', 'rb') as f: spam_val = pickle.load(f) with open('../data/interim/spam_test.pkl', 'rb') as f: spam_test = pickle.load(f) print(len(ham_val)) print(len(ham_test)) print(len(spam_val)) print(len(spam_test)) # Let's take a look at a ham email and a spam email. print(ham_train[0]) print(spam_train[0]) # Let's clean up the emails, then have another look. # + def clean1(email): pat = re.compile(r'([-\w]+: .+|<.*|.*>|.*NextPart.*|charset=.*|\w+\.\w+\.?\w*\.?\w*\.?\w*)') return pat.sub(' ', email) def get_words(email, exclude_below=2): return [x.lower() for x in re.findall(r'[A-Za-z]+', email) if len(x) > exclude_below] # - print(clean1(ham_train[0])) # Our cleaning function isn't working perfectly... we could use a built in module. # + import email import email.policy def clean2(email_): msg = email.parser.Parser(policy=email.policy.default).parsestr(email_) return msg.get_content() # - msg = clean2(ham_train[0]) print(msg) # This works better, but some messages are "multi-part", and we can't use `get_content()` with them, so it will fail on some emails. # # Looking at the two samples above, we see that the first line is "From ...", then there are several lines with an attribute, followed by a colon, followed by some details. Let's capture these. ham_train[0] pat_from = re.compile(r'(From)\s+([-@\w\.]+)\s+([a-zA-Z0-9: ]+)') re.search(pat_from, ham_train[0]).groups() # The "received" parts span multiple lines, so they will be more difficult to extract. # # Let's try to get this info using the email module. msg = email.message_from_string(ham_train[0], policy=email.policy.default) msg.is_multipart() for header, value in msg.items(): print(header, ":", value) # So we can get all of these headers using the `email` module. msg.get_content_type() for mail in ham_train: if email.message_from_string(mail, policy=email.policy.default).is_multipart(): multimail = mail break multimail multimsg = email.message_from_string(multimail, policy=email.policy.default) for part in multimsg.walk(): ctype = part.get_content_type() print(ctype) if 'multipart' not in ctype: print(part.get_content()) import sys sys.path.append('../src/features/') import build_features as bf corpus = bf.create_corpus(lambda x: bf.get_words(bf.clean2(x)), ham_train + spam_train) word_freq = pd.Series(corpus).sort_values(ascending=False) word_freq.plot() filt = word_freq.between(6, 2000) word_freq[filt].plot() len(word_freq) len(word_freq[filt]) X_train = bf.create_df(ham_train + spam_train, corpus, lower=6, upper=2000) y_train = pd.Series([0] * len(ham_train) + [1] * len(spam_train)) X_train.head() X_train.shape import numpy as np rng = np.random.default_rng(seed=42) shuf = rng.permutation(np.arange(0, X_train.shape[0])) shuf X_train = X_train.iloc[shuf].reset_index(drop=True) y_train = y_train[shuf].reset_index(drop=True) X_val = bf.create_df(ham_val + spam_val, corpus, lower=6, upper=2000) y_val = pd.Series([0] * len(ham_val) + [1] * len(spam_val)) rng = np.random.default_rng(seed=42) shuf = rng.permutation(np.arange(0, X_val.shape[0])) X_val = X_val.iloc[shuf].reset_index(drop=True) y_val = y_val[shuf].reset_index(drop=True) # Let's train a quick model as a sanity check. from sklearn.linear_model import LogisticRegression clf = LogisticRegression(C=0.3).fit(X_train, y_train) print("Train score:", clf.score(X_train, y_train)) print("Val score:", clf.score(X_val, y_val)) # Not bad. We might be overfitting slightly. Seems similar to previous results. # # Let's try L1 regularization to extract feature importances. clf2 = LogisticRegression(penalty='l1', solver='liblinear', C=0.3).fit(X_train, y_train) print("Train score:", clf2.score(X_train, y_train)) print("Val score:", clf2.score(X_val, y_val)) feature_importance = pd.Series(index=X_train.columns, data=clf2.coef_[0]) feature_importance.sort_values(inplace=True, ascending=False) sum(feature_importance != 0.0) # So only 131 words are needed to get 96% accuracy (with C=0.15). # # Let's look at what words indicate spam, and which indicate ham. filt = feature_importance.gt(0.0) feature_importance[filt][:50] feature_importance[filt][50:100] filt = feature_importance.lt(0.0) feature_importance[filt][:50] feature_importance[filt][50:100] # A few things to notice: # - the way we've extracted words chops up contractions; OTOH, contractions seem to indicate ham # - there are a lot of prepositions and pronouns, which I wouldn't think are important # - spam messages tend to use awkward/formal/impersonal prepositions and pronouns, like "yourself" and "within" # - somehow, 'gary' indicates ham; it would be better to have a feature "PROPER_NOUN" or "NAME" # - the phrase "spamassassin" indicates ham, which seems like it won't generalize well # - stemming might help, since "remove" and "removed" are both of roughly equal importance for detecting spam # Let's see how word frequency relates to these coefficients. word_freq = pd.Series(corpus).sort_values(ascending=False) filt = word_freq.between(6, 2000) fi_wf = pd.DataFrame({"word_freq": word_freq[filt], 'feature_importance': pd.Series(index=X_train.columns, data=clf2.coef_[0])}) fi_wf.head() import matplotlib.pyplot as plt plt.figure() filt = fi_wf['feature_importance'] != 0 plt.scatter(fi_wf[filt]['word_freq'], fi_wf[filt]['feature_importance'], alpha=0.5) plt.xlabel("word frequency in corpus") plt.ylabel("feature importance according to logit with L1 reg") plt.show() # Word frequency and feature importance aren't correlated, but we can see that most of the import features (words) have frequency less that 1000. # # What if we remove coefficients with small absolute value. plt.figure() filt = abs(fi_wf['feature_importance']) > 0.9 plt.scatter(fi_wf[filt]['word_freq'], fi_wf[filt]['feature_importance'], alpha=0.5) plt.xlabel("word frequency in corpus") plt.ylabel("feature importance according to logit with L1 reg") plt.show() # The trend is less pronounced for features with weight outside of (-0.9, 0.9) # # What are the highest weighted words? filt = abs(fi_wf['feature_importance']) > 0.9 fi_wf[filt].sort_values(by='word_freq') # Next step: automate feature creation so that we can experiment with variations # ## Testing build_features.py # # Let's check how our BagOfWords class works import importlib importlib.reload(bf) from build_features import BagOfWords bow = BagOfWords() bow.fit(ham_train + spam_train) X_train = bow.transform(ham_train + spam_train) y_train = pd.Series([0] * len(ham_train) + [1] * len(spam_train)) X_val = bow.transform(ham_val + spam_val) y_val = pd.Series([0] * len(ham_val) + [1] * len(spam_val)) rng = np.random.default_rng(seed=42) shuf = rng.permutation(X_train.shape[0]) X_train = X_train.iloc[shuf].reset_index(drop=True) y_train = y_train[shuf].reset_index(drop=True) rng = np.random.default_rng(seed=42) shuf = rng.permutation(X_val.shape[0]) X_val = X_val.iloc[shuf].reset_index(drop=True) y_val = y_val[shuf].reset_index(drop=True) X_train.shape # We didn't trim the corpus by word frequency, so our data has more features than before. # + clf = LogisticRegression().fit(X_train, y_train) print('Training score:', clf.score(X_train, y_train)) print("Validation score:", clf.score(X_val, y_val)) # + import random def unzip(x): y, z = zip(*x) return list(y), list(z) def get_raw_data(type='train', seed=42): INT_PATH = '../data/interim/' if type in ('train', 'val', 'test'): with open(INT_PATH + f'ham_{type}.pkl', 'rb') as f: ham = pickle.load(f) with open(INT_PATH + f'spam_{type}.pkl', 'rb') as f: spam = pickle.load(f) raw = [(x, 0) for x in ham] + [(x, 1) for x in spam] random.seed(seed) random.shuffle(raw) X, y = unzip(raw) return X, pd.Series(y) else: raise ValueError(f'Type "{type}" not recognized. Please enter "train", "val", or "test".') # - get_raw_data('validation') X_train_raw, y_train = get_raw_data('train') X_val_raw, y_val = get_raw_data('val') X_train_raw[0] bow = BagOfWords(high=0.2) import email import email.policy bow.fit(X_train_raw) X_train = bow.transform(X_train_raw) X_val = bow.transform(X_val_raw) X_train.shape # + clf = LogisticRegression(C=1.0).fit(X_train, y_train) print('Training score:', clf.score(X_train, y_train)) print("Validation score:", clf.score(X_val, y_val)) # - # We're fitting the training set almost exactly. I need to improve the high/low cut-offs for the bag of words. # # Ideally, we would cache the raw corpus and word_freq, so that we don't need to re-fit to change the filters. I don't think stemming can be turned off without re-fitting.
notebooks/data-exploration-1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Q1 WAP to check if given character is vowel or not # + vowels=['a','e','i','o','u','A','E','I','O','U'] ip_char=input("Enter a character ") #how will I find whether entered character is single or not? if ip_char in vowels: print("The character you have entered is a vowel") else: print('The character you have entered is not a Vowel') # - # ## Q2 WAP to check if number is less than 5, if yes then print its square # + ip_no=int(input("Please enter number, we will print its square if its less than 5: ")) if ip_no<5: print("Entered Number is less than 5 and its square is ",ip_no**2) elif ip_no==5: print("Entered Number is equal to 5") elif ip_no>5: print("Entered Number is more than 5") #how to detect whether input is char and ask again for input # - # ## Q3 WAP to check if number is divisible by 10 # + ip_no=int(input('Enter number to check whether its divisible by 10: ')) if(ip_no%10==0): print('Entered Number is divisible by 10') elif(ip_no%10!=0): print('Entered Number is not divisible by 10') # - # ## Q4 WAP to check if given character is upper case or lower case # + ip_char=input('Please enter a character and we will tell you whether its uppercase or lowercase: ') #we use str.islower method to detect whether character is in lower case or not lowerOrNot=str.islower(ip_char) digitOrNot=str.isdigit(ip_char) if digitOrNot==1: print("Please enter a character we don't accept numbers") else: if(lowerOrNot==1): print('Entered character is in Lower case') elif(lowerOrNot==0): print('Entered character is in Upper case') #Detects numbers as uppercase why?= Use isdigit to remove this case # - # ## Q5 WAP to check if given character is digit or not # + input_char=input('Please enter a character and we will tell you whether its a digit or not: ') #we use str.islower method to detect whether character is in lower case or not digitOrNot=str.isdigit(input_char) if digitOrNot==1: print("The character you have entered is a digit") elif digitOrNot==0: print("The character you have entered is NOT a digit") # - # ## Q6 WAP to print greatest number between given four numbers # + a=int(input('Enter number 1: ')) b=int(input('Enter number 2: ')) c=int(input('Enter number 3: ')) d=int(input('Enter number 4: ')) if(a>b and a>c and a>d): print("Number 1 that is:", a,", is the largest of all") elif(b>a and b>c and b>d): print("Number 2 that is:", b,", is the largest of all") elif(c>a and c>b and c>d): print("Number 3 that is:", c,", is the largest of all") elif(d>a and d>b and d>c): print("Number 4 that is:", d,", is the largest of all") # - # # LOOP Statements # ## Q1 WAP to count number of digits in any number. # + num=int(input('Enter a number and we will tell you the number of digits in the number: ')) cmp=10 count=0 while(num>=cmp): cmp=cmp**10 print('test') count+=1 print("The number of digits in the number is: ",count) # -
ITWS 2/.ipynb_checkpoints/2_Assignments-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Unit Testing `GiRaFFE_NRPy`: Fluxes of $\tilde{S}_i$ # # ### Author: <NAME> # # This notebook validates our new, NRPyfied HLLE solver against the function from the original `GiRaFFE` that calculates the flux for $\tilde{S}_i$ according to the the method of Harten, Lax, von Leer, and Einfeldt (HLLE), assuming that we have calculated the values of the flux on the cell faces according to the piecewise-parabolic method (PPM) of [Colella and Woodward (1984)](https://crd.lbl.gov/assets/pubs_presos/AMCS/ANAG/A141984.pdf), modified for the case of GRFFE. # # **Module Status:** <font color=green><b> Validated: </b></font> This code has passed unit tests against the original `GiRaFFE` version. # # **Validation Notes:** This demonstrates the validation of [Tutorial-GiRaFFE_NRPy_Ccode_library-Stilde-flux](../Tutorial-GiRaFFE_NRPy_Ccode_library-Stilde-flux.ipynb). # # It is, in general, good coding practice to unit test functions individually to verify that they produce the expected and intended output. Here, we expect our functions `GRFFE__S_i__flux_in_dir_*.h` to produce identical output to the function `GRFFE__S_i__flux.C` in the original `GiRaFFE`. It should be noted that the two codes handle the parameter `flux_dirn` (the direction in which the code is presently calculating the flux through the cell) differently; in the original `GiRaFFE`, the function `GRFFE__S_i__flux()` expects a parameter `flux_dirn` with value 1, 2, or 3, corresponding to the functions `GRFFE__S_i__flux_in_dir_0()`, `GRFFE__S_i__flux_in_dir_1()`, and `GRFFE__S_i__flux_in_dir_2()`, respectively, in `GiRaFFE_NRPy`. # # ### NRPy+ Source Code for this module: # * [GiRaFFE_NRPy/GiRaFFE_NRPy_Stilde_flux.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_Stilde_flux.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy-Stilde-flux.ipynb) Generates the code to compute the fluxes. # # ## Introduction: # # This notebook validates the NRPyfied C2P and P2C solvers against the original `GiRaFFE` code. This will be done at a point with a random but realistic spacetime and a variety of Poynting fluxes and Valencia velocities to test edge cases. # # We'll write this in C because the codes we want to test are already written that way, and we would like to avoid modifying the files as much as possible. To do so, we will print the C code to a file. We will begin by including core functionality. We will also define standard parameters needed for GRFFE and NRPy+. # # When this notebook is run, the significant digits of agreement between the old `GiRaFFE` and new `GiRaFFE_NRPy` versions of the algorithm will be printed to the screen right after the code is run [here](#compile_run). # # <a id='toc'></a> # # # Table of Contents # $$\label{toc}$$ # # This notebook is organized as follows # # 1. [Step 1](#setup): Set up core functions and parameters for unit testing the C2P and P2C algorithms # 1. [Step 1.a](#c_flux) Write the C functions to compute the flux # 1. [Step 1.b](#free_params) Output C codes needed for declaring and setting Cparameters; also set `free_parameters.h` # 1. [Step 1.c](#download) Download files from `GiRaFFE` for comparison # 1. [Step 2](#mainc): `Stilde_flux_unit_test.c`: The Main C Code # 1. [Step 2.a](#compile_run): Compile and run the code to validate the output # 1. [Step 3](#latex_pdf_output): Output this notebook to $\LaTeX$-formatted PDF file # <a id='setup'></a> # # # Step 1: Set up core functions and parameters for unit testing the C2P and P2C algorithms \[Back to [top](#toc)\] # # $$\label{setup}$$ # # We'll start by appending the relevant paths to `sys.path` so that we can access sympy modules in other places. Then, we'll import NRPy+ core functionality and set up a directory in which to carry out our test. We will also declare the gridfunctions that are needed for this portion of the code. import os,sys nrpy_dir_path = os.path.join("..","..") if nrpy_dir_path not in sys.path: sys.path.append(nrpy_dir_path) # The last things NRPy+ will require are the definition of type `REAL` and, of course, the functions we are testing. These files are generated on the fly. # + import shutil, os, sys # Standard Python modules for multiplatform OS-level functions nrpy_dir_path = os.path.join("..") if nrpy_dir_path not in sys.path: sys.path.append(nrpy_dir_path) from outputC import outCfunction, lhrh # NRPy+: Core C code output module import finite_difference as fin # NRPy+: Finite difference C code generation module import NRPy_param_funcs as par # NRPy+: Parameter interface import grid as gri # NRPy+: Functions having to do with numerical grids import loop as lp # NRPy+: Generate C code loops import indexedexp as ixp # NRPy+: Symbolic indexed expression (e.g., tensors, vectors, etc.) support import reference_metric as rfm # NRPy+: Reference metric support import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface out_dir = "Validation" cmd.mkdir(out_dir) thismodule = "Start_to_Finish_UnitTest-GiRaFFE_NRPy-Stilde_flux" import GiRaFFE_NRPy.Stilde_flux as Sf # We will pass values of the gridfunction on the cell faces into the function. This requires us # to declare them as C parameters in NRPy+. We will denote this with the _face infix/suffix. alpha_face,gammadet_face = gri.register_gridfunctions("AUXEVOL",["alpha_face","gammadet_face"]) gamma_faceDD = ixp.register_gridfunctions_for_single_rank2("AUXEVOL","gamma_faceDD","sym01") gamma_faceUU = ixp.register_gridfunctions_for_single_rank2("AUXEVOL","gamma_faceUU","sym01") beta_faceU = ixp.register_gridfunctions_for_single_rank1("AUXEVOL","beta_faceU") # We'll need some more gridfunctions, now, to represent the reconstructions of BU and ValenciavU # on the right and left faces Valenciav_rU = ixp.register_gridfunctions_for_single_rank1("AUXEVOL","Valenciav_rU",DIM=3) B_rU = ixp.register_gridfunctions_for_single_rank1("AUXEVOL","B_rU",DIM=3) Valenciav_lU = ixp.register_gridfunctions_for_single_rank1("AUXEVOL","Valenciav_lU",DIM=3) B_lU = ixp.register_gridfunctions_for_single_rank1("AUXEVOL","B_lU",DIM=3) sqrt4pi = par.Cparameters("REAL",thismodule,"sqrt4pi","sqrt(4.0*M_PI)") # And the function to which we'll write the output data: Stilde_fluxD = ixp.register_gridfunctions_for_single_rank1("AUXEVOL","Stilde_fluxD",DIM=3) # - # <a id='c_flux'></a> # # ## Step 1.a: Write the C functions to compute the flux \[Back to [top](#toc)\] # # $$\label{c_flux}$$ # Here, we will write out each of the three files that we need to generate to calculate the flux of $\tilde{S}_i$ in NRPy+. # + # And now, we'll write the files # In practice, the C functions should only loop over the interior; here, however, we can loop over # all points and set fewer parameters. for flux_dirn in range(3): desc="Compute the flux of all 3 components of tilde{S}_i in the "+str(flux_dirn)+"th direction" name="GRFFE__S_i__flux_in_dir_"+str(flux_dirn) Sf.calculate_Stilde_flux(flux_dirn,True,alpha_face,gamma_faceDD,beta_faceU,\ Valenciav_rU,B_rU,Valenciav_lU,B_lU,sqrt4pi) Stilde_flux_to_print = [\ lhrh(lhs=gri.gfaccess("auxevol_gfs","Stilde_fluxD0"),rhs=Sf.Stilde_fluxD[0]),\ lhrh(lhs=gri.gfaccess("auxevol_gfs","Stilde_fluxD1"),rhs=Sf.Stilde_fluxD[1]),\ lhrh(lhs=gri.gfaccess("auxevol_gfs","Stilde_fluxD2"),rhs=Sf.Stilde_fluxD[2]),\ ] outCfunction( outfile = os.path.join(out_dir,name+".h"), desc=desc, name=name, params ="const paramstruct *params,REAL *auxevol_gfs", body =fin.FD_outputC("returnstring",Stilde_flux_to_print,\ params="outCverbose=False").replace("IDX4","IDX4S"), loopopts ="AllPoints") # - # <a id='free_params'></a> # # ## Step 1.b: Output C codes needed for declaring and setting Cparameters; also set `free_parameters.h` \[Back to [top](#toc)\] # # $$\label{free_params}$$ # # Based on declared NRPy+ Cparameters, first we generate `declare_Cparameters_struct.h`, `set_Cparameters_default.h`, and `set_Cparameters[-SIMD].h`. # # Then we output `free_parameters.h`, which sets some basic grid parameters as well as the speed limit parameter we need for this function. # + # Generate declare_Cparameters_struct.h, set_Cparameters_default.h, and set_Cparameters[-SIMD].h # par.generate_Cparameters_Ccodes(os.path.join(out_dir)) # Step 3.d.ii: Set free_parameters.h with open(os.path.join(out_dir,"free_parameters.h"),"w") as file: file.write(""" // Set free-parameter values. params.Nxx_plus_2NGHOSTS0 = 1; params.Nxx_plus_2NGHOSTS1 = 1; params.Nxx_plus_2NGHOSTS2 = 1; // Standard GRFFE parameters: params.GAMMA_SPEED_LIMIT = 2000.0; \n""") # Generates declare_Cparameters_struct.h, set_Cparameters_default.h, and set_Cparameters[-SIMD].h par.generate_Cparameters_Ccodes(os.path.join(out_dir)) # - # <a id='download'></a> # # ## Step 1.c: Download files from `GiRaFFE` for comparison \[Back to [top](#toc)\] # # $$\label{download}$$ # # We'll also need to download the files in question from the `GiRaFFE` bitbucket repository. This code was originally written by <NAME> in the IllinoisGRMHD documentation; we have modified it to download the files we want. Of note is the addition of the `for` loop since we need three files (The function `GRFFE__S_i__flux()` depends on two other files for headers and functions). # + # First download the original IllinoisGRMHD source code import urllib original_file_url = ["https://bitbucket.org/zach_etienne/wvuthorns/raw/5611b2f0b17135538c9d9d17c7da062abe0401b6/GiRaFFE/src/GiRaFFE_headers.h",\ "https://bitbucket.org/zach_etienne/wvuthorns/raw/5611b2f0b17135538c9d9d17c7da062abe0401b6/GiRaFFE/src/inlined_functions.C",\ "https://bitbucket.org/zach_etienne/wvuthorns/raw/5611b2f0b17135538c9d9d17c7da062abe0401b6/GiRaFFE/src/GRFFE__S_i__flux.C"\ ] original_file_name = ["GiRaFFE_headers.h",\ "inlined_functions.C",\ "GRFFE__S_i__flux.C"\ ] for i in range(len(original_file_url)): original_file_path = os.path.join(out_dir,original_file_name[i]) # Then download the original IllinoisGRMHD source code # We try it here in a couple of ways in an attempt to keep # the code more portable try: original_file_code = urllib.request.urlopen(original_file_url[i]).read().decode('utf-8') except: original_file_code = urllib.urlopen(original_file_url[i]).read().decode('utf-8') # Write down the file the original IllinoisGRMHD source code with open(original_file_path,"w") as file: file.write(original_file_code) # - # <a id='mainc'></a> # # # Step 2: `Stilde_flux_unit_test.c`: The Main C Code \[Back to [top](#toc)\] # # $$\label{mainc}$$ # # Now that we have our vector potential and analytic magnetic field to compare against, we will start writing our unit test. We'll also import common C functionality, define `REAL`, the number of ghost zones, and the faces, and set the standard macros for NRPy+ style memory access.Now we can write our C code. First, we will import our usual libraries and define the various constants and macros we need, taking care to imitate CCTK functionality wherever necessary. In the main function, we will fill all relevant arrays with (appropriate) random values. That is, if a certain gridfunction should never be negative, we will make sure to only generate positive numbers for it. We must also contend with the fact that in NRPy+, we chose to use the Valencia 3-velocity $v^i_{(n)}$, while in ETK, we used the drift velocity $v^i$; the two are related by $$v^i = \alpha v^i_{(n)} - \beta^i.$$ # + # %%writefile $out_dir/Stilde_flux_unit_test.C // These are common packages that we are likely to need. #include "stdio.h" #include "stdlib.h" #include "math.h" #include "string.h" // Needed for strncmp, etc. #include "stdint.h" // Needed for Windows GCC 6.x compatibility #include <time.h> // Needed to set a random seed. // Standard GRFFE parameters: const double GAMMA_SPEED_LIMIT = 2000.0; // Standard NRPy+ memory access: #define IDX4S(g,i,j,k) \ ( (i) + Nxx_plus_2NGHOSTS0 * ( (j) + Nxx_plus_2NGHOSTS1 * ( (k) + Nxx_plus_2NGHOSTS2 * (g) ) ) ) // Let's also #define the NRPy+ gridfunctions #define ALPHA_FACEGF 0 #define GAMMADET_FACEGF 1 #define GAMMA_FACEDD00GF 2 #define GAMMA_FACEDD01GF 3 #define GAMMA_FACEDD02GF 4 #define GAMMA_FACEDD11GF 5 #define GAMMA_FACEDD12GF 6 #define GAMMA_FACEDD22GF 7 #define GAMMA_FACEUU00GF 8 #define GAMMA_FACEUU11GF 9 #define GAMMA_FACEUU22GF 10 #define BETA_FACEU0GF 11 #define BETA_FACEU1GF 12 #define BETA_FACEU2GF 13 #define VALENCIAV_RU0GF 14 #define VALENCIAV_RU1GF 15 #define VALENCIAV_RU2GF 16 #define B_RU0GF 17 #define B_RU1GF 18 #define B_RU2GF 19 #define VALENCIAV_LU0GF 20 #define VALENCIAV_LU1GF 21 #define VALENCIAV_LU2GF 22 #define B_LU0GF 23 #define B_LU1GF 24 #define B_LU2GF 25 #define U4UPPERZERO_LGF 26 #define U4UPPERZERO_RGF 27 #define STILDE_FLUXD0GF 28 #define STILDE_FLUXD1GF 29 #define STILDE_FLUXD2GF 30 #define NUM_AUXEVOL_GFS 31 // The NRPy+ versions of the function. These should require relatively little modification. // We will need this define, though: #define REAL double #include "declare_Cparameters_struct.h" #include "GRFFE__S_i__flux_in_dir_0.h" #include "GRFFE__S_i__flux_in_dir_1.h" #include "GRFFE__S_i__flux_in_dir_2.h" // Some needed workarounds to get the ETK version of the code to work #define CCTK_REAL double #define DECLARE_CCTK_PARAMETERS // struct cGH{}; const cGH cctkGH; // And include the code we want to test against #include "GiRaFFE_headers.h" #include "inlined_functions.C" #include "GRFFE__S_i__flux.C" int main() { paramstruct params; #include "set_Cparameters_default.h" // Step 0c: Set free parameters, overwriting Cparameters defaults // by hand or with command-line input, as desired. #include "free_parameters.h" #include "set_Cparameters-nopointer.h" // We'll define all indices to be 0. No need to complicate memory access const int i0 = 0; const int i1 = 0; const int i2 = 0; // This is the array to which we'll write the NRPy+ variables. REAL *auxevol_gfs = (REAL *)malloc(sizeof(REAL) * NUM_AUXEVOL_GFS); // These are the arrays to which we will write the ETK variables. CCTK_REAL METRIC_LAP_PSI4[NUMVARS_METRIC_AUX]; CCTK_REAL Ur[MAXNUMVARS]; CCTK_REAL Ul[MAXNUMVARS]; CCTK_REAL FACEVAL[NUMVARS_FOR_METRIC_FACEVALS]; CCTK_REAL cmax; CCTK_REAL cmin; CCTK_REAL st_x_flux; CCTK_REAL st_y_flux; CCTK_REAL st_z_flux; // Now, it's time to make the random numbers. //const long int seed = time(NULL); // seed = 1570632212; is an example of a seed that produces // bad agreement for high speeds //const long int seed = 1574393335; //srand(seed); // Set the seed //printf("seed for random number generator = %ld; RECORD IF AGREEMENT IS BAD\n\n",seed); // We take care to make sure the corresponding quantities have the SAME value. auxevol_gfs[IDX4S(ALPHA_FACEGF, i0,i1,i2)] = 1.0;//(double)rand()/RAND_MAX; const double alpha = auxevol_gfs[IDX4S(ALPHA_FACEGF, i0,i1,i2)]; METRIC_LAP_PSI4[LAPSE] = alpha; //METRIC_LAP_PSI4[LAPM1] = METRIC_LAP_PSI4[LAPSE]-1; auxevol_gfs[IDX4S(GAMMA_FACEDD00GF, i0,i1,i2)] = 1.0;//1.0+(double)rand()/RAND_MAX*0.2-0.1; auxevol_gfs[IDX4S(GAMMA_FACEDD01GF, i0,i1,i2)] = 0.0;//(double)rand()/RAND_MAX*0.2-0.1; auxevol_gfs[IDX4S(GAMMA_FACEDD02GF, i0,i1,i2)] = 0.0;//(double)rand()/RAND_MAX*0.2-0.1; auxevol_gfs[IDX4S(GAMMA_FACEDD11GF, i0,i1,i2)] = 1.0;//1.0+(double)rand()/RAND_MAX*0.2-0.1; auxevol_gfs[IDX4S(GAMMA_FACEDD12GF, i0,i1,i2)] = 0.0;//(double)rand()/RAND_MAX*0.2-0.1; auxevol_gfs[IDX4S(GAMMA_FACEDD22GF, i0,i1,i2)] = 1.0;//1.0+(double)rand()/RAND_MAX*0.2-0.1; // Generated by NRPy+: const double gammaDD00 = auxevol_gfs[IDX4S(GAMMA_FACEDD00GF, i0,i1,i2)]; const double gammaDD01 = auxevol_gfs[IDX4S(GAMMA_FACEDD01GF, i0,i1,i2)]; const double gammaDD02 = auxevol_gfs[IDX4S(GAMMA_FACEDD02GF, i0,i1,i2)]; const double gammaDD11 = auxevol_gfs[IDX4S(GAMMA_FACEDD11GF, i0,i1,i2)]; const double gammaDD12 = auxevol_gfs[IDX4S(GAMMA_FACEDD12GF, i0,i1,i2)]; const double gammaDD22 = auxevol_gfs[IDX4S(GAMMA_FACEDD22GF, i0,i1,i2)]; /* * NRPy+ Finite Difference Code Generation, Step 2 of 1: Evaluate SymPy expressions and write to main memory: */ const double tmp0 = gammaDD11*gammaDD22; const double tmp1 = pow(gammaDD12, 2); const double tmp2 = gammaDD02*gammaDD12; const double tmp3 = pow(gammaDD01, 2); const double tmp4 = pow(gammaDD02, 2); const double tmp5 = gammaDD00*tmp0 - gammaDD00*tmp1 + 2*gammaDD01*tmp2 - gammaDD11*tmp4 - gammaDD22*tmp3; const double tmp6 = 1.0/tmp5; auxevol_gfs[IDX4S(GAMMA_FACEUU00GF, i0, i1, i2)] = tmp6*(tmp0 - tmp1); auxevol_gfs[IDX4S(GAMMA_FACEUU11GF, i0, i1, i2)] = tmp6*(gammaDD00*gammaDD22 - tmp4); auxevol_gfs[IDX4S(GAMMA_FACEUU22GF, i0, i1, i2)] = tmp6*(gammaDD00*gammaDD11 - tmp3); auxevol_gfs[IDX4S(GAMMADET_FACEGF, i0, i1, i2)] = tmp5; auxevol_gfs[IDX4S(GAMMADET_FACEGF, i0,i1,i2)] = tmp5; METRIC_LAP_PSI4[PSI6] = sqrt(auxevol_gfs[IDX4S(GAMMADET_FACEGF, i0,i1,i2)]); METRIC_LAP_PSI4[PSI2] = pow(METRIC_LAP_PSI4[PSI6],1.0/3.0); METRIC_LAP_PSI4[PSI4] = METRIC_LAP_PSI4[PSI2]*METRIC_LAP_PSI4[PSI2]; const double Psim4 = 1.0/METRIC_LAP_PSI4[PSI4]; METRIC_LAP_PSI4[PSIM4] = Psim4; // Copied from the ETK implementation CCTK_REAL gtxxL = gammaDD00*Psim4; CCTK_REAL gtxyL = gammaDD01*Psim4; CCTK_REAL gtxzL = gammaDD02*Psim4; CCTK_REAL gtyyL = gammaDD11*Psim4; CCTK_REAL gtyzL = gammaDD12*Psim4; CCTK_REAL gtzzL = gammaDD22*Psim4; /********************************* * Apply det gtij = 1 constraint * *********************************/ const CCTK_REAL gtijdet = gtxxL * gtyyL * gtzzL + gtxyL * gtyzL * gtxzL + gtxzL * gtxyL * gtyzL - gtxzL * gtyyL * gtxzL - gtxyL * gtxyL * gtzzL - gtxxL * gtyzL * gtyzL; /*const CCTK_REAL gtijdet_Fm1o3 = fabs(1.0/cbrt(gtijdet)); gtxxL = gtxxL * gtijdet_Fm1o3; gtxyL = gtxyL * gtijdet_Fm1o3; gtxzL = gtxzL * gtijdet_Fm1o3; gtyyL = gtyyL * gtijdet_Fm1o3; gtyzL = gtyzL * gtijdet_Fm1o3; gtzzL = gtzzL * gtijdet_Fm1o3;*/ FACEVAL[GXX] = gtxxL; FACEVAL[GXY] = gtxyL; FACEVAL[GXZ] = gtxzL; FACEVAL[GYY] = gtyyL; FACEVAL[GYZ] = gtyzL; FACEVAL[GZZ] = gtzzL; FACEVAL[GUPXX] = ( gtyyL * gtzzL - gtyzL * gtyzL )/gtijdet; FACEVAL[GUPYY] = ( gtxxL * gtzzL - gtxzL * gtxzL )/gtijdet; FACEVAL[GUPZZ] = ( gtxxL * gtyyL - gtxyL * gtxyL )/gtijdet; auxevol_gfs[IDX4S(BETA_FACEU0GF, i0,i1,i2)] = 0.0;//(double)rand()/RAND_MAX*2.0-1.0; const double betax = auxevol_gfs[IDX4S(BETA_FACEU0GF, i0,i1,i2)]; FACEVAL[SHIFTX] = betax; auxevol_gfs[IDX4S(BETA_FACEU1GF, i0,i1,i2)] = 0.0;//(double)rand()/RAND_MAX*2.0-1.0; const double betay = auxevol_gfs[IDX4S(BETA_FACEU1GF, i0,i1,i2)]; FACEVAL[SHIFTY] = betay; auxevol_gfs[IDX4S(BETA_FACEU2GF, i0,i1,i2)] = 0.0;//(double)rand()/RAND_MAX*2.0-1.0; const double betaz = auxevol_gfs[IDX4S(BETA_FACEU2GF, i0,i1,i2)]; FACEVAL[SHIFTZ] = betaz; /* Generate physically meaningful speeds */ auxevol_gfs[IDX4S(VALENCIAV_RU0GF, i0,i1,i2)] = -6.609724564008164e-01;//(double)rand()/RAND_MAX*2.0-1.0; auxevol_gfs[IDX4S(VALENCIAV_RU1GF, i0,i1,i2)] = 5.872129306790587e-01;//(double)rand()/RAND_MAX*2.0-1.0; auxevol_gfs[IDX4S(VALENCIAV_RU2GF, i0,i1,i2)] = -2.394409383043848e-01;//(double)rand()/RAND_MAX*2.0-1.0; /* Superluminal speeds for testing */ /*auxevol_gfs[IDX4S(VALENCIAV_RU0GF, i0,i1,i2)] = 1.0+(double)rand()/RAND_MAX*9.0; auxevol_gfs[IDX4S(VALENCIAV_RU1GF, i0,i1,i2)] = 1.0+(double)rand()/RAND_MAX*9.0; auxevol_gfs[IDX4S(VALENCIAV_RU2GF, i0,i1,i2)] = 1.0+(double)rand()/RAND_MAX*9.0;*/ Ur[VX] = alpha*auxevol_gfs[IDX4S(VALENCIAV_RU0GF, i0,i1,i2)]-betax; Ur[VY] = alpha*auxevol_gfs[IDX4S(VALENCIAV_RU1GF, i0,i1,i2)]-betay; Ur[VZ] = alpha*auxevol_gfs[IDX4S(VALENCIAV_RU2GF, i0,i1,i2)]-betaz; auxevol_gfs[IDX4S(B_RU0GF, i0,i1,i2)] = 9.999999999999999e-01;//(double)rand()/RAND_MAX*2.0-1.0; Ur[BX_CENTER] = auxevol_gfs[IDX4S(B_RU0GF, i0,i1,i2)]; auxevol_gfs[IDX4S(B_RU1GF, i0,i1,i2)] = 1.732050807568877e+00;//(double)rand()/RAND_MAX*2.0-1.0; Ur[BY_CENTER] = auxevol_gfs[IDX4S(B_RU1GF, i0,i1,i2)]; // Set Bz to enforce orthogonality of u and B; lower index on drift v^i, then set // Bz = -(ux*Bx+uy*By)/uz REAL ux,uy,uz; ux = gammaDD00*(Ur[VX]+betax) + gammaDD01*(Ur[VY]+betay) + gammaDD02*(Ur[VZ]+betaz); uy = gammaDD01*(Ur[VX]+betax) + gammaDD11*(Ur[VY]+betay) + gammaDD12*(Ur[VZ]+betaz); uz = gammaDD02*(Ur[VX]+betax) + gammaDD12*(Ur[VY]+betay) + gammaDD22*(Ur[VZ]+betaz); auxevol_gfs[IDX4S(B_RU2GF, i0,i1,i2)] = 1.485468574153978e+00;//-(ux*Ur[BX_CENTER] + uy*Ur[BY_CENTER])/uz; Ur[BZ_CENTER] = auxevol_gfs[IDX4S(B_RU2GF, i0,i1,i2)]; /* Generate physically meaningful speeds */ auxevol_gfs[IDX4S(VALENCIAV_LU0GF, i0,i1,i2)] = -6.609724564008164e-01;//(double)rand()/RAND_MAX*2.0-1.0; auxevol_gfs[IDX4S(VALENCIAV_LU1GF, i0,i1,i2)] = 5.872129306790587e-01;//(double)rand()/RAND_MAX*2.0-1.0; auxevol_gfs[IDX4S(VALENCIAV_LU2GF, i0,i1,i2)] = -2.394409383043848e-01;//(double)rand()/RAND_MAX*2.0-1.0; /* Superluminal speeds for testing */ /*auxevol_gfs[IDX4S(VALENCIAV_LU0GF, i0,i1,i2)] = 1.0+(double)rand()/RAND_MAX*9.0; auxevol_gfs[IDX4S(VALENCIAV_LU1GF, i0,i1,i2)] = 1.0+(double)rand()/RAND_MAX*9.0; auxevol_gfs[IDX4S(VALENCIAV_LU2GF, i0,i1,i2)] = 1.0+(double)rand()/RAND_MAX*9.0;*/ Ul[VX] = alpha*auxevol_gfs[IDX4S(VALENCIAV_LU0GF, i0,i1,i2)]-betax; Ul[VY] = alpha*auxevol_gfs[IDX4S(VALENCIAV_LU1GF, i0,i1,i2)]-betay; Ul[VZ] = alpha*auxevol_gfs[IDX4S(VALENCIAV_LU2GF, i0,i1,i2)]-betaz; auxevol_gfs[IDX4S(B_LU0GF, i0,i1,i2)] = 9.999999999999999e-01;//(double)rand()/RAND_MAX*2.0-1.0; Ul[BX_CENTER] = auxevol_gfs[IDX4S(B_LU0GF, i0,i1,i2)]; auxevol_gfs[IDX4S(B_LU1GF, i0,i1,i2)] = 1.732050807568877e+00;//(double)rand()/RAND_MAX*2.0-1.0; Ul[BY_CENTER] = auxevol_gfs[IDX4S(B_LU1GF, i0,i1,i2)]; // Set Bz to enforce orthogonality of u and B; lower index on drift v^i, then set // Bz = -(ux*Bx+uy*By)/uz ux = gammaDD00*(Ul[VX]+betax) + gammaDD01*(Ul[VY]+betay) + gammaDD02*(Ul[VZ]+betaz); uy = gammaDD01*(Ul[VX]+betax) + gammaDD11*(Ul[VY]+betay) + gammaDD12*(Ul[VZ]+betaz); uz = gammaDD02*(Ul[VX]+betax) + gammaDD12*(Ul[VY]+betay) + gammaDD22*(Ul[VZ]+betaz); auxevol_gfs[IDX4S(B_LU2GF, i0,i1,i2)] = 1.485468574153978e+00;//-(ux*Ul[BX_CENTER] + uy*Ul[BY_CENTER])/uz; Ul[BZ_CENTER] = auxevol_gfs[IDX4S(B_LU2GF, i0,i1,i2)]; printf("Valencia 3-velocity (right): %.4e, %.4e, %.4e\n",auxevol_gfs[IDX4S(VALENCIAV_RU0GF, i0,i1,i2)],auxevol_gfs[IDX4S(VALENCIAV_RU1GF, i0,i1,i2)],auxevol_gfs[IDX4S(VALENCIAV_RU2GF, i0,i1,i2)]); printf("Valencia 3-velocity (left): %.4e, %.4e, %.4e\n\n",auxevol_gfs[IDX4S(VALENCIAV_LU0GF, i0,i1,i2)],auxevol_gfs[IDX4S(VALENCIAV_LU1GF, i0,i1,i2)],auxevol_gfs[IDX4S(VALENCIAV_LU2GF, i0,i1,i2)]); printf("Below are the numbers we care about. These are the Significant Digits of Agreement \n"); printf("between the HLLE fluxes computed by NRPy+ and ETK. Each row represents a flux \n"); printf("direction; each entry therein corresponds to a component of StildeD. Each pair \n"); printf("of outputs should show at least 10 significant digits of agreement. \n\n"); // Now, we'll run the NRPy+ and ETK functions, once in each flux_dirn. // We'll compare the output in-between each GRFFE__S_i__flux_in_dir_0(&params,auxevol_gfs); GRFFE__S_i__flux(0,0,0,1,Ul,Ur,FACEVAL,METRIC_LAP_PSI4,cmax,cmin,st_x_flux,st_y_flux,st_z_flux); printf("SDA: %.1f, %.1f, %.1f\n",1.0-log10(2.0*fabs(auxevol_gfs[IDX4S(STILDE_FLUXD0GF, i0,i1,i2)]-st_x_flux)/(fabs(auxevol_gfs[IDX4S(STILDE_FLUXD0GF, i0,i1,i2)])+fabs(st_x_flux))), 1.0-log10(2.0*fabs(auxevol_gfs[IDX4S(STILDE_FLUXD1GF, i0,i1,i2)]-st_y_flux)/(fabs(auxevol_gfs[IDX4S(STILDE_FLUXD0GF, i0,i1,i2)])+fabs(st_x_flux))), 1.0-log10(2.0*fabs(auxevol_gfs[IDX4S(STILDE_FLUXD2GF, i0,i1,i2)]-st_z_flux)/(fabs(auxevol_gfs[IDX4S(STILDE_FLUXD0GF, i0,i1,i2)])+fabs(st_x_flux)))); //printf("NRPy+ Results: %.16e, %.16e, %.16e\n",auxevol_gfs[IDX4S(STILDE_FLUXD0GF, i0,i1,i2)],auxevol_gfs[IDX4S(STILDE_FLUXD1GF, i0,i1,i2)],auxevol_gfs[IDX4S(STILDE_FLUXD2GF, i0,i1,i2)]); GRFFE__S_i__flux_in_dir_1(&params,auxevol_gfs); GRFFE__S_i__flux(0,0,0,2,Ul,Ur,FACEVAL,METRIC_LAP_PSI4,cmax,cmin,st_x_flux,st_y_flux,st_z_flux); printf("SDA: %.1f, %.1f, %.1f\n",1.0-log10(2.0*fabs(auxevol_gfs[IDX4S(STILDE_FLUXD0GF, i0,i1,i2)]-st_x_flux)/(fabs(auxevol_gfs[IDX4S(STILDE_FLUXD0GF, i0,i1,i2)])+fabs(st_x_flux))), 1.0-log10(2.0*fabs(auxevol_gfs[IDX4S(STILDE_FLUXD1GF, i0,i1,i2)]-st_y_flux)/(fabs(auxevol_gfs[IDX4S(STILDE_FLUXD0GF, i0,i1,i2)])+fabs(st_x_flux))), 1.0-log10(2.0*fabs(auxevol_gfs[IDX4S(STILDE_FLUXD2GF, i0,i1,i2)]-st_z_flux)/(fabs(auxevol_gfs[IDX4S(STILDE_FLUXD0GF, i0,i1,i2)])+fabs(st_x_flux)))); //printf("NRPy+ Results: %.16e, %.16e, %.16e\n",auxevol_gfs[IDX4S(STILDE_FLUXD0GF, i0,i1,i2)],auxevol_gfs[IDX4S(STILDE_FLUXD1GF, i0,i1,i2)],auxevol_gfs[IDX4S(STILDE_FLUXD2GF, i0,i1,i2)]); GRFFE__S_i__flux_in_dir_2(&params,auxevol_gfs); GRFFE__S_i__flux(0,0,0,3,Ul,Ur,FACEVAL,METRIC_LAP_PSI4,cmax,cmin,st_x_flux,st_y_flux,st_z_flux); printf("SDA: %.1f, %.1f, %.1f\n",1.0-log10(2.0*fabs(auxevol_gfs[IDX4S(STILDE_FLUXD0GF, i0,i1,i2)]-st_x_flux)/(fabs(auxevol_gfs[IDX4S(STILDE_FLUXD0GF, i0,i1,i2)])+fabs(st_x_flux))), 1.0-log10(2.0*fabs(auxevol_gfs[IDX4S(STILDE_FLUXD1GF, i0,i1,i2)]-st_y_flux)/(fabs(auxevol_gfs[IDX4S(STILDE_FLUXD0GF, i0,i1,i2)])+fabs(st_x_flux))), 1.0-log10(2.0*fabs(auxevol_gfs[IDX4S(STILDE_FLUXD2GF, i0,i1,i2)]-st_z_flux)/(fabs(auxevol_gfs[IDX4S(STILDE_FLUXD0GF, i0,i1,i2)])+fabs(st_x_flux)))); //printf("NRPy+ Results: %.16e, %.16e, %.16e\n",auxevol_gfs[IDX4S(STILDE_FLUXD0GF, i0,i1,i2)],auxevol_gfs[IDX4S(STILDE_FLUXD1GF, i0,i1,i2)],auxevol_gfs[IDX4S(STILDE_FLUXD2GF, i0,i1,i2)]); printf("Note that in the case of very high velocities, numerical error will accumulate \n"); printf("and reduce agreement significantly due to a catastrophic cancellation. \n\n"); //printf("ETK Results: %.16e, %.16e, %.16e\n\n",st_x_flux,st_y_flux,st_z_flux); } # - # <a id='compile_run'></a> # # ## Step 2.a: Compile and run the code to validate the output \[Back to [top](#toc)\] # # $$\label{compile_run}$$ # # And now, we will compile and run the C code. We also make python calls to time how long each of these steps takes. # + import cmdline_helper as cmd import time print("Now compiling, should take ~2 seconds...\n") start = time.time() cmd.C_compile(os.path.join(out_dir,"Stilde_flux_unit_test.C"), os.path.join(out_dir,"Stilde_flux_unit_test")) end = time.time() print("Finished in "+str(end-start)+" seconds.\n\n") # os.chdir(out_dir) print("Now running...\n") start = time.time() # cmd.Execute(os.path.join("Stilde_flux_unit_test")) # !./Validation/Stilde_flux_unit_test end = time.time() print("Finished in "+str(end-start)+" seconds.\n\n") # os.chdir(os.path.join("../")) # - # <a id='latex_pdf_output'></a> # # # Step 3: Output this notebook to $\LaTeX$-formatted PDF file \[Back to [top](#toc)\] # $$\label{latex_pdf_output}$$ # # The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename # [Tutorial-Start_to_Finish_UnitTest-GiRaFFE_NRPy-Stilde_flux.pdf](Tutorial-Start_to_Finish_UnitTest-GiRaFFE_NRPy-Stilde_flux.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.) import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface cmd.output_Jupyter_notebook_to_LaTeXed_PDF("Tutorial-Start_to_Finish_UnitTest-GiRaFFE_NRPy-Stilde_flux")
in_progress/Tutorial-Start_to_Finish_UnitTest-GiRaFFE_NRPy-Stilde_flux.ipynb
# --- # jupyter: # jupyterbook: # pre_code: import numpy as _np; _np.random.seed(42) # jupytext: # notebook_metadata_filter: all,-language_info # split_at_heading: true # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # One of the great advantages of using simulation is that you can test the # assertions your teachers make. # # For example, in the [permutation and t-test page]({{ site.baseurl # }}/chapters/05/permutation_and_t_test), we asserted that the t-test is not # valid when the underlying distribution of the numbers is not close to the # [normal distribution](https://en.wikipedia.org/wiki/Normal_distribution). # # We can investigate this claim by simulating numbers from the null (ideal) # world, and seeing what results we get from the t-test. import numpy as np import matplotlib.pyplot as plt # %matplotlib inline # The particular variant of the t-test that we were looking at in the page above # was the *independent sample* t test for groups with similar variance. Similar # variance means that the distribution of the values in the first group is # roughly equal to the distribution in the second group. # # For example, soon we will be testing again for a mean difference between the # numbers of mosquitoes attracted to each of the 25 volunteers who drank beer, # and the equivalent numbers for each of the 18 volunteers who drank water. # # See [the data # page](https://github.com/matthew-brett/datasets/tree/master/mosquito_beer) for # more details on the dataset, and [the data license page]({{ site.baseurl # }}/data/license). # # For an equal variance test, we assume that the spread of the beer values is # roughly equal to the spread of the water values, as measured by the *standard # deviation*, or, equivalently, the *variance*. Remember the variance is the # squared standard deviation. # # We can pull together the code in [permtuation and t-test page]({{ site.baseurl # }}/chapters/05/permutation_and_t_test) to implement our own t-test. # Import the Scipy statistics routines. import scipy.stats as sps def t_test(group1, group2): """ Independent sample t value and one-tail upper-tail p value. """ g1_mean = np.mean(group1) g2_mean = np.mean(group2) omd = g1_mean - g2_mean # The observed mean difference. errors = np.append(group1 - g1_mean, group2 - g2_mean) g1_n = len(group1) # Number of observations in group1 g2_n = len(group2) # Number of observations in group2 df = g1_n + g2_n - 2 # The "degrees of freedom". estimated_sd = np.sqrt(np.sum(errors ** 2) / df) t_stat = omd / (estimated_sd * np.sqrt(1 / g1_n + 1 / g2_n)) upper_tail_p = 1 - sps.t.cdf(t_stat, df) return [t_stat, upper_tail_p] # The only new thing in the implementation above is the second-to-last line, # where we are using a routine in Scipy to calculate the probability value from # the t statistic; the details of this are not important for our purpose. # # First we go back to the logic of this p value, which is very similar to the # logic for permutation test p values: # # * Notice that the function calculates `omd = np.mean(group1) - # np.mean(group2)`. Call `omd` the *observed mean difference*. # * Assume that we are in the null (ideal) world where the numbers from `group1` # and the numbers from `group2` have been drawn at random from the *same* # distribution. # * The p value is the probability, in this null world, of seeing a mean # difference that is equal to or greater than the observed mean difference # `omd`. # # You can also think of a p value as an *index of surprise*. The p value tells # you how often you would expect to see an observed mean different this large, or # larger, in the null (ideal) world. If the p value is small, then the observed # mean difference is surprising. For example, if the p value is 0.05, it means # that such difference only occurs 5% of the time by chance in the null world, or # 1 in 20 times. You could say it was surprising at a 5% level. Similarly a p # value of 0.01 means the result would only occur 1% of the time in the null # world, and it is surprising at a 1% level. # # Here we recreate the mosquito, beer, water data from the [permutation and # t-test page]({{ site.baseurl }}/chapters/05/permutation_and_t_test): beer_activated = np.array([14, 33, 27, 11, 12, 27, 26, 25, 27, 27, 22, 36, 37, 3, 23, 7, 25, 17, 36, 31, 30, 22, 20, 29, 23]) water_activated = np.array([33, 23, 23, 13, 24, 8, 4, 21, 24, 21, 26, 27, 22, 21, 25, 20, 7, 3]) # We run our t-test over these data to get the same result you saw in the # [permtuation and t-test page]({{ site.baseurl # }}/chapters/05/permutation_and_t_test). t, p = t_test(beer_activated, water_activated) print('t statistic:', t) print('Upper-tail p value:', p) # To check our function is doing the correct calculation, we show that the t and # p values are the same as the ones we get from using the standard Scipy function # for independent t-tests: result = sps.ttest_ind(beer_activated, water_activated) print('Scipy t statistic:', result.statistic) print('Scipy upper-tail p value:', result.pvalue / 2) # Here is the observed difference in means: # Observed mean difference np.mean(beer_activated) - np.mean(water_activated) # The t-test p value above asserts that a difference in means as large as the # observed difference, or larger, would only occur about 5% of the time in a null # (ideal) world, where the beer and water values come from the same distribution. # The observed result is surprising at around the 5% level. # # How would we check the assertion that the t-test is valid for normal # distributions? # # If it is valid, then consider the situation where we do in fact draw two # samples from *the same* normal distribution, and then ask the t test for a p # value. If the p value is 5%, it means that such a result should only occur by # chance, in the null world, 5% of the time. # # So, we can repeat this procedure, drawing numbers that do in fact come from the # null world, and check that the t-test only tells us that the result is # surprising at the 5% level --- about 5% of the time. n_iters = 10000 p_values = np.zeros(n_iters) # Store the p values for i in np.arange(n_iters): # Make 40 numbers from a normal distribution with mean 10, sd 2. # These are our numbers from the null world. randoms = np.random.normal(10, 2, size=40) # Split into two groups of size 20, and do a t-test. t, p = t_test(randoms[:20], randoms[20:]) # Store the p value from the t-test. p_values[i] = p # Show the first 5 p values. p_values[:5] # If the t-test calculation is correct, then we should only see a p value of 0.05 # or smaller about 5% of the time. # Proportion of times the t-test said: surprising at 5% level. np.count_nonzero(p_values <= 0.05) / n_iters # Here the t-test is doing a good job --- it labels the result as surprising, at # the 5% level, about 5% of the time. # # Now we ask - does it matter if the group sizes are unequal? To test this, we # do the same calculation, but split the numbers from the null world into one # group of 3 and another of 37: # t-test working on unequal group sizes. p_values = np.zeros(n_iters) # Store the p values for i in np.arange(n_iters): # Make 40 numbers from a normal distribution with mean 10, sd 2. randoms = np.random.normal(10, 2, size=40) # Split into two groups of size 3 and 37, and do a t-test. t, p = t_test(randoms[:3], randoms[3:]) # Store the p value from the t-test. p_values[i] = p # Show the first 5 p values. p_values[:5] # How good a job is it doing now, with unequal group sizes? # Proportion of times the t-test said: surprising at 5% level. # This time wih unequal group sizes. np.count_nonzero(p_values <= 0.05) / n_iters # The proportion is still around 5%, close to what it should be. # # What happens if we use a distribution other than the normal distribution? # # Here we use some random numbers from a [Chi-squared # distribution](https://en.wikipedia.org/wiki/Chi-squared_distribution). The # distribution looks like this, with a $k$ value of 2 (see the Wikipedia page): some_chi2_numbers = np.random.chisquare(2, size=1000) plt.hist(some_chi2_numbers) plt.title('1000 random samples from chi-squared distribution, k=2') # We use this highly not-normal distribution to provide numbers to our t-test: # t-test working on unequal group sizes and not-normal distribution. p_values = np.zeros(n_iters) # Store the p values for i in np.arange(n_iters): # Make 40 numbers from a chi-squared distribution with k=2 randoms = np.random.chisquare(2, size=40) # Split into two groups of size 3 and 37, and do a t-test. t, p = t_test(randoms[:3], randoms[3:]) # Store the p value from the t-test. p_values[i] = p # Show the first 5 p values. p_values[:5] # In this situation the t-test starts to be less accurate - labeling too many # random differences as being surprising at the 5% level: # Proportion of times the t-test said: surprising at 5% level. # This time wih unequal group sizes. np.count_nonzero(p_values <= 0.05) / n_iters # Does a permutation test do a better job in this situation? # # We can test! # # Here is a function that does a permutation test: def permutation(group1, group2, niters=10000): omd = np.mean(group1) - np.mean(group2) g1_n = len(group1) fake_mds = np.zeros(niters) pooled = np.append(group1, group2) for i in np.arange(niters): np.random.shuffle(pooled) fake_mds[i] = np.mean(pooled[:g1_n]) - np.mean(pooled[g1_n:]) return np.count_nonzero(fake_mds >= omd) / niters # Test this on the mosquito data: permutation(beer_activated, water_activated) # This is very similar to the t-statistic p value --- *for these data* that have # fairly equal group size, and a distribution not far from normal: t_test(beer_activated, water_activated) # Now let's check how the permutation test does when there are unequal group # sizes and a not-normal distribution. # # The code below will take a few tens of seconds to run, because you are running # many loops in the `permutation` function, each time you go through the main # loop. # Permutation working on unequal group sizes and not-normal distribution. # This is slow - do fewer iterations. n_iters = 1000 p_values = np.zeros(n_iters) # Store the p values for i in np.arange(n_iters): # Make 40 numbers from a chi-squared distribution with k=2 randoms = np.random.chisquare(2, size=40) # Split into two groups of size 3 and 37, and do a t-test. # Use fewer iterations than usual to save computation time. p = permutation(randoms[:3], randoms[3:], niters=1000) # Store the p value from the permutation test. p_values[i] = p # Show the first 5 p values. p_values[:5] # How does the permutation test do? # Proportion of times the permutation test said: surprising at 5% level. # With unequal group sizes, not-normal distribution. np.count_nonzero(p_values <= 0.05) / n_iters # It is more accurate than the t-test. In general the permutation method is # more accurate for data from not-normal distributions, as well being accurate # for normal distributions.
notebooks/05/testing_t.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + # #!/usr/bin/env python3 import scipy import scipy.stats import numpy as np import nilearn.plotting import nilearn import nibabel import sys import os import glob import argparse import toleranceinterval as ti import warnings import json warnings.simplefilter(action='ignore', category=FutureWarning) NaN = float('nan') preproc_extension = '_desc-preproc_T1w.nii.gz' brain_mask_extension = '_desc-brain_mask.nii.gz' # + def error(msg): print(msg, file=sys.stderr) sys.exit(1) class bcolors: HEADER = '\033[95m' OKBLUE = '\033[94m' OKCYAN = '\033[96m' OKGREEN = '\033[92m' WARNING = '\033[93m' FAIL = '\033[91m' ENDC = '\033[0m' BOLD = '\033[1m' UNDERLINE = '\033[4m' def print_info(args, pass_test, pass_test_msg, ratio): if args.template == 'MNI152NLin6Asym': template = f'{bcolors.OKBLUE}{args.template:24}{bcolors.ENDC}' else: template = f'{bcolors.OKCYAN}{args.template:24}{bcolors.ENDC}' if pass_test: header = f'{bcolors.OKGREEN}{pass_test_msg:^7}{bcolors.ENDC}' succ = bcolors.OKGREEN else: header = f'{bcolors.FAIL}{pass_test_msg:^7}{bcolors.ENDC}' succ = bcolors.FAIL dataset = f'{bcolors.WARNING}{args.dataset:10}{bcolors.ENDC}' subject = f'{bcolors.WARNING}{args.subject:16}{bcolors.ENDC}' datatype = f'{bcolors.HEADER}{args.data_type}{bcolors.ENDC}' ratio = f'{bcolors.BOLD}{succ}{100*ratio:.2f}%{bcolors.ENDC}' info = f'dataset:{dataset} subject:{subject} datatype:{datatype} template:{template} target:{args.target} ' print(f"[{header}] {ratio} {info}") # - def load_image(path): return nibabel.load(path) # + def get_normality_mask(args): if args.normality_mask_dir is None: return None regexp = os.path.join(f'{args.normality_mask_dir}', f'{args.dataset}', f'{args.subject}', f'*{args.template}*') path = glob.glob(regexp)[0] image = np.load(path, allow_pickle=True) return image.tolist().get_fdata().astype('bool') def get_reference(args): ''' Gather images used as reference ''' data = [] # Mask where True values are voxels failings Shapiro-Wilk test normality_mask = get_normality_mask(args) preproc_re = f'{args.subject}*{args.template}{preproc_extension}' brain_mask_re = f'{args.subject}*{args.template}{brain_mask_extension}' regexp = os.path.join( args.reference, f'*{args.dataset}*', 'fmriprep', args.subject, args.data_type) paths = glob.glob(regexp) for path in paths: image = load_image(glob.glob(os.path.join(path, preproc_re))[0]) brain_mask = load_image(glob.glob(os.path.join(path, brain_mask_re))[0]) mask = brain_mask if normality_mask is None else np.ma.logical_and(brain_mask, ~normality_mask) image_masked = np.ma.where(mask, image.get_fdata(), False) data.append(image_masked) return np.array(data) def get_target(args): ''' Gather images used as target ''' data = [] # Mask where True values are voxels failings Shapiro-Wilk test normality_mask = get_normality_mask(args) preproc_re = f'{args.subject}*{args.template}{preproc_extension}' brain_mask_re = f'{args.subject}*{args.template}{brain_mask_extension}' regexp = os.path.join( args.target, f'*{args.dataset}*', 'fmriprep', args.subject, args.data_type) paths = glob.glob(regexp) for path in paths: image = load_image(glob.glob(os.path.join(path, preproc_re))[0]) brain_mask = load_image(glob.glob(os.path.join(path, brain_mask_re))[0]) mask = brain_mask if normality_mask is None else np.ma.logical_and(brain_mask, ~normality_mask) image_masked = np.ma.where(mask, image.get_fdata(), False) data.append(image_masked) return np.array(data) def get_mean_reference(reference): return np.ma.mean(reference, axis=0, dtype=np.float64) def c4(n): ''' c4(n) = sqrt(2/n-1) (gamma(n/2)/gamma(n-1/2)) ''' gamma = scipy.special.gamma return np.sqrt(2/(n-1)) * (gamma(n/2)/gamma((n-1)/2)) def get_std_reference(reference): ''' Unbiased estimator for standard deviation with small sample size. ''' return np.ma.std(reference, axis=0, ddof=1, dtype=np.float64) / c4(reference.shape[0]) # + def compute_prediction_interval(mean, std, value, n, confidence): ''' Compute prediction interval ''' alpha = 1 - confidence coef = scipy.stats.t.ppf(1-alpha/2, df=n-1) bounds_inf = mean - coef * np.sqrt((1+1/n)) * std bounds_sup = mean + coef * np.sqrt((1+1/n)) * std success = np.ma.logical_and(bounds_inf <= value, value <= bounds_sup) return success def compute_k2(n, confidence, population): ''' Factor for tolerance interval under normal hypothesis ''' return ti.twoside.normal_factor(n, population, confidence) def compute_tolerance_interval(mean, std, value, n, confidence, population): ''' Compute tolerance interval ''' coef = compute_k2(n, confidence, population) bounds_inf = mean - abs(coef) * std bounds_sup = mean + coef * std success = np.ma.logical_and(bounds_inf <= value, value <= bounds_sup) return success def compute_test(mean, std, target, n, confidence, population): ''' Compute the test. If only confidence is given, computes the prediction interval. If confidence and population are given, computes the tolerance interval. ''' if population: success = compute_tolerance_interval( mean, std, target, n, confidence, population) else: success = compute_prediction_interval( mean, std, target, n, confidence) nb_success = np.ma.sum(success) nb_voxels = np.ma.count(success) return success, nb_success, nb_voxels # + def test(reference, target, confidence, population): ''' Compute the test. if target or reference is missing, returns NaN. ''' if target.size == 0 or reference.size == 0: return None,NaN,NaN n = reference.shape[0] mean = get_mean_reference(reference) std = get_std_reference(reference) return compute_test(target=target, mean=mean, std=std, confidence=confidence, n=n, population=population) def test_against_sample(args): ''' Test the sample with itself. Let X the sample made of N observation X_k, 0<k<N. This function uses one observation X_i as target and computes the SI with the last N-1 X_j observations, 0<j<N, i!=j. Do this for each i < N. ''' confidence = args.confidence population = args.population sample = get_reference(args) sample_size = sample.shape[0] pass_test = True for i, observation in enumerate(sample): index = list(range(sample_size)) index.pop(i) reference = sample[index] target = observation success, nb_success, nb_voxels = test(reference, target, confidence, population) ratio = nb_success/nb_voxels _pass_test = ratio >= args.confidence pass_test_msg = 'Success' if _pass_test else 'Fail' print_info(args, _pass_test, pass_test_msg, ratio) pass_test = pass_test and _pass_test return pass_test def test_against_reference(args): ''' Test that the target is in the SI computed with the reference. ''' confidence = args.confidence population = args.population reference = get_reference(args) targets = get_target(args) pass_test = True for target in targets: success, nb_success, nb_voxels = test( reference, target, confidence, population) ratio = nb_success/nb_voxels _pass_test = ratio >= args.confidence pass_test_msg = 'Success' if _pass_test else 'Fail' print_info(args, _pass_test, pass_test_msg, ratio) pass_test = pass_test and _pass_test return pass_test # - def parse_args(sargs): parser = argparse.ArgumentParser( description="Test target image is in a confidence interval" ) parser.add_argument( "--confidence", action="store", default=0.95, type=float, help="Confidence" ) parser.add_argument( "--population", action="store", type=float, help="Population" ) parser.add_argument('--reference', action='store', required=True, help="Reference directory") parser.add_argument("--target", action="store", help="Target directory (if empty, target is reference)") parser.add_argument("--dataset", action="store", required=True, help="Dataset") parser.add_argument("--subject", action="store", required=True, help="Subject") parser.add_argument("--data-type", action="store", required=True, help="Data type") parser.add_argument("--template", action="store", required=True, help="Template") parser.add_argument('--normality-mask-dir', action='store', help='Use normality test as a mask') args = parser.parse_args(sargs) return args def main(sargs): args = parse_args(sargs) if args.target is None: pass_test = test_against_sample(args) else: pass_test = test_against_reference(args) def run(confidence, population, reference, target, dataset, subject, data_type, template, normality_mask_dir): sargs = [f'--confidence={confidence}', f'--reference={reference}', f'--dataset={dataset}', f'--subject={subject}', f'--data-type={data_type}', f'--template={template}'] sargs += [f'--population={population}'] if population is not None else [] sargs += [f'--target={target}'] if target is not None else [] sargs += [f'--normality-mask-dir={normality_mask_dir}'] if normality_mask_dir is not None else [] main(sargs) def run_all(confidence, population, reference, target, datasets, data_type, templates, normality_mask_dir): for template in templates: for dataset, subjects in datasets.items(): for subject in subjects.keys(): run(confidence, population, reference, target, dataset, subject, data_type, template, normality_mask_dir) fi = open('fmriprep-reproducibility/fmriprep-cmd.json') datasets = json.load(fi) data_type = 'anat' templates = ['MNI152NLin2009cAsym','MNI152NLin6Asym'] confidence = 0.95 population = 0.95 reference='outputs/fuzzy' target='outputs/ieee' normality_mask_dir='outputs/test_normality' run_all(confidence, population, reference, target, datasets, data_type, templates, normality_mask_dir) reference='outputs/fuzzy' target='outputs/ieee_update' normality_mask_dir='outputs/test_normality' run_all(confidence, population, reference, target, datasets, data_type, templates, normality_mask_dir) reference='outputs/fuzzy' target='outputs/ieee_seed' normality_mask_dir='outputs/test_normality' run_all(confidence, population, reference, target, datasets, data_type, templates, normality_mask_dir) reference='outputs/ieee_seed' target='outputs/ieee' normality_mask_dir='outputs/test_normality' run_all(confidence, population, reference, target, datasets, data_type, templates, normality_mask_dir) reference='outputs/ieee_seed' target='outputs/ieee_update' normality_mask_dir='outputs/test_normality' run_all(confidence, population, reference, target, datasets, data_type, templates, normality_mask_dir) reference='outputs/ieee_seed' target='outputs/fuzzy' normality_mask_dir='outputs/test_normality' run_all(confidence, population, reference, target, datasets, data_type, templates, normality_mask_dir) reference='outputs/fuzzy' target=None normality_mask_dir='outputs/test_normality' run_all(confidence, population, reference, target, datasets, data_type, templates, normality_mask_dir) reference='outputs/ieee_seed' target=None normality_mask_dir='outputs/test_normality' run_all(confidence, population, reference, target, datasets, data_type, templates, normality_mask_dir)
test_SI.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + ### Create interactive map of residents living within 400 - 1600 m (+ 250 m because of the grid resolution) walking distance from ### metro and train station in Finland's capital region with a value slider. Data for the map is loaded from open data services of ### Maanmittauslaitos (MML) and Helsinki Region Environmental Services Authority (HSY). ### IMPORT DATA ### # Import modules import pandas as pd import geopandas as gpd from shapely.geometry import Point, Polygon from geopandas.tools import geocode import numpy as np from pyproj import CRS import requests import geojson import matplotlib.pyplot as plt from shapely.ops import cascaded_union import mapclassify import contextily as ctx from mpl_toolkits.axes_grid1 import make_axes_locatable import folium import folium.plugins import branca import branca.colormap as cm ## Read shape file containing the capital region as polygons into variable 'grid' (Data from https://tiedostopalvelu.maanmittauslaitos.fi/tp/kartta) # File path fp_grid = "data/pkseutu.shp" # Read in data grid = gpd.read_file(fp_grid) # Check if crs is correct and set crs to ETRS89 / TM35FIN if the crs is not defined correctly if (grid.crs != "epsg:3067"): grid = grid.set_crs(epsg=3067) # Reproject to WGS 84 / Pseudo-Mercator if the crs is not defined correctly if (grid.crs != "epsg:4326"): grid = grid.to_crs(epsg=4326) # Combine polygons of each city to form one polygon of the whole capial region grid['constant'] = 0 boundary = grid.dissolve(by='constant') # Check the data #print(grid.head()) #print(grid.crs) #print(boundary) # + ## Read population grid data for 2018 into a variable `pop`. # Specify the url for web feature service url = 'https://kartta.hsy.fi/geoserver/wfs' # Specify parameters (read data in json format). params = dict(service='WFS', version='2.0.0', request='GetFeature', typeName='asuminen_ja_maankaytto:Vaestotietoruudukko_2018', outputFormat='json') # Fetch data from WFS using requests r = requests.get(url, params=params) # Create GeoDataFrame from geojson pop = gpd.GeoDataFrame.from_features(geojson.loads(r.content)) # Clean out unnecessary columns pop = pop[["asukkaita", "geometry"]] # Set crs to ETRS89 / GK25FIN and reproject to WGS 84 / Pseudo-Mercator if the crs is not defined correctly if (pop.crs == None): pop = pop.set_crs(epsg=3879) if (pop.crs != "epsg:4326"): pop = pop.to_crs(epsg=4326) # Check the data #print(pop.head()) #print(pop.crs) # + ## Read buffer polygons that describe 400 m, 800 m, 1200 m and 1600 m accessibilities via pedestrian and bicycle ways from metro and ## train stations # Save wanted buffer sizes in a list which is used in loading the data dists = ['400', '800', '1200', '1600'] # Create an empty geopandas GeoDataFrame for the data buffs = gpd.GeoDataFrame() # Iterate through wanted buffer distance list for dist in dists: # Specify the url for web feature service and typeName of the data layer url_buff = 'https://kartta.hsy.fi/geoserver/wfs' type_name = dist + 'm_verkostobufferi' # Specify parameters (read data in json format). params_buff = dict(service='WFS', version='2.0.0', request='GetFeature', typeName=type_name, outputFormat='json') # Fetch data from WFS using requests r = requests.get(url_buff, params=params_buff) # Create GeoDataFrame from geojson buff = gpd.GeoDataFrame.from_features(geojson.loads(r.content)) # Clean out unnecessary columns buff = buff[["asema", "geometry"]] # Set crs to ETRS89 / GK25FIN and reproject to WGS 84 / Pseudo-Mercator if the crs is not defined correctly if (buff.crs == None): buff = buff.set_crs(epsg=3879) if (buff.crs != "epsg:4326"): buff = buff.to_crs(epsg=4326) # Clip out stations that are located outside the capital region clip_mask = buff.within(boundary.at[0,'geometry']) buff = buff.loc[clip_mask] # Create column which indicates buffer distance for the slider buff['dist'] = dist # Check the data #print(buff.head(1)) #print(len(buff)) # Add the data to combined GeoDataFrame buffs = buffs.append(buff) # Remove manually one station that wasn't clipped out buffs = buffs[buffs.asema != 'Mankki'] # Check the data #buffs.head() # + ### PROCESS DATA ### # Create new column to 'buffs' where total resident amounts within each buffer areas are stored buffs["residents_sum"] = None # Create a spatial join between grid layer and buffer layer. "Intersects" option used here to include all grid cells which # touch the buffer area (NOTE that with this choice the accuracy of the buffers is lost due to the grid resolution) pop_combined = gpd.sjoin(pop, buffs, how="left", op="intersects") # Group the data by both train and metro station names AND distance classes groupedA = pop_combined.groupby(['asema','dist']) # Check the data #groupedA.head() # + # Store sum of residents living approximately 400 m, 800 m, 1200 m and 1600 m from station to column "sum" # (the distance doesn't stay constant in performed analysis but accurate enough for this visualization) for name, group in groupedA: buffs.loc[(buffs["asema"]==name[0]) & (buffs['dist']==name[1]),'residents_sum'] = group["asukkaita"].agg("sum") ## Convert the buffer polygons to points (location set as centroids of 400 m buffers, approximate of the station locations) point_data = buffs point_data = point_data.reset_index() # Replace NoData in residents_sum column with 0 point_data["residents_sum"] = point_data["residents_sum"].replace(to_replace=np.nan, value=0) # Group the data by only train and metro station names groupedB = point_data.groupby('asema') # Convert to points based on centroids for name, group in groupedB: point_data.loc[point_data["asema"]==name,'geometry'] = group['geometry'].centroid # NOTE: raises an warning which is ignored in this case # Reorganize the column order point_data = point_data[["geometry","asema","residents_sum", "dist"]] # Check the data #point_data.head() # + ### PREPARE AND DIVIDE DATA # Divide data from each buffer distances into separate GeoDataFrames buff400 = point_data.loc[point_data['dist']=='400'] buff800 = point_data.loc[point_data['dist']=='800'] buff1200 = point_data.loc[point_data['dist']=='1200'] buff1600 = point_data.loc[point_data['dist']=='1600'] # Sort rows by station name buff400 = buff400.sort_values(by=['asema']) buff800 = buff800.sort_values(by=['asema']) buff1200 = buff1200.sort_values(by=['asema']) buff1600 = buff1600.sort_values(by=['asema']) # Get x and y coordinates for each point in buff400 buff400["x"] = buff400["geometry"].apply(lambda geom: geom.x) buff400["y"] = buff400["geometry"].apply(lambda geom: geom.y) # Set same station coordinates for each buffer distances buff800['x'] = buff400['x'].values buff800['y'] = buff400['y'].values buff1200['x'] = buff400['x'].values buff1200['y'] = buff400['y'].values buff1600['x'] = buff400['x'].values buff1600['y'] = buff400['y'].values # + ### PLOT DATA # Plot the basemap m = folium.Map(location=[60.24026, 24.96179], tiles = 'cartodbpositron', zoom_start=10, control_scale=True, prefer_canvas=True) # Create colormap colormap = cm.LinearColormap(colors=['yellow','red'], index=[0,85000],vmin=0,vmax=max(point_data['residents_sum']), caption='Residents living within defined walking distance from metro or train station') # Define tool which creates point markers from input GeoDataSeries def station_style(station): return folium.CircleMarker( radius=3, location=(station.y, station.x), color=colormap(station.residents_sum), tooltip=station.asema + ",\n" + str(station.residents_sum) + " residents", fill=True, smooth_factor=1.0, fill_opacity=.8) # Create empty FeatureGroup for each buffer stations400 = folium.FeatureGroup(name='Residents within 400 m from metro and train stations') stations800 = folium.FeatureGroup(name='Residents within 800 m from metro and train stations') stations1200 = folium.FeatureGroup(name='Residents within 1200 m from metro and train stations') stations1600 = folium.FeatureGroup(name='Residents within 1600 m from metro and train stations') # Add data of each buffer to separate FeatureGroups for station in buff400.itertuples(): station_style(station).add_to(stations400) for station in buff800.itertuples(): station_style(station).add_to(stations800) for station in buff1200.itertuples(): station_style(station).add_to(stations1200) for station in buff1600.itertuples(): station_style(station).add_to(stations1600) # Add point layers to map stations400.add_to(m) stations800.add_to(m) stations1200.add_to(m) stations1600.add_to(m) # Create and add a layer control object folium.LayerControl('topleft').add_to(m) # Add legend colormap.add_to(m) # Save the map outfp = "choropleth_map.html" m.save(outfp) m
docs/prob_2_v2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### ROT: Detecting and Forecasting the Occlusion Events between the Sun and Clouds in Realtime - Cont. # Focus: Measuring the accuracy of the forecasts vs the SSD detection ground truths. import os import queue import threading import numpy as np from matplotlib import pyplot as plt # This is needed to display the images. # %matplotlib inline import tensorflow as tf print(tf.__version__) # + from rot_helper.thread_functions import sky_images_generator from rot_helper.ssd_detection import detection_data, post_process_detection_data from rot_helper.lstm_forecast import forecast_inferrence, return_sequence_ssd_detection_data from rot_helper.lstm_forecast import n_windowed_forecast_inferrence, forecasted_occlusion_position from rot_helper.lstm_forecast import forecast_report_on_occlusions_predictions_area from rot_helper.lstm_forecast import longterm_forecast_report_on_occlusions_predictions_area from rot_helper.lstm_forecast import post_process_ssd_detection_data_for_forecasting from rot_helper.lstm_forecast import one_forecast_accuracy, multi_forecast_accuracy # - DEBUG=True forecast_accuracy_dict = {} # #### Sky Camera Files for Images Captured on the <2017_07_15> Day. sky_camera_images_dir = '../experiments/experiment1/2017_07_15' sky_camera_images_files = os.listdir(sky_camera_images_dir) sky_camera_images_files.sort() # Sorted sky_camera_images_files[:5] len(sky_camera_images_files) if DEBUG: # Extract a sequence of images that are part of the test part of the forecast model dataset. # Train: (7024, 50, 30) # Test: (3030, 50, 30) # Forecast_Test_Range [-3030: -2030] ~ about 1000 sample images and their sequences. sky_camera_images_files = sky_camera_images_files[-3130: -2030] len(sky_camera_images_files) sky_camera_images_files[0] # #### Trained ML models hyper parameters seq_num = 50 num_bboxes = 5 max_bboxes_vec_size = 6 * num_bboxes # About 6 points data for each of the 5 bboxes per an image. forecasting_model_name = '../exported_ml_models/lstm_forecasting_model/toesc_forecasting_keras_lambda_model.h5' # A simple forecasting model to predict the next bboxes # #### Data Structures to hold intermediary data # To capture inbound images from the ground sky camera in realtime (i.e., a new image per second). queue_of_inbound_sky_images = queue.Queue() # To capture a sequence of images of length = seq_num (i.e., 50) to be used to forecast the next image detection data. queue_of_images_sequence_for_forecast_task = queue.Queue(maxsize=seq_num) # To save in memory the current ssd detection data ssd_detection_data_dict = {} # #### Spawn the producer thread queue_of_inbound_sky_images.qsize() # Test<1> t = threading.Thread(target=sky_images_generator, args=(sky_camera_images_files, queue_of_inbound_sky_images,)) t.start() queue_of_inbound_sky_images.qsize() # Test<2,3, ..., k> # #### Get an image file from the queue in a FIFO format sample_image_filename = queue_of_inbound_sky_images.get() sample_image_filename # ### Sample Image Detection Data # #### Env setup sample_image_file = os.path.join(sky_camera_images_dir, sample_image_filename) results_detection_data = detection_data(sample_image_file) results_detection_data ssd_detection_data = post_process_detection_data(results_detection_data) ssd_detection_data_dict[list(ssd_detection_data.keys())[0]] = list(ssd_detection_data.values())[0] ssd_detection_data_dict # ### Simulate inbound sky images # # - Run SSD detection # - Create sequence of sky images to run forecast/predictions of future frame detection data and visualize them # - Run 24/7 or until stopped # + # Initial our dictionary for keeping track of the forecast accuracy forecast_windows = [1, 5, 10, 25, 50, 75, 100] for i in forecast_windows: forecast_accuracy_dict[str(i)] = [] # - sim_counter = 0 while(1): get_the_next_sky_image_filename = queue_of_inbound_sky_images.get() # FIFO mode print("\n:: Current sky image name: ", get_the_next_sky_image_filename) current_sky_image_filepath = os.path.join(sky_camera_images_dir, get_the_next_sky_image_filename) # Run SSD Detection results_detection_data = detection_data(current_sky_image_filepath) ssd_detection_data = post_process_detection_data(results_detection_data) # Append it into the dict of ssd detection data ssd_detection_data_dict[list(ssd_detection_data.keys())[0]] = list(ssd_detection_data.values())[0] candidate_key = list(ssd_detection_data.keys())[0] #(0) if queue_of_images_sequence_for_forecast_task.qsize() < seq_num: queue_of_images_sequence_for_forecast_task.put(candidate_key) #(1) If the forecast queue reaches seqNum, run the forecast model for the next 5, 10, 20, 40, 60, 80, 100 frames # And plot the frequency of predicted number of occlusion occurences in those predicted n-frames. if queue_of_images_sequence_for_forecast_task.qsize() == seq_num: print("\n\nTime to begin the forecast ...") # Pop the oldest entry from the queue and use it to extract seq_num detection data from the # ssd_detection_data_dict popped_sky_image_filename = queue_of_images_sequence_for_forecast_task.get() print("Popped: ", popped_sky_image_filename) # Create the sequence of detection data to be used for the prediction/forecast task extracted_seq_num_detection_data = return_sequence_ssd_detection_data(popped_sky_image_filename, ssd_detection_data_dict, seq_num, max_bboxes_vec_size) #print("\nExtracted: ", extracted_seq_num_detection_data) print("\nExtracted data hidden.\n") # Next 1st Frame Forecast next_frame_forecast = forecast_inferrence(extracted_seq_num_detection_data, forecasting_model_name) # Later compare it with the candidtate_key (visually) print(":: Next 1st Frame Forecast: ", next_frame_forecast, "\n") num_occlusions_bool = forecast_report_on_occlusions_predictions_area(next_frame_forecast) print("\n:: Next 1st Frame Forecast -- Summary Report:\n\t >> In regards to the presence of an occlusion event in the next Frame, the forecast says: ", num_occlusions_bool, "\n") forecast_accuracy = one_forecast_accuracy(candidate_key, num_occlusions_bool) print("\n:: forecast_accuracy: ", forecast_accuracy) forecast_accuracy_dict["1"].append(forecast_accuracy) ### Generic Forecast model for n-horizon window: Next [5, 10, 25, 50, 75, 100] Frames Forecast forecast_window = [5, 10, 25, 50, 75, 100] for i in forecast_window: print("\n\n:: Auto forecast the next <", i, "> frames.\n") next_n_frames_forecasts = n_windowed_forecast_inferrence(i, extracted_seq_num_detection_data, forecasting_model_name) # Forecast Report #print("\n:: Summary of the ", i, " forecasts:\n", next_n_frames_forecasts, "\n") print("\n:: Summary of the ", i, " forecasts data is hidden.\n") occlusion_bool_list = longterm_forecast_report_on_occlusions_predictions_area(next_n_frames_forecasts) print("\n:: Next", i, "Frames Forecast -- Summary Report:\n\t >> In regards to the presence of an occlusion event in the next Frames, the forecast says: ", occlusion_bool_list, "\n") print(":: Among the ", i, "Frames Forecasts, the Occlusion events appear in these next images: ", forecasted_occlusion_position(occlusion_bool_list)) n_forecast_accuracy = multi_forecast_accuracy(candidate_key, occlusion_bool_list, i, sky_camera_images_files, sky_camera_images_dir) forecast_accuracy_dict[str(i)].append(n_forecast_accuracy) print("\n:: ", i, "th forecast_accuracy: ", n_forecast_accuracy) sim_counter += 1 if sim_counter == 1000: break # Only collecting up to 1000 to evaluate the forecast accuracy. #(2) Put new entry in the forecast queue (0-1) queue_of_images_sequence_for_forecast_task.put(candidate_key) print("\n\n:: Newly Put: \n\t", candidate_key) # ### Forecast accuracy evaluation at different forecast time window [1, 5, ..., 100] forecast_windows = [1, 5, 10, 25, 50, 75, 100] summary_accuracy_dict = {} for i in forecast_windows: avg_accuracy = sum(forecast_accuracy_dict[str(i)])/len(forecast_accuracy_dict[str(i)]) summary_accuracy_dict[i] = avg_accuracy summary_accuracy_dict plt.plot(list(summary_accuracy_dict.keys()), list(summary_accuracy_dict.values()))
notebooks/ROT-Lean-Accuracy-Measure.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Environment (conda_teachopencadd) # language: python # name: conda_teachopencadd # --- # # トークトリアル 3 # # # 化合物フィルタリング:好ましくない部分構造 # # #### Developed in the CADD seminars 2017 and 2018, AG Volkamer, Charité/FU Berlin # # <NAME> and <NAME> # ## このトークトリアルの目的 # # いくつか私たちのスクリーニングライブラリーに含めたくない部分構造があります。このトークトリアルでは、そのような好ましくない部分構造の様々なタイプを学び、そしてRDKitを使ってそれらの部分構造を見つけ、ハイライトする方法を学びます。 # # ## 学習の目標 # # ### 理論 # # * 好ましくない部分構造とは何か? # * 汎分析干渉化合物 Pan Assay Interference Compounds (PAINS) # # ### 実践 # # * ChEMBLデータベースから化合物のセットを読み込む(**トークトリアル 2** で準備したもの) # * RDKitでの実装を利用し好ましくない部分構造をフィルターにかけ取り除く # * 独自の好ましくない部分構造のリストを作成し、フィルタリングを実施 # * 部分構造の検索とハイライト # # ## レファレンス # # * Brenk et al.: "Lessons learnt from assembling screening libraries for drug discovery for neglected diseases" <i>Chem. Med. Chem.</i> (2008), <b>3</b>,435-444 (https://onlinelibrary.wiley.com/doi/full/10.1002/cmdc.200700139) # * Brenk et al.: 好ましくない構造のSMARTS定義 (<i>Chem. Med. Chem.</i> (2008), <b>3</b>,435-444 のSupporting InformationのTable 1(https://onlinelibrary.wiley.com/doi/full/10.1002/cmdc.200700139) # * Baell et al.: "New substructure filters for removal of Pan Assay Interference Compounds (PAINS) from screening libraries and for their exclusion in bioassays" <i>J. Med. Chem.</i> (2010), <b>53(7)</b>,2719-2740 (https://pubs.acs.org/doi/abs/10.1021/jm901137j) # * Rajarshi GuhaによるPAINSのフォーマットのSLN(Baell et al. publication) からSMARTSへの変換: http://blog.rguha.net/?p=850; RDKit使われているSMARTSはGreg LandrumがRajarshi GuhaのKNIMEワークフローから収集・整理したものです: http://rdkit.blogspot.com/2015/08/curating-pains-filters.html # * https://en.wikipedia.org/wiki/Pan-assay_interference_compounds # * TDT -Tutorial2014 - (https://github.com/sriniker/TDT-tutorial-2014/blob/master/TDT_challenge_tutorial.ipynb) # _____________________________________________________________________________________________________________________ # # # ## 理論 # # ### 好ましくない部分構造 # # 部分構造には好ましくないものがあります。例えば毒性あるいは反応性があるといった理由や、薬物動態学的特性が好ましくないという理由、あるいは特定のアッセイに干渉する可能性が高いという理由です。 # 今日の医薬品探索ではよくハイスループットスクリーニング ([HTS wikipedia](https://en.wikipedia.org/wiki/High-throughput_screening)) を実施します。好ましくない部分構造をフィルタリングすることで、より望ましいスクリーニングライブラリを構築することができます。これにより、スクリーニングの前にライブラリの数を減らすことができ、時間と資源の節約につながります。 # # Brenkらは顧みられない病気の治療のための化合物の探索に使用するスクリーニングライブラリーをフィルタリングするため、好ましくない部分構造のリストを作成しました([<i>Chem. Med. Chem.</i> (2008), <b>3</b>,435-444](https://onlinelibrary.wiley.com/doi/full/10.1002/cmdc.200700139))。好ましくない特徴の例として、ニトロ基(変異原性の問題)、スルホン酸基とリン酸基(薬物動態学的特性が好ましくない可能性が高い)、2-ハロピリジンとチオール基(反応性の問題)、といったものが挙げられます。 # # 好ましくない部分構造のリストは上記の文献に報告されており、このトークトリアルの実践編でも使用します。 # ### 汎分析干渉化合物 Pan Assay Interference Compounds (PAINS) # # #### 概要 # # PAINS ([PAINS wikipedia](https://en.wikipedia.org/wiki/Pan-assay_interference_compounds)) は実際には偽陽性にも関わらず、HTSでしばしばヒットとして見出される化合物です。PAINSは特定の一つのターゲット分子と反応するというよりもむしろ、非特異的に多数のターゲット分子と反応する傾向があります。通常、非特異的な結合もしくはアッセイの構成要素との相互作用により、様々なアッセイで様々なタンパク質に対して見かけ上結合します。 # [![PAINS](./images/PAINS_Figure.jpeg)](https://commons.wikimedia.org/wiki/File:PAINS_Figure.tif) # <div align="center">Figure 1:汎分析干渉化合物(Pan Assay Interference Compounds、PAINS)の観点における特異的結合と非特異的結合。図は[wikipedia](https://commons.wikimedia.org/wiki/File:PAINS_Figure.tif)より引用 </div> # #### Baellらによる文献で用いられたフィルター # # Baellらはアッセイのシグナルに干渉する部分構造に焦点を当てました ([<i>J. Med. Chem.</i> (2010), <b>53(7)</b>,2719-2740](https://pubs.acs.org/doi/abs/10.1021/jm901137j))。そのようなPAINSを見つけるのに役立つ部分構造について記述し、部分構造フィルタリングに利用できるリストを提供しました。 # ## 実践 # # ### データの読み込みと可視化 # まず、必要なライブラリをインポートし、**トークトリアル T2** でフィルタリングしたデータセットを読み込み、はじめの分子群を描画します。 import pandas from rdkit import Chem from rdkit.Chem.Draw import IPythonConsole from rdkit.Chem import rdFMCS from rdkit.Chem import AllChem from rdkit.Chem import Descriptors from rdkit.Chem import Draw from rdkit import DataStructs from rdkit.Chem import PandasTools import matplotlib.pyplot as plt filteredData = pandas.read_csv("../data/T2/EGFR_compounds_lipinski.csv", delimiter=";", index_col=0) filteredData.drop(['HBD','HBA','MW','LogP'], inplace=True, axis=1) # 不必要な情報の除去 print ('Dataframe shape: ', filteredData.shape) # データフレームの次元をプリント filteredData.head(5) PandasTools.AddMoleculeColumnToFrame(filteredData, smilesCol='smiles') # 分子を列に追加 # 最初の20個の分子を描画 Draw.MolsToGridImage(list(filteredData.ROMol[0:20]), legends=list(filteredData.molecule_chembl_id[0:20]), molsPerRow=4) # ### RDKitを使用してPAINSをフィルタリング # # PAINSフィルターはすでにRDKitに実装されています([RDKit Documentation](http://rdkit.org/docs/source/rdkit.Chem.rdfiltercatalog.html))。使ってみましょう! from rdkit.Chem.FilterCatalog import * params = FilterCatalogParams() # 全 PAINS (A, B and C) からカタログを構築 params.AddCatalog(FilterCatalogParams.FilterCatalogs.PAINS) catalog = FilterCatalog(params) # + # フィルタリングしたデータを格納するための空のデータフレームを作成 rdkit_highLightFramePAINS = pandas.DataFrame(columns=('CompID', 'CompMol', 'unwantedID')) rdkit_noPAINS = pandas.DataFrame(columns=('ChEMBL_ID', 'smiles','pIC50')) rdkit_withPAINS = pandas.DataFrame(columns=('ChEMBL_ID', 'smiles', 'pIC50','unwantedID')) # フィルタリングしたデータフレームのインデックスと行についてforループを回す for i,row in filteredData.iterrows(): curMol = Chem.MolFromSmiles(row.smiles) # 現在の分子 match = False # Falseにmatchを設定 rdkit_PAINSList = [] # 最初のmatchを取得 entry = catalog.GetFirstMatch(curMol) if entry!=None: # 現在の好ましくない部分構造の名前をリストに追加 rdkit_PAINSList.append(entry.GetDescription().capitalize()) # データフレームに関連するマッチング情報を追加 rdkit_highLightFramePAINS.loc[len(rdkit_highLightFramePAINS)] = [row.molecule_chembl_id, curMol, entry.GetDescription().capitalize()] match = True if not match: # PAINSを含まない化合物のデータフレームに追加 rdkit_noPAINS.loc[len(rdkit_noPAINS)] = [row.molecule_chembl_id, row.smiles, row.pIC50] else: # PAINSを含む化合物のデータフレームに追加 # 好ましくない部分構造を含むデータフレームに関連する情報を加える rdkit_withPAINS.loc[len(rdkit_withPAINS)] = [row.molecule_chembl_id, row.smiles, row.pIC50, entry.GetDescription().capitalize()] print("Number of compounds with PAINS: %i"%(len(rdkit_withPAINS))) print("Number of compounds without PAINS: %i (=remaining compounds)"%(len(rdkit_noPAINS))) rdkit_highLightFramePAINS.head(10) # - # **訳注(04/2020)** # # 訳者はPythonに詳しくないので`rdkit_highLightFramePAINS.loc[len(rdkit_highLightFramePAINS)]` の部分に関する備忘録です。 # `loc`はPandas DataFrameで行をindexで指定する方法です。 `len(~)`でDataFrameの行数を取得することですでにデータが入っている行数がわかります。 # DataFrameのindexは0から始まっているので、DataFrameの行数をindexとして指定することで、新しいデータを新しい行(既存の行数+1の行)に追加することになるのだと思います。 # # また、PAINSの利用例はRDKit Documentationの[こちら](http://rdkit.org/docs/source/rdkit.Chem.rdfiltercatalog.html#rdkit.Chem.rdfiltercatalog.FilterCatalogEntry)に同様のコードが見られます。 # # **訳注ここまで** # ### 好ましくない/毒性の懸念される部分構造でフィルタリング(Brenkのリスト) # # RDKitには、PAINSのような好ましくない部分構造のリストがいくつかすでに実装されています。ですが、実装されていないRDKit外部のリストを使って部分構造の一致検索を行うことも可能です。ここではBrenkらによる文献([<i>Chem. Med. Chem.</i> (2008), <b>3</b>,435-444](https://onlinelibrary.wiley.com/doi/full/10.1002/cmdc.200700139))のsupporting informationで提供されているリストを使用します。 # # 注) データをダウンロードし、dataフォルダにcsvファイルとして保存済みです(形式は name-space-SMARTS です)。まず、データを読み込みます。 # + unwantedSubs = [] unwantedNames = [] for line in open('../data/T3/unwantedSubstructures.csv', 'r'): if not line.startswith("#"): # ヘッダーを無視 splitted = line.strip().split(" ") # 各行を分割 m = Chem.MolFromSmarts(splitted[1]) # SMARTSから分子を生成 name = splitted[0].capitalize() # nameに名前を保存 unwantedNames.append(name) # 好ましくない部分構造の名前をリストに追加 unwantedSubs.append(m) # 好ましくない部分構造をリストに追加 print("Number of unwanted substructures in list =", len(unwantedSubs)) # 好ましくない部分構造の数を表示 # - # **訳注(04/2020)** # # Pythonやプログラミング初心者仲間のための蛇足・・・ # 「unwantedSubstructures.csv」ファイルは1行目のみ「#name smart」となっています。`if not line.starwith(#)`の部分で「#」から始まる1行目(header)を飛ばしています。 # `strip()`は文字列の先頭・末尾の余分な文字を削除するメソッドで、空白文字としてスペース、タブ、改行も取り除けるのでcsvの改行文字を除いているのだと思います。 # `split(" ")`はスペースで分割しているので、改行文字を取り除いた各行の`name-space-SMARTS`形式を`space`で分割し`name`と`SMARTS`にしているのだと思います。 # こう考えると`splitted[0]`が`name`、`splitted[1]`が`SMARTS`になっていることがわかりやすい気がします。 # # **訳注ここまで** # 2、3個部分構造を見てみましょう (すべてのSMARTSが表示できるわけではありません。一部を選んでいます。)。 Chem.Draw.MolsToGridImage(list(unwantedSubs[2:5]), subImgSize=(200, 300), legends=unwantedNames[2:5]) # これらの好ましくない部分構造とマッチするものがあるか、フィルタリングしたデータフレームの中を検索してみます。 # + # フィルタリングしたデータを格納するためのデータフレームを作成 highLightFrameUNW = pandas.DataFrame(columns=('CompID', 'CompMol', 'unwantedID', 'unwSubstr')) noUnwanted = pandas.DataFrame(columns=('ChEMBL_ID', 'smiles','pIC50')) withUnwanted = pandas.DataFrame(columns=('ChEMBL_ID', 'smiles', 'pIC50','unwantedID')) molsToDraw = [] # データセットの各化合物に対して for i,row in filteredData.iterrows(): # フィルタリングしたデータフレームのインデックスと行についてfor ループを回す curMol = Chem.MolFromSmiles(row.smiles) # 現在の分子 match = False # Falseにmatchを設定 unwantedList = [] molsToDraw.append(curMol) # 全ての好ましくない部分構造を検索 for idx, unwSub in enumerate(unwantedSubs): # 部分構造があるかチェック if curMol.HasSubstructMatch(unwSub): # 現在の分子が好ましくない部分構造を有する場合 match = True # matchをTrueに設定 unwantedList.append(unwantedNames[idx]) # 好ましくない部分構造の名前をリストに追加 # 関連する情報をデータフレームに追加 highLightFrameUNW.loc[len(highLightFrameUNW)] = [row.molecule_chembl_id, curMol, unwantedNames[idx], unwSub] if not match: # 一致する部分構造が見つからない場合 noUnwanted.loc[len(noUnwanted)] = [row.molecule_chembl_id, row.smiles, row.pIC50] # 欲しい部分構造のデータフレームに関連情報を追加 else: # 一致が見つかった場合 withUnwanted.loc[len(withUnwanted)] = [row.molecule_chembl_id, row.smiles, row.pIC50, unwantedList] # 好ましくない部分構造データフレームに関連する情報を追加 print("Number of compounds with unwanted substructures: %i"%(len(withUnwanted))) print("Number of compounds without unwanted substructures: %i (=remaining compounds)"%(len(noUnwanted))) # - highLightFrameUNW.head(4) # 部分構造を分子の中で直接ハイライトすることもできます。 # + first_highLightFrameUNW = highLightFrameUNW.head(8) # リストの最初の8エントリーのサブセット # 分子を描画し、好ましくない部分構造をハイライトする Draw.MolsToGridImage(list(first_highLightFrameUNW["CompMol"]), subImgSize=(400,300), molsPerRow=2, highlightAtomLists= [m.GetSubstructMatch(first_highLightFrameUNW["unwSubstr"][i]) for i,m in enumerate(first_highLightFrameUNW["CompMol"])], legends=list(first_highLightFrameUNW["CompID"]+": "+first_highLightFrameUNW["unwantedID"])) # - # **訳注(04/2020)** # # Pythonやプログラミング初心者仲間のための蛇足・・・ # `[m.GetSubstructMatch(first_highLightFrameUNW["unwSubstr"][i]) for i,m in enumerate(first_highLightFrameUNW["CompMol"])]`の部分ですが、`enumerate`はインデックス番号と要素を同時に取得するメソッドで、ここではそれぞれ**i**と**m**を割り当てています。 # 要素**m**分子がもつ好ましくない部分構造を見つけるために、RDKitの`GetSubstructMatch`を使用しており、検索対象の好ましくない部分構造を`unwSubstr`列のインデックス**i**を参照するという形で実行されています。 # # **訳注ここまで** # 例をSVGファイルとして保存します。 # + # イメージをファイルに保存 img = Draw.MolsToGridImage(list(first_highLightFrameUNW["CompMol"]), subImgSize=(400,300), molsPerRow=3, highlightAtomLists= [m.GetSubstructMatch(first_highLightFrameUNW["unwSubstr"][i]) for i,m in enumerate(first_highLightFrameUNW["CompMol"])], legends=list(first_highLightFrameUNW["unwantedID"]), useSVG=True) # SVGデータの取得 molsvg = img.data # 不透明な背景を透明に置き換える molsvg = molsvg.replace("opacity:1.0", "opacity:0.0"); molsvg = molsvg.replace("12px", "24px"); # 変更されたSVGデータをファイルに保存 f = open("../data/T3/substructures.svg", "w") f.write(molsvg) f.close() # - # 好ましくない部分構造を持つ化合物と持たない化合物のリストを保存。 # + # 好ましくない部分構造を有する化合物をcsvファイルに書き出し withUnwanted.to_csv("../data/T3/EGFR_compounds_lipinski_noPAINS.csv", sep=',') # 好ましくない部分構造を持たない化合物をcsvファイルに書き出し noUnwanted.to_csv("../data/T3/EGFR_compounds_lipinski_noPAINS_noBrenk.csv", sep=',') # 好ましくない部分構造を有する化合物のcsvファイルの最初のいくつかを表示 noUnwanted.head() # - # 見つかった好ましくない部分構造をさらに解析します。 # + # 最も頻度の高い化合物の数を数えます。 unwCounts = {} for ele in highLightFrameUNW.values: unw = ele[2] # highLightFrameUNWデータフレームに含まれる好ましくない部分構造のID if unwCounts.get(unw, "empty") == "empty": # 好ましくない構造のIDがまだ辞書に含まれていない場合 unwCounts[unw] = [1, ele[3]] # 1を付与し、辞書の好ましくない構造に追加 else: # もしkey (好ましくない構造のID)がすでに存在しているなら、出現回数の値を1増やす unwCounts[unw][0] += 1 frequentUNW = [] frequentUNWNames = [] # unwCountsに含まれる構造: IDをkey、出現頻度と分子をvalueとしてもつ辞書 # 例 ('acyclic-C=C-O', [7, <rdkit.Chem.rdchem.Mol object at 0x7fa58fc06710>]) # 部分構造の頻度で辞書をソートする for key, value in sorted(unwCounts.items(), key=lambda kv: kv[1][0], reverse=True): frequentUNW.append(value[1]) # 部分構造 frequentUNWNames.append(key) # - # **訳注(04/2020)** # # Pythonやプログラミング初心者仲間のための蛇足・・・ # `for ele in highLightFrameUNW.values`の部分ですが、`values`はDataFrameの実際のデータの値で、NumPyの配列`ndarray`です。 # DataFrame`highLightFrameUNW`を参照していただければわかると思いますが、`ele[2]`は2列目`unwantedID`列を、`ele[3]`は3列目`unwSubstr`列の要素を指定しています。 # `unwCounts.get(unw, "empty")`の部分ですが、`get(key[, default])`は辞書型においてkey が辞書にあれば key に対する値を、そうでなければ default を返します。ですので、今回は辞書`unwCounts`に**key**`unw(unwntedID, ele[2])`があるかどうかを検索し、なければ**defalut**として`empyt`を返すというようになっています。辞書`unwCounts`のkeyは`unwntedID`、valueはリスト`[出現回数, 好ましくない部分構造]`となっているので、**辞書に含まれていない好ましくない部分構造**の場合は新しくエントリーを辞書に追加して値を出現回数を1に設定、**すでに辞書に含まれている構造**の場合は出現回数`[unw][0]`をインクリメントするという手順になっているのだと思います。 # # **訳注ここまで** # 頻度の高い部分構造の上位8個を描画します。 # 上位8個の頻度の高い部分構造 Draw.MolsToGridImage(mols=list(frequentUNW[0:8]), legends=list(frequentUNWNames[0:8]), molsPerRow=4) # ## ディスカッション # # このトークトリアルでは、好ましくない部分構造の検索を行う2つの方法を学びました。: # * RDKitにすでに実装されている `FilterCatalog`クラス、そして、 # * RDKitに含まれていない外部のリストと`HasSubstructMatch()`関数です。 # # 実際のところ、PAINSを部分構造検索で実装することもできます。また、BrenkらによるリストもすでにRDKitに実装されています。実装済みのその他のリストについては ([RDKit Documentation](http://rdkit.org/docs/source/rdkit.Chem.rdfiltercatalog.html))を参照してください。 # # これまで、`HasSubstructMatch()`関数を使用してきましたが、これは一つの化合物あたり一つのマッチ結果しか出しません。[`GetSubstructMatches()`](http://www.rdkit.org/Python_Docs/rdkit.Chem.rdchem.Mol-class.html#GetSubstructMatches) 関数を使用すると、一つの化合物に含まれる全ての部分構造を見つけることができます。 # 同様にPAINSに関して、一つの分子あたり、最初にマッチしたもの `GetFirstMatch()` だけをみてきました。もし、全てのPAINSをフィルタリングして除去したいのであればこれで十分です。ですが、ある分子のもつ全ての危険な部分構造を見るために、`GetMatches()`を使用することもできます。一つの分子あたり全てのマッチする部分構造を考慮しているわけではないので、最後に描画した部分構造が実際に最も頻度の高いものであったと言うことはできません。ですが、頻度が高いということだけは明らかです。 # # 見つかった部分構造は2つの異なる方法で処理できます。 # * 部分構造検索をフィルターとして適用し、引っかかった化合物を、資金と時間の節約のため、さらなる試験からは除外する # * あるいは警告として使用することもできます。好ましくない部分構造を有する分子にフラグを立てることができます。(例えば、化学者や毒性学者•・・といった)専門家の目から見て、経験に基づき判断することができるかもしれません。もし、各部分構造がそこまでクリティカルなものでなければ、スクリーニングの対象化合物として含めることも可能です。 # # ここでは、機械学習に使用するために、あまりに多くの化合物を削除することはしたくなかったので、好ましくない部分構造をフィルタリングで除去することはしません。また、部分構造によるフィルタリングは、後ほど実際のスクリーニング実験を行うまえに適用することもできます。警告(アラート)のフラッグを設定することも可能で、そうすれば(PAINSやBrenk等どのようなリストにでも順じて)好ましくない部分構造についての情報を保持し、あとで考慮することが可能です。 # ## クイズ # * なぜスクリーニングライブラリーから「PAINS」を取り除くことを考えるべきなのでしょうか?これらの化合物の問題点とは何でしょうか? # * ある好ましくない部分構造を取り除く必要がないような状況を見つけることはできますか? # * このチュートリアルで使用した部分構造はどうやってエンコードされていましたか?
3_substructure_filtering/T3_substructure_filtering_JP.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="BO7MEGbb6mtB" # # Generate text with RuGPTs in huggingface # How to generate text with pretrained RuGPTs models with huggingface. # # This notebook is valid for all RuGPTs models except RuGPT3XL. # ## Install env # + id="22qKsT3KpKFv" # !rm -rf /usr/local/cuda # !ln -s /usr/local/cuda-10.1 /usr/local/cuda # + id="VAHq-7ZdpW5N" language="bash" # export LD_LIBRARY_PATH=/usr/lib/ # + id="ORYJEnvKpXId" # !apt-get install clang-9 llvm-9 llvm-9-dev llvm-9-tools # + id="Vn0fHQmpTPcF" outputId="846b0f34-3ac6-46f4-d357-35a15e78fc28" colab={"base_uri": "https://localhost:8080/"} import subprocess CUDA_version = [s for s in subprocess.check_output(["nvcc", "--version"]).decode("UTF-8").split(", ") if s.startswith("release")][0].split(" ")[-1] print("CUDA version:", CUDA_version) if CUDA_version == "10.0": torch_version_suffix = "+cu100" elif CUDA_version == "10.1": torch_version_suffix = "+cu101" elif CUDA_version == "10.2": torch_version_suffix = "" else: torch_version_suffix = "+cu110" # + [markdown] id="Maf99CebV3oT" # If code below doesn't work, check your cuda version and installation here https://pytorch.org/get-started/previous-versions/ # + id="8uNRRWUaVQN0" # !pip install torch==1.7.1{torch_version_suffix} torchvision==0.8.2{torch_version_suffix} torchaudio==0.7.2 -f https://download.pytorch.org/whl/torch_stable.html # + id="H73-Pizb6c8n" # !pip3 install transformers==3.5.0 # + id="RSV1JFFf6nrt" # !git clone https://github.com/sberbank-ai/ru-gpts # + [markdown] id="QvgntLymArg3" # ## Generate # + id="csHcDJXFDdaW" import numpy as np import torch # + id="TJxPg-cJDhAB" np.random.seed(42) torch.manual_seed(42) # + id="AkUrzKsy_16F" from transformers import GPT2LMHeadModel, GPT2Tokenizer # + id="tV7tt-t2FQc3" def load_tokenizer_and_model(model_name_or_path): return GPT2Tokenizer.from_pretrained(model_name_or_path), GPT2LMHeadModel.from_pretrained(model_name_or_path).cuda() def generate( model, tok, text, do_sample=True, max_length=50, repetition_penalty=5.0, top_k=5, top_p=0.95, temperature=1, num_beams=None, no_repeat_ngram_size=3 ): input_ids = tok.encode(text, return_tensors="pt").cuda() out = model.generate( input_ids.cuda(), max_length=max_length, repetition_penalty=repetition_penalty, do_sample=do_sample, top_k=top_k, top_p=top_p, temperature=temperature, num_beams=num_beams, no_repeat_ngram_size=no_repeat_ngram_size ) return list(map(tok.decode, out)) # + [markdown] id="7sPySei8FO_r" # ### RuGPT2Large # + id="x_EMbgO0BTvb" tok, model = load_tokenizer_and_model("sberbank-ai/rugpt2large") generated = generate(model, tok, "<NAME> родился в ", num_beams=10) generated[0] # + [markdown] id="F4X-d7fIIZFC" # ### RuGPT3Small # + id="24oUrAfBIk6G" tok, model = load_tokenizer_and_model("sberbank-ai/rugpt3small_based_on_gpt2") generated = generate(model, tok, "<NAME> родился в ", num_beams=10) # + colab={"base_uri": "https://localhost:8080/", "height": 52} id="SGTZin-JIu_N" outputId="0aaf577a-e412-42cc-c238-8b498902550e" generated[0] # + [markdown] id="GHrO9tovIyyj" # ### RuGPT3Medium # + id="2MVyT8zAIyys" tok, model = load_tokenizer_and_model("sberbank-ai/rugpt3medium_based_on_gpt2") generated = generate(model, tok, "<NAME> родился в ", num_beams=10) # + colab={"base_uri": "https://localhost:8080/", "height": 52} id="W3SWmttlJHF7" outputId="62149ac7-a825-4aad-b80f-c7d181ed6e0a" generated[0] # + [markdown] id="HnU-9k3dIzfy" # ### RuGPT3Large # + id="Z14U66yuIzfz" tok, model = load_tokenizer_and_model("sberbank-ai/rugpt3large_based_on_gpt2") generated = generate(model, tok, "<NAME> родился в ", num_beams=10) # + colab={"base_uri": "https://localhost:8080/", "height": 52} id="VFuy-V2xJmwu" outputId="20bce96f-d327-480f-d309-02d247e53d88" generated[0]
examples/Generate_text_with_RuGPTs_HF.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- import matplotlib.pyplot as plt import scipy.stats as st import numpy as np import pandas as pd import pymc3 as pm from quap import quap import arviz as az import statsmodels.api as sm import math data = pd.read_csv("Data/WaffleDivorce.csv", delimiter=";") def standardize(series): """Standardize a pandas series""" std_series = (series - series.mean()) / series.std() return std_series # Standardize regressors and target data["Divorce_std"] = standardize(data["Divorce"]) data["Marriage_std"] = standardize(data["Marriage"]) data["MedianAgeMarriage_std"] = standardize(data["MedianAgeMarriage"]) with pm.Model() as m_age_mrate: a = pm.Normal("a", 0, 0.2) b1 = pm.Normal("b1", 0, 0.5) b2 = pm.Normal("b2", 0, 0.5) sigma = pm.Exponential("sigma", 1) mu = a + b1 * data.Marriage_std + b2 * data.MedianAgeMarriage_std divorce_rate_std = pm.Normal( "divorce_rate_std", mu=mu, sigma=sigma, observed=data.Divorce_std ) prior_samples = pm.sample_prior_predictive() idata,dist = quap(vars=[a,b1,b2,sigma],n_samples=10_000) samples = az.InferenceData.to_dataframe(idata) az.summary(idata, kind="stats",hdi_prob=0.89) az.plot_forest( idata, var_names=["a","b1", "b2"], ) plt.axvline(0) # + N = 100 # number of individuals height = np.random.normal(10, 2, N) # sim total height of each leg_prop = np.random.uniform(0.4, 0.5, N) # leg as proportion of height leg_left = leg_prop * height + np.random.normal(0, 0.02, N) # sim left leg as proportion + error leg_right = leg_prop * height + np.random.normal(0, 0.02, N) # sim right leg as proportion + error d = pd.DataFrame( np.vstack([height, leg_left, leg_right]).T, columns=["height", "leg_left", "leg_right"], ) # combine into data frame d.head() # - with pm.Model() as left_leg: a = pm.Normal("a", 10, 100) b_left = pm.Normal("b_left", 2, 10) sigma = pm.Exponential("sigma", 1) mu = a + b_left * d.leg_left height = pm.Normal("height", mu=mu, sigma=sigma, observed=d.height) idata,dist = quap(vars=[a,b_left,sigma],n_samples=10_000) samples = az.InferenceData.to_dataframe(idata) az.summary(idata, kind="stats",hdi_prob=0.89) az.plot_forest( idata, var_names=["a","b_left"], ) plt.axvline(0) with pm.Model() as right_leg: a = pm.Normal("a", 10, 100) b_right = pm.Normal("b_right", 2, 10) sigma = pm.Exponential("sigma", 1) mu = a + b_right * d.leg_right height = pm.Normal("height", mu=mu, sigma=sigma, observed=d.height) idata,dist = quap(vars=[a,b_right,sigma],n_samples=10_000) samples = az.InferenceData.to_dataframe(idata) az.summary(idata, kind="stats",hdi_prob=0.89) az.plot_forest( idata, var_names=["a","b_right"], ) plt.axvline(0) with pm.Model() as both_legs: a = pm.Normal("a", 10, 100) b_left = pm.Normal("b_left", 2, 10) b_right = pm.Normal("b_right", 2, 10) sigma = pm.Exponential("sigma", 1) mu = a + b_left * d.leg_left + b_right * d.leg_right height = pm.Normal("height", mu=mu, sigma=sigma, observed=d.height) idata,dist = quap(vars=[a,b_left,b_right,sigma],n_samples=10_000) samples = az.InferenceData.to_dataframe(idata) az.summary(idata, kind="stats",hdi_prob=0.89) az.plot_forest( idata, var_names=["a","b_left","b_right"], ) plt.axvline(0) # + # number of plants N = 100 # simulate initial heights h0 = np.random.normal(10, 2, N) # assign treatments and simulate fungus and growth treatment = np.repeat([0, 1], N / 2) fungus = np.random.binomial(n=1, p=0.5 - treatment * 0.4, size=N) h1 = h0 + np.random.normal(5 - 3 * fungus, size=N) # compose a clean data frame d = pd.DataFrame.from_dict({"h0": h0, "h1": h1, "treatment": treatment, "fungus": fungus}) az.summary(d.to_dict(orient="list"), kind="stats", round_to=2) # - treatment with pm.Model() as m6_6: p = pm.Lognormal("p", 0, 0.25) sigma = pm.Exponential("sigma", 1) mu = h0*p h1 = pm.Normal("height", mu=mu, sigma=sigma, observed=d.h1) idata,dist = quap(vars=[p,sigma],n_samples=10_000) samples = az.InferenceData.to_dataframe(idata) az.summary(idata, round_to=2, kind='stats', hdi_prob=0.89) # + with pm.Model() as m_6_7: a = pm.Normal("a", 0, 0.2) b1 = pm.Normal("b1", 0, 0.5) b2 = pm.Normal("b2", 0, 0.5) sigma = pm.Exponential("sigma", 1) p = a + b1 * d.treatment + b2 * d.fungus mu = p * d.h0 h1 = pm.Normal("h1", mu=mu, sigma=sigma, observed=d.h1) idata,dist = quap(vars=[a,b1,b2,sigma],n_samples=10_000) samples = az.InferenceData.to_dataframe(idata) az.summary(idata, kind="stats",round_to=2) # - az.plot_forest( idata, var_names=["a","b1","b2"], ) plt.axvline(0)
Week 5 lecture.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.6.8 64-bit # language: python # name: python36864bita056cf26f1db41c3aa481369b2a4c10e # --- # + [markdown] colab_type="text" id="_4xdMHe6bmV1" # # CRIME PREDICTION USING ML # + colab={"base_uri": "https://localhost:8080/", "height": 539} colab_type="code" id="FT3zzCU_bVkE" outputId="9c7878f2-65b8-422d-a935-2e2d289b2459" import numpy as np import matplotlib.pyplot as plt import pandas as pd from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler from sklearn.linear_model import LogisticRegression from matplotlib.colors import ListedColormap from sklearn.metrics import confusion_matrix import seaborn as sns # %matplotlib inline df=pd.read_csv('crime.csv') df # + [markdown] colab_type="text" id="4v_aL3zTcCTS" # # DROPPING UNWANTED COLUMNS # + colab={"base_uri": "https://localhost:8080/", "height": 402} colab_type="code" id="jbPjrn0ub-0X" outputId="7fb5b143-5594-42c2-c776-d2b608d4b43b" dataset = pd.DataFrame(df) dataset = dataset.drop(['YEAR','MONTH','DAY','HOUR','MINUTE','HUNDRED_BLOCK','NEIGHBOURHOOD','X','Y','Latitude'], axis=1 ) dataset # + [markdown] colab_type="text" id="EXOYo1pEcS2d" # # A DEMO VIEW OF 30 ROWS # + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" id="zwhDLZt4cM1j" outputId="87079acd-17fb-472d-ee3c-56b36c70b143" df.head(30) # + [markdown] colab_type="text" id="xJ1Q7xZhckVW" # # DEMO DATA # + colab={"base_uri": "https://localhost:8080/", "height": 235} colab_type="code" id="zjiIjL2rcd3e" outputId="1d963984-1691-4a4d-e3dc-8d20ff15ff84" demo_view = df.iloc[1] demo_view # + [markdown] colab_type="text" id="IjzlAgTNc2n8" # # REDUCING SIZE OF DATASET # + [markdown] colab_type="text" id="HZYbgR5uc8ep" # **INDEPENDENT VARIABLE** # + colab={"base_uri": "https://localhost:8080/", "height": 570} colab_type="code" id="c9sOUMJfcpvF" outputId="6bc50ccc-6dcc-48b2-804e-98229cc782c5" x=df.head(2000) x # + [markdown] colab_type="text" id="oO1sZKyHdFqa" # **LATITUDE** # + colab={"base_uri": "https://localhost:8080/", "height": 134} colab_type="code" id="_RBGm2vwdBrk" outputId="5caa9f59-be54-4bc1-b87d-cae6743d660f" a = x.iloc[:,10:11].values a # + [markdown] colab_type="text" id="_cb-LhcHdNBk" # **LONGITUDE** # + colab={"base_uri": "https://localhost:8080/", "height": 50} colab_type="code" id="alqdo40TdK81" outputId="99730565-3374-4ca2-ee1b-1984f922980e" b = x.iloc[:,11].values b # + [markdown] colab_type="text" id="Cl9B3Lg4dSQp" # # TAKING AVERAGE OF LATITUDE AND LONGITUDE # + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" id="HAX5TuoBdXIU" outputId="4bd393e8-bb48-48ca-f435-d68a0a36e84f" X=[] for i in range(0,2000): ab = a[i]+b[i] X.append(ab/2) X # + [markdown] colab_type="text" id="Vi8ZfsVndfPd" # # DEPENDENT VARIABLE # + [markdown] colab_type="text" id="4bB9mlyodpyn" # **REDUCING SIZE TO 2K** # + colab={"base_uri": "https://localhost:8080/", "height": 570} colab_type="code" id="ff4Jzf9Wdm8z" outputId="b1f951eb-73be-4ccb-d98e-6b467d5b8551" y=df.head(2000) y # + [markdown] colab_type="text" id="yjz3etZOdvOd" # **DEPENDENT VARIABLE TAKEN** # + colab={"base_uri": "https://localhost:8080/", "height": 134} colab_type="code" id="gi0hXxwJd37P" outputId="35c7eb8e-b55f-4e2b-ca49-82c7cd45ffce" y = y.iloc[:,0:1].values y # + [markdown] colab_type="text" id="Ubd7FmhQeAay" # # FINDING DIFFERENT TYPES OF CRIME # + colab={"base_uri": "https://localhost:8080/", "height": 151} colab_type="code" id="AlZXjogmeBh3" outputId="aeba9a9f-12c6-44d1-a441-e7f1f8a68105" YY = [] ynum=0 for i in y: if i not in YY: ynum+=1 YY.append(i) YY # + [markdown] colab_type="text" id="GPqbuY2ueH2Q" # **THERE ARE 7 DIFFERENT TYPES OF CRIMES IN THE DATASET** # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="jwOqVzbzeETR" outputId="0b203feb-352d-4198-bbcc-6ef4ee34e5d6" ynum # + [markdown] colab_type="text" id="RBlhGM27eMaE" # # REPLACING EACH CRIME WITH A NUMBER FOR EASY COMPUTATION # + colab={"base_uri": "https://localhost:8080/", "height": 134} colab_type="code" id="X6BOKKmBeP5q" outputId="d6b2c019-ac6a-49d6-a067-c7614786392d" Y=y for i in range(0,2000): if Y[i]=='Other Theft': Y[i]=1 elif Y[i]=='Break and Enter Residential/Other': Y[i]=2 elif Y[i]=='Mischief': Y[i]=3 elif Y[i]=='Break and Enter Commercial': Y[i]=4 elif Y[i]=='Offence Against a Person': Y[i]=5 elif Y[i]=='Theft from Vehicle': Y[i]=6 else: Y[i]=7 Y # + [markdown] colab_type="text" id="1RXiwlk0eUqM" # # DATA VISUALIZATION # + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" id="kTBBSTcxeeqr" outputId="da320f88-5b06-4b6f-fc34-30454bb8b20e" X # + [markdown] colab_type="text" id="BpG9qEcQes2E" # **MANY NULL VALUES WERE FOUND** # + [markdown] colab_type="text" id="tCOUlWIzewRI" # # DATA CLEANING # + [markdown] colab_type="text" id="zfiP4vVre9fc" # **THE NULL VALUE ROWS SHOULD BE DROPPED** # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="0Ej15siterlG" outputId="59c02076-2cd6-450a-8461-ff50a5140f11" nullvals=0 for i in X: if i>=0: nullvals+=1 nullvals # + [markdown] colab_type="text" id="psx8NM2NfHC6" # **167 SUCH ROWS WERE FOUND** # + [markdown] colab_type="text" id="wRWFKMmTfMIy" # **REMOVING THOSE 167 ROWS IN BOTH X AND Y** # + colab={"base_uri": "https://localhost:8080/", "height": 229} colab_type="code" id="IkIUTaWvfKHN" outputId="06d75a28-7cf3-4a02-9343-4beec9546b39" YY=Y.tolist() popin=0 for i in X: if i>=0: X.pop(popin) YY.pop(popin) popin+=1 # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="j6K1gYb2fYfJ" outputId="17fdce5c-f16a-4d65-d7af-0bfa62b73460" XXvals=0 for i in X: XXvals+=1 XXvals # + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" id="isebBkqxsNLe" outputId="09c0e931-08be-45a3-e419-2b514b6cea02" X # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="HZ3eOG9BfZTi" outputId="f42b09c3-4134-40e1-e260-c2258ca1c036" YYvals=0 for i in YY: YYvals+=1 YYvals # + colab={} colab_type="code" id="D5a9z3PysSJC" YY = np.array(YY, dtype=np.int) # + YYvals=0 for i in YY: YYvals+=1 YYvals # + [markdown] colab_type="text" id="MlQipXDQf8na" # # FINDING THE NUMBER OF EACH TYPE OF CRIME # + colab={"base_uri": "https://localhost:8080/", "height": 134} colab_type="code" id="fEG_N5XOf4nP" outputId="83255c51-d3a2-4a5c-e3ec-d9069c16e7f3" theft=0 breakin=0 mischief=0 commercial=0 offence=0 vehicle=0 collision=0 for i in YY: if i == 1: theft+=1 elif i == 2: breakin+=1 elif i == 3: mischief+=1 elif i == 4: commercial+=1 elif i == 5: offence+=1 elif i == 6: vehicle+=1 elif i == 7: collision+=1 print("Number of thefts= ", theft) print("Number of break and Enter Residential/Other = ", breakin) print("Number of mischief = ", mischief) print("Number of Break and Enter Commercial= ", commercial) print("Number of Offence Against a Person = ", offence) print("Number of Theft from Vehicle = ", vehicle) print("Number of Vehicle Collision or Pedestrian Struck (with Injury) = ", collision) # + [markdown] colab_type="text" id="Uk-ly3c1gZN6" # # VIEWING THE CRIME PLOT GRAPH # + [markdown] colab_type="text" id="ydihwmcUglsZ" # **BAR GRAPH** # + colab={"base_uri": "https://localhost:8080/", "height": 281} colab_type="code" id="qznOuMeJgXhR" outputId="fc453789-53f4-428c-99e8-8f9c0e3eeb6e" labels = ['Theft', 'Residential','Mischief','Commercial','Person','Theft Vehicle', 'Vehicle Collide'] number = [theft,breakin,mischief,commercial,offence,vehicle,collision] fig, ax = plt.subplots() width = 0.4 ax.bar(labels, number,width, color='Orange') ax.set_ylabel('NUMBER', color= 'aqua') ax.set_title('CRIME NUMBER GRAPH', color='yellow') plt.show() # + [markdown] colab_type="text" id="lhJgX_rRgouJ" # **PIE CHART** # + colab={"base_uri": "https://localhost:8080/", "height": 250} colab_type="code" id="mCX7rOwGgtkI" outputId="efc75220-a825-43db-bdc3-ed9fc09fd16d" # Pie chart, where the slices will be ordered and plotted counter-clockwise: labels = 'Theft', 'Residential','Mischief','Commercial','Person','Theft Vehicle', 'Vehicle Collide' sizes = [theft,breakin,mischief,commercial,offence,vehicle,collision] explode = (0, 0, 0, 0, 0, 0, 0.1) fig1, ax1 = plt.subplots() ax1.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%', shadow=True, startangle=90) ax1.axis('equal') plt.show() # + [markdown] colab_type="text" id="orFrXrM8gxXe" # # USING KNN # + colab={} colab_type="code" id="RSgA--1ogzZ4" from sklearn.model_selection import train_test_split from sklearn.neighbors import KNeighborsClassifier # + colab={} colab_type="code" id="TKa635Rzg1vP" X_train, X_test, y_train, y_test = train_test_split(X,YY,test_size=0.30) # + colab={} colab_type="code" id="e9QUot-qg5Id" knn = KNeighborsClassifier(n_neighbors=5) # + colab={"base_uri": "https://localhost:8080/", "height": 67} colab_type="code" id="vEl40LxLg7h1" outputId="69a958d7-f382-46c0-f4c5-dca4bf8a79f6" knn.fit(X_train,y_train.ravel()) # + colab={} colab_type="code" id="2aklCDyzg-Ed" pred = knn.predict(X_test) # + colab={} colab_type="code" id="dZKA42qThCIA" from sklearn.metrics import classification_report,confusion_matrix from sklearn.model_selection import cross_val_score # + colab={"base_uri": "https://localhost:8080/", "height": 134} colab_type="code" id="XHOW0FRbhC-8" outputId="d074ce59-4c8e-4220-fe0a-2c0eaf2201df" print(confusion_matrix(y_test,pred)) # + colab={"base_uri": "https://localhost:8080/", "height": 252} colab_type="code" id="uCGxs7kYhFv3" outputId="07079527-cc8c-46ef-c67e-9cb83f66b63a" print(classification_report(y_test,pred)) # + colab={} colab_type="code" id="Uy2P37BahKYj" accuracy_rate = [] # Will take some time for i in range(1,40): knn = KNeighborsClassifier(n_neighbors=i) score=cross_val_score(knn,X,YY.ravel(),cv=10) accuracy_rate.append(score.mean()) # + colab={} colab_type="code" id="XywONM5HhLN2" error_rate = [] # Will take some time for i in range(1,40): knn = KNeighborsClassifier(n_neighbors=i) score=cross_val_score(knn,X,YY.ravel(),cv=10) error_rate.append(1-score.mean()) # + colab={"base_uri": "https://localhost:8080/", "height": 710} colab_type="code" id="I8C_E0ighOmi" outputId="ea94e2bb-e2ae-4c02-f130-b67f855246ca" plt.figure(figsize=(20,12)) #plt.plot(range(1,80),error_rate,color='blue', linestyle='dashed', marker='o', # markerfacecolor='red', markersize=10) plt.plot(range(1,40),accuracy_rate,color='blue', linestyle='dashed', marker='o', markerfacecolor='red', markersize=10) plt.title('Error Rate vs. K Value') plt.xlabel('K') plt.ylabel('Error Rate') # + colab={"base_uri": "https://localhost:8080/", "height": 507} colab_type="code" id="UombnfCYhSIH" outputId="5d0be834-a739-4bee-fece-78c255fe31c0" # NOW WITH K=8 knn = KNeighborsClassifier(n_neighbors=32) knn.fit(X_train,y_train.ravel()) pred = knn.predict(X_test) print('WITH K=8') print('\n') print(confusion_matrix(y_test,pred)) print('\n') print(classification_report(y_test,pred)) # + [markdown] colab_type="text" id="OleBDEtyhYUy" # # CRIME PREDICTING # + [markdown] colab_type="text" id="mc-ynd9Chb9C" # **THE DATASET IS CONCENTRATED IN ONE PLACE ONLY DUE TO WHICH THE LATITUDE-LONGITUDE RANGE CAN VARY ONLY FROM -30 to -40** # + colab={"base_uri": "https://localhost:8080/", "height": 101} colab_type="code" id="L_Z4UD3nhUuO" outputId="20130071-abaf-4133-fa8c-232a9d79f24c" print("Give the desired Longitude") longi = float(input()) print("Give the desired Latitude") latit = float(input()) longlang = (longi+latit)/2 inp = np.array([longlang]) inp = inp.reshape(1, -1) prediction = knn.predict(inp) if (prediction == 1): print("Crime is Theft") elif (prediction == 3): print("Crime is mischief") elif (prediction == 2): print("Crime is Break and Enter Residential") elif (prediction == 4): print("Crime is Break and Enter commercial") elif (prediction == 5): print("Crime is Offence against a person") elif (prediction == 6): print("Crime is Theft from vehicle") else: print("Crime is Vehicle Collision or pedestrian struck/injured") # + [markdown] colab_type="text" id="iyAzF1GOhp6v" # # THE OUTPUT USUALLY WOULD GIVE ONLY "Vehicle Collision or pedestrian struck/injured" BECAUSE THATS THE HIGHEST NUMBER OF CRIME IN THE DATASET. A BETTER DATASET WOULD GIVE BETTER RESULTS
crime-predict/.ipynb_checkpoints/CrimePredictioner-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Storage Benchmark # In this notebook, we'll compare the following storage formats: # - CSV: Comma-separated, standard flat text file format. # - HDF5: Hierarchical data format, developed initially at the National Center for Supercomputing Applications. It is a fast and scalable storage format for numerical data, available in pandas using the PyTables library. # - Parquet: Part of the Apache Hadoop ecosystem, a binary, columnar storage format that provides efficient data compression and encoding and has been developed by Cloudera and Twitter. It is available for pandas through the `pyarrow` library, led by <NAME>, the original author of pandas. # # This notebook compares the performance of the preceding libraries using a test DataFrame that can be configured to contain numerical or text data, or both. For the HDF5 library, we test both the fixed and table formats. The table format allows for queries and can be appended to. # # ## Usage # # To recreate the charts used in the book, you need to run this notebook twice up to section 'Store Result' using different settings for `data_type` and arguments for `generate_test_data` as follows: # 1. `data_type='Numeric`: `numerical_cols=2000`, `text_cols=0` (default) # 2. `data_type='Mixed`: `numerical_cols=1000`, `text_cols=1000` # ## Imports & Settings import warnings warnings.filterwarnings('ignore') # + pycharm={"is_executing": true} from pathlib import Path import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import random import string # + pycharm={"is_executing": true} sns.set_style('whitegrid') # + pycharm={"is_executing": true} results = {} # - # ## Generate Test Data # The test `DataFrame` that can be configured to contain numerical or text data, or both. For the HDF5 library, we test both the fixed and table format. # + pycharm={"is_executing": true} def generate_test_data(nrows=100000, numerical_cols=2000, text_cols=0, text_length=10): s = "".join([random.choice(string.ascii_letters) for _ in range(text_length)]) data = pd.concat([pd.DataFrame(np.random.random(size=(nrows, numerical_cols))), pd.DataFrame(np.full(shape=(nrows, text_cols), fill_value=s))], axis=1, ignore_index=True) data.columns = [str(i) for i in data.columns] return data # + pycharm={"is_executing": true} data_type = 'Numeric' # + pycharm={"is_executing": true} df = generate_test_data(numerical_cols=1000, text_cols=1000) df.info() # - # ## Parquet # ### Size # + pycharm={"is_executing": true} parquet_file = Path('test.parquet') # + pycharm={"is_executing": true} df.to_parquet(parquet_file) size = parquet_file.stat().st_size # - # ### Read # + pycharm={"is_executing": true} # %%timeit -o df = pd.read_parquet(parquet_file) # + pycharm={"is_executing": true} read = _ # + pycharm={"is_executing": true} parquet_file.unlink() # - # ### Write # + pycharm={"is_executing": true} # %%timeit -o df.to_parquet(parquet_file) parquet_file.unlink() # + pycharm={"is_executing": true} write = _ # - # ### Results # + pycharm={"is_executing": true} results['Parquet'] = {'read': np.mean(read.all_runs), 'write': np.mean(write.all_runs), 'size': size} # - # ## HDF5 # + pycharm={"is_executing": true} test_store = Path('index.h5') # - # ### Fixed Format # #### Size # + pycharm={"is_executing": true} with pd.HDFStore(test_store) as store: store.put('file', df) size = test_store.stat().st_size # - # #### Read # + pycharm={"is_executing": true} # %%timeit -o with pd.HDFStore(test_store) as store: store.get('file') # + pycharm={"is_executing": true} read = _ # + pycharm={"is_executing": true} test_store.unlink() # - # #### Write # + pycharm={"is_executing": true} # %%timeit -o with pd.HDFStore(test_store) as store: store.put('file', df) test_store.unlink() # + pycharm={"is_executing": true} write = _ # - # #### Results # + pycharm={"is_executing": true} results['HDF Fixed'] = {'read': np.mean(read.all_runs), 'write': np.mean(write.all_runs), 'size': size} # - # ### Table Format # #### Size # + pycharm={"is_executing": true} with pd.HDFStore(test_store) as store: store.append('file', df, format='t') size = test_store.stat().st_size # - # #### Read # + pycharm={"is_executing": true} # %%timeit -o with pd.HDFStore(test_store) as store: df = store.get('file') # + pycharm={"is_executing": true} read = _ # + pycharm={"is_executing": true} test_store.unlink() # - # #### Write # Note that `write` in table format does not work with text data. # + pycharm={"is_executing": true} # %%timeit -o with pd.HDFStore(test_store) as store: store.append('file', df, format='t') test_store.unlink() # + pycharm={"is_executing": true} write = _ # - # #### Results # + pycharm={"is_executing": true} results['HDF Table'] = {'read': np.mean(read.all_runs), 'write': np.mean(write.all_runs), 'size': size} # - # ### Table Select # #### Size # + pycharm={"is_executing": true} with pd.HDFStore(test_store) as store: store.append('file', df, format='t', data_columns=['company', 'form']) size = test_store.stat().st_size # - # #### Read # + pycharm={"is_executing": true} company = 'APPLE INC' # + pycharm={"is_executing": true} # %%timeit with pd.HDFStore(test_store) as store: s = store.get('file') # + pycharm={"is_executing": true} read = _ # + pycharm={"is_executing": true} test_store.unlink() # - # #### Write # + pycharm={"is_executing": true} # %%timeit with pd.HDFStore(test_store) as store: store.append('file', df, format='t', data_columns=['company', 'form']) test_store.unlink() # + pycharm={"is_executing": true} write = _ # - # #### Results # + pycharm={"is_executing": true} results['HDF Select'] = {'read': np.mean(read.all_runs), 'write': np.mean(write.all_runs), 'size': size} # - # ## CSV # + pycharm={"is_executing": true} test_csv = Path('test.csv') # - # ### Size # + pycharm={"is_executing": true} df.to_csv(test_csv) test_csv.stat().st_size # - # ### Read # + pycharm={"is_executing": true} # %%timeit -o df = pd.read_csv(test_csv) # + pycharm={"is_executing": true} read = _ # + pycharm={"is_executing": true} test_csv.unlink() # - # ### Write # + pycharm={"is_executing": true} # %%timeit -o df.to_csv(test_csv) test_csv.unlink() # + pycharm={"is_executing": true} write = _ # - # ### Results # + pycharm={"is_executing": true} results['CSV'] = {'read': np.mean(read.all_runs), 'write': np.mean(write.all_runs), 'size': size} # - # ## Store Results # + pycharm={"is_executing": true} pd.DataFrame(results).assign(Data=data_type).to_csv(f'{data_type}.csv') # - # ## Display Results # Please run the notebook twice as described above under `Usage` to create the two `csv` files with results for different test data. # + pycharm={"is_executing": true} df = (pd.read_csv('Numeric.csv', index_col=0) .append(pd.read_csv('Mixed.csv', index_col=0)) .rename(columns=str.capitalize)) df.index.name='Storage' df = df.set_index('Data', append=True).unstack() df.Size /= 1e9 # + pycharm={"is_executing": true} fig, axes = plt.subplots(ncols=3, figsize=(16, 4)) for i, op in enumerate(['Read', 'Write', 'Size']): flag= op in ['Read', 'Write'] df.loc[:, op].plot.barh(title=op, ax=axes[i], logx=flag) if flag: axes[i].set_xlabel('seconds (log scale)') else: axes[i].set_xlabel('GB') fig.tight_layout() fig.savefig('storage', dpi=300);
ml4trading-2ed/02_market_and_fundamental_data/05_storage_benchmark/storage_benchmark.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + tags=[] language="sh" # # rm -rf my_models # + tags=[] from datafiles import datafile @datafile("my_models/{self.my_key}.yml") class MyModel: my_key: str my_value: int = 0 # + tags=[] MyModel('foo') # + [markdown] tags=[] # # `get_or_none(*args, **kwargs)` # + tags=[] MyModel.objects.get_or_none('foo') # + tags=[] item = MyModel.objects.get_or_none('bar') assert item is None # + [markdown] tags=[] # # `get_or_create(*args, **kwags)` # + tags=[] MyModel.objects.get_or_create('bar', 42) # + tags=[] MyModel.objects.get_or_create('bar') # + [markdown] tags=[] # # `all()` # + tags=[] generator = MyModel.objects.all() list(generator) # + [markdown] tags=[] # # `filter(**kwargs)` # + tags=[] generator = MyModel.objects.filter(my_value=42) list(generator)
notebooks/manager_api.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Profitable App Profile Analysis for the App Store and Google Play # *** # # # In this project we will analyse what makes an mobile app popular and how this could be in used in order to built more profitable apps. Our main goal is to come up with an app sugestion that can be profitable fot both App Store and Google Play markets. The app should be free and make its profit out of advertising and because of that the we need to analyse the data to see what makes an app popular. # # As of September 2018, there were approximately 2 million iOS apps available on the App Store, and 2.1 million Android apps on Google Play. # # Collecting data for over four million apps requires a significant amount of time and money, so we'll try to analyze a sample of data instead. To avoid spending resources with collecting new data ourselves, we should first try to see whether we can find any relevant existing data at no cost. Luckily, these are two data sets that seem suitable for our purpose: # # A data set containing data about approximately ten thousand Android apps from Google Play. You can download the data set directly from [this link](https://www.kaggle.com/lava18/google-play-store-apps). # # A data set containing data about approximately seven thousand iOS apps from the App Store. You can download the data set directly from [this link](https://www.kaggle.com/ramamet4/app-store-apple-data-set-10k-apps). # # ![](https://www.imagemhost.com.br/images/2020/04/06/image.png) # # Image: <a href="https://www.freepik.com/free-photos-vectors/technology">Technology vector created by stories - www.freepik.<EMAIL></a> # # Exploring the Data # # We wil begin by importing the libraries we'll use and reading the datasets into two dataframes. # + import pandas as pd import numpy as np import matplotlib.pyplot as plt # %matplotlib inline ios = pd.read_csv('datasets/AppleStore.csv', encoding='utf8') android = pd.read_csv('datasets/googleplaystore.csv', encoding='utf8') ios.head() # - ios.describe() android.head() android.describe(include='all') # # Cleaning the Data # # It has been reported in the [Google Play data set dedicated discussion section](https://www.kaggle.com/lava18/google-play-store-apps/discussion/66015) that one of the rows (row 10472) is missing the 'Category' colunm, what makes all the other colunms to shift and because of that this row has less colunms than the it should have. We can see that below. android.iloc[10472] # We can see that the `Category` column in this row contains a numeric value qhen it should contain a string. That's because it has the data from the `Rating` column, while the `Rating` column has the data from the `Reviews` column and so on untill the last column `Android Ver` ends up with null data. After the drop, we'll print the unique values in the `Category`column so we can see the that the row was deleted. # + # Dropping the row android.drop(index=10472, axis=0, inplace=True) # Checking the result print(android['Category'].unique()) # - # The numeric value is no longer in the column. The row was successfully deleted. # ## Duplicates # # # We'll now check both databases in order to find if they present duplicate data. This task willbe divided in two steps. First, we are going to check if there is duplicate data and then we'll delete it. # ios_dups = ios[ios.iloc[:,0].duplicated()] ios_dups.shape android_dups = android[android.iloc[:,0].duplicated()] android_dups.shape # There are 1181 duplicated apps in the android database and none in the IOS database. android_dups.head(10) # Using the Google Ads app example below, we can see that the only difference between the duplicated data is in the fourth colunm, which correspond to the number of reviews. In order to not delete the eduplicate data randomly, we'll use this criteria to do it. For each duplicate app, we'll only keep only the row with the highest number of reviews because the higher the number, the most recent and reliable the ratings. android[android['App'] == 'Google Ads'] # To drop the duplicate rows we'll: # # * Loop through the unique duplicate apps; # * Get the maximum number of reviews for that app; # * Select the rows that do not attend that requisite; # * Drop those rows; # # At this point, most of the duplicates apps are gone. But we'll still have some duplicates left. That's beacause we used the maximum number of reviews to drop the rows and some apps have more than onde row with the maximum number of reviews. As the number o reviews are now the same we can drop anyone. So, we will: # # * Use the `DataFrame.drop_duplicates()` method drop the remaining duplicate apps; # * Reset the index. # * Check the result. # + # Looping through the unique apps names for app in android_dups['App'].unique(): # Selecting the maximum number of reviews max_review = str(android[android['App'] == app]['Reviews'].max()) # Selecting the rows that do not contain the maximum number of reviews rows_to_drop = android[(android['App'] == app) & (android['Reviews'] != max_review)] # Dropping the rows for row in rows_to_drop.index: index_to_drop = row android.drop(index=index_to_drop, axis=0, inplace=True) # Dropping the remaining duplicate android.drop_duplicates(subset='App', keep='first', inplace=True) # Resetting the index android.reset_index(drop=True, inplace=True) # Checking the result android_dups = android[android.iloc[:,0].duplicated()] print(android_dups.shape) # - # No duplicate rows left, as expected. # ## Removing Non-Enlgish apps # # If you explore the data set long enoguh, you'll notice that the names of some apps suggest they are not directed to the english-speaking audience, as we can see in the examples below: # + print(ios.iloc[814,2]) print(android.iloc[3749,0]) # - # As we're not interested in these apps, we will now write a function identify them. We'll take advantage of the [ASCII standard](https://pt.wikipedia.org/wiki/ASCII) and the built-in `ord()` to do it. First, let's test the function. # + def is_en(string): for character in string: if ord(character) > 127: return False return True print(is_en('Facebook')) print(is_en('中国語 AQリスニング')) # - # The function seems to be working just fine, but it still has some problems as it cannot identify some english app names as we can see in the example below: print(is_en('Docs To Go™ Free Office Suite')) print(is_en('Instachat 😜')) # That happened because emoji and characters like `™` are outside the ASCII range we determined. We'll then adapt `is_en()` to identify an app as non english only if the string has more than three characters that fall outside the ASCII range (0 - 127). That is necessary so we can minimiza the amount of data loss. # + def is_en(string): count = 0 for characther in string: if ord(characther) > 127: count += 1 if count > 3: return False return True print(is_en('Facebook')) print(is_en('Docs To Go™ Free Office Suite')) print(is_en('Instachat 😜')) print(is_en('中国語 AQリスニング')) # - # Altough the function is not perfect, it is now good enough to minimize data loss during the cleaning process. # # We'll modify the function once more. If the name of the contains more thna three characthers outside the ASCII range (0 - 127), a null value will be assignd to it. The we'll use the `Series.apply()` method to test every app at once in each dataframe. Finally we'll exclude the null values from the dataframs. AFter we're done we will see how many apps we have left. # + def is_en(string): count = 0 for characther in string: if ord(characther) > 127: count += 1 if count > 3: string = np.nan return string # Applying the function ios['track_name'] = ios['track_name'].apply(is_en) android['App'] = android['App'].apply(is_en) # Excluding the null values ios = ios[ios['track_name'].notnull()] android = android[android['App'].notnull()] # Checking the number of apps left print(ios.shape) print(android.shape) # - # We're now left with 9614 Android apps and 6183 iOS apps. # ## Filtering free apps # # For this project, we are only interested in analyzing the free apps. Therefore, we'll now select only the free apps from each dataframe. # # The android data already has a column that specifies if the app is free or not. However, the iOS does not have such column, so we'll select the apps which price is zero. # # When we're done we'll check how many apps we have left in each dataframe. # + # Selecting the free apps android_free = android.loc[android['Type'] == 'Free'].copy() ios_free = ios.loc[ios['price'] == 0].copy() # Checking how many apps are left print(android_free.shape) print(ios_free.shape) # - # Finally, we're down to 8861 Android apps and 3222 iOS apps. # # Analysing the Data # ## Most popular apps by genre # # As mentioned in the introduction, the main goal of this project is dertermine what kind of app is more likely to attrac more users. That is important because the revenue of a free app comes basically from advertisement. Therefore, the higher the number of users, the higher the revenue. # Considering that the end goal is to add the app both on Google Play and App Store, it is necessary to find app profiles that are successful in both markets. We'll start by analyzing the apps by genre in the iOS dataset. # # # + ios_gen_per = ios_free['prime_genre'].value_counts() / ios_free.shape[0] * 100 print(round(ios_gen_per, 2)) # - ios_gen_per.index[:10] # + colors = [(114/255, 158/255, 206/255), (255/255, 158/255, 74/255), (103/255, 191/255, 92/255), (237/255, 102/255, 93/255), (173/255, 139/255, 201/255), (109/255, 204/255, 218/255), (205/255, 204/255, 93/255), (162/255, 162/255, 162/255), (237/255, 151/255, 202/255), (168/255, 120/255, 110/255)] fig, ax = plt.subplots() ax.bar(x=ios_gen_per.index[:10], height=ios_gen_per[:10], color=colors) ax.tick_params(bottom=False, top=False, left=False, right=False, labelsize=12, rotation=90) ax.set_yticks([30, 60]) ax.set_title('iOS Genre %') for kew, spine in ax.spines.items(): spine.set_visible(False) plt.show() # - # We can easily see the games dominate the English free apps market (58.16%). Entertaiment apps are close to 8% while Photo & Video apps do not reach 5%. Education (3,66%) and Social Networking (3.28%) end the top five. However, it is not yet possible to take any conclusions from this because the fact that there is a large number of apps for a particular genre does not imply that genre has a large number of users. The demand might not be as large as the offer and, also, a large offer creates a more tough market to succeed. # # Now, let's take a look in the Google Play data set. For that, we'll analyse two colunms that seem to be related: the 'Category' and 'Genres' colunms. android_cat_per = android_free['Category'].value_counts() / android_free.shape[0] * 100 print(round(android_cat_per, 2)) # + fig, ax = plt.subplots() ax.bar(x=android_cat_per.index[:10], height=android_cat_per[:10], color=colors) ax.tick_params(bottom=False, top=False, left=False, right=False, labelsize=12, rotation=90) ax.set_yticks([10, 20]) ax.set_title('Android Category %') for kew, spine in ax.spines.items(): spine.set_visible(False) plt.show() # - android_gen_per = android_free['Genres'].value_counts() / android_free.shape[0] * 100 print(round(android_gen_per, 2)) # + fig, ax = plt.subplots() ax.bar(x=android_gen_per.index[:10], height=android_gen_per[:10], color=colors) ax.tick_params(bottom=False, top=False, left=False, right=False, labelsize=12, rotation=90) ax.set_yticks([5, 10]) ax.set_title('Android Genre %') for kew, spine in ax.spines.items(): spine.set_visible(False) plt.show() # - # The scenario is completely different for the Google Play dataset as its market is more equally divided among the categories and genres than the iOS market, altough the Family category corresponds to almost 19% of the apps. Games (9.72%), Tools (8.46%), Business (4.59%) and Lifestyle (3.90%) end a diversified top five. The Android database also seems to have more apps designed for practical life purposes than just for gaming. # Considering only the 'Genres' colunm, it has a lot more categories, which makes it harder to analyse and take conclusions from. That's why from now on we'll only consider the 'Category' colunm for the Android apps. # ## Most popular apps by genre in the App Store # # As the iOS dataset does not contains the number of installs for each app, we'll use the number rating count to mesaure the app's popularity. So, let's see the average rating count for each genre. ios_gen_rat = ios_free.groupby('prime_genre')['rating_count_tot'].mean().sort_values(ascending=False) print(round(ios_gen_rat, 2)) # We can see that navigation apps have the highest number of user reviews in average, followed by reference and social networking apps averaging over 70,000 reviews per app. However, we should check how concentrated are these reviews in one or two big apps. For that, we'll display the average rating count per app in each of the top three categories. for gen in ios_gen_rat.index[:3]: df = ios_free[ios_free['prime_genre'] == gen] print(gen) print('\n') print(df.groupby('track_name')['rating_count_tot'].mean()) print('\n') # As we can see, the navigation and social networking apps are dominated by few famous apps. Navigation for instance has almost half a million reviews just for Waze and Google Maps. That is why we should consider reference apps as a good option here. This genre reviews are much influenced by the bible app, however this shoud not be a problem since the its number of reviews is so much greater than the others that makes reference apps a one-app genre, which show us a potencial room for new apps to succceed. # # We could use the Bible as an example to be replicated with a different famous book, adding new features and making the app even more interactive to the user. # ## Most popular apps by genre on Google Play # # The Google Play database gives us data about the number of installs for each app. However, we can see that most values are open-ended (100+, 1,000+, 5,000+, etc.) which make it look less precise. # android_inst = android_free['Installs'].value_counts() android_inst.sort_values(ascending=False) # We only intend to get an ideia of which genres attract the most users, therefore we do not need this data to be absolutely precise. We'll then consider that an app with 100,000+ installs has 100,000 installs, and an app with 1,000,000+ installs has 1,000,000 installs, and so on. # # During this process we'll need to convert the number of reviews to float. # # We'll then display the average number of intalls for each category. # + # MOdifying the Installs column android_free.loc[:,'Installs'] = android_free.loc[:,'Installs'].str.replace('+','') android_free.loc[:,'Installs'] = android_free['Installs'].str.replace(',','') android_free.loc[:,'Installs'] = android_free['Installs'].astype(float) # Calculating the average per category android_cat_inst = android_free.groupby('Category')['Installs'].mean() print(round(android_cat_inst, 2).sort_values(ascending=False)) # - # Communication, Video Players and social apps dominate the Google Play market. We know however that this catgories are already built around some huge apps like Facebook, Whatsapp and YouTube. Considering that and that we suggested the Reference genre as good option for the iOS market, we should take a look at the Book and Reference category. # + # Creating a dataframe with only the "Books and References" category android_books = android_free.loc[android['Category'] == 'BOOKS_AND_REFERENCE'].copy() # Checking the number of apps in the new dataframe print(android_books.shape) # Calculating the number of installs per app books_install = android_books.groupby('App')['Installs'].sum() books_install # - # We now know that there are 190 apps in this category. Let's chack how many of them have over 100,000,000 installs. android_books_100 = android_books.loc[android_books['Installs'] >= 100000000] android_books_100 # It looks like this category has just a few very popular apps and also a lot of the apps are software for processing and reading ebooks, dictionaries and libraries, which seems to show room for new apps to succeed. # That's why a similiar ideia to that we suggested for the iOS market may have some potential here. Also, it is part of our goal to suggest the same app to be built for both the plataforms. # Selecting a popular book an making an app out of it, with new features and user's interactions might be the way to go here. # # Conclusions # # In this project we expolored, cleaned and analysed data about the App Store and Google Play mobile apps with the goal of recommending an app profile that can be profitable for both markets. # # We conclued that creating an app out of a popular book could be profitable for both App Store and Google Play markets. This is big market that is not dominated by some huge very popular apps, also they are full of libraries and e-readers. That looks like a market where there is room for new app to become popular. Therefore, a fun interactive app about a book that lots of people care about could really draw some attention, especially if we use nice features and new forms of interation with the book's content. #
Profitable App Profile Analysis for the App Store and Google Play.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # Dependencies import pandas as pd import matplotlib.pyplot as plt import requests import json import spotipy from spotipy.oauth2 import SpotifyClientCredentials # Import Keys from config import ckey from config import skey # - # Setting up Spotify API info cid = ckey secret = skey client_credentials_manager = SpotifyClientCredentials(client_id=cid, client_secret=secret) sp = spotipy.Spotify(client_credentials_manager = client_credentials_manager) # + # Get all of the data # Create array to loop through files in resource folders paths = ['spring2019','summer2019','fall2019','winter2019','spring2020', 'summer2020','fall2020','winter2020','spring2021'] # Empty arrays for relevant data totdanceb = [] totvalence = [] totenergy = [] maxstreams = [] # Loop to obtain giant dataframe for path in paths: info_df = pd.read_csv(f"Resources/{path}.csv") # Format dataframe, assign a header row header_row = 0 info_df.columns = info_df.iloc[header_row] info_df = info_df.drop(header_row) info_df = info_df.reset_index(drop=True) # Get track names track_names = info_df['Track Name'].to_list() track_ids = [] for track in track_names: song_name = track song_results = sp.search(q=song_name, type='track', limit=1) try: track_ids.append(song_results['tracks']['items'][0]['id']) except IndexError: print(f"Song {track} not found") info_df = info_df[info_df['Track Name'] != track] info_df['Track ID'] = track_ids # Get Audio Features danceability = [] energy = [] valence = [] for track in track_ids: trackid = track feat_results = sp.audio_features([track]) danceability.append(feat_results[0]['danceability']) energy.append(feat_results[0]['energy']) valence.append(feat_results[0]['valence']) info_df['Danceability'] = danceability info_df['Valence'] = energy info_df['Energy'] = valence # Get mean values for danceability, energy and valence meand = info_df['Danceability'].mean() meanv = info_df['Valence'].mean() meane = info_df['Energy'].mean() # Get max value for streams maxstr = info_df['Streams'].max() # Append into respective lists totdanceb.append(meand) totvalence.append(meanv) totenergy.append(meane) maxstreams.append(maxstr) # Clear necessary variables track_ids = [] danceability = [] energy = [] valence = [] # Display values print(totdanceb) print(totvalence) print(totenergy) print(maxstreams) # - # Define dataframe seasons = ['Spring 2019','Summer 2019','Fall 2019','Winter 2019','Spring 2020', 'Summer 2020','Fall 2020','Winter 2020','Spring 2021'] global_df = pd.DataFrame({"Date":seasons, "Average Danceability":totdanceb, "Average Valence":totvalence, "Average Energy":totenergy, "Max Streams": maxstreams}) global_df # Plot indicators over time plt.plot('Date', 'Average Danceability', data=global_df, marker='o', markerfacecolor='blue', markersize=12, color='skyblue', linewidth=4,label='Average Danceability') plt.plot('Date', 'Average Valence', data=global_df, marker='', color='olive', linewidth=2,label='Average Valence') plt.plot('Date', 'Average Energy', data=global_df, marker='', color='olive', linewidth=2, linestyle='dashed', label='Average Energy') plt.grid() plt.title('Danceability, Valence and Energy Through Time') plt.xticks(rotation = 45) plt.legend(loc='best') plt.show()
spotify-analysis/spotify-test-global.ipynb